diff --git a/Cargo.lock b/Cargo.lock index 2dc03f5cc230..f5ebdc036c0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2806,9 +2806,9 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "handlebars" -version = "4.4.0" +version = "5.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c39b3bc2a8f715298032cf5087e58573809374b08160aa7d750582bdb82d2683" +checksum = "c73166c591e67fb4bf9bc04011b4e35f12e89fe8d676193aa263df065955a379" dependencies = [ "log", "pest", @@ -4517,9 +4517,9 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.7.4" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c022f1e7b65d6a24c0dbbd5fb344c66881bc01f3e5ae74a1c8100f2f985d98a4" +checksum = "1f200d8d83c44a45b21764d1916299752ca035d15ecd46faca3e9a2a2bf6ad06" dependencies = [ "memchr", "thiserror", @@ -4528,9 +4528,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.4" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35513f630d46400a977c4cb58f78e1bfbe01434316e60c37d27b9ad6139c66d8" +checksum = "bcd6ab1236bbdb3a49027e920e693192ebfe8913f6d60e294de57463a493cfde" dependencies = [ "pest", "pest_generator", @@ -4538,9 +4538,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.4" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc9fc1b9e7057baba189b5c626e2d6f40681ae5b6eb064dc7c7834101ec8123a" +checksum = "2a31940305ffc96863a735bef7c7994a00b325a7138fdbc5bda0f1a0476d3275" dependencies = [ "pest", "pest_meta", @@ -4551,9 +4551,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.4" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df74e9e7ec4053ceb980e7c0c8bd3594e977fde1af91daba9c928e8e8c6708d" +checksum = "a7ff62f5259e53b78d1af898941cdcdccfae7385cf7d793a6e55de5d05bb4b7d" dependencies = [ "once_cell", "pest", @@ -8547,12 +8547,14 @@ dependencies = [ "zksync_eth_client", "zksync_eth_signer", "zksync_health_check", + "zksync_l1_contract_interface", "zksync_mempool", "zksync_merkle_tree", "zksync_mini_merkle_tree", "zksync_object_store", "zksync_protobuf", "zksync_protobuf_build", + "zksync_prover_interface", "zksync_queued_job_processor", "zksync_state", "zksync_storage", @@ -8585,6 +8587,7 @@ dependencies = [ "assert_matches", "bigdecimal", "bincode", + "chrono", "hex", "itertools 0.10.5", "num 0.4.1", @@ -8706,6 +8709,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "zksync_l1_contract_interface" +version = "0.1.0" +dependencies = [ + "codegen 0.1.0", + "zkevm_test_harness 1.3.3", + "zksync_prover_interface", + "zksync_types", +] + [[package]] name = "zksync_mempool" version = "0.1.0" @@ -8735,6 +8748,7 @@ dependencies = [ "tracing-subscriber", "vise", "zksync_crypto", + "zksync_prover_interface", "zksync_storage", "zksync_system_constants", "zksync_types", @@ -8845,6 +8859,21 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_prover_interface" +version = "0.1.0" +dependencies = [ + "bincode", + "chrono", + "serde", + "serde_with", + "strum", + "tokio", + "zkevm_test_harness 1.3.3", + "zksync_object_store", + "zksync_types", +] + [[package]] name = "zksync_queued_job_processor" version = "0.1.0" @@ -8961,7 +8990,6 @@ dependencies = [ "anyhow", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "chrono", - "codegen 0.1.0", "hex", "num 0.4.1", "num_enum", @@ -8975,7 +9003,6 @@ dependencies = [ "strum", "thiserror", "tokio", - "zkevm_test_harness 1.3.3", "zksync_basic_types", "zksync_config", "zksync_contracts", diff --git a/Cargo.toml b/Cargo.toml index 0481e5d96223..8213e01170d2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,12 +23,14 @@ members = [ "core/lib/env_config", "core/lib/eth_client", "core/lib/eth_signer", + "core/lib/l1_contract_interface", "core/lib/mempool", "core/lib/merkle_tree", "core/lib/mini_merkle_tree", "core/lib/node", "core/lib/object_store", "core/lib/prometheus_exporter", + "core/lib/prover_interface", "core/lib/queued_job_processor", "core/lib/state", "core/lib/storage", diff --git a/core/lib/basic_types/src/basic_fri_types.rs b/core/lib/basic_types/src/basic_fri_types.rs index 545acf8214cd..5aaba2ece6ee 100644 --- a/core/lib/basic_types/src/basic_fri_types.rs +++ b/core/lib/basic_types/src/basic_fri_types.rs @@ -1,5 +1,9 @@ //! Basic types for FRI prover. +// TODO (PLA-773): Should be moved to the prover workspace. + +use std::{convert::TryFrom, str::FromStr}; + use serde::{Deserialize, Serialize}; #[derive(Debug, Deserialize, Serialize, Clone, Eq, Hash, PartialEq, PartialOrd, Ord)] @@ -16,3 +20,82 @@ impl CircuitIdRoundTuple { } } } + +/// Represents the sequential number of the proof aggregation round. +/// Mostly used to be stored in `aggregation_round` column in `prover_jobs` table +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] +pub enum AggregationRound { + BasicCircuits = 0, + LeafAggregation = 1, + NodeAggregation = 2, + Scheduler = 3, +} + +impl From for AggregationRound { + fn from(item: u8) -> Self { + match item { + 0 => AggregationRound::BasicCircuits, + 1 => AggregationRound::LeafAggregation, + 2 => AggregationRound::NodeAggregation, + 3 => AggregationRound::Scheduler, + _ => panic!("Invalid round"), + } + } +} + +impl AggregationRound { + pub fn next(&self) -> Option { + match self { + AggregationRound::BasicCircuits => Some(AggregationRound::LeafAggregation), + AggregationRound::LeafAggregation => Some(AggregationRound::NodeAggregation), + AggregationRound::NodeAggregation => Some(AggregationRound::Scheduler), + AggregationRound::Scheduler => None, + } + } +} + +impl std::fmt::Display for AggregationRound { + fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str(match self { + Self::BasicCircuits => "basic_circuits", + Self::LeafAggregation => "leaf_aggregation", + Self::NodeAggregation => "node_aggregation", + Self::Scheduler => "scheduler", + }) + } +} + +impl FromStr for AggregationRound { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "basic_circuits" => Ok(AggregationRound::BasicCircuits), + "leaf_aggregation" => Ok(AggregationRound::LeafAggregation), + "node_aggregation" => Ok(AggregationRound::NodeAggregation), + "scheduler" => Ok(AggregationRound::Scheduler), + other => Err(format!( + "{} is not a valid round name for witness generation", + other + )), + } + } +} + +impl TryFrom for AggregationRound { + type Error = (); + + fn try_from(v: i32) -> Result { + match v { + x if x == AggregationRound::BasicCircuits as i32 => Ok(AggregationRound::BasicCircuits), + x if x == AggregationRound::LeafAggregation as i32 => { + Ok(AggregationRound::LeafAggregation) + } + x if x == AggregationRound::NodeAggregation as i32 => { + Ok(AggregationRound::NodeAggregation) + } + x if x == AggregationRound::Scheduler as i32 => Ok(AggregationRound::Scheduler), + _ => Err(()), + } + } +} diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index c686ee291de0..7222b4f0ce8d 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -50,6 +50,7 @@ hex = "0.4" once_cell = "1.7" strum = { version = "0.24", features = ["derive"] } tracing = "0.1" +chrono = { version = "0.4", features = ["serde"] } [dev-dependencies] assert_matches = "1.5.0" diff --git a/core/lib/dal/src/fri_gpu_prover_queue_dal.rs b/core/lib/dal/src/fri_gpu_prover_queue_dal.rs index a29298944449..56baa32ba9c8 100644 --- a/core/lib/dal/src/fri_gpu_prover_queue_dal.rs +++ b/core/lib/dal/src/fri_gpu_prover_queue_dal.rs @@ -1,8 +1,10 @@ use std::time::Duration; -use zksync_types::proofs::{GpuProverInstanceStatus, SocketAddress}; - -use crate::{time_utils::pg_interval_from_duration, StorageProcessor}; +use crate::{ + fri_prover_dal::types::{GpuProverInstanceStatus, SocketAddress}, + time_utils::pg_interval_from_duration, + StorageProcessor, +}; #[derive(Debug)] pub struct FriGpuProverQueueDal<'a, 'c> { diff --git a/core/lib/dal/src/fri_proof_compressor_dal.rs b/core/lib/dal/src/fri_proof_compressor_dal.rs index ee331204ec43..959e4304b761 100644 --- a/core/lib/dal/src/fri_proof_compressor_dal.rs +++ b/core/lib/dal/src/fri_proof_compressor_dal.rs @@ -2,12 +2,10 @@ use std::{collections::HashMap, str::FromStr, time::Duration}; use sqlx::Row; use strum::{Display, EnumString}; -use zksync_types::{ - proofs::{JobCountStatistics, StuckJobs}, - L1BatchNumber, -}; +use zksync_types::L1BatchNumber; use crate::{ + fri_prover_dal::types::{JobCountStatistics, StuckJobs}, time_utils::{duration_to_naive_time, pg_interval_from_duration}, StorageProcessor, }; diff --git a/core/lib/dal/src/fri_prover_dal.rs b/core/lib/dal/src/fri_prover_dal.rs index d9446182b7f2..f3970f08092a 100644 --- a/core/lib/dal/src/fri_prover_dal.rs +++ b/core/lib/dal/src/fri_prover_dal.rs @@ -1,12 +1,12 @@ use std::{collections::HashMap, convert::TryFrom, time::Duration}; use zksync_types::{ - basic_fri_types::CircuitIdRoundTuple, - proofs::{AggregationRound, FriProverJobMetadata, JobCountStatistics, StuckJobs}, + basic_fri_types::{AggregationRound, CircuitIdRoundTuple}, protocol_version::FriProtocolVersionId, L1BatchNumber, }; +use self::types::{FriProverJobMetadata, JobCountStatistics, StuckJobs}; use crate::{ instrument::InstrumentExt, metrics::MethodLatency, @@ -14,6 +14,223 @@ use crate::{ StorageProcessor, }; +// TODO (PLA-775): Should not be an embedded submodule in a concrete DAL file. +pub mod types { + //! Types exposed by the prover DAL for general-purpose use. + + use std::{net::IpAddr, ops::Add}; + + use sqlx::types::chrono::{DateTime, Utc}; + use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + + #[derive(Debug, Clone)] + pub struct FriProverJobMetadata { + pub id: u32, + pub block_number: L1BatchNumber, + pub circuit_id: u8, + pub aggregation_round: AggregationRound, + pub sequence_number: usize, + pub depth: u16, + pub is_node_final_proof: bool, + } + + #[derive(Debug, Clone, Copy, Default)] + pub struct JobCountStatistics { + pub queued: usize, + pub in_progress: usize, + pub failed: usize, + pub successful: usize, + } + + impl Add for JobCountStatistics { + type Output = JobCountStatistics; + + fn add(self, rhs: Self) -> Self::Output { + Self { + queued: self.queued + rhs.queued, + in_progress: self.in_progress + rhs.in_progress, + failed: self.failed + rhs.failed, + successful: self.successful + rhs.successful, + } + } + } + + #[derive(Debug)] + pub struct StuckJobs { + pub id: u64, + pub status: String, + pub attempts: u64, + } + + // TODO (PLA-774): Redundant structure, should be replaced with `std::net::SocketAddr`. + #[derive(Debug, Clone)] + pub struct SocketAddress { + pub host: IpAddr, + pub port: u16, + } + + impl From for std::net::SocketAddr { + fn from(socket_address: SocketAddress) -> Self { + Self::new(socket_address.host, socket_address.port) + } + } + + impl From for SocketAddress { + fn from(socket_address: std::net::SocketAddr) -> Self { + Self { + host: socket_address.ip(), + port: socket_address.port(), + } + } + } + + #[derive(Debug, Clone)] + pub struct LeafAggregationJobMetadata { + pub id: u32, + pub block_number: L1BatchNumber, + pub circuit_id: u8, + pub prover_job_ids_for_proofs: Vec, + } + + #[derive(Debug, Clone)] + pub struct NodeAggregationJobMetadata { + pub id: u32, + pub block_number: L1BatchNumber, + pub circuit_id: u8, + pub depth: u16, + pub prover_job_ids_for_proofs: Vec, + } + + #[derive(Debug)] + pub struct JobPosition { + pub aggregation_round: AggregationRound, + pub sequence_number: usize, + } + + #[derive(Debug, Default)] + pub struct ProverJobStatusFailed { + pub started_at: DateTime, + pub error: String, + } + + #[derive(Debug)] + pub struct ProverJobStatusSuccessful { + pub started_at: DateTime, + pub time_taken: chrono::Duration, + } + + impl Default for ProverJobStatusSuccessful { + fn default() -> Self { + ProverJobStatusSuccessful { + started_at: DateTime::default(), + time_taken: chrono::Duration::zero(), + } + } + } + + #[derive(Debug, Default)] + pub struct ProverJobStatusInProgress { + pub started_at: DateTime, + } + + #[derive(Debug)] + pub struct WitnessJobStatusSuccessful { + pub started_at: DateTime, + pub time_taken: chrono::Duration, + } + + impl Default for WitnessJobStatusSuccessful { + fn default() -> Self { + WitnessJobStatusSuccessful { + started_at: DateTime::default(), + time_taken: chrono::Duration::zero(), + } + } + } + + #[derive(Debug, Default)] + pub struct WitnessJobStatusFailed { + pub started_at: DateTime, + pub error: String, + } + + #[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] + pub enum ProverJobStatus { + #[strum(serialize = "queued")] + Queued, + #[strum(serialize = "in_progress")] + InProgress(ProverJobStatusInProgress), + #[strum(serialize = "successful")] + Successful(ProverJobStatusSuccessful), + #[strum(serialize = "failed")] + Failed(ProverJobStatusFailed), + #[strum(serialize = "skipped")] + Skipped, + #[strum(serialize = "ignored")] + Ignored, + } + + #[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] + pub enum WitnessJobStatus { + #[strum(serialize = "failed")] + Failed(WitnessJobStatusFailed), + #[strum(serialize = "skipped")] + Skipped, + #[strum(serialize = "successful")] + Successful(WitnessJobStatusSuccessful), + #[strum(serialize = "waiting_for_artifacts")] + WaitingForArtifacts, + #[strum(serialize = "waiting_for_proofs")] + WaitingForProofs, + #[strum(serialize = "in_progress")] + InProgress, + #[strum(serialize = "queued")] + Queued, + } + + #[derive(Debug)] + pub struct WitnessJobInfo { + pub block_number: L1BatchNumber, + pub created_at: DateTime, + pub updated_at: DateTime, + pub status: WitnessJobStatus, + pub position: JobPosition, + } + + #[derive(Debug)] + pub struct ProverJobInfo { + pub id: u32, + pub block_number: L1BatchNumber, + pub circuit_type: String, + pub position: JobPosition, + pub input_length: u64, + pub status: ProverJobStatus, + pub attempts: u32, + pub created_at: DateTime, + pub updated_at: DateTime, + } + + #[derive(Debug)] + pub struct JobExtendedStatistics { + pub successful_padding: L1BatchNumber, + pub queued_padding: L1BatchNumber, + pub queued_padding_len: u32, + pub active_area: Vec, + } + + #[derive(Debug, Copy, Clone)] + pub enum GpuProverInstanceStatus { + // The instance is available for processing. + Available, + // The instance is running at full capacity. + Full, + // The instance is reserved by an synthesizer. + Reserved, + // The instance is not alive anymore. + Dead, + } +} + #[derive(Debug)] pub struct FriProverDal<'a, 'c> { pub(crate) storage: &'a mut StorageProcessor<'c>, diff --git a/core/lib/dal/src/fri_witness_generator_dal.rs b/core/lib/dal/src/fri_witness_generator_dal.rs index 874ad8d03689..57b45253dd76 100644 --- a/core/lib/dal/src/fri_witness_generator_dal.rs +++ b/core/lib/dal/src/fri_witness_generator_dal.rs @@ -2,15 +2,13 @@ use std::{collections::HashMap, convert::TryFrom, time::Duration}; use sqlx::Row; use zksync_types::{ - proofs::{ - AggregationRound, JobCountStatistics, LeafAggregationJobMetadata, - NodeAggregationJobMetadata, StuckJobs, - }, - protocol_version::FriProtocolVersionId, - L1BatchNumber, + basic_fri_types::AggregationRound, protocol_version::FriProtocolVersionId, L1BatchNumber, }; use crate::{ + fri_prover_dal::types::{ + JobCountStatistics, LeafAggregationJobMetadata, NodeAggregationJobMetadata, StuckJobs, + }, metrics::MethodLatency, time_utils::{duration_to_naive_time, pg_interval_from_duration}, StorageProcessor, diff --git a/core/lib/dal/src/models/storage_prover_job_info.rs b/core/lib/dal/src/models/storage_prover_job_info.rs index 3242953b39dd..efe6e8cb69d9 100644 --- a/core/lib/dal/src/models/storage_prover_job_info.rs +++ b/core/lib/dal/src/models/storage_prover_job_info.rs @@ -1,12 +1,11 @@ use std::{convert::TryFrom, panic, str::FromStr}; use sqlx::types::chrono::{DateTime, NaiveDateTime, NaiveTime, Utc}; -use zksync_types::{ - proofs::{ - AggregationRound, JobPosition, ProverJobInfo, ProverJobStatus, ProverJobStatusFailed, - ProverJobStatusInProgress, ProverJobStatusSuccessful, - }, - L1BatchNumber, +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::fri_prover_dal::types::{ + JobPosition, ProverJobInfo, ProverJobStatus, ProverJobStatusFailed, ProverJobStatusInProgress, + ProverJobStatusSuccessful, }; #[derive(sqlx::FromRow)] diff --git a/core/lib/dal/src/models/storage_witness_job_info.rs b/core/lib/dal/src/models/storage_witness_job_info.rs index 486b9f89681b..ea8e15fb9c99 100644 --- a/core/lib/dal/src/models/storage_witness_job_info.rs +++ b/core/lib/dal/src/models/storage_witness_job_info.rs @@ -1,12 +1,11 @@ use std::{convert::TryFrom, str::FromStr}; use sqlx::types::chrono::{DateTime, NaiveDateTime, NaiveTime, Utc}; -use zksync_types::{ - proofs::{ - AggregationRound, JobPosition, WitnessJobInfo, WitnessJobStatus, WitnessJobStatusFailed, - WitnessJobStatusSuccessful, - }, - L1BatchNumber, +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::fri_prover_dal::types::{ + JobPosition, WitnessJobInfo, WitnessJobStatus, WitnessJobStatusFailed, + WitnessJobStatusSuccessful, }; #[derive(sqlx::FromRow)] diff --git a/core/lib/l1_contract_interface/Cargo.toml b/core/lib/l1_contract_interface/Cargo.toml new file mode 100644 index 000000000000..204198bdaecd --- /dev/null +++ b/core/lib/l1_contract_interface/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "zksync_l1_contract_interface" +version = "0.1.0" +edition = "2018" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] +readme = "README.md" + +[dependencies] +zksync_types = { path = "../types" } +zksync_prover_interface = { path = "../prover_interface" } + +# Used to serialize proof data +codegen = { git = "https://github.com/matter-labs/solidity_plonk_verifier.git", branch = "dev" } +# Used to calculate commitment for vk from the old L1 verifier contract (backward comatibility needs) +zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3" } diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs new file mode 100644 index 000000000000..fe6876930153 --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs @@ -0,0 +1,26 @@ +use zksync_types::{commitment::L1BatchWithMetadata, ethabi::Token}; + +use crate::{ + i_executor::structures::{CommitBatchInfo, StoredBatchInfo}, + Tokenizable, Tokenize, +}; + +/// Input required to encode `commitBatches` call. +#[derive(Debug, Clone)] +pub struct CommitBatches { + pub last_committed_l1_batch: L1BatchWithMetadata, + pub l1_batches: Vec, +} + +impl Tokenize for CommitBatches { + fn into_tokens(self) -> Vec { + let stored_batch_info = StoredBatchInfo(&self.last_committed_l1_batch).into_token(); + let l1_batches_to_commit = self + .l1_batches + .iter() + .map(|batch| CommitBatchInfo(batch).into_token()) + .collect(); + + vec![stored_batch_info, Token::Array(l1_batches_to_commit)] + } +} diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs new file mode 100644 index 000000000000..9b759270a2ac --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs @@ -0,0 +1,22 @@ +use zksync_types::{ + commitment::L1BatchWithMetadata, ethabi::Token, web3::contract::tokens::Tokenizable, +}; + +use crate::{i_executor::structures::StoredBatchInfo, Tokenize}; + +/// Input required to encode `executeBatches` call. +#[derive(Debug, Clone)] +pub struct ExecuteBatches { + pub l1_batches: Vec, +} + +impl Tokenize for ExecuteBatches { + fn into_tokens(self) -> Vec { + vec![Token::Array( + self.l1_batches + .iter() + .map(|batch| StoredBatchInfo(batch).into_token()) + .collect(), + )] + } +} diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/mod.rs b/core/lib/l1_contract_interface/src/i_executor/methods/mod.rs new file mode 100644 index 000000000000..765586edb3fa --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/methods/mod.rs @@ -0,0 +1,9 @@ +//! Utilities for encoding input data for methods defined in `IExecutor.sol`. + +pub use self::{ + commit_batches::CommitBatches, execute_batches::ExecuteBatches, prove_batches::ProveBatches, +}; + +mod commit_batches; +mod execute_batches; +mod prove_batches; diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/prove_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/prove_batches.rs new file mode 100644 index 000000000000..3c35677d240a --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/methods/prove_batches.rs @@ -0,0 +1,69 @@ +use codegen::serialize_proof; +use zksync_prover_interface::outputs::L1BatchProofForL1; +use zksync_types::{ + commitment::L1BatchWithMetadata, ethabi::Token, web3::contract::tokens::Tokenizable, U256, +}; + +use crate::{i_executor::structures::StoredBatchInfo, Tokenize}; + +/// Input required to encode `proveBatches` call. +#[derive(Debug, Clone)] +pub struct ProveBatches { + pub prev_l1_batch: L1BatchWithMetadata, + pub l1_batches: Vec, + pub proofs: Vec, + pub should_verify: bool, +} + +impl Tokenize for ProveBatches { + fn into_tokens(self) -> Vec { + let prev_l1_batch = StoredBatchInfo(&self.prev_l1_batch).into_token(); + let batches_arg = self + .l1_batches + .iter() + .map(|batch| StoredBatchInfo(batch).into_token()) + .collect(); + let batches_arg = Token::Array(batches_arg); + + if self.should_verify { + // currently we only support submitting a single proof + assert_eq!(self.proofs.len(), 1); + assert_eq!(self.l1_batches.len(), 1); + + let L1BatchProofForL1 { + aggregation_result_coords, + scheduler_proof, + } = self.proofs.first().unwrap(); + + let (_, proof) = serialize_proof(scheduler_proof); + + let aggregation_result_coords = if self.l1_batches[0] + .header + .protocol_version + .unwrap() + .is_pre_boojum() + { + Token::Array( + aggregation_result_coords + .iter() + .map(|bytes| Token::Uint(U256::from_big_endian(bytes))) + .collect(), + ) + } else { + Token::Array(Vec::new()) + }; + let proof_input = Token::Tuple(vec![ + aggregation_result_coords, + Token::Array(proof.into_iter().map(Token::Uint).collect()), + ]); + + vec![prev_l1_batch, batches_arg, proof_input] + } else { + vec![ + prev_l1_batch, + batches_arg, + Token::Tuple(vec![Token::Array(vec![]), Token::Array(vec![])]), + ] + } + } +} diff --git a/core/lib/l1_contract_interface/src/i_executor/mod.rs b/core/lib/l1_contract_interface/src/i_executor/mod.rs new file mode 100644 index 000000000000..a866b45fef7a --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/mod.rs @@ -0,0 +1,4 @@ +//! Different interfaces exposed by the `IExecutor.sol`. + +pub mod methods; +pub mod structures; diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs new file mode 100644 index 000000000000..c657ef9dcf52 --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -0,0 +1,113 @@ +use zksync_types::{ + commitment::L1BatchWithMetadata, + ethabi::Token, + web3::{contract::Error as Web3ContractError, error::Error as Web3ApiError}, + U256, +}; + +use crate::Tokenizable; + +/// Encoding for `CommitBatchInfo` from `IExecutor.sol` +#[derive(Debug)] +pub struct CommitBatchInfo<'a>(pub &'a L1BatchWithMetadata); + +impl<'a> Tokenizable for CommitBatchInfo<'a> { + fn from_token(_token: Token) -> Result + where + Self: Sized, + { + // Currently there is no need to decode this struct. + // We still want to implement `Tokenizable` trait for it, so that *once* it's needed + // the implementation is provided here and not in some other inconsistent way. + Err(Web3ContractError::Api(Web3ApiError::Decoder( + "Not implemented".to_string(), + ))) + } + + fn into_token(self) -> Token { + if self.0.header.protocol_version.unwrap().is_pre_boojum() { + Token::Tuple(vec![ + Token::Uint(U256::from(self.0.header.number.0)), + Token::Uint(U256::from(self.0.header.timestamp)), + Token::Uint(U256::from(self.0.metadata.rollup_last_leaf_index)), + Token::FixedBytes(self.0.metadata.merkle_root_hash.as_bytes().to_vec()), + Token::Uint(U256::from(self.0.header.l1_tx_count)), + Token::FixedBytes(self.0.metadata.l2_l1_merkle_root.as_bytes().to_vec()), + Token::FixedBytes( + self.0 + .header + .priority_ops_onchain_data_hash() + .as_bytes() + .to_vec(), + ), + Token::Bytes(self.0.metadata.initial_writes_compressed.clone()), + Token::Bytes(self.0.metadata.repeated_writes_compressed.clone()), + Token::Bytes(self.0.metadata.l2_l1_messages_compressed.clone()), + Token::Array( + self.0 + .header + .l2_to_l1_messages + .iter() + .map(|message| Token::Bytes(message.to_vec())) + .collect(), + ), + Token::Array( + self.0 + .factory_deps + .iter() + .map(|bytecode| Token::Bytes(bytecode.to_vec())) + .collect(), + ), + ]) + } else { + Token::Tuple(vec![ + // `batchNumber` + Token::Uint(U256::from(self.0.header.number.0)), + // `timestamp` + Token::Uint(U256::from(self.0.header.timestamp)), + // `indexRepeatedStorageChanges` + Token::Uint(U256::from(self.0.metadata.rollup_last_leaf_index)), + // `newStateRoot` + Token::FixedBytes(self.0.metadata.merkle_root_hash.as_bytes().to_vec()), + // `numberOfLayer1Txs` + Token::Uint(U256::from(self.0.header.l1_tx_count)), + // `priorityOperationsHash` + Token::FixedBytes( + self.0 + .header + .priority_ops_onchain_data_hash() + .as_bytes() + .to_vec(), + ), + // `bootloaderHeapInitialContentsHash` + Token::FixedBytes( + self.0 + .metadata + .bootloader_initial_content_commitment + .unwrap() + .as_bytes() + .to_vec(), + ), + // `eventsQueueStateHash` + Token::FixedBytes( + self.0 + .metadata + .events_queue_commitment + .unwrap() + .as_bytes() + .to_vec(), + ), + // `systemLogs` + Token::Bytes(self.0.metadata.l2_l1_messages_compressed.clone()), + // `totalL2ToL1Pubdata` + Token::Bytes( + self.0 + .header + .pubdata_input + .clone() + .unwrap_or(self.0.construct_pubdata()), + ), + ]) + } + } +} diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs new file mode 100644 index 000000000000..d1ed57e41f2e --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs @@ -0,0 +1,6 @@ +//! Structures exposed by the `IExecutor.sol`. + +mod commit_batch_info; +mod stored_batch_info; + +pub use self::{commit_batch_info::CommitBatchInfo, stored_batch_info::StoredBatchInfo}; diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs new file mode 100644 index 000000000000..10fccc0198d1 --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs @@ -0,0 +1,53 @@ +use zksync_types::{ + commitment::L1BatchWithMetadata, + ethabi::Token, + web3::{contract::Error as Web3ContractError, error::Error as Web3ApiError}, + U256, +}; + +use crate::Tokenizable; + +/// Encoding for `StoredBatchInfo` from `IExecutor.sol` +#[derive(Debug)] +pub struct StoredBatchInfo<'a>(pub &'a L1BatchWithMetadata); + +impl<'a> Tokenizable for StoredBatchInfo<'a> { + fn from_token(_token: Token) -> Result + where + Self: Sized, + { + // Currently there is no need to decode this struct. + // We still want to implement `Tokenizable` trait for it, so that *once* it's needed + // the implementation is provided here and not in some other inconsistent way. + Err(Web3ContractError::Api(Web3ApiError::Decoder( + "Not implemented".to_string(), + ))) + } + + fn into_token(self) -> Token { + Token::Tuple(vec![ + // `batchNumber` + Token::Uint(U256::from(self.0.header.number.0)), + // `batchHash` + Token::FixedBytes(self.0.metadata.root_hash.as_bytes().to_vec()), + // `indexRepeatedStorageChanges` + Token::Uint(U256::from(self.0.metadata.rollup_last_leaf_index)), + // `numberOfLayer1Txs` + Token::Uint(U256::from(self.0.header.l1_tx_count)), + // `priorityOperationsHash` + Token::FixedBytes( + self.0 + .header + .priority_ops_onchain_data_hash() + .as_bytes() + .to_vec(), + ), + // `l2LogsTreeRoot` + Token::FixedBytes(self.0.metadata.l2_l1_merkle_root.as_bytes().to_vec()), + // timestamp + Token::Uint(U256::from(self.0.header.timestamp)), + // commitment + Token::FixedBytes(self.0.metadata.commitment.as_bytes().to_vec()), + ]) + } +} diff --git a/core/lib/l1_contract_interface/src/lib.rs b/core/lib/l1_contract_interface/src/lib.rs new file mode 100644 index 000000000000..f4f9d04ef248 --- /dev/null +++ b/core/lib/l1_contract_interface/src/lib.rs @@ -0,0 +1,19 @@ +//! Utilities for interacting with the zkSync L1 contract +//! +//! Provides utilities both to encode input data for the contract and to decode +//! the data provided by the contract. +//! +//! This crate utilizes traits provided by the `web3` crate to encode and decode +//! data. `Tokenizable` trait represents items that are encoded via single `Token`, +//! while `Tokenize` trait represents items that are encoded via array of `Token`s +//! (for example, transaction input). + +pub use zksync_types::web3::contract::tokens::{Detokenize, Tokenizable, Tokenize}; + +/// Rust interface for (subset of) `IExector.sol`. +pub mod i_executor; +/// Utilities for interacting with `Multicall3` contract. +pub mod multicall3; +/// Utilities for interacting with the old verifier contract. +/// Required for backward compatibility only. +pub mod pre_boojum_verifier; diff --git a/core/lib/types/src/contracts.rs b/core/lib/l1_contract_interface/src/multicall3/mod.rs similarity index 99% rename from core/lib/types/src/contracts.rs rename to core/lib/l1_contract_interface/src/multicall3/mod.rs index 6b72375202a4..a47d034d5866 100644 --- a/core/lib/types/src/contracts.rs +++ b/core/lib/l1_contract_interface/src/multicall3/mod.rs @@ -1,6 +1,6 @@ use std::mem; -use crate::{ +use zksync_types::{ ethabi::Token, web3::contract::{tokens::Tokenizable, Error}, Address, diff --git a/core/lib/l1_contract_interface/src/pre_boojum_verifier/mod.rs b/core/lib/l1_contract_interface/src/pre_boojum_verifier/mod.rs new file mode 100644 index 000000000000..b1af0b253739 --- /dev/null +++ b/core/lib/l1_contract_interface/src/pre_boojum_verifier/mod.rs @@ -0,0 +1,3 @@ +mod vk_transform; + +pub use self::vk_transform::old_l1_vk_commitment; diff --git a/core/lib/types/src/vk_transform.rs b/core/lib/l1_contract_interface/src/pre_boojum_verifier/vk_transform.rs similarity index 92% rename from core/lib/types/src/vk_transform.rs rename to core/lib/l1_contract_interface/src/pre_boojum_verifier/vk_transform.rs index b19fdaef6927..70098230d9b8 100644 --- a/core/lib/types/src/vk_transform.rs +++ b/core/lib/l1_contract_interface/src/pre_boojum_verifier/vk_transform.rs @@ -1,3 +1,6 @@ +//! This module contains functions for transforming vk from the old L1 verifier contract to the hash +//! that serves as its commitment. + use std::str::FromStr; use zkevm_test_harness::{ @@ -13,16 +16,15 @@ use zkevm_test_harness::{ recursive_aggregation::{compute_vk_encoding_and_committment, erase_vk_type}, }, }; - -use crate::{ethabi::Token, H256}; +use zksync_types::{ethabi::Token, H256}; /// Calculates commitment for vk from L1 verifier contract. -pub fn l1_vk_commitment(token: Token) -> H256 { +pub fn old_l1_vk_commitment(token: Token) -> H256 { let vk = vk_from_token(token); generate_vk_commitment(vk) } -pub fn generate_vk_commitment( +fn generate_vk_commitment( vk: VerificationKey>>, ) -> H256 { let (_, scheduler_vk_commitment) = compute_vk_encoding_and_committment(erase_vk_type(vk)); diff --git a/core/lib/merkle_tree/Cargo.toml b/core/lib/merkle_tree/Cargo.toml index bb38021c7aae..06cc0b67871e 100644 --- a/core/lib/merkle_tree/Cargo.toml +++ b/core/lib/merkle_tree/Cargo.toml @@ -14,6 +14,7 @@ vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev zksync_types = { path = "../types" } zksync_crypto = { path = "../crypto" } zksync_storage = { path = "../storage" } +zksync_prover_interface = { path = "../prover_interface" } zksync_utils = { path = "../utils" } leb128 = "0.2.5" diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index 2fe4b59f8217..0724804a5a7b 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -2,8 +2,8 @@ use rayon::{ThreadPool, ThreadPoolBuilder}; use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; use zksync_types::{ - proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}, writes::{InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord}, L1BatchNumber, StorageKey, U256, }; diff --git a/core/lib/merkle_tree/tests/integration/domain.rs b/core/lib/merkle_tree/tests/integration/domain.rs index ee4ea973c956..565b4d5f0fe4 100644 --- a/core/lib/merkle_tree/tests/integration/domain.rs +++ b/core/lib/merkle_tree/tests/integration/domain.rs @@ -7,11 +7,10 @@ use serde_with::{hex::Hex, serde_as}; use tempfile::TempDir; use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{domain::ZkSyncTree, HashTree, TreeEntry, TreeInstruction}; +use zksync_prover_interface::inputs::StorageLogMetadata; use zksync_storage::RocksDB; use zksync_system_constants::ACCOUNT_CODE_STORAGE_ADDRESS; -use zksync_types::{ - proofs::StorageLogMetadata, AccountTreeId, Address, L1BatchNumber, StorageKey, H256, -}; +use zksync_types::{AccountTreeId, Address, L1BatchNumber, StorageKey, H256}; fn gen_storage_logs() -> Vec> { let addrs = vec![ diff --git a/core/lib/object_store/src/lib.rs b/core/lib/object_store/src/lib.rs index bf6630ef0606..0eddf3a61d53 100644 --- a/core/lib/object_store/src/lib.rs +++ b/core/lib/object_store/src/lib.rs @@ -39,6 +39,6 @@ pub mod _reexports { } pub use self::{ - objects::{AggregationsKey, CircuitKey, ClosedFormInputKey, FriCircuitKey, StoredObject}, + objects::StoredObject, raw::{Bucket, ObjectStore, ObjectStoreError, ObjectStoreFactory}, }; diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs index e01519fa71db..75c0f5460ad4 100644 --- a/core/lib/object_store/src/objects.rs +++ b/core/lib/object_store/src/objects.rs @@ -7,8 +7,6 @@ use flate2::{read::GzDecoder, write::GzEncoder, Compression}; use prost::Message; use zksync_protobuf::{decode, ProtoFmt}; use zksync_types::{ - aggregated_operations::L1BatchProofForL1, - proofs::{AggregationRound, PrepareBasicCircuitsJob}, snapshots::{ SnapshotFactoryDependencies, SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey, }, @@ -131,62 +129,6 @@ impl StoredObject for WitnessBlockState { serialize_using_bincode!(); } -impl StoredObject for PrepareBasicCircuitsJob { - const BUCKET: Bucket = Bucket::WitnessInput; - type Key<'a> = L1BatchNumber; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("merkel_tree_paths_{key}.bin") - } - - serialize_using_bincode!(); -} - -/// Storage key for a [AggregationWrapper`]. -#[derive(Debug, Clone, Copy)] -pub struct AggregationsKey { - pub block_number: L1BatchNumber, - pub circuit_id: u8, - pub depth: u16, -} - -/// Storage key for a [ClosedFormInputWrapper`]. -#[derive(Debug, Clone, Copy)] -pub struct ClosedFormInputKey { - pub block_number: L1BatchNumber, - pub circuit_id: u8, -} - -/// Storage key for a [`CircuitWrapper`]. -#[derive(Debug, Clone, Copy)] -pub struct FriCircuitKey { - pub block_number: L1BatchNumber, - pub sequence_number: usize, - pub circuit_id: u8, - pub aggregation_round: AggregationRound, - pub depth: u16, -} - -/// Storage key for a [`ZkSyncCircuit`]. -#[derive(Debug, Clone, Copy)] -pub struct CircuitKey<'a> { - pub block_number: L1BatchNumber, - pub sequence_number: usize, - pub circuit_type: &'a str, - pub aggregation_round: AggregationRound, -} - -impl StoredObject for L1BatchProofForL1 { - const BUCKET: Bucket = Bucket::ProofsFri; - type Key<'a> = L1BatchNumber; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("l1_batch_proof_{key}.bin") - } - - serialize_using_bincode!(); -} - impl dyn ObjectStore + '_ { /// Fetches the value for the given key if it exists. /// diff --git a/core/lib/prover_interface/Cargo.toml b/core/lib/prover_interface/Cargo.toml new file mode 100644 index 000000000000..3bb9e65fe806 --- /dev/null +++ b/core/lib/prover_interface/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "zksync_prover_interface" +version = "0.1.0" +edition = "2018" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] +readme = "README.md" + +[dependencies] +zksync_types = { path = "../types" } +zksync_object_store = { path = "../object_store" } + +zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3" } + +serde = "1.0.90" +strum = { version = "0.24", features = ["derive"] } +serde_with = { version = "1", features = ["base64"] } +chrono = { version = "0.4", features = ["serde"] } + +[dev-dependencies] +tokio = { version = "1.21.2", features = ["full"] } +bincode = "1" diff --git a/core/lib/types/src/prover_server_api/mod.rs b/core/lib/prover_interface/src/api.rs similarity index 79% rename from core/lib/types/src/prover_server_api/mod.rs rename to core/lib/prover_interface/src/api.rs index fdbbd57624f8..85cf88c4f908 100644 --- a/core/lib/types/src/prover_server_api/mod.rs +++ b/core/lib/prover_interface/src/api.rs @@ -1,12 +1,14 @@ -use serde::{Deserialize, Serialize}; -use zksync_basic_types::L1BatchNumber; +//! Prover and server subsystems communicate via the API. +//! This module defines the types used in the API. -use crate::{ - aggregated_operations::L1BatchProofForL1, - proofs::PrepareBasicCircuitsJob, +use serde::{Deserialize, Serialize}; +use zksync_types::{ protocol_version::{FriProtocolVersionId, L1VerifierConfig}, + L1BatchNumber, }; +use crate::{inputs::PrepareBasicCircuitsJob, outputs::L1BatchProofForL1}; + #[derive(Debug, Serialize, Deserialize)] pub struct ProofGenerationData { pub l1_batch_number: L1BatchNumber, diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs new file mode 100644 index 000000000000..44fe60edddd8 --- /dev/null +++ b/core/lib/prover_interface/src/inputs.rs @@ -0,0 +1,185 @@ +use std::{convert::TryInto, fmt::Debug}; + +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, Bytes}; +use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; +use zksync_types::{L1BatchNumber, H256, U256}; + +const HASH_LEN: usize = H256::len_bytes(); + +/// Metadata emitted by a Merkle tree after processing single storage log. +#[serde_as] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct StorageLogMetadata { + #[serde_as(as = "Bytes")] + pub root_hash: [u8; HASH_LEN], + pub is_write: bool, + pub first_write: bool, + #[serde_as(as = "Vec")] + pub merkle_paths: Vec<[u8; HASH_LEN]>, + pub leaf_hashed_key: U256, + pub leaf_enumeration_index: u64, + // **NB.** For compatibility reasons, `#[serde_as(as = "Bytes")]` attributes are not added below. + pub value_written: [u8; HASH_LEN], + pub value_read: [u8; HASH_LEN], +} + +impl StorageLogMetadata { + pub fn leaf_hashed_key_array(&self) -> [u8; 32] { + let mut result = [0_u8; 32]; + self.leaf_hashed_key.to_little_endian(&mut result); + result + } + + pub fn into_merkle_paths_array(self) -> Box<[[u8; HASH_LEN]; PATH_LEN]> { + let actual_len = self.merkle_paths.len(); + self.merkle_paths.try_into().unwrap_or_else(|_| { + panic!( + "Unexpected length of Merkle paths in `StorageLogMetadata`: expected {}, got {}", + PATH_LEN, actual_len + ); + }) + } +} + +/// Witness data produced by the Merkle tree as a result of processing a single block. Used +/// as an input to the witness generator. +/// +/// # Stability +/// +/// This type is serialized using `bincode` to be passed from the metadata calculator +/// to the witness generator. As such, changes in its `serde` serialization +/// must be backwards-compatible. +/// +/// # Compact form +/// +/// In order to reduce storage space, this job supports a compact format. In this format, +/// only the first item in `merkle_paths` is guaranteed to have the full Merkle path (i.e., +/// 256 items with the current Merkle tree). The following items may have less hashes in their +/// Merkle paths; if this is the case, the starting hashes are skipped and are the same +/// as in the first path. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PrepareBasicCircuitsJob { + // Merkle paths and some auxiliary information for each read / write operation in a block. + merkle_paths: Vec, + next_enumeration_index: u64, +} + +impl StoredObject for PrepareBasicCircuitsJob { + const BUCKET: Bucket = Bucket::WitnessInput; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("merkel_tree_paths_{key}.bin") + } + + serialize_using_bincode!(); +} + +impl PrepareBasicCircuitsJob { + /// Creates a new job with the specified leaf index and no included paths. + pub fn new(next_enumeration_index: u64) -> Self { + Self { + merkle_paths: vec![], + next_enumeration_index, + } + } + + /// Returns the next leaf index at the beginning of the block. + pub fn next_enumeration_index(&self) -> u64 { + self.next_enumeration_index + } + + /// Reserves additional capacity for Merkle paths. + pub fn reserve(&mut self, additional_capacity: usize) { + self.merkle_paths.reserve(additional_capacity); + } + + /// Pushes an additional Merkle path. + pub fn push_merkle_path(&mut self, mut path: StorageLogMetadata) { + let Some(first_path) = self.merkle_paths.first() else { + self.merkle_paths.push(path); + return; + }; + assert_eq!(first_path.merkle_paths.len(), path.merkle_paths.len()); + + let mut hash_pairs = path.merkle_paths.iter().zip(&first_path.merkle_paths); + let first_unique_idx = + hash_pairs.position(|(hash, first_path_hash)| hash != first_path_hash); + let first_unique_idx = first_unique_idx.unwrap_or(path.merkle_paths.len()); + path.merkle_paths = path.merkle_paths.split_off(first_unique_idx); + self.merkle_paths.push(path); + } + + /// Converts this job into an iterator over the contained Merkle paths. + pub fn into_merkle_paths(self) -> impl ExactSizeIterator { + let mut merkle_paths = self.merkle_paths; + if let [first, rest @ ..] = merkle_paths.as_mut_slice() { + for path in rest { + assert!( + path.merkle_paths.len() <= first.merkle_paths.len(), + "Merkle paths in `PrepareBasicCircuitsJob` are malformed; the first path is not \ + the longest one" + ); + let spliced_len = first.merkle_paths.len() - path.merkle_paths.len(); + let spliced_hashes = &first.merkle_paths[0..spliced_len]; + path.merkle_paths + .splice(0..0, spliced_hashes.iter().cloned()); + debug_assert_eq!(path.merkle_paths.len(), first.merkle_paths.len()); + } + } + merkle_paths.into_iter() + } +} + +/// Enriched `PrepareBasicCircuitsJob`. All the other fields are taken from the `l1_batches` table. +#[derive(Debug, Clone)] +pub struct BasicCircuitWitnessGeneratorInput { + pub block_number: L1BatchNumber, + pub previous_block_hash: H256, + pub previous_block_timestamp: u64, + pub block_timestamp: u64, + pub used_bytecodes_hashes: Vec, + pub initial_heap_content: Vec<(usize, U256)>, + pub merkle_paths_input: PrepareBasicCircuitsJob, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn prepare_basic_circuits_job_roundtrip() { + let zero_hash = [0_u8; 32]; + let logs = (0..10).map(|i| { + let mut merkle_paths = vec![zero_hash; 255]; + merkle_paths.push([i as u8; 32]); + StorageLogMetadata { + root_hash: zero_hash, + is_write: i % 2 == 0, + first_write: i % 3 == 0, + merkle_paths, + leaf_hashed_key: U256::from(i), + leaf_enumeration_index: i + 1, + value_written: [i as u8; 32], + value_read: [0; 32], + } + }); + let logs: Vec<_> = logs.collect(); + + let mut job = PrepareBasicCircuitsJob::new(4); + job.reserve(logs.len()); + for log in &logs { + job.push_merkle_path(log.clone()); + } + + // Check that Merkle paths are compacted. + for (i, log) in job.merkle_paths.iter().enumerate() { + let expected_merkle_path_len = if i == 0 { 256 } else { 1 }; + assert_eq!(log.merkle_paths.len(), expected_merkle_path_len); + } + + let logs_from_job: Vec<_> = job.into_merkle_paths().collect(); + assert_eq!(logs_from_job, logs); + } +} diff --git a/core/lib/prover_interface/src/lib.rs b/core/lib/prover_interface/src/lib.rs new file mode 100644 index 000000000000..31d66c2af1ea --- /dev/null +++ b/core/lib/prover_interface/src/lib.rs @@ -0,0 +1,9 @@ +//! Point of interaction of the core subsystem with the prover subsystem. +//! Defines the means of communication between the two subsystems without exposing the internal details of either. + +/// Types that define the API for interaction between prover and server subsystems. +pub mod api; +/// Inputs for proof generation provided by the core subsystem. +pub mod inputs; +/// Outputs of proof generation provided by the prover subsystem. +pub mod outputs; diff --git a/core/lib/prover_interface/src/outputs.rs b/core/lib/prover_interface/src/outputs.rs new file mode 100644 index 000000000000..ebadc6101467 --- /dev/null +++ b/core/lib/prover_interface/src/outputs.rs @@ -0,0 +1,38 @@ +use core::fmt; + +use serde::{Deserialize, Serialize}; +use zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::{bn256::Bn256, plonk::better_better_cs::proof::Proof}, + witness::oracle::VmWitnessOracle, +}; +use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; +use zksync_types::L1BatchNumber; + +/// The only type of proof utilized by the core subsystem: a "final" proof that can be sent +/// to the L1 contract. +#[derive(Clone, Serialize, Deserialize)] +pub struct L1BatchProofForL1 { + pub aggregation_result_coords: [[u8; 32]; 4], + pub scheduler_proof: Proof>>, +} + +impl fmt::Debug for L1BatchProofForL1 { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("L1BatchProofForL1") + .field("aggregation_result_coords", &self.aggregation_result_coords) + .finish_non_exhaustive() + } +} + +impl StoredObject for L1BatchProofForL1 { + const BUCKET: Bucket = Bucket::ProofsFri; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("l1_batch_proof_{key}.bin") + } + + serialize_using_bincode!(); +} diff --git a/core/lib/object_store/tests/integration.rs b/core/lib/prover_interface/tests/job_serialization.rs similarity index 92% rename from core/lib/object_store/tests/integration.rs rename to core/lib/prover_interface/tests/job_serialization.rs index 9db2061f17fd..a71f7ea3ae39 100644 --- a/core/lib/object_store/tests/integration.rs +++ b/core/lib/prover_interface/tests/job_serialization.rs @@ -1,11 +1,9 @@ -//! Integration tests for object store. +//! Integration tests for object store serialization of job objects. use tokio::fs; use zksync_object_store::{Bucket, ObjectStoreFactory}; -use zksync_types::{ - proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}, - L1BatchNumber, -}; +use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_types::L1BatchNumber; /// Tests compatibility of the `PrepareBasicCircuitsJob` serialization to the previously used /// one. diff --git a/core/lib/object_store/tests/snapshots/prepare-basic-circuits-job-full.bin b/core/lib/prover_interface/tests/snapshots/prepare-basic-circuits-job-full.bin similarity index 100% rename from core/lib/object_store/tests/snapshots/prepare-basic-circuits-job-full.bin rename to core/lib/prover_interface/tests/snapshots/prepare-basic-circuits-job-full.bin diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index ec8bc1cb88c0..3a2a6e4eb54f 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -17,9 +17,6 @@ zksync_basic_types = { path = "../basic_types" } zksync_contracts = { path = "../contracts" } zksync_mini_merkle_tree = { path = "../mini_merkle_tree" } zksync_config = { path = "../config" } -# We need this import because we wanat DAL to be responsible for (de)serialization -codegen = { git = "https://github.com/matter-labs/solidity_plonk_verifier.git", branch = "dev" } -zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3" } zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } anyhow = "1.0.75" diff --git a/core/lib/types/src/aggregated_operations.rs b/core/lib/types/src/aggregated_operations.rs index 006eca562e71..dadfad265cb2 100644 --- a/core/lib/types/src/aggregated_operations.rs +++ b/core/lib/types/src/aggregated_operations.rs @@ -1,158 +1,4 @@ -use std::{fmt, ops, str::FromStr}; - -use codegen::serialize_proof; -use serde::{Deserialize, Serialize}; -use zkevm_test_harness::{ - abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, - bellman::{bn256::Bn256, plonk::better_better_cs::proof::Proof}, - witness::oracle::VmWitnessOracle, -}; -use zksync_basic_types::{ethabi::Token, L1BatchNumber}; - -use crate::{commitment::L1BatchWithMetadata, ProtocolVersionId, U256}; - -fn l1_batch_range_from_batches( - batches: &[L1BatchWithMetadata], -) -> ops::RangeInclusive { - let start = batches - .first() - .map(|l1_batch| l1_batch.header.number) - .unwrap_or_default(); - let end = batches - .last() - .map(|l1_batch| l1_batch.header.number) - .unwrap_or_default(); - start..=end -} - -#[derive(Debug, Clone)] -pub struct L1BatchCommitOperation { - pub last_committed_l1_batch: L1BatchWithMetadata, - pub l1_batches: Vec, -} - -impl L1BatchCommitOperation { - pub fn get_eth_tx_args(&self) -> Vec { - let stored_batch_info = self.last_committed_l1_batch.l1_header_data(); - let l1_batches_to_commit = self - .l1_batches - .iter() - .map(L1BatchWithMetadata::l1_commit_data) - .collect(); - - vec![stored_batch_info, Token::Array(l1_batches_to_commit)] - } - - pub fn l1_batch_range(&self) -> ops::RangeInclusive { - l1_batch_range_from_batches(&self.l1_batches) - } -} - -#[derive(Debug, Clone)] -pub struct L1BatchCreateProofOperation { - pub l1_batches: Vec, - pub proofs_to_pad: usize, -} - -#[derive(Clone, Serialize, Deserialize)] -pub struct L1BatchProofForL1 { - pub aggregation_result_coords: [[u8; 32]; 4], - pub scheduler_proof: Proof>>, -} - -impl fmt::Debug for L1BatchProofForL1 { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter - .debug_struct("L1BatchProofForL1") - .field("aggregation_result_coords", &self.aggregation_result_coords) - .finish_non_exhaustive() - } -} - -#[derive(Debug, Clone)] -pub struct L1BatchProofOperation { - pub prev_l1_batch: L1BatchWithMetadata, - pub l1_batches: Vec, - pub proofs: Vec, - pub should_verify: bool, -} - -impl L1BatchProofOperation { - pub fn get_eth_tx_args(&self) -> Vec { - let prev_l1_batch = self.prev_l1_batch.l1_header_data(); - let batches_arg = self - .l1_batches - .iter() - .map(L1BatchWithMetadata::l1_header_data) - .collect(); - let batches_arg = Token::Array(batches_arg); - - if self.should_verify { - // currently we only support submitting a single proof - assert_eq!(self.proofs.len(), 1); - assert_eq!(self.l1_batches.len(), 1); - - let L1BatchProofForL1 { - aggregation_result_coords, - scheduler_proof, - } = self.proofs.first().unwrap(); - - let (_, proof) = serialize_proof(scheduler_proof); - - let aggregation_result_coords = if self.l1_batches[0] - .header - .protocol_version - .unwrap() - .is_pre_boojum() - { - Token::Array( - aggregation_result_coords - .iter() - .map(|bytes| Token::Uint(U256::from_big_endian(bytes))) - .collect(), - ) - } else { - Token::Array(Vec::new()) - }; - let proof_input = Token::Tuple(vec![ - aggregation_result_coords, - Token::Array(proof.into_iter().map(Token::Uint).collect()), - ]); - - vec![prev_l1_batch, batches_arg, proof_input] - } else { - vec![ - prev_l1_batch, - batches_arg, - Token::Tuple(vec![Token::Array(vec![]), Token::Array(vec![])]), - ] - } - } - - pub fn l1_batch_range(&self) -> ops::RangeInclusive { - l1_batch_range_from_batches(&self.l1_batches) - } -} - -#[derive(Debug, Clone)] -pub struct L1BatchExecuteOperation { - pub l1_batches: Vec, -} - -impl L1BatchExecuteOperation { - pub fn get_eth_tx_args(&self) -> Vec { - vec![Token::Array( - self.l1_batches - .iter() - .map(L1BatchWithMetadata::l1_header_data) - .collect(), - )] - } - - pub fn l1_batch_range(&self) -> ops::RangeInclusive { - l1_batch_range_from_batches(&self.l1_batches) - } -} +use std::{fmt, str::FromStr}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum AggregatedActionType { @@ -193,45 +39,3 @@ impl FromStr for AggregatedActionType { } } } - -#[allow(clippy::large_enum_variant)] -#[derive(Debug, Clone)] -pub enum AggregatedOperation { - Commit(L1BatchCommitOperation), - PublishProofOnchain(L1BatchProofOperation), - Execute(L1BatchExecuteOperation), -} - -impl AggregatedOperation { - pub fn get_action_type(&self) -> AggregatedActionType { - match self { - Self::Commit(_) => AggregatedActionType::Commit, - Self::PublishProofOnchain(_) => AggregatedActionType::PublishProofOnchain, - Self::Execute(_) => AggregatedActionType::Execute, - } - } - - pub fn l1_batch_range(&self) -> ops::RangeInclusive { - match self { - Self::Commit(op) => op.l1_batch_range(), - Self::PublishProofOnchain(op) => op.l1_batch_range(), - Self::Execute(op) => op.l1_batch_range(), - } - } - - pub fn get_action_caption(&self) -> &'static str { - match self { - Self::Commit(_) => "commit", - Self::PublishProofOnchain(_) => "proof", - Self::Execute(_) => "execute", - } - } - - pub fn protocol_version(&self) -> ProtocolVersionId { - match self { - Self::Commit(op) => op.l1_batches[0].header.protocol_version.unwrap(), - Self::PublishProofOnchain(op) => op.l1_batches[0].header.protocol_version.unwrap(), - Self::Execute(op) => op.l1_batches[0].header.protocol_version.unwrap(), - } - } -} diff --git a/core/lib/types/src/commitment.rs b/core/lib/types/src/commitment.rs index 8a59bd4758fd..6356769242c7 100644 --- a/core/lib/types/src/commitment.rs +++ b/core/lib/types/src/commitment.rs @@ -17,14 +17,13 @@ use zksync_utils::u256_to_h256; use crate::{ block::L1BatchHeader, - ethabi::Token, l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, web3::signing::keccak256, writes::{ compress_state_diffs, InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord, PADDED_ENCODED_STORAGE_DIFF_LEN_BYTES, }, - ProtocolVersionId, H256, KNOWN_CODES_STORAGE_ADDRESS, U256, + ProtocolVersionId, H256, KNOWN_CODES_STORAGE_ADDRESS, }; /// Type that can be serialized for commitment. @@ -131,118 +130,6 @@ impl L1BatchWithMetadata { }) } - /// Encodes L1Batch into `StorageBatchInfo` (see `IExecutor.sol`) - pub fn l1_header_data(&self) -> Token { - Token::Tuple(vec![ - // `batchNumber` - Token::Uint(U256::from(self.header.number.0)), - // `batchHash` - Token::FixedBytes(self.metadata.root_hash.as_bytes().to_vec()), - // `indexRepeatedStorageChanges` - Token::Uint(U256::from(self.metadata.rollup_last_leaf_index)), - // `numberOfLayer1Txs` - Token::Uint(U256::from(self.header.l1_tx_count)), - // `priorityOperationsHash` - Token::FixedBytes( - self.header - .priority_ops_onchain_data_hash() - .as_bytes() - .to_vec(), - ), - // `l2LogsTreeRoot` - Token::FixedBytes(self.metadata.l2_l1_merkle_root.as_bytes().to_vec()), - // timestamp - Token::Uint(U256::from(self.header.timestamp)), - // commitment - Token::FixedBytes(self.metadata.commitment.as_bytes().to_vec()), - ]) - } - - /// Encodes the L1Batch into CommitBatchInfo (see IExecutor.sol). - pub fn l1_commit_data(&self) -> Token { - if self.header.protocol_version.unwrap().is_pre_boojum() { - Token::Tuple(vec![ - Token::Uint(U256::from(self.header.number.0)), - Token::Uint(U256::from(self.header.timestamp)), - Token::Uint(U256::from(self.metadata.rollup_last_leaf_index)), - Token::FixedBytes(self.metadata.merkle_root_hash.as_bytes().to_vec()), - Token::Uint(U256::from(self.header.l1_tx_count)), - Token::FixedBytes(self.metadata.l2_l1_merkle_root.as_bytes().to_vec()), - Token::FixedBytes( - self.header - .priority_ops_onchain_data_hash() - .as_bytes() - .to_vec(), - ), - Token::Bytes(self.metadata.initial_writes_compressed.clone()), - Token::Bytes(self.metadata.repeated_writes_compressed.clone()), - Token::Bytes(self.metadata.l2_l1_messages_compressed.clone()), - Token::Array( - self.header - .l2_to_l1_messages - .iter() - .map(|message| Token::Bytes(message.to_vec())) - .collect(), - ), - Token::Array( - self.factory_deps - .iter() - .map(|bytecode| Token::Bytes(bytecode.to_vec())) - .collect(), - ), - ]) - } else { - Token::Tuple(vec![ - // `batchNumber` - Token::Uint(U256::from(self.header.number.0)), - // `timestamp` - Token::Uint(U256::from(self.header.timestamp)), - // `indexRepeatedStorageChanges` - Token::Uint(U256::from(self.metadata.rollup_last_leaf_index)), - // `newStateRoot` - Token::FixedBytes(self.metadata.merkle_root_hash.as_bytes().to_vec()), - // `numberOfLayer1Txs` - Token::Uint(U256::from(self.header.l1_tx_count)), - // `priorityOperationsHash` - Token::FixedBytes( - self.header - .priority_ops_onchain_data_hash() - .as_bytes() - .to_vec(), - ), - // `bootloaderHeapInitialContentsHash` - Token::FixedBytes( - self.metadata - .bootloader_initial_content_commitment - .unwrap() - .as_bytes() - .to_vec(), - ), - // `eventsQueueStateHash` - Token::FixedBytes( - self.metadata - .events_queue_commitment - .unwrap() - .as_bytes() - .to_vec(), - ), - // `systemLogs` - Token::Bytes(self.metadata.l2_l1_messages_compressed.clone()), - // `totalL2ToL1Pubdata` - Token::Bytes( - self.header - .pubdata_input - .clone() - .unwrap_or(self.construct_pubdata()), - ), - ]) - } - } - - pub fn l1_commit_data_size(&self) -> usize { - crate::ethabi::encode(&[Token::Array(vec![self.l1_commit_data()])]).len() - } - /// Packs all pubdata needed for batch commitment in boojum into one bytes array. The packing contains the /// following: logs, messages, bytecodes, and compressed state diffs. /// This data is currently part of calldata but will be submitted as part of the blob section post EIP-4844. diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 5f8adfca9ed6..27cffb360a3f 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -30,7 +30,6 @@ pub mod block; pub mod circuit; pub mod commitment; pub mod contract_verification_api; -pub mod contracts; pub mod event; pub mod fee; pub mod fee_model; @@ -51,12 +50,9 @@ pub mod zk_evm_types; pub mod api; pub mod eth_sender; pub mod helpers; -pub mod proofs; pub mod proto; -pub mod prover_server_api; pub mod transaction_request; pub mod utils; -pub mod vk_transform; pub mod vm_version; /// Denotes the first byte of the special zkSync's EIP-712-signed transaction. diff --git a/core/lib/types/src/proofs.rs b/core/lib/types/src/proofs.rs deleted file mode 100644 index 28f9523a89ec..000000000000 --- a/core/lib/types/src/proofs.rs +++ /dev/null @@ -1,450 +0,0 @@ -use std::{ - convert::{TryFrom, TryInto}, - fmt::Debug, - net::IpAddr, - ops::Add, - str::FromStr, -}; - -use chrono::{DateTime, Utc}; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, Bytes}; -use zksync_basic_types::{L1BatchNumber, H256, U256}; - -const HASH_LEN: usize = H256::len_bytes(); - -/// Metadata emitted by a Merkle tree after processing single storage log. -#[serde_as] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct StorageLogMetadata { - #[serde_as(as = "Bytes")] - pub root_hash: [u8; HASH_LEN], - pub is_write: bool, - pub first_write: bool, - #[serde_as(as = "Vec")] - pub merkle_paths: Vec<[u8; HASH_LEN]>, - pub leaf_hashed_key: U256, - pub leaf_enumeration_index: u64, - // **NB.** For compatibility reasons, `#[serde_as(as = "Bytes")]` attributes are not added below. - pub value_written: [u8; HASH_LEN], - pub value_read: [u8; HASH_LEN], -} - -impl StorageLogMetadata { - pub fn leaf_hashed_key_array(&self) -> [u8; 32] { - let mut result = [0_u8; 32]; - self.leaf_hashed_key.to_little_endian(&mut result); - result - } - - pub fn into_merkle_paths_array(self) -> Box<[[u8; HASH_LEN]; PATH_LEN]> { - let actual_len = self.merkle_paths.len(); - self.merkle_paths.try_into().unwrap_or_else(|_| { - panic!( - "Unexpected length of Merkle paths in `StorageLogMetadata`: expected {}, got {}", - PATH_LEN, actual_len - ); - }) - } -} - -/// Represents the sequential number of the proof aggregation round. -/// Mostly used to be stored in `aggregation_round` column in `prover_jobs` table -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] -pub enum AggregationRound { - BasicCircuits = 0, - LeafAggregation = 1, - NodeAggregation = 2, - Scheduler = 3, -} - -impl From for AggregationRound { - fn from(item: u8) -> Self { - match item { - 0 => AggregationRound::BasicCircuits, - 1 => AggregationRound::LeafAggregation, - 2 => AggregationRound::NodeAggregation, - 3 => AggregationRound::Scheduler, - _ => panic!("Invalid round"), - } - } -} - -impl AggregationRound { - pub fn next(&self) -> Option { - match self { - AggregationRound::BasicCircuits => Some(AggregationRound::LeafAggregation), - AggregationRound::LeafAggregation => Some(AggregationRound::NodeAggregation), - AggregationRound::NodeAggregation => Some(AggregationRound::Scheduler), - AggregationRound::Scheduler => None, - } - } -} - -impl std::fmt::Display for AggregationRound { - fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str(match self { - Self::BasicCircuits => "basic_circuits", - Self::LeafAggregation => "leaf_aggregation", - Self::NodeAggregation => "node_aggregation", - Self::Scheduler => "scheduler", - }) - } -} - -impl FromStr for AggregationRound { - type Err = String; - - fn from_str(s: &str) -> Result { - match s { - "basic_circuits" => Ok(AggregationRound::BasicCircuits), - "leaf_aggregation" => Ok(AggregationRound::LeafAggregation), - "node_aggregation" => Ok(AggregationRound::NodeAggregation), - "scheduler" => Ok(AggregationRound::Scheduler), - other => Err(format!( - "{} is not a valid round name for witness generation", - other - )), - } - } -} - -impl TryFrom for AggregationRound { - type Error = (); - - fn try_from(v: i32) -> Result { - match v { - x if x == AggregationRound::BasicCircuits as i32 => Ok(AggregationRound::BasicCircuits), - x if x == AggregationRound::LeafAggregation as i32 => { - Ok(AggregationRound::LeafAggregation) - } - x if x == AggregationRound::NodeAggregation as i32 => { - Ok(AggregationRound::NodeAggregation) - } - x if x == AggregationRound::Scheduler as i32 => Ok(AggregationRound::Scheduler), - _ => Err(()), - } - } -} - -/// Witness data produced by the Merkle tree as a result of processing a single block. Used -/// as an input to the witness generator. -/// -/// # Stability -/// -/// This type is serialized using `bincode` to be passed from the metadata calculator -/// to the witness generator. As such, changes in its `serde` serialization -/// must be backwards-compatible. -/// -/// # Compact form -/// -/// In order to reduce storage space, this job supports a compact format. In this format, -/// only the first item in `merkle_paths` is guaranteed to have the full Merkle path (i.e., -/// 256 items with the current Merkle tree). The following items may have less hashes in their -/// Merkle paths; if this is the case, the starting hashes are skipped and are the same -/// as in the first path. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct PrepareBasicCircuitsJob { - // Merkle paths and some auxiliary information for each read / write operation in a block. - merkle_paths: Vec, - next_enumeration_index: u64, -} - -impl PrepareBasicCircuitsJob { - /// Creates a new job with the specified leaf index and no included paths. - pub fn new(next_enumeration_index: u64) -> Self { - Self { - merkle_paths: vec![], - next_enumeration_index, - } - } - - /// Returns the next leaf index at the beginning of the block. - pub fn next_enumeration_index(&self) -> u64 { - self.next_enumeration_index - } - - /// Reserves additional capacity for Merkle paths. - pub fn reserve(&mut self, additional_capacity: usize) { - self.merkle_paths.reserve(additional_capacity); - } - - /// Pushes an additional Merkle path. - pub fn push_merkle_path(&mut self, mut path: StorageLogMetadata) { - let Some(first_path) = self.merkle_paths.first() else { - self.merkle_paths.push(path); - return; - }; - assert_eq!(first_path.merkle_paths.len(), path.merkle_paths.len()); - - let mut hash_pairs = path.merkle_paths.iter().zip(&first_path.merkle_paths); - let first_unique_idx = - hash_pairs.position(|(hash, first_path_hash)| hash != first_path_hash); - let first_unique_idx = first_unique_idx.unwrap_or(path.merkle_paths.len()); - path.merkle_paths = path.merkle_paths.split_off(first_unique_idx); - self.merkle_paths.push(path); - } - - /// Converts this job into an iterator over the contained Merkle paths. - pub fn into_merkle_paths(self) -> impl ExactSizeIterator { - let mut merkle_paths = self.merkle_paths; - if let [first, rest @ ..] = merkle_paths.as_mut_slice() { - for path in rest { - assert!( - path.merkle_paths.len() <= first.merkle_paths.len(), - "Merkle paths in `PrepareBasicCircuitsJob` are malformed; the first path is not \ - the longest one" - ); - let spliced_len = first.merkle_paths.len() - path.merkle_paths.len(); - let spliced_hashes = &first.merkle_paths[0..spliced_len]; - path.merkle_paths - .splice(0..0, spliced_hashes.iter().cloned()); - debug_assert_eq!(path.merkle_paths.len(), first.merkle_paths.len()); - } - } - merkle_paths.into_iter() - } -} - -/// Enriched `PrepareBasicCircuitsJob`. All the other fields are taken from the `l1_batches` table. -#[derive(Debug, Clone)] -pub struct BasicCircuitWitnessGeneratorInput { - pub block_number: L1BatchNumber, - pub previous_block_hash: H256, - pub previous_block_timestamp: u64, - pub block_timestamp: u64, - pub used_bytecodes_hashes: Vec, - pub initial_heap_content: Vec<(usize, U256)>, - pub merkle_paths_input: PrepareBasicCircuitsJob, -} - -#[derive(Debug, Clone)] -pub struct FriProverJobMetadata { - pub id: u32, - pub block_number: L1BatchNumber, - pub circuit_id: u8, - pub aggregation_round: AggregationRound, - pub sequence_number: usize, - pub depth: u16, - pub is_node_final_proof: bool, -} - -#[derive(Debug, Clone)] -pub struct LeafAggregationJobMetadata { - pub id: u32, - pub block_number: L1BatchNumber, - pub circuit_id: u8, - pub prover_job_ids_for_proofs: Vec, -} - -#[derive(Debug, Clone)] -pub struct NodeAggregationJobMetadata { - pub id: u32, - pub block_number: L1BatchNumber, - pub circuit_id: u8, - pub depth: u16, - pub prover_job_ids_for_proofs: Vec, -} - -#[derive(Debug)] -pub struct JobPosition { - pub aggregation_round: AggregationRound, - pub sequence_number: usize, -} - -#[derive(Debug, Default)] -pub struct ProverJobStatusFailed { - pub started_at: DateTime, - pub error: String, -} - -#[derive(Debug)] -pub struct ProverJobStatusSuccessful { - pub started_at: DateTime, - pub time_taken: chrono::Duration, -} - -impl Default for ProverJobStatusSuccessful { - fn default() -> Self { - ProverJobStatusSuccessful { - started_at: DateTime::default(), - time_taken: chrono::Duration::zero(), - } - } -} - -#[derive(Debug, Default)] -pub struct ProverJobStatusInProgress { - pub started_at: DateTime, -} - -#[derive(Debug)] -pub struct WitnessJobStatusSuccessful { - pub started_at: DateTime, - pub time_taken: chrono::Duration, -} - -impl Default for WitnessJobStatusSuccessful { - fn default() -> Self { - WitnessJobStatusSuccessful { - started_at: DateTime::default(), - time_taken: chrono::Duration::zero(), - } - } -} - -#[derive(Debug, Default)] -pub struct WitnessJobStatusFailed { - pub started_at: DateTime, - pub error: String, -} - -#[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] -pub enum ProverJobStatus { - #[strum(serialize = "queued")] - Queued, - #[strum(serialize = "in_progress")] - InProgress(ProverJobStatusInProgress), - #[strum(serialize = "successful")] - Successful(ProverJobStatusSuccessful), - #[strum(serialize = "failed")] - Failed(ProverJobStatusFailed), - #[strum(serialize = "skipped")] - Skipped, - #[strum(serialize = "ignored")] - Ignored, -} - -#[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] -pub enum WitnessJobStatus { - #[strum(serialize = "failed")] - Failed(WitnessJobStatusFailed), - #[strum(serialize = "skipped")] - Skipped, - #[strum(serialize = "successful")] - Successful(WitnessJobStatusSuccessful), - #[strum(serialize = "waiting_for_artifacts")] - WaitingForArtifacts, - #[strum(serialize = "waiting_for_proofs")] - WaitingForProofs, - #[strum(serialize = "in_progress")] - InProgress, - #[strum(serialize = "queued")] - Queued, -} - -#[derive(Debug)] -pub struct WitnessJobInfo { - pub block_number: L1BatchNumber, - pub created_at: DateTime, - pub updated_at: DateTime, - pub status: WitnessJobStatus, - pub position: JobPosition, -} - -#[derive(Debug)] -pub struct ProverJobInfo { - pub id: u32, - pub block_number: L1BatchNumber, - pub circuit_type: String, - pub position: JobPosition, - pub input_length: u64, - pub status: ProverJobStatus, - pub attempts: u32, - pub created_at: DateTime, - pub updated_at: DateTime, -} - -#[derive(Debug)] -pub struct JobExtendedStatistics { - pub successful_padding: L1BatchNumber, - pub queued_padding: L1BatchNumber, - pub queued_padding_len: u32, - pub active_area: Vec, -} - -#[derive(Debug, Clone, Copy, Default)] -pub struct JobCountStatistics { - pub queued: usize, - pub in_progress: usize, - pub failed: usize, - pub successful: usize, -} - -impl Add for JobCountStatistics { - type Output = JobCountStatistics; - - fn add(self, rhs: Self) -> Self::Output { - Self { - queued: self.queued + rhs.queued, - in_progress: self.in_progress + rhs.in_progress, - failed: self.failed + rhs.failed, - successful: self.successful + rhs.successful, - } - } -} - -#[derive(Debug)] -pub struct StuckJobs { - pub id: u64, - pub status: String, - pub attempts: u64, -} - -#[derive(Debug, Clone)] -pub struct SocketAddress { - pub host: IpAddr, - pub port: u16, -} - -#[derive(Debug, Copy, Clone)] -pub enum GpuProverInstanceStatus { - // The instance is available for processing. - Available, - // The instance is running at full capacity. - Full, - // The instance is reserved by an synthesizer. - Reserved, - // The instance is not alive anymore. - Dead, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn prepare_basic_circuits_job_roundtrip() { - let zero_hash = [0_u8; 32]; - let logs = (0..10).map(|i| { - let mut merkle_paths = vec![zero_hash; 255]; - merkle_paths.push([i as u8; 32]); - StorageLogMetadata { - root_hash: zero_hash, - is_write: i % 2 == 0, - first_write: i % 3 == 0, - merkle_paths, - leaf_hashed_key: U256::from(i), - leaf_enumeration_index: i + 1, - value_written: [i as u8; 32], - value_read: [0; 32], - } - }); - let logs: Vec<_> = logs.collect(); - - let mut job = PrepareBasicCircuitsJob::new(4); - job.reserve(logs.len()); - for log in &logs { - job.push_merkle_path(log.clone()); - } - - // Check that Merkle paths are compacted. - for (i, log) in job.merkle_paths.iter().enumerate() { - let expected_merkle_path_len = if i == 0 { 256 } else { 1 }; - assert_eq!(log.merkle_paths.len(), expected_merkle_path_len); - } - - let logs_from_job: Vec<_> = job.into_merkle_paths().collect(); - assert_eq!(logs_from_job, logs); - } -} diff --git a/core/lib/zksync_core/Cargo.toml b/core/lib/zksync_core/Cargo.toml index 1470ce11a4a9..032e58d2bcd1 100644 --- a/core/lib/zksync_core/Cargo.toml +++ b/core/lib/zksync_core/Cargo.toml @@ -25,6 +25,7 @@ zksync_system_constants = { path = "../../lib/constants" } zksync_commitment_utils = { path = "../commitment_utils" } zksync_eth_client = { path = "../eth_client" } zksync_eth_signer = { path = "../eth_signer" } +zksync_l1_contract_interface = { path = "../l1_contract_interface" } zksync_mempool = { path = "../mempool" } zksync_queued_job_processor = { path = "../queued_job_processor" } zksync_circuit_breaker = { path = "../circuit_breaker" } @@ -32,6 +33,7 @@ zksync_storage = { path = "../storage" } zksync_merkle_tree = { path = "../merkle_tree" } zksync_mini_merkle_tree = { path = "../mini_merkle_tree" } prometheus_exporter = { path = "../prometheus_exporter" } +zksync_prover_interface = { path = "../prover_interface" } zksync_web3_decl = { path = "../web3_decl", default-features = false, features = [ "server", "client", @@ -41,6 +43,7 @@ zksync_health_check = { path = "../health_check" } vlog = { path = "../vlog" } multivm = { path = "../multivm" } + # Consensus dependenices zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } zksync_consensus_crypto = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5b3d383d7a65b0fbe2a771fecf4313f5083be9ae" } diff --git a/core/lib/zksync_core/src/consistency_checker/mod.rs b/core/lib/zksync_core/src/consistency_checker/mod.rs index 4632343982bb..8441f927f8f5 100644 --- a/core/lib/zksync_core/src/consistency_checker/mod.rs +++ b/core/lib/zksync_core/src/consistency_checker/mod.rs @@ -5,6 +5,7 @@ use tokio::sync::watch; use zksync_contracts::PRE_BOOJUM_COMMIT_FUNCTION; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::{clients::QueryClient, Error as L1ClientError, EthInterface}; +use zksync_l1_contract_interface::{i_executor::structures::CommitBatchInfo, Tokenizable}; use zksync_types::{web3::ethabi, L1BatchNumber, H256}; use crate::{ @@ -113,7 +114,7 @@ impl LocalL1BatchCommitData { Ok(Some(Self { is_pre_boojum, - l1_commit_data: l1_batch.l1_commit_data(), + l1_commit_data: CommitBatchInfo(&l1_batch).into_token(), commit_tx_hash, })) } diff --git a/core/lib/zksync_core/src/consistency_checker/tests/mod.rs b/core/lib/zksync_core/src/consistency_checker/tests/mod.rs index 05232aac4ba6..0803b25d4a95 100644 --- a/core/lib/zksync_core/src/consistency_checker/tests/mod.rs +++ b/core/lib/zksync_core/src/consistency_checker/tests/mod.rs @@ -7,6 +7,7 @@ use test_casing::{test_casing, Product}; use tokio::sync::mpsc; use zksync_dal::StorageProcessor; use zksync_eth_client::clients::MockEthereum; +use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; use zksync_types::{ aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, web3::contract::Options, L2ChainId, ProtocolVersion, ProtocolVersionId, H256, @@ -42,7 +43,9 @@ fn create_pre_boojum_l1_batch_with_metadata(number: u32) -> L1BatchWithMetadata } fn build_commit_tx_input_data(batches: &[L1BatchWithMetadata]) -> Vec { - let commit_tokens = batches.iter().map(L1BatchWithMetadata::l1_commit_data); + let commit_tokens = batches + .iter() + .map(|batch| CommitBatchInfo(batch).into_token()); let commit_tokens = ethabi::Token::Array(commit_tokens.collect()); let mut encoded = vec![]; @@ -51,7 +54,7 @@ fn build_commit_tx_input_data(batches: &[L1BatchWithMetadata]) -> Vec { // Mock an additional argument used in real `commitBlocks` / `commitBatches`. In real transactions, // it's taken from the L1 batch previous to `batches[0]`, but since this argument is not checked, // it's OK to use `batches[0]`. - let prev_header_tokens = batches[0].l1_header_data(); + let prev_header_tokens = StoredBatchInfo(&batches[0]).into_token(); encoded.extend_from_slice(ðabi::encode(&[prev_header_tokens, commit_tokens])); encoded } @@ -92,7 +95,7 @@ fn build_commit_tx_input_data_is_correct() { batch.header.number, ) .unwrap(); - assert_eq!(commit_data, batch.l1_commit_data()); + assert_eq!(commit_data, CommitBatchInfo(batch).into_token()); } } diff --git a/core/lib/zksync_core/src/eth_sender/aggregated_operations.rs b/core/lib/zksync_core/src/eth_sender/aggregated_operations.rs new file mode 100644 index 000000000000..bb7cf75e50d4 --- /dev/null +++ b/core/lib/zksync_core/src/eth_sender/aggregated_operations.rs @@ -0,0 +1,55 @@ +use std::ops; + +use zksync_l1_contract_interface::i_executor::methods::{ + CommitBatches, ExecuteBatches, ProveBatches, +}; +use zksync_types::{aggregated_operations::AggregatedActionType, L1BatchNumber, ProtocolVersionId}; + +#[allow(clippy::large_enum_variant)] +#[derive(Debug, Clone)] +pub enum AggregatedOperation { + Commit(CommitBatches), + PublishProofOnchain(ProveBatches), + Execute(ExecuteBatches), +} + +impl AggregatedOperation { + pub fn get_action_type(&self) -> AggregatedActionType { + match self { + Self::Commit(_) => AggregatedActionType::Commit, + Self::PublishProofOnchain(_) => AggregatedActionType::PublishProofOnchain, + Self::Execute(_) => AggregatedActionType::Execute, + } + } + + pub fn l1_batch_range(&self) -> ops::RangeInclusive { + let batches = match self { + Self::Commit(op) => &op.l1_batches, + Self::PublishProofOnchain(op) => &op.l1_batches, + Self::Execute(op) => &op.l1_batches, + }; + + if batches.is_empty() { + return L1BatchNumber(0)..=L1BatchNumber(0); + } + let first_batch = &batches[0]; + let last_batch = &batches[batches.len() - 1]; + first_batch.header.number..=last_batch.header.number + } + + pub fn get_action_caption(&self) -> &'static str { + match self { + Self::Commit(_) => "commit", + Self::PublishProofOnchain(_) => "proof", + Self::Execute(_) => "execute", + } + } + + pub fn protocol_version(&self) -> ProtocolVersionId { + match self { + Self::Commit(op) => op.l1_batches[0].header.protocol_version.unwrap(), + Self::PublishProofOnchain(op) => op.l1_batches[0].header.protocol_version.unwrap(), + Self::Execute(op) => op.l1_batches[0].header.protocol_version.unwrap(), + } + } +} diff --git a/core/lib/zksync_core/src/eth_sender/aggregator.rs b/core/lib/zksync_core/src/eth_sender/aggregator.rs index a043b871b1e8..fe887db6469e 100644 --- a/core/lib/zksync_core/src/eth_sender/aggregator.rs +++ b/core/lib/zksync_core/src/eth_sender/aggregator.rs @@ -3,21 +3,23 @@ use std::sync::Arc; use zksync_config::configs::eth_sender::{ProofLoadingMode, ProofSendingMode, SenderConfig}; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::StorageProcessor; +use zksync_l1_contract_interface::i_executor::methods::{ + CommitBatches, ExecuteBatches, ProveBatches, +}; use zksync_object_store::{ObjectStore, ObjectStoreError}; +use zksync_prover_interface::outputs::L1BatchProofForL1; use zksync_types::{ - aggregated_operations::{ - AggregatedActionType, AggregatedOperation, L1BatchCommitOperation, L1BatchExecuteOperation, - L1BatchProofForL1, L1BatchProofOperation, - }, - commitment::L1BatchWithMetadata, - helpers::unix_timestamp_ms, - protocol_version::L1VerifierConfig, - L1BatchNumber, ProtocolVersionId, + aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, + helpers::unix_timestamp_ms, protocol_version::L1VerifierConfig, L1BatchNumber, + ProtocolVersionId, }; -use super::publish_criterion::{ - DataSizeCriterion, GasCriterion, L1BatchPublishCriterion, NumberCriterion, - TimestampDeadlineCriterion, +use super::{ + aggregated_operations::AggregatedOperation, + publish_criterion::{ + DataSizeCriterion, GasCriterion, L1BatchPublishCriterion, NumberCriterion, + TimestampDeadlineCriterion, + }, }; #[derive(Debug)] @@ -142,7 +144,7 @@ impl Aggregator { storage: &mut StorageProcessor<'_>, limit: usize, last_sealed_l1_batch: L1BatchNumber, - ) -> Option { + ) -> Option { let max_l1_batch_timestamp_millis = self .config .l1_batch_min_age_before_execute_seconds @@ -160,7 +162,7 @@ impl Aggregator { ) .await; - l1_batches.map(|l1_batches| L1BatchExecuteOperation { l1_batches }) + l1_batches.map(|l1_batches| ExecuteBatches { l1_batches }) } async fn get_commit_operation( @@ -170,7 +172,7 @@ impl Aggregator { last_sealed_batch: L1BatchNumber, base_system_contracts_hashes: BaseSystemContractsHashes, protocol_version_id: ProtocolVersionId, - ) -> Option { + ) -> Option { let mut blocks_dal = storage.blocks_dal(); let last_committed_l1_batch = blocks_dal .get_last_committed_to_eth_l1_batch() @@ -218,7 +220,7 @@ impl Aggregator { ) .await; - batches.map(|batches| L1BatchCommitOperation { + batches.map(|batches| CommitBatches { last_committed_l1_batch, l1_batches: batches, }) @@ -229,7 +231,7 @@ impl Aggregator { l1_verifier_config: L1VerifierConfig, proof_loading_mode: &ProofLoadingMode, blob_store: &dyn ObjectStore, - ) -> Option { + ) -> Option { let previous_proven_batch_number = storage .blocks_dal() .get_last_l1_batch_with_prove_tx() @@ -297,7 +299,7 @@ impl Aggregator { ); }); - Some(L1BatchProofOperation { + Some(ProveBatches { prev_l1_batch: previous_proven_batch_metadata, l1_batches: vec![metadata_for_batch_being_proved], proofs, @@ -310,7 +312,7 @@ impl Aggregator { storage: &mut StorageProcessor<'_>, ready_for_proof_l1_batches: Vec, last_sealed_l1_batch: L1BatchNumber, - ) -> Option { + ) -> Option { let batches = extract_ready_subrange( storage, &mut self.proof_criteria, @@ -326,7 +328,7 @@ impl Aggregator { .await .unwrap()?; - Some(L1BatchProofOperation { + Some(ProveBatches { prev_l1_batch: prev_batch, l1_batches: batches, proofs: vec![], @@ -340,7 +342,7 @@ impl Aggregator { limit: usize, last_sealed_l1_batch: L1BatchNumber, l1_verifier_config: L1VerifierConfig, - ) -> Option { + ) -> Option { match self.config.proof_sending_mode { ProofSendingMode::OnlyRealProofs => { Self::load_real_proof_operation( diff --git a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs index 6211b6363849..70a8b3d8ae41 100644 --- a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs +++ b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs @@ -5,20 +5,20 @@ use zksync_config::configs::eth_sender::SenderConfig; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::{BoundEthInterface, CallFunctionArgs}; +use zksync_l1_contract_interface::{ + multicall3::{Multicall3Call, Multicall3Result}, + pre_boojum_verifier::old_l1_vk_commitment, + Detokenize, Tokenizable, Tokenize, +}; use zksync_types::{ - aggregated_operations::AggregatedOperation, - contracts::{Multicall3Call, Multicall3Result}, eth_sender::EthTx, ethabi::{Contract, Token}, protocol_version::{L1VerifierConfig, VerifierParams}, - vk_transform::l1_vk_commitment, - web3::contract::{ - tokens::{Detokenize, Tokenizable}, - Error, - }, + web3::contract::Error as Web3ContractError, Address, ProtocolVersionId, H256, U256, }; +use super::aggregated_operations::AggregatedOperation; use crate::{ eth_sender::{ metrics::{PubdataKind, METRICS}, @@ -189,9 +189,12 @@ impl EthTxAggregator { token: Token, ) -> Result { let parse_error = |tokens: &[Token]| { - Err(ETHSenderError::ParseError(Error::InvalidOutputType( - format!("Failed to parse multicall token: {:?}", tokens), - ))) + Err(ETHSenderError::ParseError( + Web3ContractError::InvalidOutputType(format!( + "Failed to parse multicall token: {:?}", + tokens + )), + )) }; if let Token::Array(call_results) = token { @@ -205,24 +208,24 @@ impl EthTxAggregator { Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_bootloader.len() != 32 { - return Err(ETHSenderError::ParseError(Error::InvalidOutputType( - format!( + return Err(ETHSenderError::ParseError( + Web3ContractError::InvalidOutputType(format!( "multicall3 bootloader hash data is not of the len of 32: {:?}", multicall3_bootloader - ), - ))); + )), + )); } let bootloader = H256::from_slice(&multicall3_bootloader); let multicall3_default_aa = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_default_aa.len() != 32 { - return Err(ETHSenderError::ParseError(Error::InvalidOutputType( - format!( + return Err(ETHSenderError::ParseError( + Web3ContractError::InvalidOutputType(format!( "multicall3 default aa hash data is not of the len of 32: {:?}", multicall3_default_aa - ), - ))); + )), + )); } let default_aa = H256::from_slice(&multicall3_default_aa); let base_system_contracts_hashes = BaseSystemContractsHashes { @@ -233,12 +236,12 @@ impl EthTxAggregator { let multicall3_verifier_params = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_verifier_params.len() != 96 { - return Err(ETHSenderError::ParseError(Error::InvalidOutputType( - format!( + return Err(ETHSenderError::ParseError( + Web3ContractError::InvalidOutputType(format!( "multicall3 verifier params data is not of the len of 96: {:?}", multicall3_default_aa - ), - ))); + )), + )); } let recursion_node_level_vk_hash = H256::from_slice(&multicall3_verifier_params[..32]); let recursion_leaf_level_vk_hash = @@ -254,24 +257,24 @@ impl EthTxAggregator { let multicall3_verifier_address = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_verifier_address.len() != 32 { - return Err(ETHSenderError::ParseError(Error::InvalidOutputType( - format!( + return Err(ETHSenderError::ParseError( + Web3ContractError::InvalidOutputType(format!( "multicall3 verifier address data is not of the len of 32: {:?}", multicall3_verifier_address - ), - ))); + )), + )); } let verifier_address = Address::from_slice(&multicall3_verifier_address[12..]); let multicall3_protocol_version = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; if multicall3_protocol_version.len() != 32 { - return Err(ETHSenderError::ParseError(Error::InvalidOutputType( - format!( + return Err(ETHSenderError::ParseError( + Web3ContractError::InvalidOutputType(format!( "multicall3 protocol version data is not of the len of 32: {:?}", multicall3_protocol_version - ), - ))); + )), + )); } let protocol_version_id = U256::from_big_endian(&multicall3_protocol_version) .try_into() @@ -310,7 +313,7 @@ impl EthTxAggregator { .for_contract(verifier_address, abi); let vk = self.eth_client.call_contract_function(args).await?; - Ok(l1_vk_commitment(Token::from_tokens(vk)?)) + Ok(old_l1_vk_commitment(Token::from_tokens(vk)?)) } else { let get_vk_hash = self.functions.verification_key_hash.as_ref(); tracing::debug!("Calling verificationKeyHash"); @@ -406,7 +409,7 @@ impl EthTxAggregator { // For "commit" and "prove" operations it's necessary that the contracts are of the same version as L1 batches are. // For "execute" it's not required, i.e. we can "execute" pre-boojum batches with post-boojum contracts. - match &op { + match op.clone() { AggregatedOperation::Commit(op) => { assert_eq!(contracts_are_pre_boojum, operation_is_pre_boojum); let f = if contracts_are_pre_boojum { @@ -417,7 +420,8 @@ impl EthTxAggregator { .as_ref() .expect("Missing ABI for commitBatches") }; - f.encode_input(&op.get_eth_tx_args()) + f.encode_input(&op.into_tokens()) + .expect("Failed to encode commit transaction data") } AggregatedOperation::PublishProofOnchain(op) => { assert_eq!(contracts_are_pre_boojum, operation_is_pre_boojum); @@ -429,7 +433,8 @@ impl EthTxAggregator { .as_ref() .expect("Missing ABI for proveBatches") }; - f.encode_input(&op.get_eth_tx_args()) + f.encode_input(&op.into_tokens()) + .expect("Failed to encode prove transaction data") } AggregatedOperation::Execute(op) => { let f = if contracts_are_pre_boojum { @@ -440,10 +445,10 @@ impl EthTxAggregator { .as_ref() .expect("Missing ABI for executeBatches") }; - f.encode_input(&op.get_eth_tx_args()) + f.encode_input(&op.into_tokens()) + .expect("Failed to encode execute transaction data") } } - .expect("Failed to encode transaction data") } pub(super) async fn save_eth_tx( diff --git a/core/lib/zksync_core/src/eth_sender/mod.rs b/core/lib/zksync_core/src/eth_sender/mod.rs index e5a47d3f62f8..010441dc0d1a 100644 --- a/core/lib/zksync_core/src/eth_sender/mod.rs +++ b/core/lib/zksync_core/src/eth_sender/mod.rs @@ -1,3 +1,4 @@ +mod aggregated_operations; mod aggregator; mod error; mod eth_tx_aggregator; diff --git a/core/lib/zksync_core/src/eth_sender/publish_criterion.rs b/core/lib/zksync_core/src/eth_sender/publish_criterion.rs index 85f6a46c960b..bc931a119490 100644 --- a/core/lib/zksync_core/src/eth_sender/publish_criterion.rs +++ b/core/lib/zksync_core/src/eth_sender/publish_criterion.rs @@ -3,8 +3,10 @@ use std::fmt; use async_trait::async_trait; use chrono::Utc; use zksync_dal::StorageProcessor; +use zksync_l1_contract_interface::{i_executor::structures::CommitBatchInfo, Tokenizable}; use zksync_types::{ - aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, L1BatchNumber, + aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, ethabi, + L1BatchNumber, }; use super::metrics::METRICS; @@ -215,13 +217,16 @@ impl L1BatchPublishCriterion for DataSizeCriterion { let mut data_size_left = self.data_limit - STORED_BLOCK_INFO_SIZE; for (index, l1_batch) in consecutive_l1_batches.iter().enumerate() { - if data_size_left < l1_batch.l1_commit_data_size() { + // TODO (PLA-771): Make sure that this estimation is correct. + let l1_commit_data_size = ethabi::encode(&[ethabi::Token::Array(vec![ + CommitBatchInfo(l1_batch).into_token(), + ])]) + .len(); + if data_size_left < l1_commit_data_size { if index == 0 { panic!( "L1 batch #{} requires {} data, which is more than the range limit of {}", - l1_batch.header.number, - l1_batch.l1_commit_data_size(), - self.data_limit + l1_batch.header.number, l1_commit_data_size, self.data_limit ); } @@ -236,7 +241,7 @@ impl L1BatchPublishCriterion for DataSizeCriterion { METRICS.block_aggregation_reason[&(self.op, "data_size").into()].inc(); return Some(output); } - data_size_left -= l1_batch.l1_commit_data_size(); + data_size_left -= l1_commit_data_size; } None diff --git a/core/lib/zksync_core/src/eth_sender/tests.rs b/core/lib/zksync_core/src/eth_sender/tests.rs index e70df8530c8f..723c765f3954 100644 --- a/core/lib/zksync_core/src/eth_sender/tests.rs +++ b/core/lib/zksync_core/src/eth_sender/tests.rs @@ -8,11 +8,11 @@ use zksync_config::{ }; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::{clients::MockEthereum, EthInterface}; +use zksync_l1_contract_interface::i_executor::methods::{ + CommitBatches, ExecuteBatches, ProveBatches, +}; use zksync_object_store::ObjectStoreFactory; use zksync_types::{ - aggregated_operations::{ - AggregatedOperation, L1BatchCommitOperation, L1BatchExecuteOperation, L1BatchProofOperation, - }, block::L1BatchHeader, commitment::{L1BatchMetaParameters, L1BatchMetadata, L1BatchWithMetadata}, ethabi::Token, @@ -23,7 +23,8 @@ use zksync_types::{ use crate::{ eth_sender::{ - eth_tx_manager::L1BlockNumbers, Aggregator, ETHSenderError, EthTxAggregator, EthTxManager, + aggregated_operations::AggregatedOperation, eth_tx_manager::L1BlockNumbers, Aggregator, + ETHSenderError, EthTxAggregator, EthTxManager, }, l1_gas_price::GasAdjuster, utils::testonly::create_l1_batch, @@ -33,7 +34,7 @@ use crate::{ type MockEthTxManager = EthTxManager; static DUMMY_OPERATION: Lazy = Lazy::new(|| { - AggregatedOperation::Execute(L1BatchExecuteOperation { + AggregatedOperation::Execute(ExecuteBatches { l1_batches: vec![L1BatchWithMetadata { header: create_l1_batch(1), metadata: default_l1_batch_metadata(), @@ -914,7 +915,7 @@ async fn execute_l1_batches( l1_batches: Vec, confirm: bool, ) -> H256 { - let operation = AggregatedOperation::Execute(L1BatchExecuteOperation { + let operation = AggregatedOperation::Execute(ExecuteBatches { l1_batches: l1_batches.into_iter().map(l1_batch_with_metadata).collect(), }); send_operation(tester, operation, confirm).await @@ -926,7 +927,7 @@ async fn prove_l1_batch( l1_batch: L1BatchHeader, confirm: bool, ) -> H256 { - let operation = AggregatedOperation::PublishProofOnchain(L1BatchProofOperation { + let operation = AggregatedOperation::PublishProofOnchain(ProveBatches { prev_l1_batch: l1_batch_with_metadata(last_committed_l1_batch), l1_batches: vec![l1_batch_with_metadata(l1_batch)], proofs: vec![], @@ -941,7 +942,7 @@ async fn commit_l1_batch( l1_batch: L1BatchHeader, confirm: bool, ) -> H256 { - let operation = AggregatedOperation::Commit(L1BatchCommitOperation { + let operation = AggregatedOperation::Commit(CommitBatches { last_committed_l1_batch: l1_batch_with_metadata(last_committed_l1_batch), l1_batches: vec![l1_batch_with_metadata(l1_batch)], }); diff --git a/core/lib/zksync_core/src/eth_watch/client.rs b/core/lib/zksync_core/src/eth_watch/client.rs index 08e62c3f4ea0..28707ce4a4c5 100644 --- a/core/lib/zksync_core/src/eth_watch/client.rs +++ b/core/lib/zksync_core/src/eth_watch/client.rs @@ -2,9 +2,9 @@ use std::fmt; use zksync_contracts::verifier_contract; use zksync_eth_client::{CallFunctionArgs, Error as EthClientError, EthInterface}; +use zksync_l1_contract_interface::pre_boojum_verifier::old_l1_vk_commitment; use zksync_types::{ ethabi::{Contract, Token}, - vk_transform::l1_vk_commitment, web3::{ self, contract::tokens::Detokenize, @@ -126,7 +126,7 @@ impl EthClient for EthHttpQueryClient { let args = CallFunctionArgs::new("get_verification_key", ()) .for_contract(verifier_address, self.verifier_contract_abi.clone()); let vk = self.client.call_contract_function(args).await?; - Ok(l1_vk_commitment(Token::from_tokens(vk)?)) + Ok(old_l1_vk_commitment(Token::from_tokens(vk)?)) } } diff --git a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs index 88e1f0f64654..0039b32bc77c 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs @@ -1,6 +1,5 @@ use async_trait::async_trait; -use zksync_dal::ConnectionPool; -use zksync_types::proofs::JobCountStatistics; +use zksync_dal::{fri_prover_dal::types::JobCountStatistics, ConnectionPool}; use crate::house_keeper::periodic_job::PeriodicJob; diff --git a/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs index f198d27d97b2..cf1cdc90314e 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs @@ -1,8 +1,8 @@ use std::collections::HashMap; use async_trait::async_trait; -use zksync_dal::ConnectionPool; -use zksync_types::proofs::{AggregationRound, JobCountStatistics}; +use zksync_dal::{fri_prover_dal::types::JobCountStatistics, ConnectionPool}; +use zksync_types::basic_fri_types::AggregationRound; use crate::house_keeper::periodic_job::PeriodicJob; diff --git a/core/lib/zksync_core/src/metadata_calculator/helpers.rs b/core/lib/zksync_core/src/metadata_calculator/helpers.rs index 58c51d7c4066..cdb44b67824c 100644 --- a/core/lib/zksync_core/src/metadata_calculator/helpers.rs +++ b/core/lib/zksync_core/src/metadata_calculator/helpers.rs @@ -437,7 +437,8 @@ impl L1BatchWithLogs { mod tests { use tempfile::TempDir; use zksync_dal::ConnectionPool; - use zksync_types::{proofs::PrepareBasicCircuitsJob, L2ChainId, StorageKey, StorageLog}; + use zksync_prover_interface::inputs::PrepareBasicCircuitsJob; + use zksync_types::{L2ChainId, StorageKey, StorageLog}; use super::*; use crate::{ diff --git a/core/lib/zksync_core/src/metadata_calculator/tests.rs b/core/lib/zksync_core/src/metadata_calculator/tests.rs index f06b2a547d76..4f49a052aa36 100644 --- a/core/lib/zksync_core/src/metadata_calculator/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/tests.rs @@ -14,9 +14,10 @@ use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_health_check::{CheckHealth, HealthStatus}; use zksync_merkle_tree::domain::ZkSyncTree; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_prover_interface::inputs::PrepareBasicCircuitsJob; use zksync_types::{ - block::L1BatchHeader, proofs::PrepareBasicCircuitsJob, AccountTreeId, Address, L1BatchNumber, - L2ChainId, MiniblockNumber, StorageKey, StorageLog, H256, + block::L1BatchHeader, AccountTreeId, Address, L1BatchNumber, L2ChainId, MiniblockNumber, + StorageKey, StorageLog, H256, }; use zksync_utils::u32_to_h256; diff --git a/core/lib/zksync_core/src/proof_data_handler/mod.rs b/core/lib/zksync_core/src/proof_data_handler/mod.rs index 7a5b8bc69b32..56a48e18cd67 100644 --- a/core/lib/zksync_core/src/proof_data_handler/mod.rs +++ b/core/lib/zksync_core/src/proof_data_handler/mod.rs @@ -9,9 +9,9 @@ use zksync_config::{ }; use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; +use zksync_prover_interface::api::{ProofGenerationDataRequest, SubmitProofRequest}; use zksync_types::{ protocol_version::{L1VerifierConfig, VerifierParams}, - prover_server_api::{ProofGenerationDataRequest, SubmitProofRequest}, H256, }; diff --git a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs index bc9873d99ed5..91b2f4124a0d 100644 --- a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs +++ b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs @@ -11,13 +11,13 @@ use zksync_config::configs::{ }; use zksync_dal::{ConnectionPool, SqlxError}; use zksync_object_store::{ObjectStore, ObjectStoreError}; +use zksync_prover_interface::api::{ + ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, + SubmitProofRequest, SubmitProofResponse, +}; use zksync_types::{ commitment::serialize_commitments, protocol_version::{FriProtocolVersionId, L1VerifierConfig}, - prover_server_api::{ - ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, - SubmitProofRequest, SubmitProofResponse, - }, web3::signing::keccak256, L1BatchNumber, H256, }; diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 4aa17b93d4f0..889e763449b6 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -898,22 +898,6 @@ dependencies = [ "cc", ] -[[package]] -name = "codegen" -version = "0.1.0" -source = "git+https://github.com/matter-labs/solidity_plonk_verifier.git?branch=dev#82f96b7156551087f1c9bfe4f0ea68845b6debfc" -dependencies = [ - "ethereum-types", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", - "handlebars", - "hex", - "paste", - "rescue_poseidon 0.4.1 (git+https://github.com/matter-labs/rescue-poseidon)", - "serde", - "serde_derive", - "serde_json", -] - [[package]] name = "codegen" version = "0.2.0" @@ -2240,20 +2224,6 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" -[[package]] -name = "handlebars" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94eae21d01d20dabef65d8eda734d83df6e2dea8166788804be9bd6bc92448fa" -dependencies = [ - "log", - "pest", - "pest_derive", - "serde", - "serde_json", - "thiserror", -] - [[package]] name = "hashbrown" version = "0.12.3" @@ -3627,51 +3597,6 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" -[[package]] -name = "pest" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f200d8d83c44a45b21764d1916299752ca035d15ecd46faca3e9a2a2bf6ad06" -dependencies = [ - "memchr", - "thiserror", - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcd6ab1236bbdb3a49027e920e693192ebfe8913f6d60e294de57463a493cfde" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a31940305ffc96863a735bef7c7994a00b325a7138fdbc5bda0f1a0476d3275" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", -] - -[[package]] -name = "pest_meta" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7ff62f5259e53b78d1af898941cdcdccfae7385cf7d793a6e55de5d05bb4b7d" -dependencies = [ - "once_cell", - "pest", - "sha2 0.10.8", -] - [[package]] name = "petgraph" version = "0.6.4" @@ -6004,12 +5929,6 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" -[[package]] -name = "ucd-trie" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" - [[package]] name = "uint" version = "0.9.5" @@ -6901,7 +6820,7 @@ source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v dependencies = [ "bincode", "circuit_testing", - "codegen 0.2.0", + "codegen", "crossbeam 0.8.4", "derivative", "env_logger 0.9.3", @@ -6928,7 +6847,7 @@ source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v dependencies = [ "bincode", "circuit_definitions 0.1.0 (git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0)", - "codegen 0.2.0", + "codegen", "crossbeam 0.8.4", "derivative", "env_logger 0.9.3", @@ -6980,7 +6899,7 @@ source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v dependencies = [ "bincode", "circuit_definitions 0.1.0 (git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.1)", - "codegen 0.2.0", + "codegen", "crossbeam 0.8.4", "curl", "derivative", @@ -7137,6 +7056,7 @@ dependencies = [ "anyhow", "bigdecimal", "bincode", + "chrono", "hex", "itertools 0.10.5", "num 0.4.1", @@ -7240,6 +7160,7 @@ dependencies = [ "zksync_env_config", "zksync_object_store", "zksync_prover_fri_types", + "zksync_prover_interface", "zksync_queued_job_processor", "zksync_types", "zksync_utils", @@ -7331,6 +7252,7 @@ dependencies = [ "zksync_dal", "zksync_env_config", "zksync_object_store", + "zksync_prover_interface", "zksync_types", "zksync_utils", ] @@ -7363,6 +7285,19 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_prover_interface" +version = "0.1.0" +dependencies = [ + "chrono", + "serde", + "serde_with", + "strum", + "zkevm_test_harness 1.3.3", + "zksync_object_store", + "zksync_types", +] + [[package]] name = "zksync_queued_job_processor" version = "0.1.0" @@ -7420,7 +7355,6 @@ dependencies = [ "anyhow", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "chrono", - "codegen 0.1.0", "hex", "num 0.4.1", "num_enum", @@ -7433,7 +7367,6 @@ dependencies = [ "serde_with", "strum", "thiserror", - "zkevm_test_harness 1.3.3", "zksync_basic_types", "zksync_config", "zksync_contracts", @@ -7496,6 +7429,7 @@ dependencies = [ "zksync_object_store", "zksync_prover_fri_types", "zksync_prover_fri_utils", + "zksync_prover_interface", "zksync_queued_job_processor", "zksync_state", "zksync_system_constants", diff --git a/prover/proof_fri_compressor/Cargo.toml b/prover/proof_fri_compressor/Cargo.toml index f10dbf3ba5a9..6f5c8cf8ce96 100644 --- a/prover/proof_fri_compressor/Cargo.toml +++ b/prover/proof_fri_compressor/Cargo.toml @@ -13,6 +13,7 @@ zksync_dal = { path = "../../core/lib/dal" } zksync_config = { path = "../../core/lib/config" } zksync_env_config = { path = "../../core/lib/env_config" } zksync_object_store = { path = "../../core/lib/object_store" } +zksync_prover_interface = { path = "../../core/lib/prover_interface" } zksync_utils = { path = "../../core/lib/utils" } prometheus_exporter = { path = "../../core/lib/prometheus_exporter" } zksync_prover_fri_types = { path = "../prover_fri_types" } diff --git a/prover/proof_fri_compressor/src/compressor.rs b/prover/proof_fri_compressor/src/compressor.rs index d224ca60abe3..ecf26cacd4d1 100644 --- a/prover/proof_fri_compressor/src/compressor.rs +++ b/prover/proof_fri_compressor/src/compressor.rs @@ -23,8 +23,9 @@ use zksync_prover_fri_types::{ }, get_current_pod_name, AuxOutputWitnessWrapper, FriProofWrapper, }; +use zksync_prover_interface::outputs::L1BatchProofForL1; use zksync_queued_job_processor::JobProcessor; -use zksync_types::{aggregated_operations::L1BatchProofForL1, L1BatchNumber}; +use zksync_types::L1BatchNumber; use zksync_vk_setup_data_server_fri::{get_recursive_layer_vk_for_circuit_type, get_snark_vk}; use crate::metrics::METRICS; diff --git a/prover/prover_fri/src/gpu_prover_job_processor.rs b/prover/prover_fri/src/gpu_prover_job_processor.rs index f880a296af5e..82b78024a98d 100644 --- a/prover/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/prover_fri/src/gpu_prover_job_processor.rs @@ -6,7 +6,7 @@ pub mod gpu_prover { use shivini::{gpu_prove_from_external_witness_data, ProverContext}; use tokio::task::JoinHandle; use zksync_config::configs::{fri_prover_group::FriProverGroupConfig, FriProverConfig}; - use zksync_dal::ConnectionPool; + use zksync_dal::{fri_prover_dal::types::SocketAddress, ConnectionPool}; use zksync_env_config::FromEnv; use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ @@ -26,7 +26,7 @@ pub mod gpu_prover { CircuitWrapper, FriProofWrapper, ProverServiceDataKey, WitnessVectorArtifacts, }; use zksync_queued_job_processor::{async_trait, JobProcessor}; - use zksync_types::{basic_fri_types::CircuitIdRoundTuple, proofs::SocketAddress}; + use zksync_types::basic_fri_types::CircuitIdRoundTuple; use zksync_vk_setup_data_server_fri::{ get_setup_data_for_circuit_type, GoldilocksGpuProverSetupData, }; diff --git a/prover/prover_fri/src/main.rs b/prover/prover_fri/src/main.rs index ab2dfa30a9a0..d867fd5e93c9 100644 --- a/prover/prover_fri/src/main.rs +++ b/prover/prover_fri/src/main.rs @@ -11,7 +11,10 @@ use tokio::{ use zksync_config::configs::{ fri_prover_group::FriProverGroupConfig, FriProverConfig, PostgresConfig, }; -use zksync_dal::ConnectionPool; +use zksync_dal::{ + fri_prover_dal::types::{GpuProverInstanceStatus, SocketAddress}, + ConnectionPool, +}; use zksync_env_config::{ object_store::{ProverObjectStoreConfig, PublicObjectStoreConfig}, FromEnv, @@ -19,10 +22,7 @@ use zksync_env_config::{ use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_prover_fri_utils::{get_all_circuit_id_round_tuples_for, region_fetcher::get_zone}; use zksync_queued_job_processor::JobProcessor; -use zksync_types::{ - basic_fri_types::CircuitIdRoundTuple, - proofs::{GpuProverInstanceStatus, SocketAddress}, -}; +use zksync_types::basic_fri_types::CircuitIdRoundTuple; use zksync_utils::wait_for_tasks::wait_for_tasks; mod gpu_prover_job_processor; diff --git a/prover/prover_fri/src/socket_listener.rs b/prover/prover_fri/src/socket_listener.rs index 0f84ad9587dd..8c564ea13a06 100644 --- a/prover/prover_fri/src/socket_listener.rs +++ b/prover/prover_fri/src/socket_listener.rs @@ -11,10 +11,13 @@ pub mod gpu_socket_listener { net::{TcpListener, TcpStream}, sync::watch, }; - use zksync_dal::ConnectionPool; + use zksync_dal::{ + fri_prover_dal::types::{GpuProverInstanceStatus, SocketAddress}, + ConnectionPool, + }; use zksync_object_store::bincode; use zksync_prover_fri_types::{CircuitWrapper, ProverServiceDataKey, WitnessVectorArtifacts}; - use zksync_types::proofs::{AggregationRound, GpuProverInstanceStatus, SocketAddress}; + use zksync_types::basic_fri_types::AggregationRound; use zksync_vk_setup_data_server_fri::{ get_finalization_hints, get_round_for_recursive_circuit_type, }; diff --git a/prover/prover_fri/src/utils.rs b/prover/prover_fri/src/utils.rs index 37f5eea645be..b111f22605c4 100644 --- a/prover/prover_fri/src/utils.rs +++ b/prover/prover_fri/src/utils.rs @@ -27,7 +27,10 @@ use zksync_prover_fri_types::{ CircuitWrapper, FriProofWrapper, ProverServiceDataKey, WitnessVectorArtifacts, }; use zksync_prover_fri_utils::get_base_layer_circuit_id_for_recursive_layer; -use zksync_types::{basic_fri_types::CircuitIdRoundTuple, proofs::AggregationRound, L1BatchNumber}; +use zksync_types::{ + basic_fri_types::{AggregationRound, CircuitIdRoundTuple}, + L1BatchNumber, +}; use crate::metrics::METRICS; diff --git a/prover/prover_fri/tests/basic_test.rs b/prover/prover_fri/tests/basic_test.rs index 89089ac8249e..ebcc43e93afc 100644 --- a/prover/prover_fri/tests/basic_test.rs +++ b/prover/prover_fri/tests/basic_test.rs @@ -4,10 +4,12 @@ use anyhow::Context as _; use serde::Serialize; use zksync_config::{configs::FriProverConfig, ObjectStoreConfig}; use zksync_env_config::FromEnv; -use zksync_object_store::{bincode, FriCircuitKey, ObjectStoreFactory}; +use zksync_object_store::{bincode, ObjectStoreFactory}; use zksync_prover_fri::prover_job_processor::Prover; -use zksync_prover_fri_types::{CircuitWrapper, ProverJob, ProverServiceDataKey}; -use zksync_types::{proofs::AggregationRound, L1BatchNumber}; +use zksync_prover_fri_types::{ + keys::FriCircuitKey, CircuitWrapper, ProverJob, ProverServiceDataKey, +}; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use zksync_vk_setup_data_server_fri::generate_cpu_base_layer_setup_data; fn compare_serialized(expected: &T, actual: &T) { diff --git a/prover/prover_fri_gateway/Cargo.toml b/prover/prover_fri_gateway/Cargo.toml index 3e826a2f5c5b..bfe772bef64d 100644 --- a/prover/prover_fri_gateway/Cargo.toml +++ b/prover/prover_fri_gateway/Cargo.toml @@ -11,6 +11,7 @@ zksync_dal = { path = "../../core/lib/dal" } zksync_config = { path = "../../core/lib/config" } zksync_env_config = { path = "../../core/lib/env_config" } zksync_object_store = { path = "../../core/lib/object_store" } +zksync_prover_interface = { path = "../../core/lib/prover_interface" } zksync_utils = { path = "../../core/lib/utils" } prometheus_exporter = { path = "../../core/lib/prometheus_exporter" } vlog = { path = "../../core/lib/vlog" } diff --git a/prover/prover_fri_gateway/src/main.rs b/prover/prover_fri_gateway/src/main.rs index 15329ce955a8..0ab2475d4196 100644 --- a/prover/prover_fri_gateway/src/main.rs +++ b/prover/prover_fri_gateway/src/main.rs @@ -6,7 +6,7 @@ use zksync_config::configs::{FriProverGatewayConfig, PostgresConfig}; use zksync_dal::ConnectionPool; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; -use zksync_types::prover_server_api::{ProofGenerationDataRequest, SubmitProofRequest}; +use zksync_prover_interface::api::{ProofGenerationDataRequest, SubmitProofRequest}; use zksync_utils::wait_for_tasks::wait_for_tasks; use crate::api_data_fetcher::{PeriodicApiStruct, PROOF_GENERATION_DATA_PATH, SUBMIT_PROOF_PATH}; diff --git a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs index a25d447ad221..09d322ce940d 100644 --- a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs +++ b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs @@ -1,5 +1,5 @@ use async_trait::async_trait; -use zksync_types::prover_server_api::{ +use zksync_prover_interface::api::{ ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, }; diff --git a/prover/prover_fri_gateway/src/proof_submitter.rs b/prover/prover_fri_gateway/src/proof_submitter.rs index 78c7a6a6d8e7..3af3e81e20fb 100644 --- a/prover/prover_fri_gateway/src/proof_submitter.rs +++ b/prover/prover_fri_gateway/src/proof_submitter.rs @@ -1,9 +1,7 @@ use async_trait::async_trait; use zksync_dal::fri_proof_compressor_dal::ProofCompressionJobStatus; -use zksync_types::{ - prover_server_api::{SubmitProofRequest, SubmitProofResponse}, - L1BatchNumber, -}; +use zksync_prover_interface::api::{SubmitProofRequest, SubmitProofResponse}; +use zksync_types::L1BatchNumber; use crate::api_data_fetcher::{PeriodicApi, PeriodicApiStruct}; diff --git a/prover/prover_fri_types/src/keys.rs b/prover/prover_fri_types/src/keys.rs new file mode 100644 index 000000000000..729db7541788 --- /dev/null +++ b/prover/prover_fri_types/src/keys.rs @@ -0,0 +1,37 @@ +//! Different key types for object store. + +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +/// Storage key for a [AggregationWrapper`]. +#[derive(Debug, Clone, Copy)] +pub struct AggregationsKey { + pub block_number: L1BatchNumber, + pub circuit_id: u8, + pub depth: u16, +} + +/// Storage key for a [ClosedFormInputWrapper`]. +#[derive(Debug, Clone, Copy)] +pub struct ClosedFormInputKey { + pub block_number: L1BatchNumber, + pub circuit_id: u8, +} + +/// Storage key for a [`CircuitWrapper`]. +#[derive(Debug, Clone, Copy)] +pub struct FriCircuitKey { + pub block_number: L1BatchNumber, + pub sequence_number: usize, + pub circuit_id: u8, + pub aggregation_round: AggregationRound, + pub depth: u16, +} + +/// Storage key for a [`ZkSyncCircuit`]. +#[derive(Debug, Clone, Copy)] +pub struct CircuitKey<'a> { + pub block_number: L1BatchNumber, + pub sequence_number: usize, + pub circuit_type: &'a str, + pub aggregation_round: AggregationRound, +} diff --git a/prover/prover_fri_types/src/lib.rs b/prover/prover_fri_types/src/lib.rs index c244cb99f5a7..a1572ee2a2c2 100644 --- a/prover/prover_fri_types/src/lib.rs +++ b/prover/prover_fri_types/src/lib.rs @@ -11,9 +11,12 @@ use circuit_definitions::{ zkevm_circuits::scheduler::block_header::BlockAuxilaryOutputWitness, ZkSyncDefaultRoundFunction, }; -use zksync_object_store::{serialize_using_bincode, Bucket, FriCircuitKey, StoredObject}; -use zksync_types::{proofs::AggregationRound, L1BatchNumber}; +use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; +use crate::keys::FriCircuitKey; + +pub mod keys; pub mod queue; #[derive(serde::Serialize, serde::Deserialize, Clone)] diff --git a/prover/prover_fri_utils/src/lib.rs b/prover/prover_fri_utils/src/lib.rs index 991683b7f9b8..39971555f932 100644 --- a/prover/prover_fri_utils/src/lib.rs +++ b/prover/prover_fri_utils/src/lib.rs @@ -1,7 +1,7 @@ use std::time::Instant; use zksync_dal::StorageProcessor; -use zksync_object_store::{FriCircuitKey, ObjectStore}; +use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ circuit_definitions::{ circuit_definitions::recursion_layer::{ @@ -9,10 +9,12 @@ use zksync_prover_fri_types::{ }, zkevm_circuits::scheduler::aux::BaseLayerCircuitType, }, - get_current_pod_name, CircuitWrapper, ProverJob, ProverServiceDataKey, + get_current_pod_name, + keys::FriCircuitKey, + CircuitWrapper, ProverJob, ProverServiceDataKey, }; use zksync_types::{ - basic_fri_types::CircuitIdRoundTuple, proofs::AggregationRound, + basic_fri_types::{AggregationRound, CircuitIdRoundTuple}, protocol_version::L1VerifierConfig, }; diff --git a/prover/prover_fri_utils/src/metrics.rs b/prover/prover_fri_utils/src/metrics.rs index acb48bacb3e3..b33bcc6d4481 100644 --- a/prover/prover_fri_utils/src/metrics.rs +++ b/prover/prover_fri_utils/src/metrics.rs @@ -1,7 +1,7 @@ use std::time::Duration; use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; -use zksync_types::proofs::AggregationRound; +use zksync_types::basic_fri_types::AggregationRound; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet)] pub struct CircuitLabels { diff --git a/prover/prover_fri_utils/src/socket_utils.rs b/prover/prover_fri_utils/src/socket_utils.rs index c0c5ddcbcb9b..d6d7e80f8cb6 100644 --- a/prover/prover_fri_utils/src/socket_utils.rs +++ b/prover/prover_fri_utils/src/socket_utils.rs @@ -4,20 +4,17 @@ use std::{ time::{Duration, Instant}, }; -use zksync_types::proofs::SocketAddress; - pub fn send_assembly( job_id: u32, mut serialized: &[u8], - address: &SocketAddress, + socket_address: &SocketAddr, ) -> Result<(Duration, u64), String> { tracing::trace!( "Sending assembly to {}:{}, job id {{{job_id}}}", - address.host, - address.port + socket_address.ip(), + socket_address.port() ); - let socket_address = SocketAddr::new(address.host, address.port); let started_at = Instant::now(); let mut error_messages = vec![]; diff --git a/prover/vk_setup_data_generator_server_fri/src/lib.rs b/prover/vk_setup_data_generator_server_fri/src/lib.rs index bd3a8cfcb6fc..b3d286314582 100644 --- a/prover/vk_setup_data_generator_server_fri/src/lib.rs +++ b/prover/vk_setup_data_generator_server_fri/src/lib.rs @@ -51,7 +51,7 @@ use zksync_prover_fri_types::{ }, ProverServiceDataKey, }; -use zksync_types::proofs::AggregationRound; +use zksync_types::basic_fri_types::AggregationRound; #[cfg(feature = "gpu")] use {shivini::cs::GpuSetup, std::alloc::Global}; diff --git a/prover/vk_setup_data_generator_server_fri/src/main.rs b/prover/vk_setup_data_generator_server_fri/src/main.rs index 158a4390a967..ec4ef461d653 100644 --- a/prover/vk_setup_data_generator_server_fri/src/main.rs +++ b/prover/vk_setup_data_generator_server_fri/src/main.rs @@ -14,7 +14,7 @@ use zksync_prover_fri_types::{ }, ProverServiceDataKey, }; -use zksync_types::proofs::AggregationRound; +use zksync_types::basic_fri_types::AggregationRound; use zksync_vk_setup_data_server_fri::{ get_round_for_recursive_circuit_type, save_base_layer_vk, save_finalization_hints, save_recursive_layer_vk, save_snark_vk, diff --git a/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs b/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs index 5df4b75b3a65..bd36e8e2b3b9 100644 --- a/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs +++ b/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs @@ -14,7 +14,7 @@ use zksync_prover_fri_types::{ }, ProverServiceDataKey, }; -use zksync_types::proofs::AggregationRound; +use zksync_types::basic_fri_types::AggregationRound; use zksync_vk_setup_data_server_fri::{ generate_cpu_base_layer_setup_data, get_finalization_hints, get_recursive_layer_vk_for_circuit_type, get_round_for_recursive_circuit_type, save_setup_data, diff --git a/prover/vk_setup_data_generator_server_fri/src/tests.rs b/prover/vk_setup_data_generator_server_fri/src/tests.rs index 8c2c6fa9937f..0059a646fd87 100644 --- a/prover/vk_setup_data_generator_server_fri/src/tests.rs +++ b/prover/vk_setup_data_generator_server_fri/src/tests.rs @@ -8,7 +8,7 @@ use zksync_prover_fri_types::{ }, ProverServiceDataKey, }; -use zksync_types::proofs::AggregationRound; +use zksync_types::basic_fri_types::AggregationRound; use zksync_vk_setup_data_server_fri::{ get_base_layer_vk_for_circuit_type, get_base_path, get_file_path, get_finalization_hints, get_recursive_layer_vk_for_circuit_type, get_round_for_recursive_circuit_type, diff --git a/prover/vk_setup_data_generator_server_fri/src/vk_generator.rs b/prover/vk_setup_data_generator_server_fri/src/vk_generator.rs index 2b633bc6d08d..ced67af82e62 100644 --- a/prover/vk_setup_data_generator_server_fri/src/vk_generator.rs +++ b/prover/vk_setup_data_generator_server_fri/src/vk_generator.rs @@ -14,7 +14,7 @@ use zksync_prover_fri_types::{ }, ProverServiceDataKey, }; -use zksync_types::proofs::AggregationRound; +use zksync_types::basic_fri_types::AggregationRound; use zksync_vk_setup_data_server_fri::{ get_round_for_recursive_circuit_type, save_base_layer_vk, save_finalization_hints, save_recursive_layer_vk, diff --git a/prover/witness_generator/Cargo.toml b/prover/witness_generator/Cargo.toml index b01228cc17c4..5a207138ef95 100644 --- a/prover/witness_generator/Cargo.toml +++ b/prover/witness_generator/Cargo.toml @@ -15,6 +15,7 @@ vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev zksync_dal = { path = "../../core/lib/dal" } zksync_config = { path = "../../core/lib/config" } +zksync_prover_interface = { path = "../../core/lib/prover_interface" } zksync_env_config = { path = "../../core/lib/env_config" } zksync_system_constants = { path = "../../core/lib/constants" } prometheus_exporter = { path = "../../core/lib/prometheus_exporter" } diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 62e0098dc793..6d7d15b78613 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -20,9 +20,7 @@ use serde::{Deserialize, Serialize}; use zkevm_test_harness::{geometry_config::get_geometry_config, toolset::GeometryConfig}; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::{fri_witness_generator_dal::FriWitnessJobStatus, ConnectionPool}; -use zksync_object_store::{ - Bucket, ClosedFormInputKey, ObjectStore, ObjectStoreFactory, StoredObject, -}; +use zksync_object_store::{Bucket, ObjectStore, ObjectStoreFactory, StoredObject}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ @@ -33,15 +31,17 @@ use zksync_prover_fri_types::{ block_header::BlockAuxilaryOutputWitness, input::SchedulerCircuitInstanceWitness, }, }, - get_current_pod_name, AuxOutputWitnessWrapper, + get_current_pod_name, + keys::ClosedFormInputKey, + AuxOutputWitnessWrapper, }; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_prover_interface::inputs::{BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob}; use zksync_queued_job_processor::JobProcessor; use zksync_state::{PostgresStorage, StorageView}; use zksync_types::{ - proofs::{AggregationRound, BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob}, - protocol_version::FriProtocolVersionId, - Address, L1BatchNumber, ProtocolVersionId, BOOTLOADER_ADDRESS, H256, U256, + basic_fri_types::AggregationRound, protocol_version::FriProtocolVersionId, Address, + L1BatchNumber, ProtocolVersionId, BOOTLOADER_ADDRESS, H256, U256, }; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; diff --git a/prover/witness_generator/src/leaf_aggregation.rs b/prover/witness_generator/src/leaf_aggregation.rs index eb28936085fc..dd2b5805e42d 100644 --- a/prover/witness_generator/src/leaf_aggregation.rs +++ b/prover/witness_generator/src/leaf_aggregation.rs @@ -6,8 +6,8 @@ use zkevm_test_harness::witness::recursive_aggregation::{ compute_leaf_params, create_leaf_witnesses, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::ConnectionPool; -use zksync_object_store::{ClosedFormInputKey, ObjectStore, ObjectStoreFactory}; +use zksync_dal::{fri_prover_dal::types::LeafAggregationJobMetadata, ConnectionPool}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, @@ -21,14 +21,14 @@ use zksync_prover_fri_types::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, }, - get_current_pod_name, FriProofWrapper, + get_current_pod_name, + keys::ClosedFormInputKey, + FriProofWrapper, }; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ - proofs::{AggregationRound, LeafAggregationJobMetadata}, - protocol_version::FriProtocolVersionId, - L1BatchNumber, + basic_fri_types::AggregationRound, protocol_version::FriProtocolVersionId, L1BatchNumber, }; use zksync_vk_setup_data_server_fri::{ get_base_layer_vk_for_circuit_type, get_recursive_layer_vk_for_circuit_type, diff --git a/prover/witness_generator/src/main.rs b/prover/witness_generator/src/main.rs index 28f42037ca37..7e92397dd1c3 100644 --- a/prover/witness_generator/src/main.rs +++ b/prover/witness_generator/src/main.rs @@ -15,7 +15,7 @@ use zksync_dal::ConnectionPool; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; use zksync_queued_job_processor::JobProcessor; -use zksync_types::{proofs::AggregationRound, web3::futures::StreamExt}; +use zksync_types::{basic_fri_types::AggregationRound, web3::futures::StreamExt}; use zksync_utils::wait_for_tasks::wait_for_tasks; use zksync_vk_setup_data_server_fri::commitment_utils::get_cached_commitments; diff --git a/prover/witness_generator/src/node_aggregation.rs b/prover/witness_generator/src/node_aggregation.rs index 5f817dd88865..3c46ed98d50e 100644 --- a/prover/witness_generator/src/node_aggregation.rs +++ b/prover/witness_generator/src/node_aggregation.rs @@ -6,8 +6,8 @@ use zkevm_test_harness::witness::recursive_aggregation::{ compute_node_vk_commitment, create_node_witnesses, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::ConnectionPool; -use zksync_object_store::{AggregationsKey, ObjectStore, ObjectStoreFactory}; +use zksync_dal::{fri_prover_dal::types::NodeAggregationJobMetadata, ConnectionPool}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, @@ -18,13 +18,13 @@ use zksync_prover_fri_types::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, }, - get_current_pod_name, FriProofWrapper, + get_current_pod_name, + keys::AggregationsKey, + FriProofWrapper, }; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ - proofs::{AggregationRound, NodeAggregationJobMetadata}, - protocol_version::FriProtocolVersionId, - L1BatchNumber, + basic_fri_types::AggregationRound, protocol_version::FriProtocolVersionId, L1BatchNumber, }; use zksync_vk_setup_data_server_fri::{ get_recursive_layer_vk_for_circuit_type, utils::get_leaf_vk_params, diff --git a/prover/witness_generator/src/precalculated_merkle_paths_provider.rs b/prover/witness_generator/src/precalculated_merkle_paths_provider.rs index c26efa4cf39e..2cfadc93fc6a 100644 --- a/prover/witness_generator/src/precalculated_merkle_paths_provider.rs +++ b/prover/witness_generator/src/precalculated_merkle_paths_provider.rs @@ -3,7 +3,7 @@ use zk_evm::blake2::Blake2s256; use zkevm_test_harness::witness::tree::{ BinaryHasher, BinarySparseStorageTree, EnumeratedBinaryLeaf, LeafQuery, ZkSyncStorageLeaf, }; -use zksync_types::proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct PrecalculatedMerklePathsProvider { diff --git a/prover/witness_generator/src/scheduler.rs b/prover/witness_generator/src/scheduler.rs index a6aa372b41e0..68e48f832890 100644 --- a/prover/witness_generator/src/scheduler.rs +++ b/prover/witness_generator/src/scheduler.rs @@ -4,7 +4,7 @@ use anyhow::Context as _; use async_trait::async_trait; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::ConnectionPool; -use zksync_object_store::{FriCircuitKey, ObjectStore, ObjectStoreFactory}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ @@ -18,11 +18,13 @@ use zksync_prover_fri_types::{ recursion_layer_proof_config, zkevm_circuits::scheduler::{input::SchedulerCircuitInstanceWitness, SchedulerConfig}, }, - get_current_pod_name, CircuitWrapper, FriProofWrapper, + get_current_pod_name, + keys::FriCircuitKey, + CircuitWrapper, FriProofWrapper, }; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ - proofs::AggregationRound, protocol_version::FriProtocolVersionId, L1BatchNumber, + basic_fri_types::AggregationRound, protocol_version::FriProtocolVersionId, L1BatchNumber, }; use zksync_vk_setup_data_server_fri::{ get_recursive_layer_vk_for_circuit_type, utils::get_leaf_vk_params, diff --git a/prover/witness_generator/src/tests.rs b/prover/witness_generator/src/tests.rs index 7fd95a7c7d89..e167c82aba9e 100644 --- a/prover/witness_generator/src/tests.rs +++ b/prover/witness_generator/src/tests.rs @@ -2,10 +2,8 @@ use std::iter; use const_decoder::Decoder::Hex; use zkevm_test_harness::witness::tree::{BinarySparseStorageTree, ZkSyncStorageLeaf}; -use zksync_types::{ - proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}, - U256, -}; +use zksync_prover_interface::inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}; +use zksync_types::U256; use super::precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider; diff --git a/prover/witness_generator/src/utils.rs b/prover/witness_generator/src/utils.rs index 63acee195cbd..17e6533344a9 100644 --- a/prover/witness_generator/src/utils.rs +++ b/prover/witness_generator/src/utils.rs @@ -4,10 +4,7 @@ use circuit_definitions::{ }; use multivm::utils::get_used_bootloader_memory_bytes; use zkevm_test_harness::boojum::field::goldilocks::GoldilocksField; -use zksync_object_store::{ - serialize_using_bincode, AggregationsKey, Bucket, ClosedFormInputKey, FriCircuitKey, - ObjectStore, StoredObject, -}; +use zksync_object_store::{serialize_using_bincode, Bucket, ObjectStore, StoredObject}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ @@ -22,9 +19,10 @@ use zksync_prover_fri_types::{ zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness, ZkSyncDefaultRoundFunction, }, + keys::{AggregationsKey, ClosedFormInputKey, FriCircuitKey}, CircuitWrapper, FriProofWrapper, }; -use zksync_types::{proofs::AggregationRound, L1BatchNumber, ProtocolVersionId, U256}; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber, ProtocolVersionId, U256}; pub fn expand_bootloader_contents( packed: &[(usize, U256)], diff --git a/prover/witness_generator/tests/basic_test.rs b/prover/witness_generator/tests/basic_test.rs index 16cce19929da..446ee71c9226 100644 --- a/prover/witness_generator/tests/basic_test.rs +++ b/prover/witness_generator/tests/basic_test.rs @@ -2,14 +2,15 @@ use std::time::Instant; use serde::Serialize; use zksync_config::ObjectStoreConfig; +use zksync_dal::fri_prover_dal::types::{LeafAggregationJobMetadata, NodeAggregationJobMetadata}; use zksync_env_config::FromEnv; -use zksync_object_store::{AggregationsKey, FriCircuitKey, ObjectStoreFactory}; -use zksync_prover_fri_types::CircuitWrapper; -use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; -use zksync_types::{ - proofs::{AggregationRound, LeafAggregationJobMetadata, NodeAggregationJobMetadata}, - L1BatchNumber, +use zksync_object_store::ObjectStoreFactory; +use zksync_prover_fri_types::{ + keys::{AggregationsKey, FriCircuitKey}, + CircuitWrapper, }; +use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use zksync_witness_generator::{ leaf_aggregation::{prepare_leaf_aggregation_job, LeafAggregationWitnessGenerator}, node_aggregation, diff --git a/prover/witness_vector_generator/src/generator.rs b/prover/witness_vector_generator/src/generator.rs index 0c73a8df9008..cbc2da1f5f39 100644 --- a/prover/witness_vector_generator/src/generator.rs +++ b/prover/witness_vector_generator/src/generator.rs @@ -1,4 +1,5 @@ use std::{ + net::SocketAddr, sync::Arc, time::{Duration, Instant}, }; @@ -7,7 +8,7 @@ use anyhow::Context as _; use async_trait::async_trait; use tokio::{task::JoinHandle, time::sleep}; use zksync_config::configs::FriWitnessVectorGeneratorConfig; -use zksync_dal::ConnectionPool; +use zksync_dal::{fri_prover_dal::types::GpuProverInstanceStatus, ConnectionPool}; use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ circuit_definitions::boojum::field::goldilocks::GoldilocksField, CircuitWrapper, ProverJob, @@ -17,11 +18,7 @@ use zksync_prover_fri_utils::{ fetch_next_circuit, get_numeric_circuit_id, socket_utils::send_assembly, }; use zksync_queued_job_processor::JobProcessor; -use zksync_types::{ - basic_fri_types::CircuitIdRoundTuple, - proofs::{GpuProverInstanceStatus, SocketAddress}, - protocol_version::L1VerifierConfig, -}; +use zksync_types::{basic_fri_types::CircuitIdRoundTuple, protocol_version::L1VerifierConfig}; use zksync_vk_setup_data_server_fri::get_finalization_hints; use crate::metrics::METRICS; @@ -155,6 +152,7 @@ impl JobProcessor for WitnessVectorGenerator { .await; if let Some(address) = prover { + let address = SocketAddr::from(address); tracing::info!( "Found prover after {:?}. Sending witness vector job...", now.elapsed() @@ -216,7 +214,7 @@ impl JobProcessor for WitnessVectorGenerator { async fn handle_send_result( result: &Result<(Duration, u64), String>, job_id: u32, - address: &SocketAddress, + address: &SocketAddr, pool: &ConnectionPool, zone: String, ) { @@ -250,7 +248,11 @@ async fn handle_send_result( .await .unwrap() .fri_gpu_prover_queue_dal() - .update_prover_instance_status(address.clone(), GpuProverInstanceStatus::Dead, zone) + .update_prover_instance_status( + (*address).into(), + GpuProverInstanceStatus::Dead, + zone, + ) .await; // mark the job as failed diff --git a/prover/witness_vector_generator/tests/basic_test.rs b/prover/witness_vector_generator/tests/basic_test.rs index 648b1ee4d9e6..54898cf94d5d 100644 --- a/prover/witness_vector_generator/tests/basic_test.rs +++ b/prover/witness_vector_generator/tests/basic_test.rs @@ -1,7 +1,7 @@ use std::fs; use zksync_prover_fri_types::{CircuitWrapper, ProverJob, ProverServiceDataKey}; -use zksync_types::{proofs::AggregationRound, L1BatchNumber}; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use zksync_witness_vector_generator::generator::WitnessVectorGenerator; #[test]