From df6e1ae6d84773a480864ea6e35c8bd142db938c Mon Sep 17 00:00:00 2001 From: xgreenx Date: Sun, 24 Dec 2023 04:08:14 +0100 Subject: [PATCH 01/28] Move storage traits implementation to the `fuel-core-storage` crate --- Cargo.lock | 17 +- Cargo.toml | 3 + crates/chain-config/Cargo.toml | 2 +- crates/fuel-core/Cargo.toml | 8 +- crates/fuel-core/src/database.rs | 312 +++------ crates/fuel-core/src/database/balances.rs | 478 +------------- crates/fuel-core/src/database/block.rs | 122 ++-- crates/fuel-core/src/database/code_root.rs | 122 ---- crates/fuel-core/src/database/coin.rs | 76 ++- crates/fuel-core/src/database/contracts.rs | 341 +--------- crates/fuel-core/src/database/message.rs | 111 ++-- crates/fuel-core/src/database/metadata.rs | 99 ++- crates/fuel-core/src/database/receipts.rs | 11 - crates/fuel-core/src/database/relayer.rs | 10 - crates/fuel-core/src/database/sealed_block.rs | 12 +- crates/fuel-core/src/database/state.rs | 478 +------------- crates/fuel-core/src/database/storage.rs | 391 ++++------- crates/fuel-core/src/database/transaction.rs | 7 +- crates/fuel-core/src/database/transactions.rs | 165 +++-- crates/fuel-core/src/service.rs | 2 +- .../src/service/adapters/executor.rs | 4 +- crates/fuel-core/src/state.rs | 53 +- .../src/state/in_memory/transaction.rs | 7 +- crates/services/executor/src/executor.rs | 2 +- crates/services/executor/src/ports.rs | 4 +- crates/services/relayer/Cargo.toml | 1 + crates/services/relayer/src/ports.rs | 22 + crates/storage/Cargo.toml | 13 +- crates/storage/src/codec.rs | 49 ++ crates/storage/src/codec/manual.rs | 42 ++ crates/storage/src/codec/postcard.rs | 29 + crates/storage/src/codec/primitive.rs | 102 +++ crates/storage/src/codec/raw.rs | 27 + crates/storage/src/column.rs | 179 ++++++ crates/storage/src/kv_store.rs | 151 +++++ crates/storage/src/lib.rs | 33 +- crates/storage/src/structure.rs | 102 +++ crates/storage/src/structure/plain.rs | 127 ++++ crates/storage/src/structure/sparse.rs | 440 +++++++++++++ crates/storage/src/structured_storage.rs | 607 ++++++++++++++++++ .../src/structured_storage/balances.rs | 85 +++ .../storage/src/structured_storage/blocks.rs | 24 + .../storage/src/structured_storage/coins.rs | 24 + .../src/structured_storage/contracts.rs | 88 +++ .../src/structured_storage/merkle_data.rs | 49 ++ .../src/structured_storage/messages.rs | 41 ++ .../src/structured_storage/receipts.rs | 29 + .../src/structured_storage/sealed_block.rs | 24 + .../storage/src/structured_storage/state.rs | 87 +++ .../src/structured_storage/transactions.rs | 24 + crates/storage/src/tables.rs | 114 +++- crates/types/Cargo.toml | 4 +- crates/types/src/blockchain/header.rs | 4 +- crates/types/src/blockchain/primitives.rs | 29 +- crates/types/src/entities/coins/coin.rs | 2 +- crates/types/src/lib.rs | 3 + 56 files changed, 3261 insertions(+), 2131 deletions(-) delete mode 100644 crates/fuel-core/src/database/code_root.rs delete mode 100644 crates/fuel-core/src/database/receipts.rs delete mode 100644 crates/fuel-core/src/database/relayer.rs create mode 100644 crates/storage/src/codec.rs create mode 100644 crates/storage/src/codec/manual.rs create mode 100644 crates/storage/src/codec/postcard.rs create mode 100644 crates/storage/src/codec/primitive.rs create mode 100644 crates/storage/src/codec/raw.rs create mode 100644 crates/storage/src/column.rs create mode 100644 crates/storage/src/structure.rs create mode 100644 crates/storage/src/structure/plain.rs create mode 100644 crates/storage/src/structure/sparse.rs create mode 100644 crates/storage/src/structured_storage.rs create mode 100644 crates/storage/src/structured_storage/balances.rs create mode 100644 crates/storage/src/structured_storage/blocks.rs create mode 100644 crates/storage/src/structured_storage/coins.rs create mode 100644 crates/storage/src/structured_storage/contracts.rs create mode 100644 crates/storage/src/structured_storage/merkle_data.rs create mode 100644 crates/storage/src/structured_storage/messages.rs create mode 100644 crates/storage/src/structured_storage/receipts.rs create mode 100644 crates/storage/src/structured_storage/sealed_block.rs create mode 100644 crates/storage/src/structured_storage/state.rs create mode 100644 crates/storage/src/structured_storage/transactions.rs diff --git a/Cargo.lock b/Cargo.lock index fae149e92dd..86852b53eb6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2642,14 +2642,12 @@ dependencies = [ "hyper", "itertools 0.10.5", "mockall", - "postcard", "proptest", "rand", "rocksdb", - "serde", "serde_json", - "strum 0.24.1", - "strum_macros 0.24.3", + "strum 0.25.0", + "strum_macros 0.25.3", "tempfile", "test-case", "test-strategy", @@ -2994,6 +2992,7 @@ dependencies = [ "mockall", "once_cell", "parking_lot", + "rand", "serde", "serde_json", "test-case", @@ -3023,10 +3022,18 @@ version = "0.22.0" dependencies = [ "anyhow", "derive_more", + "enum-iterator", "fuel-core-types", "fuel-vm", + "itertools 0.10.5", "mockall", + "paste", + "postcard", "primitive-types", + "rand", + "serde", + "strum 0.25.0", + "strum_macros 0.25.3", ] [[package]] @@ -3123,8 +3130,10 @@ version = "0.22.0" dependencies = [ "anyhow", "bs58", + "derivative", "derive_more", "fuel-vm", + "rand", "secrecy", "serde", "tai64", diff --git a/Cargo.toml b/Cargo.toml index 94aafb99c48..1b5d4df908f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,6 +85,7 @@ async-trait = "0.1" cynic = { version = "2.2.1", features = ["http-reqwest"] } clap = "4.1" derive_more = { version = "0.99" } +enum-iterator = "1.2" hyper = { version = "0.14.26" } primitive-types = { version = "0.12", default-features = false } rand = "0.8" @@ -100,6 +101,8 @@ tracing-attributes = "0.1" tracing-subscriber = "0.3" serde = "1.0" serde_json = "1.0" +strum = "0.25" +strum_macros = "0.25" # enable cookie store to support L7 sticky sessions reqwest = { version = "0.11.16", default-features = false, features = ["rustls-tls", "cookies"] } mockall = "0.11" diff --git a/crates/chain-config/Cargo.toml b/crates/chain-config/Cargo.toml index 4fc9d777c18..d5b89a84bed 100644 --- a/crates/chain-config/Cargo.toml +++ b/crates/chain-config/Cargo.toml @@ -17,7 +17,7 @@ fuel-core-storage = { workspace = true } fuel-core-types = { workspace = true, default-features = false, features = ["serde"] } hex = { version = "0.4", features = ["serde"] } itertools = { workspace = true } -postcard = { version = "1.0", features = ["alloc"] } +postcard = { workspace = true, features = ["alloc"] } rand = { workspace = true, optional = true } serde = { workspace = true, features = ["derive", "rc"] } serde_json = { version = "1.0", features = ["raw_value"], optional = true } diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index db8c5902570..7a54e142f0f 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -19,7 +19,7 @@ async-trait = { workspace = true } axum = { workspace = true } clap = { workspace = true, features = ["derive"] } derive_more = { version = "0.99" } -enum-iterator = "1.2" +enum-iterator = { workspace = true } fuel-core-chain-config = { workspace = true } fuel-core-consensus-module = { workspace = true } fuel-core-database = { workspace = true } @@ -41,16 +41,14 @@ futures = { workspace = true } hex = { version = "0.4", features = ["serde"] } hyper = { workspace = true } itertools = { workspace = true } -postcard = { workspace = true, features = ["use-std"] } rand = { workspace = true } rocksdb = { version = "0.21", default-features = false, features = [ "lz4", "multi-threaded-cf", ], optional = true } -serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true, features = ["raw_value"] } -strum = "0.24" -strum_macros = "0.24" +strum = { workspace = true } +strum_macros = { workspace = true } tempfile = { workspace = true, optional = true } thiserror = "1.0" tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index d2fb65cfddd..5a05102ba95 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -14,15 +14,18 @@ use fuel_core_chain_config::{ use fuel_core_storage::{ iter::IterDirection, kv_store::{ - StorageColumn, + BatchOperations, + KeyValueStore, Value, WriteOperation, }, + structured_storage::StructuredStorage, transactional::{ StorageTransaction, Transactional, }, Error as StorageError, + Mappable, Result as StorageResult, }; use fuel_core_types::{ @@ -34,11 +37,6 @@ use fuel_core_types::{ }, tai64::Tai64, }; -use itertools::Itertools; -use serde::{ - de::DeserializeOwned, - Serialize, -}; use std::{ fmt::{ self, @@ -46,10 +44,8 @@ use std::{ Formatter, }, marker::Send, - ops::Deref, sync::Arc, }; -use strum::EnumCount; pub use fuel_core_database::Error; pub type Result = core::result::Result; @@ -59,20 +55,20 @@ type DatabaseResult = Result; // TODO: Extract `Database` and all belongs into `fuel-core-database`. #[cfg(feature = "rocksdb")] use crate::state::rocks_db::RocksDb; +use fuel_core_storage::{ + codec::Decode, + structure::Structure, + structured_storage::TableWithStructure, +}; #[cfg(feature = "rocksdb")] use std::path::Path; #[cfg(feature = "rocksdb")] use tempfile::TempDir; // Storages implementation -// TODO: Move to separate `database/storage` folder, because it is only implementation of storages traits. mod block; -mod code_root; mod contracts; mod message; -mod receipts; -#[cfg(feature = "relayer")] -mod relayer; mod sealed_block; mod state; @@ -84,97 +80,11 @@ pub mod storage; pub mod transaction; pub mod transactions; -/// Database tables column ids to the corresponding [`fuel_core_storage::Mappable`] table. -#[repr(u32)] -#[derive( - Copy, - Clone, - Debug, - strum_macros::EnumCount, - strum_macros::IntoStaticStr, - PartialEq, - Eq, - enum_iterator::Sequence, -)] -pub enum Column { - /// The column id of metadata about the blockchain - Metadata = 0, - /// See [`ContractsRawCode`](fuel_core_storage::tables::ContractsRawCode) - ContractsRawCode = 1, - /// See [`ContractsInfo`](fuel_core_storage::tables::ContractsInfo) - ContractsInfo = 2, - /// See [`ContractsState`](fuel_core_storage::tables::ContractsState) - ContractsState = 3, - /// See [`ContractsLatestUtxo`](fuel_core_storage::tables::ContractsLatestUtxo) - ContractsLatestUtxo = 4, - /// See [`ContractsAssets`](fuel_core_storage::tables::ContractsAssets) - ContractsAssets = 5, - /// See [`Coins`](fuel_core_storage::tables::Coins) - Coins = 6, - /// The column of the table that stores `true` if `owner` owns `Coin` with `coin_id` - OwnedCoins = 7, - /// See [`Transactions`](fuel_core_storage::tables::Transactions) - Transactions = 8, - /// Transaction id to current status - TransactionStatus = 9, - /// The column of the table of all `owner`'s transactions - TransactionsByOwnerBlockIdx = 10, - /// See [`Receipts`](fuel_core_storage::tables::Receipts) - Receipts = 11, - /// See [`FuelBlocks`](fuel_core_storage::tables::FuelBlocks) - FuelBlocks = 12, - /// See [`FuelBlockSecondaryKeyBlockHeights`](storage::FuelBlockSecondaryKeyBlockHeights) - FuelBlockSecondaryKeyBlockHeights = 13, - /// See [`Messages`](fuel_core_storage::tables::Messages) - Messages = 14, - /// The column of the table that stores `true` if `owner` owns `Message` with `message_id` - OwnedMessageIds = 15, - /// See [`SealedBlockConsensus`](fuel_core_storage::tables::SealedBlockConsensus) - FuelBlockConsensus = 16, - /// See [`FuelBlockMerkleData`](storage::FuelBlockMerkleData) - FuelBlockMerkleData = 17, - /// See [`FuelBlockMerkleMetadata`](storage::FuelBlockMerkleMetadata) - FuelBlockMerkleMetadata = 18, - /// Messages that have been spent. - /// Existence of a key in this column means that the message has been spent. - /// See [`SpentMessages`](fuel_core_storage::tables::SpentMessages) - SpentMessages = 19, - /// Metadata for the relayer - /// See [`RelayerMetadata`](fuel_core_relayer::ports::RelayerMetadata) - RelayerMetadata = 20, - /// See [`ContractsAssetsMerkleData`](storage::ContractsAssetsMerkleData) - ContractsAssetsMerkleData = 21, - /// See [`ContractsAssetsMerkleMetadata`](storage::ContractsAssetsMerkleMetadata) - ContractsAssetsMerkleMetadata = 22, - /// See [`ContractsStateMerkleData`](storage::ContractsStateMerkleData) - ContractsStateMerkleData = 23, - /// See [`ContractsStateMerkleMetadata`](storage::ContractsStateMerkleMetadata) - ContractsStateMerkleMetadata = 24, -} - -impl Column { - /// The total count of variants in the enum. - pub const COUNT: usize = ::COUNT; - - /// Returns the `usize` representation of the `Column`. - pub fn as_usize(&self) -> usize { - *self as usize - } -} - -impl StorageColumn for Column { - fn name(&self) -> &'static str { - self.into() - } - - fn id(&self) -> u32 { - *self as u32 - } -} +pub type Column = fuel_core_storage::column::Column; #[derive(Clone, Debug)] pub struct Database { - data: DataSource, + data: StructuredStorage, // used for RAII _drop: Arc, } @@ -209,9 +119,12 @@ impl Drop for DropResources { } impl Database { - pub fn new(data_source: DataSource) -> Self { + pub fn new(data_source: D) -> Self + where + D: Into, + { Self { - data: data_source, + data: StructuredStorage::new(data_source.into()), _drop: Default::default(), } } @@ -227,14 +140,14 @@ impl Database { let db = RocksDb::default_open(path, capacity.into()).map_err(Into::::into).context("Failed to open rocksdb, you may need to wipe a pre-existing incompatible db `rm -rf ~/.fuel/db`")?; Ok(Database { - data: Arc::new(db), + data: StructuredStorage::new(Arc::new(db).into()), _drop: Default::default(), }) } pub fn in_memory() -> Self { Self { - data: Arc::new(MemoryStore::default()), + data: StructuredStorage::new(Arc::new(MemoryStore::default()).into()), _drop: Default::default(), } } @@ -244,7 +157,7 @@ impl Database { let tmp_dir = TempDir::new().unwrap(); let db = RocksDb::default_open(tmp_dir.path(), None).unwrap(); Self { - data: Arc::new(db), + data: StructuredStorage::new(Arc::new(db).into()), _drop: Arc::new( { move || { @@ -262,189 +175,152 @@ impl Database { } pub fn checkpoint(&self) -> DatabaseResult { - self.data.checkpoint() + self.data.as_ref().checkpoint() } pub fn flush(self) -> DatabaseResult<()> { - self.data.flush() + self.data.as_ref().flush() } } -/// Mutable methods. -// TODO: Add `&mut self` to them. -impl Database { - fn insert, V: Serialize + ?Sized, R: DeserializeOwned>( - &self, - key: K, - column: Column, - value: &V, - ) -> StorageResult> { - let result = self.data.replace( - key.as_ref(), - column, - Arc::new(postcard::to_stdvec(value).map_err(|_| StorageError::Codec)?), - )?; - if let Some(previous) = result { - Ok(Some( - postcard::from_bytes(&previous).map_err(|_| StorageError::Codec)?, - )) - } else { - Ok(None) - } +impl KeyValueStore for DataSource { + type Column = Column; + + fn put(&self, key: &[u8], column: Self::Column, value: Value) -> StorageResult<()> { + self.as_ref().put(key, column, value) } - fn insert_raw, V: AsRef<[u8]>>( + fn replace( &self, - key: K, - column: Column, - value: V, + key: &[u8], + column: Self::Column, + value: Value, ) -> StorageResult> { - self.data - .replace(key.as_ref(), column, Arc::new(value.as_ref().to_vec())) + self.as_ref().replace(key, column, value) } - fn batch_insert, V: Serialize, S>( + fn write( &self, - column: Column, - set: S, - ) -> StorageResult<()> - where - S: Iterator, - { - let set: Vec<_> = set - .map(|(key, value)| { - let value = - postcard::to_stdvec(&value).map_err(|_| StorageError::Codec)?; - - let tuple = ( - key.as_ref().to_vec(), - column, - WriteOperation::Insert(Arc::new(value)), - ); - - Ok::<_, StorageError>(tuple) - }) - .try_collect()?; + key: &[u8], + column: Self::Column, + buf: &[u8], + ) -> StorageResult { + self.as_ref().write(key, column, buf) + } - self.data.batch_write(&mut set.into_iter()) + fn take(&self, key: &[u8], column: Self::Column) -> StorageResult> { + self.as_ref().take(key, column) } - fn take( - &self, - key: &[u8], - column: Column, - ) -> StorageResult> { - self.data - .take(key, column)? - .map(|val| postcard::from_bytes(&val).map_err(|_| StorageError::Codec)) - .transpose() + fn delete(&self, key: &[u8], column: Self::Column) -> StorageResult<()> { + self.as_ref().delete(key, column) } - fn take_raw(&self, key: &[u8], column: Column) -> StorageResult> { - self.data.take(key, column) + fn exists(&self, key: &[u8], column: Self::Column) -> StorageResult { + self.as_ref().exists(key, column) } -} -/// Read-only methods. -impl Database { - fn contains_key(&self, key: &[u8], column: Column) -> StorageResult { - self.data.exists(key, column) + fn size_of_value( + &self, + key: &[u8], + column: Self::Column, + ) -> StorageResult> { + self.as_ref().size_of_value(key, column) } - fn size_of_value(&self, key: &[u8], column: Column) -> StorageResult> { - self.data.size_of_value(key, column) + fn get(&self, key: &[u8], column: Self::Column) -> StorageResult> { + self.as_ref().get(key, column) } fn read( &self, key: &[u8], - column: Column, + column: Self::Column, buf: &mut [u8], ) -> StorageResult> { - self.data.read(key, column, buf) - } - - fn read_alloc(&self, key: &[u8], column: Column) -> StorageResult>> { - self.data - .get(key, column) - .map(|value| value.map(|value| value.deref().clone())) + self.as_ref().read(key, column, buf) } +} - fn get( +impl BatchOperations for DataSource { + fn batch_write( &self, - key: &[u8], - column: Column, - ) -> StorageResult> { - self.data - .get(key, column)? - .map(|val| postcard::from_bytes(&val).map_err(|_| StorageError::Codec)) - .transpose() + entries: &mut dyn Iterator, Self::Column, WriteOperation)>, + ) -> StorageResult<()> { + self.as_ref().batch_write(entries) } +} - fn iter_all( +/// Read-only methods. +impl Database { + fn iter_all( &self, - column: Column, direction: Option, - ) -> impl Iterator> + '_ + ) -> impl Iterator> + '_ where - K: From>, - V: DeserializeOwned, + M: Mappable + TableWithStructure, + M::Structure: Structure, { - self.iter_all_filtered::, Vec>(column, None, None, direction) + self.iter_all_filtered::, Vec>(None, None, direction) } - fn iter_all_by_prefix( + fn iter_all_by_prefix( &self, - column: Column, prefix: Option

, - ) -> impl Iterator> + '_ + ) -> impl Iterator> + '_ where - K: From>, - V: DeserializeOwned, + M: Mappable + TableWithStructure, + M::Structure: Structure, P: AsRef<[u8]>, { - self.iter_all_filtered::(column, prefix, None, None) + self.iter_all_filtered::(prefix, None, None) } - fn iter_all_by_start( + fn iter_all_by_start( &self, - column: Column, start: Option, direction: Option, - ) -> impl Iterator> + '_ + ) -> impl Iterator> + '_ where - K: From>, - V: DeserializeOwned, + M: Mappable + TableWithStructure, + M::Structure: Structure, S: AsRef<[u8]>, { - self.iter_all_filtered::(column, None, start, direction) + self.iter_all_filtered::(None, start, direction) } - fn iter_all_filtered( + fn iter_all_filtered( &self, - column: Column, prefix: Option

, start: Option, direction: Option, - ) -> impl Iterator> + '_ + ) -> impl Iterator> + '_ where - K: From>, - V: DeserializeOwned, + M: Mappable + TableWithStructure, + M::Structure: Structure, P: AsRef<[u8]>, S: AsRef<[u8]>, { self.data + .as_ref() .iter_all( - column, + M::column(), prefix.as_ref().map(|p| p.as_ref()), start.as_ref().map(|s| s.as_ref()), direction.unwrap_or_default(), ) .map(|val| { val.and_then(|(key, value)| { - let key = K::from(key); - let value: V = - postcard::from_bytes(&value).map_err(|_| StorageError::Codec)?; + let key = + >::KeyCodec::decode( + key.as_slice(), + ) + .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; + let value = + >::ValueCodec::decode( + value.as_slice(), + ) + .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; Ok((key, value)) }) }) diff --git a/crates/fuel-core/src/database/balances.rs b/crates/fuel-core/src/database/balances.rs index 0c92179adf9..84eb0c7f7e3 100644 --- a/crates/fuel-core/src/database/balances.rs +++ b/crates/fuel-core/src/database/balances.rs @@ -1,155 +1,18 @@ -use crate::database::{ - storage::{ - ContractsAssetsMerkleData, - ContractsAssetsMerkleMetadata, - DatabaseColumn, - SparseMerkleMetadata, - }, - Column, - Database, -}; +use crate::database::Database; use fuel_core_storage::{ tables::ContractsAssets, ContractsAssetKey, Error as StorageError, - Mappable, - MerkleRoot, - MerkleRootStorage, - StorageAsMut, - StorageAsRef, - StorageInspect, - StorageMutate, + StorageBatchMutate, }; use fuel_core_types::{ fuel_asm::Word, - fuel_merkle::{ - sparse, - sparse::{ - in_memory, - MerkleTree, - MerkleTreeKey, - }, - }, fuel_types::{ AssetId, ContractId, }, }; use itertools::Itertools; -use std::borrow::{ - BorrowMut, - Cow, -}; - -impl StorageInspect for Database { - type Error = StorageError; - - fn get( - &self, - key: &::Key, - ) -> Result::OwnedValue>>, Self::Error> { - self.get(key.as_ref(), Column::ContractsAssets) - .map_err(Into::into) - } - - fn contains_key( - &self, - key: &::Key, - ) -> Result { - self.contains_key(key.as_ref(), Column::ContractsAssets) - .map_err(Into::into) - } -} - -impl StorageMutate for Database { - fn insert( - &mut self, - key: &::Key, - value: &::Value, - ) -> Result::OwnedValue>, Self::Error> { - let prev = Database::insert(self, key.as_ref(), Column::ContractsAssets, value) - .map_err(Into::into); - - // Get latest metadata entry for this contract id - let prev_metadata = self - .storage::() - .get(key.contract_id())? - .unwrap_or_default(); - - let root = prev_metadata.root; - let storage = self.borrow_mut(); - let mut tree: MerkleTree = - MerkleTree::load(storage, &root) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - // Update the contact's key-value dataset. The key is the asset id and the - // value the Word - tree.update(MerkleTreeKey::new(key), value.to_be_bytes().as_slice()) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - // Generate new metadata for the updated tree - let root = tree.root(); - let metadata = SparseMerkleMetadata { root }; - self.storage::() - .insert(key.contract_id(), &metadata)?; - - prev - } - - fn remove( - &mut self, - key: &::Key, - ) -> Result::OwnedValue>, Self::Error> { - let prev = Database::take(self, key.as_ref(), Column::ContractsAssets) - .map_err(Into::into); - - // Get latest metadata entry for this contract id - let prev_metadata = self - .storage::() - .get(key.contract_id())?; - - if let Some(prev_metadata) = prev_metadata { - let root = prev_metadata.root; - - // Load the tree saved in metadata - let storage = self.borrow_mut(); - let mut tree: MerkleTree = - MerkleTree::load(storage, &root) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - // Update the contract's key-value dataset. The key is the asset id and - // the value is the Word - tree.delete(MerkleTreeKey::new(key)) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - let root = tree.root(); - if root == *sparse::empty_sum() { - // The tree is now empty; remove the metadata - self.storage::() - .remove(key.contract_id())?; - } else { - // Generate new metadata for the updated tree - let metadata = SparseMerkleMetadata { root }; - self.storage::() - .insert(key.contract_id(), &metadata)?; - } - } - - prev - } -} - -impl MerkleRootStorage for Database { - fn root(&self, parent: &ContractId) -> Result { - let metadata = self - .storage::() - .get(parent)?; - let root = metadata - .map(|metadata| metadata.root) - .unwrap_or_else(|| in_memory::MerkleTree::new().root()); - Ok(root) - } -} impl Database { /// Initialize the balances of the contract from the all leafs. @@ -162,56 +25,23 @@ impl Database { where S: Iterator, { - if self - .storage::() - .contains_key(contract_id)? - { - return Err( - anyhow::anyhow!("The contract balances is already initialized").into(), - ) - } - - let balances = balances.collect_vec(); - - // Keys and values should be original without any modifications. - // Key is `ContractId` ++ `AssetId` - self.batch_insert( - Column::ContractsAssets, - balances.clone().into_iter().map(|(asset, value)| { - (ContractsAssetKey::new(contract_id, &asset), value) - }), - )?; - - // Merkle data: - // - Asset key should be converted into `MerkleTreeKey` by `new` function that hashes them. - // - The balance value are original. - let balances = balances.into_iter().map(|(asset, value)| { - ( - MerkleTreeKey::new(ContractsAssetKey::new(contract_id, &asset)), - value.to_be_bytes(), - ) - }); - let (root, nodes) = in_memory::MerkleTree::nodes_from_set(balances); - self.batch_insert(ContractsAssetsMerkleData::column(), nodes.into_iter())?; - let metadata = SparseMerkleMetadata { root }; - self.storage::() - .insert(contract_id, &metadata)?; - - Ok(()) + let balances = balances + .map(|(asset, balance)| { + (ContractsAssetKey::new(contract_id, &asset), balance) + }) + .collect_vec(); + <_ as StorageBatchMutate>::init_storage( + &mut self.data, + &mut balances.iter().map(|(key, value)| (key, value)), + ) } } #[cfg(test)] mod tests { use super::*; - use fuel_core_storage::{ - StorageAsMut, - StorageAsRef, - }; - use fuel_core_types::fuel_types::{ - AssetId, - Word, - }; + use fuel_core_storage::StorageAsMut; + use fuel_core_types::fuel_types::AssetId; use rand::Rng; fn random_asset_id(rng: &mut R) -> AssetId @@ -223,255 +53,6 @@ mod tests { bytes.into() } - #[test] - fn get() { - let key = (&ContractId::from([1u8; 32]), &AssetId::new([1u8; 32])).into(); - let balance: Word = 100; - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - assert_eq!( - database - .storage::() - .get(&key) - .unwrap() - .unwrap() - .into_owned(), - balance - ); - } - - #[test] - fn put() { - let key = (&ContractId::from([1u8; 32]), &AssetId::new([1u8; 32])).into(); - let balance: Word = 100; - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - let returned = database - .storage::() - .get(&key) - .unwrap() - .unwrap(); - assert_eq!(*returned, balance); - } - - #[test] - fn remove() { - let key = (&ContractId::from([1u8; 32]), &AssetId::new([1u8; 32])).into(); - let balance: Word = 100; - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - database.storage::().remove(&key).unwrap(); - - assert!(!database - .storage::() - .contains_key(&key) - .unwrap()); - } - - #[test] - fn exists() { - let key = (&ContractId::from([1u8; 32]), &AssetId::new([1u8; 32])).into(); - let balance: Word = 100; - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - assert!(database - .storage::() - .contains_key(&key) - .unwrap()); - } - - #[test] - fn root() { - let key = (&ContractId::from([1u8; 32]), &AssetId::new([1u8; 32])).into(); - let balance: Word = 100; - - let mut database = Database::default(); - - StorageMutate::::insert(&mut database, &key, &balance).unwrap(); - - let root = database - .storage::() - .root(key.contract_id()); - assert!(root.is_ok()) - } - - #[test] - fn root_returns_empty_root_for_invalid_contract() { - let invalid_contract_id = ContractId::from([1u8; 32]); - let database = Database::default(); - let empty_root = in_memory::MerkleTree::new().root(); - let root = database - .storage::() - .root(&invalid_contract_id) - .unwrap(); - assert_eq!(root, empty_root) - } - - #[test] - fn put_updates_the_assets_merkle_root_for_the_given_contract() { - let contract_id = ContractId::from([1u8; 32]); - let database = &mut Database::default(); - - // Write the first contract asset - let asset_id = AssetId::new([1u8; 32]); - let key = (&contract_id, &asset_id).into(); - let balance: Word = 100; - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - // Read the first Merkle root - let root_1 = database - .storage::() - .root(&contract_id) - .unwrap(); - - // Write the second contract asset - let asset_id = AssetId::new([2u8; 32]); - let key = (&contract_id, &asset_id).into(); - let balance: Word = 100; - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - // Read the second Merkle root - let root_2 = database - .storage::() - .root(&contract_id) - .unwrap(); - - assert_ne!(root_1, root_2); - } - - #[test] - fn put_creates_merkle_metadata_when_empty() { - let contract_id = ContractId::from([1u8; 32]); - let asset_id = AssetId::new([1u8; 32]); - let key = (&contract_id, &asset_id).into(); - let database = &mut Database::default(); - - // Write a contract asset - let balance: Word = 100; - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - // Read the Merkle metadata - let metadata = database - .storage::() - .get(&contract_id) - .unwrap(); - - assert!(metadata.is_some()); - } - - #[test] - fn remove_updates_the_assets_merkle_root_for_the_given_contract() { - let contract_id = ContractId::from([1u8; 32]); - let database = &mut Database::default(); - - // Write the first contract asset - let asset_id = AssetId::new([1u8; 32]); - let key = (&contract_id, &asset_id).into(); - let balance: Word = 100; - database - .storage::() - .insert(&key, &balance) - .unwrap(); - let root_0 = database - .storage::() - .root(&contract_id) - .unwrap(); - - // Write the second contract asset - let asset_id = AssetId::new([2u8; 32]); - let key = (&contract_id, &asset_id).into(); - let balance: Word = 100; - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - // Read the first Merkle root - let root_1 = database - .storage::() - .root(&contract_id) - .unwrap(); - - // Remove the first contract asset - let asset_id = AssetId::new([2u8; 32]); - let key = (&contract_id, &asset_id).into(); - database.storage::().remove(&key).unwrap(); - - // Read the second Merkle root - let root_2 = database - .storage::() - .root(&contract_id) - .unwrap(); - - assert_ne!(root_1, root_2); - assert_eq!(root_0, root_2); - } - - #[test] - fn updating_foreign_contract_does_not_affect_the_given_contract_insertion() { - let given_contract_id = ContractId::from([1u8; 32]); - let foreign_contract_id = ContractId::from([2u8; 32]); - let database = &mut Database::default(); - - let asset_id = AssetId::new([0u8; 32]); - let balance: Word = 100; - - // Given - let given_contract_key = (&given_contract_id, &asset_id).into(); - let foreign_contract_key = (&foreign_contract_id, &asset_id).into(); - database - .storage::() - .insert(&given_contract_key, &balance) - .unwrap(); - - // When - database - .storage::() - .insert(&foreign_contract_key, &balance) - .unwrap(); - database - .storage::() - .remove(&foreign_contract_key) - .unwrap(); - - // Then - let result = database - .storage::() - .insert(&given_contract_key, &balance) - .unwrap(); - - assert!(result.is_some()); - } - #[test] fn init_contract_balances_works() { use rand::{ @@ -526,37 +107,4 @@ mod tests { assert_eq!(seq_value, value); } } - - #[test] - fn remove_deletes_merkle_metadata_when_empty() { - let contract_id = ContractId::from([1u8; 32]); - let asset_id = AssetId::new([1u8; 32]); - let key = (&contract_id, &asset_id).into(); - let database = &mut Database::default(); - - // Write a contract asset - let balance: Word = 100; - database - .storage::() - .insert(&key, &balance) - .unwrap(); - - // Read the Merkle metadata - database - .storage::() - .get(&contract_id) - .unwrap() - .expect("Expected Merkle metadata to be present"); - - // Remove the contract asset - database.storage::().remove(&key).unwrap(); - - // Read the Merkle metadata - let metadata = database - .storage::() - .get(&contract_id) - .unwrap(); - - assert!(metadata.is_none()); - } } diff --git a/crates/fuel-core/src/database/block.rs b/crates/fuel-core/src/database/block.rs index f4fbbe3342d..9677c2d8202 100644 --- a/crates/fuel-core/src/database/block.rs +++ b/crates/fuel-core/src/database/block.rs @@ -1,23 +1,29 @@ use crate::database::{ - storage::{ - DenseMerkleMetadata, - FuelBlockMerkleData, - FuelBlockMerkleMetadata, - FuelBlockSecondaryKeyBlockHeights, - ToDatabaseKey, - }, Column, Database, Error as DatabaseError, }; use fuel_core_storage::{ + basic_storage_tests, + codec::{ + primitive::Primitive, + raw::Raw, + }, iter::IterDirection, not_found, + structure::plain::Plain, + structured_storage::TableWithStructure, tables::{ + merkle::{ + DenseMerkleMetadata, + FuelBlockMerkleData, + FuelBlockMerkleMetadata, + }, FuelBlocks, Transactions, }, Error as StorageError, + Mappable, MerkleRootStorage, Result as StorageResult, StorageAsMut, @@ -39,27 +45,47 @@ use fuel_core_types::{ tai64::Tai64, }; use itertools::Itertools; -use std::{ - borrow::{ - BorrowMut, - Cow, - }, - convert::{ - TryFrom, - TryInto, - }, +use std::borrow::{ + BorrowMut, + Cow, }; +/// The table of fuel block's secondary key - `BlockHeight`. +/// It links the `BlockHeight` to corresponding `BlockId`. +pub struct FuelBlockSecondaryKeyBlockHeights; + +impl Mappable for FuelBlockSecondaryKeyBlockHeights { + /// Secondary key - `BlockHeight`. + type Key = BlockHeight; + type OwnedKey = Self::Key; + /// Primary key - `BlockId`. + type Value = BlockId; + type OwnedValue = Self::Value; +} + +impl TableWithStructure for FuelBlockSecondaryKeyBlockHeights { + type Structure = Plain, Raw>; + + fn column() -> Column { + Column::FuelBlockSecondaryKeyBlockHeights + } +} + +basic_storage_tests!( + FuelBlockSecondaryKeyBlockHeights, + ::Key::default(), + ::Value::default() +); + impl StorageInspect for Database { type Error = StorageError; fn get(&self, key: &BlockId) -> Result>, Self::Error> { - Database::get(self, key.as_slice(), Column::FuelBlocks).map_err(Into::into) + self.data.storage::().get(key) } fn contains_key(&self, key: &BlockId) -> Result { - Database::contains_key(self, key.as_slice(), Column::FuelBlocks) - .map_err(Into::into) + self.data.storage::().contains_key(key) } } @@ -69,7 +95,10 @@ impl StorageMutate for Database { key: &BlockId, value: &CompressedBlock, ) -> Result, Self::Error> { - let prev = Database::insert(self, key.as_slice(), Column::FuelBlocks, value)?; + let prev = self + .data + .storage_as_mut::() + .insert(key, value)?; let height = value.header().height(); self.storage::() @@ -77,10 +106,7 @@ impl StorageMutate for Database { // Get latest metadata entry let prev_metadata = self - .iter_all::, DenseMerkleMetadata>( - Column::FuelBlockMerkleMetadata, - Some(IterDirection::Reverse), - ) + .iter_all::(Some(IterDirection::Reverse)) .next() .transpose()? .map(|(_, metadata)| metadata) @@ -105,7 +131,7 @@ impl StorageMutate for Database { fn remove(&mut self, key: &BlockId) -> Result, Self::Error> { let prev: Option = - Database::take(self, key.as_slice(), Column::FuelBlocks)?; + self.data.storage_as_mut::().remove(key)?; if let Some(block) = &prev { let height = block.header().height(); @@ -148,12 +174,9 @@ impl Database { } pub fn get_block_id(&self, height: &BlockHeight) -> StorageResult> { - Database::get( - self, - height.database_key().as_ref(), - Column::FuelBlockSecondaryKeyBlockHeights, - ) - .map_err(Into::into) + self.storage::() + .get(height) + .map(|v| v.map(|v| v.into_owned())) } pub fn all_block_ids( @@ -162,48 +185,23 @@ impl Database { direction: IterDirection, ) -> impl Iterator> + '_ { let start = start.map(|b| b.to_bytes()); - self.iter_all_by_start::, BlockId, _>( - Column::FuelBlockSecondaryKeyBlockHeights, + self.iter_all_by_start::( start, Some(direction), ) - .map(|res| { - let (height, id) = res?; - let block_height_bytes: [u8; 4] = height - .as_slice() - .try_into() - .expect("block height always has correct number of bytes"); - Ok((block_height_bytes.into(), id)) - }) } pub fn ids_of_genesis_block(&self) -> StorageResult<(BlockHeight, BlockId)> { - self.iter_all( - Column::FuelBlockSecondaryKeyBlockHeights, - Some(IterDirection::Forward), - ) - .next() - .ok_or(DatabaseError::ChainUninitialized)? - .map(|(height, id): (Vec, BlockId)| { - let bytes = <[u8; 4]>::try_from(height.as_slice()) - .expect("all block heights are stored with the correct amount of bytes"); - (u32::from_be_bytes(bytes).into(), id) - }) + self.iter_all::(Some(IterDirection::Forward)) + .next() + .ok_or(DatabaseError::ChainUninitialized)? } pub fn ids_of_latest_block(&self) -> StorageResult> { let ids = self - .iter_all::, BlockId>( - Column::FuelBlockSecondaryKeyBlockHeights, - Some(IterDirection::Reverse), - ) + .iter_all::(Some(IterDirection::Reverse)) .next() - .transpose()? - .map(|(height, block)| { - // safety: we know that all block heights are stored with the correct amount of bytes - let bytes = <[u8; 4]>::try_from(height.as_slice()).unwrap(); - (u32::from_be_bytes(bytes).into(), block) - }); + .transpose()?; Ok(ids) } diff --git a/crates/fuel-core/src/database/code_root.rs b/crates/fuel-core/src/database/code_root.rs deleted file mode 100644 index 7474a85d2aa..00000000000 --- a/crates/fuel-core/src/database/code_root.rs +++ /dev/null @@ -1,122 +0,0 @@ -use crate::database::{ - storage::DatabaseColumn, - Column, -}; -use fuel_core_storage::tables::ContractsInfo; - -impl DatabaseColumn for ContractsInfo { - fn column() -> Column { - Column::ContractsInfo - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::database::Database; - use fuel_core_storage::StorageAsMut; - use fuel_core_types::{ - fuel_types::{ - Bytes32, - ContractId, - Salt, - }, - fuel_vm::Contract, - }; - use rand::{ - rngs::StdRng, - Rng, - SeedableRng, - }; - - #[test] - fn get() { - let rng = &mut StdRng::seed_from_u64(2322u64); - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - let root = contract.root(); - let salt: Salt = rng.gen(); - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, &(salt, root)) - .unwrap(); - - assert_eq!( - database - .storage::() - .get(&contract_id) - .unwrap() - .unwrap() - .into_owned(), - (salt, root) - ); - } - - #[test] - fn put() { - let rng = &mut StdRng::seed_from_u64(2322u64); - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - let root = contract.root(); - let salt: Salt = rng.gen(); - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, &(salt, root)) - .unwrap(); - - let returned: (Salt, Bytes32) = *database - .storage::() - .get(&contract_id) - .unwrap() - .unwrap(); - assert_eq!(returned, (salt, root)); - } - - #[test] - fn remove() { - let rng = &mut StdRng::seed_from_u64(2322u64); - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - let root = contract.root(); - let salt: Salt = rng.gen(); - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, &(salt, root)) - .unwrap(); - - database - .storage::() - .remove(&contract_id) - .unwrap(); - - assert!(!database - .contains_key(contract_id.as_ref(), Column::ContractsInfo) - .unwrap()); - } - - #[test] - fn exists() { - let rng = &mut StdRng::seed_from_u64(2322u64); - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - let root = contract.root(); - let salt: Salt = rng.gen(); - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, &(salt, root)) - .unwrap(); - - assert!(database - .storage::() - .contains_key(&contract_id) - .unwrap()); - } -} diff --git a/crates/fuel-core/src/database/coin.rs b/crates/fuel-core/src/database/coin.rs index b56ca30daf3..51ac80deaff 100644 --- a/crates/fuel-core/src/database/coin.rs +++ b/crates/fuel-core/src/database/coin.rs @@ -1,12 +1,19 @@ use crate::database::{ - storage::DatabaseColumn, Column, Database, }; use fuel_core_chain_config::CoinConfig; use fuel_core_storage::{ + basic_storage_tests, + codec::{ + postcard::Postcard, + primitive::utxo_id_to_bytes, + raw::Raw, + }, iter::IterDirection, not_found, + structure::plain::Plain, + structured_storage::TableWithStructure, tables::Coins, Error as StorageError, Mappable, @@ -21,7 +28,6 @@ use fuel_core_types::{ entities::coins::coin::CompressedCoin, fuel_tx::{ Address, - Bytes32, UtxoId, }, }; @@ -35,13 +41,6 @@ pub fn owner_coin_id_key(owner: &Address, coin_id: &UtxoId) -> OwnedCoinKey { default } -fn utxo_id_to_bytes(utxo_id: &UtxoId) -> [u8; TxId::LEN + 1] { - let mut default = [0; TxId::LEN + 1]; - default[0..TxId::LEN].copy_from_slice(utxo_id.tx_id().as_ref()); - default[TxId::LEN] = utxo_id.output_index(); - default -} - /// The storage table of owned coin ids. Maps addresses to owned coins. pub struct OwnedCoins; /// The storage key for owned coins: `Address ++ UtxoId` @@ -51,25 +50,41 @@ impl Mappable for OwnedCoins { type Key = Self::OwnedKey; type OwnedKey = OwnedCoinKey; type Value = Self::OwnedValue; - type OwnedValue = bool; + type OwnedValue = (); } -impl DatabaseColumn for OwnedCoins { +impl TableWithStructure for OwnedCoins { + type Structure = Plain; + fn column() -> Column { Column::OwnedCoins } } +#[cfg(test)] +fn generate_key(rng: &mut impl rand::Rng) -> ::Key { + let mut bytes = [0u8; 65]; + rng.fill(bytes.as_mut()); + bytes +} + +basic_storage_tests!( + OwnedCoins, + [0u8; 65], + ::Value::default(), + ::Value::default(), + generate_key +); + impl StorageInspect for Database { type Error = StorageError; fn get(&self, key: &UtxoId) -> Result>, Self::Error> { - Database::get(self, &utxo_id_to_bytes(key), Column::Coins).map_err(Into::into) + self.data.storage::().get(key) } fn contains_key(&self, key: &UtxoId) -> Result { - Database::contains_key(self, &utxo_id_to_bytes(key), Column::Coins) - .map_err(Into::into) + self.data.storage::().contains_key(key) } } @@ -81,16 +96,15 @@ impl StorageMutate for Database { ) -> Result, Self::Error> { let coin_by_owner = owner_coin_id_key(&value.owner, key); // insert primary record - let insert = Database::insert(self, utxo_id_to_bytes(key), Column::Coins, value)?; + let insert = self.data.storage_as_mut::().insert(key, value)?; // insert secondary index by owner self.storage_as_mut::() - .insert(&coin_by_owner, &true)?; + .insert(&coin_by_owner, &())?; Ok(insert) } fn remove(&mut self, key: &UtxoId) -> Result, Self::Error> { - let coin: Option = - Database::take(self, &utxo_id_to_bytes(key), Column::Coins)?; + let coin = self.data.storage_as_mut::().remove(key)?; // cleanup secondary index if let Some(coin) = &coin { @@ -109,8 +123,7 @@ impl Database { start_coin: Option, direction: Option, ) -> impl Iterator> + '_ { - self.iter_all_filtered::, bool, _, _>( - Column::OwnedCoins, + self.iter_all_filtered::( Some(*owner), start_coin.map(|b| owner_coin_id_key(owner, &b)), direction, @@ -138,22 +151,19 @@ impl Database { pub fn get_coin_config(&self) -> StorageResult>> { let configs = self - .iter_all::, CompressedCoin>(Column::Coins, None) + .iter_all::(None) .map(|raw_coin| -> StorageResult { - let coin = raw_coin?; - - let byte_id = Bytes32::new(coin.0[..32].try_into()?); - let output_index = coin.0[32]; + let (utxo_id, coin) = raw_coin?; Ok(CoinConfig { - tx_id: Some(byte_id), - output_index: Some(output_index), - tx_pointer_block_height: Some(coin.1.tx_pointer.block_height()), - tx_pointer_tx_idx: Some(coin.1.tx_pointer.tx_index()), - maturity: Some(coin.1.maturity), - owner: coin.1.owner, - amount: coin.1.amount, - asset_id: coin.1.asset_id, + tx_id: Some(*utxo_id.tx_id()), + output_index: Some(utxo_id.output_index()), + tx_pointer_block_height: Some(coin.tx_pointer.block_height()), + tx_pointer_tx_idx: Some(coin.tx_pointer.tx_index()), + maturity: Some(coin.maturity), + owner: coin.owner, + amount: coin.amount, + asset_id: coin.asset_id, }) }) .collect::>>()?; diff --git a/crates/fuel-core/src/database/contracts.rs b/crates/fuel-core/src/database/contracts.rs index 48cbb1a7809..0d56ed2c48c 100644 --- a/crates/fuel-core/src/database/contracts.rs +++ b/crates/fuel-core/src/database/contracts.rs @@ -1,29 +1,20 @@ -use crate::database::{ - storage::DatabaseColumn, - Column, - Database, -}; +use crate::database::Database; use fuel_core_chain_config::ContractConfig; use fuel_core_storage::{ iter::IterDirection, tables::{ + ContractsAssets, ContractsInfo, ContractsLatestUtxo, ContractsRawCode, + ContractsState, }, ContractsAssetKey, - Error as StorageError, - Mappable, Result as StorageResult, StorageAsRef, - StorageInspect, - StorageMutate, - StorageRead, - StorageSize, }; use fuel_core_types::{ entities::contract::ContractUtxoInfo, - fuel_tx::Contract, fuel_types::{ AssetId, Bytes32, @@ -31,80 +22,6 @@ use fuel_core_types::{ Word, }, }; -use std::borrow::Cow; - -impl DatabaseColumn for ContractsLatestUtxo { - fn column() -> Column { - Column::ContractsLatestUtxo - } -} - -impl StorageInspect for Database { - type Error = StorageError; - - fn get( - &self, - key: &::Key, - ) -> Result::OwnedValue>>, Self::Error> - { - Ok(self - .read_alloc(key.as_ref(), Column::ContractsRawCode)? - .map(|v| Cow::Owned(Contract::from(v)))) - } - - fn contains_key( - &self, - key: &::Key, - ) -> Result { - self.contains_key(key.as_ref(), Column::ContractsRawCode) - .map_err(Into::into) - } -} - -// # Dev-note: The value of the `ContractsRawCode` has a unique implementation of serialization -// and deserialization. Because the value is a contract byte code represented by bytes, -// we don't use `serde::Deserialization` and `serde::Serialization` for `Vec`, because we don't -// need to store the size of the contract. We store/load raw bytes. -impl StorageMutate for Database { - fn insert( - &mut self, - key: &::Key, - value: &::Value, - ) -> Result::OwnedValue>, Self::Error> { - let result = Database::insert_raw(self, key, Column::ContractsRawCode, value)?; - - Ok(result.map(|v| Contract::from(v.as_ref().clone()))) - } - - fn remove( - &mut self, - key: &::Key, - ) -> Result::OwnedValue>, Self::Error> { - let result = Database::take_raw(self, key.as_ref(), Column::ContractsRawCode)?; - - Ok(result.map(|v| Contract::from(v.as_ref().clone()))) - } -} - -impl StorageSize for Database { - fn size_of_value(&self, key: &ContractId) -> Result, Self::Error> { - self.size_of_value(key.as_ref(), Column::ContractsRawCode) - } -} - -impl StorageRead for Database { - fn read( - &self, - key: &ContractId, - buf: &mut [u8], - ) -> Result, Self::Error> { - self.read(key.as_ref(), Column::ContractsRawCode, buf) - } - - fn read_alloc(&self, key: &ContractId) -> Result>, Self::Error> { - self.read_alloc(key.as_ref(), Column::ContractsRawCode) - } -} impl Database { pub fn get_contract_config_by_id( @@ -136,37 +53,25 @@ impl Database { .into_owned(); let state = Some( - self.iter_all_by_prefix::, Bytes32, _>( - Column::ContractsState, - Some(contract_id.as_ref()), - ) - .map(|res| -> StorageResult<(Bytes32, Bytes32)> { - let safe_res = res?; - - // We don't need to store ContractId which is the first 32 bytes of this - // key, as this Vec is already attached to that ContractId - let state_key = Bytes32::new(safe_res.0[32..].try_into()?); - - Ok((state_key, safe_res.1)) - }) - .filter(|val| val.is_ok()) - .collect::>>()?, + self.iter_all_by_prefix::(Some(contract_id.as_ref())) + .map(|res| -> StorageResult<(Bytes32, Bytes32)> { + let (key, value) = res?; + + Ok((*key.state_key(), value)) + }) + .filter(|val| val.is_ok()) + .collect::>>()?, ); let balances = Some( - self.iter_all_by_prefix::, u64, _>( - Column::ContractsAssets, - Some(contract_id.as_ref()), - ) - .map(|res| { - let safe_res = res?; - - let asset_id = AssetId::new(safe_res.0[32..].try_into()?); - - Ok((asset_id, safe_res.1)) - }) - .filter(|val| val.is_ok()) - .collect::>>()?, + self.iter_all_by_prefix::(Some(contract_id.as_ref())) + .map(|res| { + let (key, value) = res?; + + Ok((*key.asset_id(), value)) + }) + .filter(|val| val.is_ok()) + .collect::>>()?, ); Ok(ContractConfig { @@ -188,25 +93,19 @@ impl Database { start_asset: Option, direction: Option, ) -> impl Iterator> + '_ { - self.iter_all_filtered::, Word, _, _>( - Column::ContractsAssets, + self.iter_all_filtered::( Some(contract), start_asset.map(|asset_id| ContractsAssetKey::new(&contract, &asset_id)), direction, ) - .map(|res| { - res.map(|(key, balance)| { - (AssetId::new(key[32..].try_into().unwrap()), balance) - }) - }) + .map(|res| res.map(|(key, balance)| (*key.asset_id(), balance))) } pub fn get_contract_config(&self) -> StorageResult>> { let configs = self - .iter_all::, Word>(Column::ContractsRawCode, None) + .iter_all::(None) .map(|raw_contract_id| -> StorageResult { - let contract_id = - ContractId::new(raw_contract_id.unwrap().0[..32].try_into()?); + let contract_id = raw_contract_id?.0; self.get_contract_config_by_id(contract_id) }) .collect::>>()?; @@ -219,60 +118,12 @@ impl Database { mod tests { use super::*; use fuel_core_storage::StorageAsMut; - use fuel_core_types::fuel_tx::{ - Contract, - TxId, - TxPointer, - UtxoId, - }; + use fuel_core_types::fuel_tx::Contract; use rand::{ RngCore, SeedableRng, }; - #[test] - fn raw_code_get() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - - let database = &mut Database::default(); - - database - .storage::() - .insert(&contract_id, contract.as_ref()) - .unwrap(); - - assert_eq!( - database - .storage::() - .get(&contract_id) - .unwrap() - .unwrap() - .into_owned(), - contract - ); - } - - #[test] - fn raw_code_put() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, contract.as_ref()) - .unwrap(); - - let returned: Contract = database - .storage::() - .get(&contract_id) - .unwrap() - .unwrap() - .into_owned(); - assert_eq!(returned, contract); - } - #[test] fn raw_code_put_huge_contract() { let rng = &mut rand::rngs::StdRng::seed_from_u64(2322u64); @@ -295,148 +146,4 @@ mod tests { .into_owned(); assert_eq!(returned, contract); } - - #[test] - fn raw_code_remove() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, contract.as_ref()) - .unwrap(); - - database - .storage::() - .remove(&contract_id) - .unwrap(); - - assert!(!database - .storage::() - .contains_key(&contract_id) - .unwrap()); - } - - #[test] - fn raw_code_exists() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let contract: Contract = Contract::from(vec![32u8]); - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, contract.as_ref()) - .unwrap(); - - assert!(database - .storage::() - .contains_key(&contract_id) - .unwrap()); - } - - #[test] - fn latest_utxo_get() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let utxo_id: UtxoId = UtxoId::new(TxId::new([2u8; 32]), 4); - let tx_pointer = TxPointer::new(1.into(), 5); - let utxo_info = ContractUtxoInfo { - utxo_id, - tx_pointer, - }; - let database = &mut Database::default(); - - database - .storage::() - .insert(&contract_id, &utxo_info) - .unwrap(); - - assert_eq!( - database - .storage::() - .get(&contract_id) - .unwrap() - .unwrap() - .into_owned(), - utxo_info - ); - } - - #[test] - fn latest_utxo_put() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let utxo_id: UtxoId = UtxoId::new(TxId::new([2u8; 32]), 4); - let tx_pointer = TxPointer::new(1.into(), 5); - let utxo_info = ContractUtxoInfo { - utxo_id, - tx_pointer, - }; - - let database = &mut Database::default(); - database - .storage::() - .insert(&contract_id, &utxo_info) - .unwrap(); - - let returned: ContractUtxoInfo = database - .storage::() - .get(&contract_id) - .unwrap() - .unwrap() - .into_owned(); - assert_eq!(returned, utxo_info); - } - - #[test] - fn latest_utxo_remove() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let utxo_id: UtxoId = UtxoId::new(TxId::new([2u8; 32]), 4); - let tx_pointer = TxPointer::new(1.into(), 5); - - let database = &mut Database::default(); - database - .storage::() - .insert( - &contract_id, - &ContractUtxoInfo { - utxo_id, - tx_pointer, - }, - ) - .unwrap(); - - database - .storage::() - .remove(&contract_id) - .unwrap(); - - assert!(!database - .storage::() - .contains_key(&contract_id) - .unwrap()); - } - - #[test] - fn latest_utxo_exists() { - let contract_id: ContractId = ContractId::from([1u8; 32]); - let utxo_id: UtxoId = UtxoId::new(TxId::new([2u8; 32]), 4); - let tx_pointer = TxPointer::new(1.into(), 5); - - let database = &mut Database::default(); - database - .storage::() - .insert( - &contract_id, - &ContractUtxoInfo { - utxo_id, - tx_pointer, - }, - ) - .unwrap(); - - assert!(database - .storage::() - .contains_key(&contract_id) - .unwrap()); - } } diff --git a/crates/fuel-core/src/database/message.rs b/crates/fuel-core/src/database/message.rs index cccbf8abb1c..6c928924994 100644 --- a/crates/fuel-core/src/database/message.rs +++ b/crates/fuel-core/src/database/message.rs @@ -1,17 +1,27 @@ use crate::database::{ - storage::ToDatabaseKey, Column, Database, }; use fuel_core_chain_config::MessageConfig; use fuel_core_storage::{ + codec::{ + manual::Manual, + postcard::Postcard, + Decode, + Encode, + }, iter::IterDirection, + structure::plain::Plain, + structured_storage::TableWithStructure, tables::{ Messages, SpentMessages, }, Error as StorageError, + Mappable, Result as StorageResult, + StorageAsMut, + StorageAsRef, StorageInspect, StorageMutate, }; @@ -27,19 +37,50 @@ use std::{ ops::Deref, }; -use super::storage::DatabaseColumn; +fuel_core_types::fuel_vm::double_key!(OwnedMessageKey, Address, address, Nonce, nonce); + +/// The table that stores all messages per owner. +pub struct OwnedMessageIds; + +impl Mappable for OwnedMessageIds { + type Key = OwnedMessageKey; + type OwnedKey = Self::Key; + type Value = (); + type OwnedValue = Self::Value; +} + +impl Encode for Manual { + type Encoder<'a> = Cow<'a, [u8]>; + + fn encode(t: &OwnedMessageKey) -> Self::Encoder<'_> { + Cow::Borrowed(t.as_ref()) + } +} + +impl Decode for Manual { + fn decode(bytes: &[u8]) -> anyhow::Result { + OwnedMessageKey::from_slice(bytes) + .map_err(|_| anyhow::anyhow!("Unable to decode bytes")) + } +} + +impl TableWithStructure for OwnedMessageIds { + type Structure = Plain, Postcard>; + + fn column() -> fuel_core_storage::column::Column { + Column::OwnedMessageIds + } +} impl StorageInspect for Database { type Error = StorageError; fn get(&self, key: &Nonce) -> Result>, Self::Error> { - let key = key.database_key(); - Database::get(self, key.as_ref(), Column::Messages).map_err(Into::into) + self.data.storage::().get(key) } fn contains_key(&self, key: &Nonce) -> Result { - let key = key.database_key(); - Database::contains_key(self, key.as_ref(), Column::Messages).map_err(Into::into) + self.data.storage::().contains_key(key) } } @@ -50,42 +91,28 @@ impl StorageMutate for Database { value: &Message, ) -> Result, Self::Error> { // insert primary record - let result = - Database::insert(self, key.database_key().as_ref(), Column::Messages, value)?; + let result = self.data.storage_as_mut::().insert(key, value)?; // insert secondary record by owner - let _: Option = Database::insert( - self, - owner_msg_id_key(&value.recipient, key), - Column::OwnedMessageIds, - &true, - )?; + self.storage_as_mut::() + .insert(&OwnedMessageKey::new(&value.recipient, key), &())?; Ok(result) } fn remove(&mut self, key: &Nonce) -> Result, Self::Error> { let result: Option = - Database::take(self, key.database_key().as_ref(), Column::Messages)?; + self.data.storage_as_mut::().remove(key)?; if let Some(message) = &result { - Database::take::( - self, - &owner_msg_id_key(&message.recipient, key), - Column::OwnedMessageIds, - )?; + self.storage_as_mut::() + .remove(&OwnedMessageKey::new(&message.recipient, key))?; } Ok(result) } } -impl DatabaseColumn for SpentMessages { - fn column() -> Column { - Column::SpentMessages - } -} - impl Database { pub fn owned_message_ids( &self, @@ -93,18 +120,12 @@ impl Database { start_message_id: Option, direction: Option, ) -> impl Iterator> + '_ { - self.iter_all_filtered::, bool, _, _>( - Column::OwnedMessageIds, + self.iter_all_filtered::( Some(*owner), - start_message_id.map(|msg_id| owner_msg_id_key(owner, &msg_id)), + start_message_id.map(|msg_id| OwnedMessageKey::new(owner, &msg_id)), direction, ) - .map(|res| { - res.map(|(key, _)| { - Nonce::try_from(&key[Address::LEN..Address::LEN + Nonce::LEN]) - .expect("key is always {Nonce::LEN} bytes") - }) - }) + .map(|res| res.map(|(key, _)| *key.nonce())) } pub fn all_messages( @@ -113,7 +134,7 @@ impl Database { direction: Option, ) -> impl Iterator> + '_ { let start = start.map(|v| v.deref().to_vec()); - self.iter_all_by_start::, Message, _>(Column::Messages, start, direction) + self.iter_all_by_start::(start, direction) .map(|res| res.map(|(_, message)| message)) } @@ -158,19 +179,9 @@ impl Database { } } -// TODO: Reuse `fuel_vm::storage::double_key` macro. -/// Get a Key by chaining Owner + Nonce -fn owner_msg_id_key(owner: &Address, nonce: &Nonce) -> [u8; Address::LEN + Nonce::LEN] { - let mut default = [0u8; Address::LEN + Nonce::LEN]; - default[0..Address::LEN].copy_from_slice(owner.as_ref()); - default[Address::LEN..].copy_from_slice(nonce.as_ref()); - default -} - #[cfg(test)] mod tests { use super::*; - use fuel_core_storage::StorageAsMut; #[test] fn owned_message_ids() { @@ -180,14 +191,14 @@ mod tests { // insert a message with the first id let first_id = 1.into(); let _ = db - .storage::() + .storage_as_mut::() .insert(&first_id, &message) .unwrap(); // insert a message with the second id with the same Owner let second_id = 2.into(); let _ = db - .storage::() + .storage_as_mut::() .insert(&second_id, &message) .unwrap(); @@ -196,7 +207,7 @@ mod tests { assert_eq!(owned_msg_ids.count(), 2); // remove the first message with its given id - let _ = db.storage::().remove(&first_id).unwrap(); + let _ = db.storage_as_mut::().remove(&first_id).unwrap(); // verify that only second ID is left let owned_msg_ids: Vec<_> = db @@ -206,7 +217,7 @@ mod tests { assert_eq!(owned_msg_ids.len(), 1); // remove the second message with its given id - let _ = db.storage::().remove(&second_id).unwrap(); + let _ = db.storage_as_mut::().remove(&second_id).unwrap(); let owned_msg_ids = db.owned_message_ids(&message.recipient, None, None); assert_eq!(owned_msg_ids.count(), 0); } diff --git a/crates/fuel-core/src/database/metadata.rs b/crates/fuel-core/src/database/metadata.rs index 5239e58401e..93cc113a543 100644 --- a/crates/fuel-core/src/database/metadata.rs +++ b/crates/fuel-core/src/database/metadata.rs @@ -1,27 +1,71 @@ -use crate::database::{ - Column, - Database, - Error as DatabaseError, +use crate::{ + database::{ + storage::UseStructuredImplementation, + Column, + Database, + Error as DatabaseError, + }, + state::DataSource, }; use fuel_core_chain_config::ChainConfig; -use fuel_core_storage::Result as StorageResult; +use fuel_core_storage::{ + codec::postcard::Postcard, + structure::plain::Plain, + structured_storage::{ + StructuredStorage, + TableWithStructure, + }, + Mappable, + Result as StorageResult, + StorageMutate, +}; + +pub struct MetadataTable(core::marker::PhantomData); + +impl Mappable for MetadataTable +where + V: Clone, +{ + type Key = str; + type OwnedKey = String; + type Value = V; + type OwnedValue = V; +} + +impl TableWithStructure for MetadataTable +where + V: Clone, +{ + type Structure = Plain; + + fn column() -> Column { + Column::Metadata + } +} + +impl UseStructuredImplementation> for StructuredStorage where + V: Clone +{ +} -pub(crate) const DB_VERSION_KEY: &[u8] = b"version"; -pub(crate) const CHAIN_NAME_KEY: &[u8] = b"chain_name"; +pub(crate) const DB_VERSION_KEY: &str = "version"; +pub(crate) const CHAIN_NAME_KEY: &str = "chain_name"; /// Tracks the total number of transactions written to the chain /// It's useful for analyzing TPS or other metrics. -pub(crate) const TX_COUNT: &[u8] = b"total_tx_count"; +pub(crate) const TX_COUNT: &str = "total_tx_count"; /// Can be used to perform migrations in the future. pub(crate) const DB_VERSION: u32 = 0x00; impl Database { /// Ensures the database is initialized and that the database version is correct - pub fn init(&self, config: &ChainConfig) -> StorageResult<()> { + pub fn init(&mut self, config: &ChainConfig) -> StorageResult<()> { + use fuel_core_storage::StorageAsMut; // initialize chain name if not set if self.get_chain_name()?.is_none() { - self.insert(CHAIN_NAME_KEY, Column::Metadata, &config.chain_name) - .and_then(|v: Option| { + self.storage::>() + .insert(CHAIN_NAME_KEY, &config.chain_name) + .and_then(|v| { if v.is_some() { Err(DatabaseError::ChainAlreadyInitialized.into()) } else { @@ -31,7 +75,8 @@ impl Database { } // Ensure the database version is correct - if let Some(version) = self.get::(DB_VERSION_KEY, Column::Metadata)? { + if let Some(version) = self.storage::>().get(DB_VERSION_KEY)? { + let version = version.into_owned(); if version != DB_VERSION { return Err(DatabaseError::InvalidDatabaseVersion { found: version, @@ -39,28 +84,42 @@ impl Database { })? } } else { - let _: Option = - self.insert(DB_VERSION_KEY, Column::Metadata, &DB_VERSION)?; + self.storage::>() + .insert(DB_VERSION_KEY, &DB_VERSION)?; } Ok(()) } pub fn get_chain_name(&self) -> StorageResult> { - self.get(CHAIN_NAME_KEY, Column::Metadata) + use fuel_core_storage::StorageAsRef; + self.storage::>() + .get(CHAIN_NAME_KEY) + .map(|v| v.map(|v| v.into_owned())) } pub fn increase_tx_count(&self, new_txs: u64) -> StorageResult { + use fuel_core_storage::StorageAsRef; // TODO: how should tx count be initialized after regenesis? - let current_tx_count: u64 = - self.get(TX_COUNT, Column::Metadata)?.unwrap_or_default(); + let current_tx_count: u64 = self + .storage::>() + .get(TX_COUNT)? + .unwrap_or_default() + .into_owned(); // Using saturating_add because this value doesn't significantly impact the correctness of execution. let new_tx_count = current_tx_count.saturating_add(new_txs); - self.insert::<_, _, u64>(TX_COUNT, Column::Metadata, &new_tx_count)?; + <_ as StorageMutate>>::insert( + // TODO: Workaround to avoid a mutable borrow of self + &mut StructuredStorage::new(self.data.as_ref()), + TX_COUNT, + &new_tx_count, + )?; Ok(new_tx_count) } pub fn get_tx_count(&self) -> StorageResult { - self.get(TX_COUNT, Column::Metadata) - .map(|v| v.unwrap_or_default()) + use fuel_core_storage::StorageAsRef; + self.storage::>() + .get(TX_COUNT) + .map(|v| v.unwrap_or_default().into_owned()) } } diff --git a/crates/fuel-core/src/database/receipts.rs b/crates/fuel-core/src/database/receipts.rs deleted file mode 100644 index 41cdf0df95b..00000000000 --- a/crates/fuel-core/src/database/receipts.rs +++ /dev/null @@ -1,11 +0,0 @@ -use crate::database::{ - storage::DatabaseColumn, - Column, -}; -use fuel_core_storage::tables::Receipts; - -impl DatabaseColumn for Receipts { - fn column() -> Column { - Column::Receipts - } -} diff --git a/crates/fuel-core/src/database/relayer.rs b/crates/fuel-core/src/database/relayer.rs deleted file mode 100644 index 787182c01e3..00000000000 --- a/crates/fuel-core/src/database/relayer.rs +++ /dev/null @@ -1,10 +0,0 @@ -use crate::database::Column; -use fuel_core_relayer::ports::RelayerMetadata; - -use super::storage::DatabaseColumn; - -impl DatabaseColumn for RelayerMetadata { - fn column() -> Column { - Column::RelayerMetadata - } -} diff --git a/crates/fuel-core/src/database/sealed_block.rs b/crates/fuel-core/src/database/sealed_block.rs index 7b9f337fa20..a1cd34fa668 100644 --- a/crates/fuel-core/src/database/sealed_block.rs +++ b/crates/fuel-core/src/database/sealed_block.rs @@ -1,8 +1,4 @@ -use crate::database::{ - storage::DatabaseColumn, - Column, - Database, -}; +use crate::database::Database; use fuel_core_storage::{ not_found, tables::{ @@ -28,12 +24,6 @@ use fuel_core_types::{ }; use std::ops::Range; -impl DatabaseColumn for SealedBlockConsensus { - fn column() -> Column { - Column::FuelBlockConsensus - } -} - impl Database { pub fn get_sealed_block_by_id( &self, diff --git a/crates/fuel-core/src/database/state.rs b/crates/fuel-core/src/database/state.rs index d5af5db45d0..53bed4b8e8e 100644 --- a/crates/fuel-core/src/database/state.rs +++ b/crates/fuel-core/src/database/state.rs @@ -1,152 +1,15 @@ -use crate::database::{ - storage::{ - ContractsStateMerkleData, - ContractsStateMerkleMetadata, - DatabaseColumn, - SparseMerkleMetadata, - }, - Column, - Database, -}; +use crate::database::Database; use fuel_core_storage::{ tables::ContractsState, ContractsStateKey, Error as StorageError, - Mappable, - MerkleRoot, - MerkleRootStorage, - StorageAsMut, - StorageAsRef, - StorageInspect, - StorageMutate, + StorageBatchMutate, }; -use fuel_core_types::{ - fuel_merkle::{ - sparse, - sparse::{ - in_memory, - MerkleTree, - MerkleTreeKey, - }, - }, - fuel_types::{ - Bytes32, - ContractId, - }, +use fuel_core_types::fuel_types::{ + Bytes32, + ContractId, }; use itertools::Itertools; -use std::borrow::{ - BorrowMut, - Cow, -}; - -impl StorageInspect for Database { - type Error = StorageError; - - fn get( - &self, - key: &::Key, - ) -> Result::OwnedValue>>, Self::Error> { - self.get(key.as_ref(), Column::ContractsState) - .map_err(Into::into) - } - - fn contains_key( - &self, - key: &::Key, - ) -> Result { - self.contains_key(key.as_ref(), Column::ContractsState) - .map_err(Into::into) - } -} - -impl StorageMutate for Database { - fn insert( - &mut self, - key: &::Key, - value: &::Value, - ) -> Result::OwnedValue>, Self::Error> { - let prev = Database::insert(self, key.as_ref(), Column::ContractsState, value) - .map_err(Into::into); - - // Get latest metadata entry for this contract id - let prev_metadata = self - .storage::() - .get(key.contract_id())? - .unwrap_or_default(); - - let root = prev_metadata.root; - let storage = self.borrow_mut(); - let mut tree: MerkleTree = - MerkleTree::load(storage, &root) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - // Update the contract's key-value dataset. The key is the state key and - // the value is the 32 bytes - tree.update(MerkleTreeKey::new(key), value.as_slice()) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - // Generate new metadata for the updated tree - let root = tree.root(); - let metadata = SparseMerkleMetadata { root }; - self.storage::() - .insert(key.contract_id(), &metadata)?; - - prev - } - - fn remove( - &mut self, - key: &::Key, - ) -> Result::OwnedValue>, Self::Error> { - let prev = Database::take(self, key.as_ref(), Column::ContractsState) - .map_err(Into::into); - - // Get latest metadata entry for this contract id - let prev_metadata = self - .storage::() - .get(key.contract_id())?; - - if let Some(prev_metadata) = prev_metadata { - let root = prev_metadata.root; - - // Load the tree saved in metadata - let storage = self.borrow_mut(); - let mut tree: MerkleTree = - MerkleTree::load(storage, &root) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - // Update the contract's key-value dataset. The key is the state key and - // the value is the 32 bytes - tree.delete(MerkleTreeKey::new(key)) - .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; - - let root = tree.root(); - if root == *sparse::empty_sum() { - // The tree is now empty; remove the metadata - self.storage::() - .remove(key.contract_id())?; - } else { - // Generate new metadata for the updated tree - let metadata = SparseMerkleMetadata { root }; - self.storage::() - .insert(key.contract_id(), &metadata)?; - } - } - - prev - } -} - -impl MerkleRootStorage for Database { - fn root(&self, parent: &ContractId) -> Result { - let metadata = self.storage::().get(parent)?; - let root = metadata - .map(|metadata| metadata.root) - .unwrap_or_else(|| in_memory::MerkleTree::new().root()); - Ok(root) - } -} impl Database { /// Initialize the state of the contract from all leaves. @@ -159,55 +22,20 @@ impl Database { where S: Iterator, { - let slots = slots.collect_vec(); - - if slots.is_empty() { - return Ok(()) - } - - if self - .storage::() - .contains_key(contract_id)? - { - return Err(anyhow::anyhow!("The contract state is already initialized").into()) - } - - // Keys and values should be original without any modifications. - // Key is `ContractId` ++ `StorageKey` - self.batch_insert( - Column::ContractsState, - slots - .clone() - .into_iter() - .map(|(key, value)| (ContractsStateKey::new(contract_id, &key), value)), - )?; - - // Merkle data: - // - State key should be converted into `MerkleTreeKey` by `new` function that hashes them. - // - The state value are original. - let slots = slots.into_iter().map(|(key, value)| { - ( - MerkleTreeKey::new(ContractsStateKey::new(contract_id, &key)), - value, - ) - }); - let (root, nodes) = in_memory::MerkleTree::nodes_from_set(slots); - self.batch_insert(ContractsStateMerkleData::column(), nodes.into_iter())?; - let metadata = SparseMerkleMetadata { root }; - self.storage::() - .insert(contract_id, &metadata)?; - - Ok(()) + let slots = slots + .map(|(key, value)| (ContractsStateKey::new(contract_id, &key), value)) + .collect_vec(); + <_ as StorageBatchMutate>::init_storage( + &mut self.data, + &mut slots.iter().map(|(key, value)| (key, value)), + ) } } #[cfg(test)] mod tests { use super::*; - use fuel_core_storage::{ - StorageAsMut, - StorageAsRef, - }; + use fuel_core_storage::StorageAsMut; use fuel_core_types::fuel_types::Bytes32; use rand::Rng; @@ -220,253 +48,6 @@ mod tests { bytes.into() } - #[test] - fn get() { - let key = (&ContractId::from([1u8; 32]), &Bytes32::from([1u8; 32])).into(); - let stored_value: Bytes32 = Bytes32::from([2u8; 32]); - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &stored_value) - .unwrap(); - - assert_eq!( - *database - .storage::() - .get(&key) - .unwrap() - .unwrap(), - stored_value - ); - } - - #[test] - fn put() { - let key = (&ContractId::from([1u8; 32]), &Bytes32::from([1u8; 32])).into(); - let stored_value: Bytes32 = Bytes32::from([2u8; 32]); - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &stored_value) - .unwrap(); - - let returned: Bytes32 = *database - .storage::() - .get(&key) - .unwrap() - .unwrap(); - assert_eq!(returned, stored_value); - } - - #[test] - fn remove() { - let key = (&ContractId::from([1u8; 32]), &Bytes32::from([1u8; 32])).into(); - let stored_value: Bytes32 = Bytes32::from([2u8; 32]); - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &stored_value) - .unwrap(); - - database.storage::().remove(&key).unwrap(); - - assert!(!database - .storage::() - .contains_key(&key) - .unwrap()); - } - - #[test] - fn exists() { - let key = (&ContractId::from([1u8; 32]), &Bytes32::from([1u8; 32])).into(); - let stored_value: Bytes32 = Bytes32::from([2u8; 32]); - - let database = &mut Database::default(); - database - .storage::() - .insert(&key, &stored_value) - .unwrap(); - - assert!(database - .storage::() - .contains_key(&key) - .unwrap()); - } - - #[test] - fn root() { - let key = (&ContractId::from([1u8; 32]), &Bytes32::from([1u8; 32])).into(); - let stored_value: Bytes32 = Bytes32::from([2u8; 32]); - - let mut database = Database::default(); - - StorageMutate::::insert(&mut database, &key, &stored_value) - .unwrap(); - - let root = database.storage::().root(key.contract_id()); - assert!(root.is_ok()) - } - - #[test] - fn root_returns_empty_root_for_invalid_contract() { - let invalid_contract_id = ContractId::from([1u8; 32]); - let database = Database::default(); - let empty_root = in_memory::MerkleTree::new().root(); - let root = database - .storage::() - .root(&invalid_contract_id) - .unwrap(); - assert_eq!(root, empty_root) - } - - #[test] - fn put_updates_the_state_merkle_root_for_the_given_contract() { - let contract_id = ContractId::from([1u8; 32]); - let database = &mut Database::default(); - - // Write the first contract state - let state_key = Bytes32::from([1u8; 32]); - let state = Bytes32::from([0xff; 32]); - let key = (&contract_id, &state_key).into(); - database - .storage::() - .insert(&key, &state) - .unwrap(); - - // Read the first Merkle root - let root_1 = database - .storage::() - .root(&contract_id) - .unwrap(); - - // Write the second contract state - let state_key = Bytes32::from([2u8; 32]); - let state = Bytes32::from([0xff; 32]); - let key = (&contract_id, &state_key).into(); - database - .storage::() - .insert(&key, &state) - .unwrap(); - - // Read the second Merkle root - let root_2 = database - .storage::() - .root(&contract_id) - .unwrap(); - - assert_ne!(root_1, root_2); - } - - #[test] - fn put_creates_merkle_metadata_when_empty() { - let contract_id = ContractId::from([1u8; 32]); - let state_key = Bytes32::new([1u8; 32]); - let state = Bytes32::from([0xff; 32]); - let key = (&contract_id, &state_key).into(); - let database = &mut Database::default(); - - // Write a contract state - database - .storage::() - .insert(&key, &state) - .unwrap(); - - // Read the Merkle metadata - let metadata = database - .storage::() - .get(&contract_id) - .unwrap(); - - assert!(metadata.is_some()); - } - - #[test] - fn remove_updates_the_state_merkle_root_for_the_given_contract() { - let contract_id = ContractId::from([1u8; 32]); - let database = &mut Database::default(); - - // Write the first contract state - let state_key = Bytes32::new([1u8; 32]); - let state = Bytes32::from([0xff; 32]); - let key = (&contract_id, &state_key).into(); - database - .storage::() - .insert(&key, &state) - .unwrap(); - let root_0 = database - .storage::() - .root(&contract_id) - .unwrap(); - - // Write the second contract state - let state_key = Bytes32::new([2u8; 32]); - let state = Bytes32::from([0xff; 32]); - let key = (&contract_id, &state_key).into(); - database - .storage::() - .insert(&key, &state) - .unwrap(); - - // Read the first Merkle root - let root_1 = database - .storage::() - .root(&contract_id) - .unwrap(); - - // Remove the first contract state - let state_key = Bytes32::new([2u8; 32]); - let key = (&contract_id, &state_key).into(); - database.storage::().remove(&key).unwrap(); - - // Read the second Merkle root - let root_2 = database - .storage::() - .root(&contract_id) - .unwrap(); - - assert_ne!(root_1, root_2); - assert_eq!(root_0, root_2); - } - - #[test] - fn updating_foreign_contract_does_not_affect_the_given_contract_insertion() { - let given_contract_id = ContractId::from([1u8; 32]); - let foreign_contract_id = ContractId::from([2u8; 32]); - let database = &mut Database::default(); - - let state_key = Bytes32::new([1u8; 32]); - let state_value = Bytes32::from([0xff; 32]); - - // Given - let given_contract_key = (&given_contract_id, &state_key).into(); - let foreign_contract_key = (&foreign_contract_id, &state_key).into(); - database - .storage::() - .insert(&given_contract_key, &state_value) - .unwrap(); - - // When - database - .storage::() - .insert(&foreign_contract_key, &state_value) - .unwrap(); - database - .storage::() - .remove(&foreign_contract_key) - .unwrap(); - - // Then - let result = database - .storage::() - .insert(&given_contract_key, &state_value) - .unwrap(); - - assert!(result.is_some()); - } - #[test] fn init_contract_state_works() { use rand::{ @@ -520,37 +101,4 @@ mod tests { assert_eq!(seq_value, value); } } - - #[test] - fn remove_deletes_merkle_metadata_when_empty() { - let contract_id = ContractId::from([1u8; 32]); - let state_key = Bytes32::new([1u8; 32]); - let state = Bytes32::from([0xff; 32]); - let key = (&contract_id, &state_key).into(); - let database = &mut Database::default(); - - // Write a contract state - database - .storage::() - .insert(&key, &state) - .unwrap(); - - // Read the Merkle metadata - database - .storage::() - .get(&contract_id) - .unwrap() - .expect("Expected Merkle metadata to be present"); - - // Remove the contract asset - database.storage::().remove(&key).unwrap(); - - // Read the Merkle metadata - let metadata = database - .storage::() - .get(&contract_id) - .unwrap(); - - assert!(metadata.is_none()); - } } diff --git a/crates/fuel-core/src/database/storage.rs b/crates/fuel-core/src/database/storage.rs index 6ceab3a776b..19414196753 100644 --- a/crates/fuel-core/src/database/storage.rs +++ b/crates/fuel-core/src/database/storage.rs @@ -1,312 +1,159 @@ -use crate::database::{ - Column, - Database, +use crate::{ + database::{ + block::FuelBlockSecondaryKeyBlockHeights, + coin::OwnedCoins, + message::OwnedMessageIds, + transactions::{ + OwnedTransactions, + TransactionStatuses, + }, + Database, + }, + state::DataSource, }; use fuel_core_storage::{ + structured_storage::StructuredStorage, + tables::{ + merkle::{ + ContractsAssetsMerkleData, + ContractsAssetsMerkleMetadata, + ContractsStateMerkleData, + ContractsStateMerkleMetadata, + FuelBlockMerkleData, + FuelBlockMerkleMetadata, + }, + ContractsAssets, + ContractsInfo, + ContractsLatestUtxo, + ContractsRawCode, + ContractsState, + Receipts, + SealedBlockConsensus, + SpentMessages, + Transactions, + }, Error as StorageError, Mappable, MerkleRoot, + MerkleRootStorage, Result as StorageResult, + StorageAsMut, + StorageAsRef, StorageInspect, StorageMutate, + StorageRead, + StorageSize, }; -use fuel_core_types::{ - blockchain::primitives::BlockId, - fuel_merkle::{ - binary, - sparse, - }, - fuel_tx::TxId, - fuel_types::{ - BlockHeight, - ContractId, - Nonce, - }, -}; -use serde::{ - de::DeserializeOwned, - Serialize, -}; -use std::{ - borrow::Cow, - ops::Deref, -}; - -/// Metadata for dense Merkle trees -#[derive(Clone, serde::Serialize, serde::Deserialize)] -pub struct DenseMerkleMetadata { - /// The root hash of the dense Merkle tree structure - pub root: MerkleRoot, - /// The version of the dense Merkle tree structure is equal to the number of - /// leaves. Every time we append a new leaf to the Merkle tree data set, we - /// increment the version number. - pub version: u64, -} - -impl Default for DenseMerkleMetadata { - fn default() -> Self { - let empty_merkle_tree = binary::root_calculator::MerkleRootCalculator::new(); - Self { - root: empty_merkle_tree.root(), - version: 0, - } - } -} - -/// Metadata for sparse Merkle trees -#[derive(Clone, serde::Serialize, serde::Deserialize)] -pub struct SparseMerkleMetadata { - /// The root hash of the sparse Merkle tree structure - pub root: MerkleRoot, -} - -impl Default for SparseMerkleMetadata { - fn default() -> Self { - let empty_merkle_tree = sparse::in_memory::MerkleTree::new(); - Self { - root: empty_merkle_tree.root(), - } - } -} - -/// The table of fuel block's secondary key - `BlockHeight`. -/// It links the `BlockHeight` to corresponding `BlockId`. -pub struct FuelBlockSecondaryKeyBlockHeights; - -impl Mappable for FuelBlockSecondaryKeyBlockHeights { - /// Secondary key - `BlockHeight`. - type Key = BlockHeight; - type OwnedKey = Self::Key; - /// Primary key - `BlockId`. - type Value = BlockId; - type OwnedValue = Self::Value; -} - -/// The table of BMT data for Fuel blocks. -pub struct FuelBlockMerkleData; - -impl Mappable for FuelBlockMerkleData { - type Key = u64; - type OwnedKey = Self::Key; - type Value = binary::Primitive; - type OwnedValue = Self::Value; -} - -/// The metadata table for [`FuelBlockMerkleData`](FuelBlockMerkleData) table. -pub struct FuelBlockMerkleMetadata; - -impl Mappable for FuelBlockMerkleMetadata { - type Key = BlockHeight; - type OwnedKey = Self::Key; - type Value = DenseMerkleMetadata; - type OwnedValue = Self::Value; -} - -/// The table of SMT data for Contract assets. -pub struct ContractsAssetsMerkleData; - -impl Mappable for ContractsAssetsMerkleData { - type Key = [u8; 32]; - type OwnedKey = Self::Key; - type Value = sparse::Primitive; - type OwnedValue = Self::Value; -} - -/// The metadata table for [`ContractsAssetsMerkleData`](ContractsAssetsMerkleData) table -pub struct ContractsAssetsMerkleMetadata; - -impl Mappable for ContractsAssetsMerkleMetadata { - type Key = ContractId; - type OwnedKey = Self::Key; - type Value = SparseMerkleMetadata; - type OwnedValue = Self::Value; -} - -/// The table of SMT data for Contract state. -pub struct ContractsStateMerkleData; - -impl Mappable for ContractsStateMerkleData { - type Key = [u8; 32]; - type OwnedKey = Self::Key; - type Value = sparse::Primitive; - type OwnedValue = Self::Value; -} - -/// The metadata table for [`ContractsStateMerkleData`](ContractsStateMerkleData) table -pub struct ContractsStateMerkleMetadata; - -impl Mappable for ContractsStateMerkleMetadata { - type Key = ContractId; - type OwnedKey = Self::Key; - type Value = SparseMerkleMetadata; - type OwnedValue = Self::Value; -} - -/// The table has a corresponding column in the database. -/// -/// Using this trait allows the configured mappable type to have its' -/// database integration auto-implemented for single column interactions. -/// -/// If the mappable type requires access to multiple columns or custom logic during setting/getting -/// then its' storage interfaces should be manually implemented and this trait should be avoided. -pub trait DatabaseColumn { - /// The column of the table. - fn column() -> Column; -} - -impl DatabaseColumn for FuelBlockSecondaryKeyBlockHeights { - fn column() -> Column { - Column::FuelBlockSecondaryKeyBlockHeights - } -} - -impl DatabaseColumn for FuelBlockMerkleData { - fn column() -> Column { - Column::FuelBlockMerkleData - } -} +use std::borrow::Cow; -impl DatabaseColumn for FuelBlockMerkleMetadata { - fn column() -> Column { - Column::FuelBlockMerkleMetadata - } -} - -impl DatabaseColumn for ContractsAssetsMerkleData { - fn column() -> Column { - Column::ContractsAssetsMerkleData - } -} - -impl DatabaseColumn for ContractsAssetsMerkleMetadata { - fn column() -> Column { - Column::ContractsAssetsMerkleMetadata - } -} - -impl DatabaseColumn for ContractsStateMerkleData { - fn column() -> Column { - Column::ContractsStateMerkleData - } -} - -impl DatabaseColumn for ContractsStateMerkleMetadata { - fn column() -> Column { - Column::ContractsStateMerkleMetadata - } +pub trait UseStructuredImplementation +where + M: Mappable, +{ } -impl StorageInspect for Database +macro_rules! use_structured_implementation { + ($($m:ty),*) => { + $( + impl UseStructuredImplementation<$m> for StructuredStorage {} + )* + }; +} + +use_structured_implementation!( + ContractsRawCode, + ContractsAssets, + ContractsState, + ContractsLatestUtxo, + ContractsInfo, + SpentMessages, + SealedBlockConsensus, + Transactions, + Receipts, + ContractsStateMerkleMetadata, + ContractsStateMerkleData, + ContractsAssetsMerkleMetadata, + ContractsAssetsMerkleData, + OwnedCoins, + OwnedMessageIds, + OwnedTransactions, + TransactionStatuses, + FuelBlockSecondaryKeyBlockHeights, + FuelBlockMerkleData, + FuelBlockMerkleMetadata +); +#[cfg(feature = "relayer")] +use_structured_implementation!(fuel_core_relayer::ports::RelayerMetadata); + +impl StorageInspect for Database where - T: Mappable + DatabaseColumn, - T::Key: ToDatabaseKey, - T::OwnedValue: DeserializeOwned, + M: Mappable, + StructuredStorage: + StorageInspect + UseStructuredImplementation, { type Error = StorageError; - fn get(&self, key: &T::Key) -> StorageResult>> { - self.get(key.database_key().as_ref(), T::column()) - .map_err(Into::into) + fn get(&self, key: &M::Key) -> StorageResult>> { + self.data.storage::().get(key) } - fn contains_key(&self, key: &T::Key) -> StorageResult { - self.contains_key(key.database_key().as_ref(), T::column()) - .map_err(Into::into) + fn contains_key(&self, key: &M::Key) -> StorageResult { + self.data.storage::().contains_key(key) } } -impl StorageMutate for Database +impl StorageMutate for Database where - T: Mappable + DatabaseColumn, - T::Key: ToDatabaseKey, - T::Value: Serialize, - T::OwnedValue: DeserializeOwned, + M: Mappable, + StructuredStorage: + StorageMutate + UseStructuredImplementation, { fn insert( &mut self, - key: &T::Key, - value: &T::Value, - ) -> StorageResult> { - Database::insert(self, key.database_key().as_ref(), T::column(), &value) - .map_err(Into::into) + key: &M::Key, + value: &M::Value, + ) -> StorageResult> { + self.data.storage_as_mut::().insert(key, value) } - fn remove(&mut self, key: &T::Key) -> StorageResult> { - Database::take(self, key.database_key().as_ref(), T::column()).map_err(Into::into) + fn remove(&mut self, key: &M::Key) -> StorageResult> { + self.data.storage_as_mut::().remove(key) } } -/// Some keys requires pre-processing that could change their type. -pub trait ToDatabaseKey { - /// A new type of prepared database key that can be converted into bytes. - type Type<'a>: AsRef<[u8]> - where - Self: 'a; - - /// Coverts the key into database key that supports byte presentation. - fn database_key(&self) -> Self::Type<'_>; -} - -impl ToDatabaseKey for BlockHeight { - type Type<'a> = [u8; 4]; - - fn database_key(&self) -> Self::Type<'_> { - self.to_bytes() - } -} - -impl ToDatabaseKey for u64 { - type Type<'a> = [u8; 8]; - - fn database_key(&self) -> Self::Type<'_> { - self.to_be_bytes() - } -} - -impl ToDatabaseKey for Nonce { - type Type<'a> = &'a [u8; 32]; - - fn database_key(&self) -> Self::Type<'_> { - self.deref() - } -} - -impl ToDatabaseKey for ContractId { - type Type<'a> = &'a [u8; 32]; - - fn database_key(&self) -> Self::Type<'_> { - self.deref() - } -} - -impl ToDatabaseKey for BlockId { - type Type<'a> = &'a [u8]; - - fn database_key(&self) -> Self::Type<'_> { - self.as_slice() +impl MerkleRootStorage for Database +where + M: Mappable, + StructuredStorage: + MerkleRootStorage + UseStructuredImplementation, +{ + fn root(&self, key: &Key) -> StorageResult { + self.data.storage::().root(key) } } -impl ToDatabaseKey for TxId { - type Type<'a> = &'a [u8; 32]; - - fn database_key(&self) -> Self::Type<'_> { - self.deref() +impl StorageSize for Database +where + M: Mappable, + StructuredStorage: + StorageSize + UseStructuredImplementation, +{ + fn size_of_value(&self, key: &M::Key) -> StorageResult> { + <_ as StorageSize>::size_of_value(&self.data, key) } } -impl ToDatabaseKey for () { - type Type<'a> = &'a [u8]; - - fn database_key(&self) -> Self::Type<'_> { - &[] +impl StorageRead for Database +where + M: Mappable, + StructuredStorage: + StorageRead + UseStructuredImplementation, +{ + fn read(&self, key: &M::Key, buf: &mut [u8]) -> StorageResult> { + self.data.storage::().read(key, buf) } -} - -impl ToDatabaseKey for [u8; N] { - type Type<'a> = &'a [u8]; - fn database_key(&self) -> Self::Type<'_> { - self.as_slice() + fn read_alloc(&self, key: &M::Key) -> StorageResult>> { + self.data.storage::().read_alloc(key) } } diff --git a/crates/fuel-core/src/database/transaction.rs b/crates/fuel-core/src/database/transaction.rs index 2f8829ab406..ec3f3de67df 100644 --- a/crates/fuel-core/src/database/transaction.rs +++ b/crates/fuel-core/src/database/transaction.rs @@ -64,13 +64,10 @@ impl Transaction for DatabaseTransaction { impl From<&Database> for DatabaseTransaction { fn from(source: &Database) -> Self { - let data = Arc::new(MemoryTransactionView::new(source.data.clone())); + let data = Arc::new(MemoryTransactionView::new(source.data.as_ref().clone())); Self { changes: data.clone(), - database: Database { - data, - _drop: Default::default(), - }, + database: Database::new(data), } } } diff --git a/crates/fuel-core/src/database/transactions.rs b/crates/fuel-core/src/database/transactions.rs index e41e84b7ece..a0362ecb27e 100644 --- a/crates/fuel-core/src/database/transactions.rs +++ b/crates/fuel-core/src/database/transactions.rs @@ -1,11 +1,21 @@ use crate::database::{ - storage::DatabaseColumn, Column, Database, }; +use core::mem::size_of; use fuel_core_storage::{ + codec::{ + manual::Manual, + postcard::Postcard, + raw::Raw, + Decode, + Encode, + }, iter::IterDirection, + structure::plain::Plain, + structured_storage::TableWithStructure, tables::Transactions, + Mappable, Result as StorageResult, }; use fuel_core_types::{ @@ -21,17 +31,65 @@ use fuel_core_types::{ }, services::txpool::TransactionStatus, }; -use std::{ - mem::size_of, - ops::Deref, -}; +use std::array::TryFromSliceError; + +pub struct OwnedTransactions; + +impl Mappable for OwnedTransactions { + type Key = OwnedTransactionIndexKey; + type OwnedKey = Self::Key; + type Value = Bytes32; + type OwnedValue = Self::Value; +} + +impl TableWithStructure for OwnedTransactions { + type Structure = Plain, Raw>; -impl DatabaseColumn for Transactions { fn column() -> Column { - Column::Transactions + Column::TransactionsByOwnerBlockIdx } } +#[cfg(test)] +fn generate_key(rng: &mut impl rand::Rng) -> ::Key { + let mut bytes = [0u8; INDEX_SIZE]; + rng.fill(bytes.as_mut()); + bytes.into() +} + +fuel_core_storage::basic_storage_tests!( + OwnedTransactions, + [1u8; INDEX_SIZE].into(), + ::Value::default(), + ::Value::default(), + generate_key +); + +pub struct TransactionStatuses; + +impl Mappable for TransactionStatuses { + type Key = Bytes32; + type OwnedKey = Self::Key; + type Value = TransactionStatus; + type OwnedValue = Self::Value; +} + +impl TableWithStructure for TransactionStatuses { + type Structure = Plain; + + fn column() -> Column { + Column::TransactionStatus + } +} + +fuel_core_storage::basic_storage_tests!( + TransactionStatuses, + ::Key::default(), + TransactionStatus::Submitted { + time: fuel_core_types::tai64::Tai64::UNIX_EPOCH, + } +); + impl Database { pub fn all_transactions( &self, @@ -39,12 +97,8 @@ impl Database { direction: Option, ) -> impl Iterator> + '_ { let start = start.map(|b| b.as_ref().to_vec()); - self.iter_all_by_start::, Transaction, _>( - Column::Transactions, - start, - direction, - ) - .map(|res| res.map(|(_, tx)| tx)) + self.iter_all_by_start::(start, direction) + .map(|res| res.map(|(_, tx)| tx)) } /// Iterates over a KV mapping of `[address + block height + tx idx] => transaction id`. This @@ -59,44 +113,45 @@ impl Database { ) -> impl Iterator> + '_ { let start = start .map(|cursor| owned_tx_index_key(&owner, cursor.block_height, cursor.tx_idx)); - self.iter_all_filtered::( - Column::TransactionsByOwnerBlockIdx, - Some(owner), - start, - direction, - ) - .map(|res| { - res.map(|(key, tx_id)| (TxPointer::new(key.block_height, key.tx_idx), tx_id)) - }) + self.iter_all_filtered::(Some(owner), start, direction) + .map(|res| { + res.map(|(key, tx_id)| { + (TxPointer::new(key.block_height, key.tx_idx), tx_id) + }) + }) } pub fn record_tx_id_owner( - &self, + &mut self, owner: &Address, block_height: BlockHeight, tx_idx: TransactionIndex, tx_id: &Bytes32, ) -> StorageResult> { - self.insert( - owned_tx_index_key(owner, block_height, tx_idx), - Column::TransactionsByOwnerBlockIdx, + use fuel_core_storage::StorageAsMut; + self.storage::().insert( + &OwnedTransactionIndexKey::new(owner, block_height, tx_idx), tx_id, ) } pub fn update_tx_status( - &self, + &mut self, id: &Bytes32, status: TransactionStatus, ) -> StorageResult> { - self.insert(id, Column::TransactionStatus, &status) + use fuel_core_storage::StorageAsMut; + self.storage::().insert(id, &status) } pub fn get_tx_status( &self, id: &Bytes32, ) -> StorageResult> { - self.get(&id.deref()[..], Column::TransactionStatus) + use fuel_core_storage::StorageAsRef; + self.storage::() + .get(id) + .map(|v| v.map(|v| v.into_owned())) } } @@ -123,30 +178,68 @@ fn owned_tx_index_key( pub type TransactionIndex = u16; +#[derive(Clone)] pub struct OwnedTransactionIndexKey { + owner: Address, block_height: BlockHeight, tx_idx: TransactionIndex, } -impl From for OwnedTransactionIndexKey -where - T: AsRef<[u8]>, -{ - fn from(bytes: T) -> Self { +impl OwnedTransactionIndexKey { + pub fn new( + owner: &Address, + block_height: BlockHeight, + tx_idx: TransactionIndex, + ) -> Self { + Self { + owner: *owner, + block_height, + tx_idx, + } + } +} + +impl From<[u8; INDEX_SIZE]> for OwnedTransactionIndexKey { + fn from(bytes: [u8; INDEX_SIZE]) -> Self { + let owner: [u8; 32] = bytes[..32].try_into().expect("It's an array of 32 bytes"); // the first 32 bytes are the owner, which is already known when querying let mut block_height_bytes: [u8; 4] = Default::default(); - block_height_bytes.copy_from_slice(&bytes.as_ref()[32..36]); + block_height_bytes.copy_from_slice(&bytes[32..36]); let mut tx_idx_bytes: [u8; 2] = Default::default(); tx_idx_bytes.copy_from_slice(&bytes.as_ref()[36..38]); Self { - // owner: Address::from(owner_bytes), + owner: Address::from(owner), block_height: u32::from_be_bytes(block_height_bytes).into(), tx_idx: u16::from_be_bytes(tx_idx_bytes), } } } +impl TryFrom<&[u8]> for OwnedTransactionIndexKey { + type Error = TryFromSliceError; + + fn try_from(bytes: &[u8]) -> Result { + let bytes: [u8; INDEX_SIZE] = bytes.try_into()?; + Ok(Self::from(bytes)) + } +} + +impl Encode for Manual { + type Encoder<'a> = [u8; INDEX_SIZE]; + + fn encode(t: &OwnedTransactionIndexKey) -> Self::Encoder<'_> { + owned_tx_index_key(&t.owner, t.block_height, t.tx_idx) + } +} + +impl Decode for Manual { + fn decode(bytes: &[u8]) -> anyhow::Result { + OwnedTransactionIndexKey::try_from(bytes) + .map_err(|_| anyhow::anyhow!("Unable to decode bytes")) + } +} + #[derive(Clone, Debug, PartialOrd, Eq, PartialEq)] pub struct OwnedTransactionIndexCursor { pub block_height: BlockHeight, diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index a6497bfc4a7..fa643a3b108 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -184,7 +184,7 @@ pub struct Task { impl Task { /// Private inner method for initializing the fuel service task - pub fn new(database: Database, config: Config) -> anyhow::Result { + pub fn new(mut database: Database, config: Config) -> anyhow::Result { // initialize state tracing::info!("Initializing database"); database.init(&config.chain_conf)?; diff --git a/crates/fuel-core/src/service/adapters/executor.rs b/crates/fuel-core/src/service/adapters/executor.rs index bb6f27083f3..6eb22419509 100644 --- a/crates/fuel-core/src/service/adapters/executor.rs +++ b/crates/fuel-core/src/service/adapters/executor.rs @@ -93,7 +93,7 @@ impl fuel_core_executor::ports::TxIdOwnerRecorder for Database { type Error = StorageError; fn record_tx_id_owner( - &self, + &mut self, owner: &Address, block_height: BlockHeight, tx_idx: u16, @@ -103,7 +103,7 @@ impl fuel_core_executor::ports::TxIdOwnerRecorder for Database { } fn update_tx_status( - &self, + &mut self, id: &Bytes32, status: TransactionStatus, ) -> Result, Self::Error> { diff --git a/crates/fuel-core/src/state.rs b/crates/fuel-core/src/state.rs index 49ca2b7a73a..73f5cee37b2 100644 --- a/crates/fuel-core/src/state.rs +++ b/crates/fuel-core/src/state.rs @@ -1,8 +1,14 @@ -use crate::database::{ - Column, - Database, - Error as DatabaseError, - Result as DatabaseResult, +use crate::{ + database::{ + Column, + Database, + Error as DatabaseError, + Result as DatabaseResult, + }, + state::in_memory::{ + memory_store::MemoryStore, + transaction::MemoryTransactionView, + }, }; use fuel_core_storage::{ iter::{ @@ -16,7 +22,42 @@ use std::{ sync::Arc, }; -pub type DataSource = Arc>; +type DataSourceInner = Arc>; +#[derive(Clone, Debug)] +pub struct DataSource(DataSourceInner); + +impl From> for DataSource { + fn from(inner: Arc) -> Self { + Self(inner) + } +} + +#[cfg(feature = "rocksdb")] +impl From> for DataSource { + fn from(inner: Arc) -> Self { + Self(inner) + } +} + +impl From> for DataSource { + fn from(inner: Arc) -> Self { + Self(inner) + } +} + +impl core::ops::Deref for DataSource { + type Target = DataSourceInner; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl core::ops::DerefMut for DataSource { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} pub trait TransactableStorage: IteratorableStore + BatchOperations + Debug + Send + Sync diff --git a/crates/fuel-core/src/state/in_memory/transaction.rs b/crates/fuel-core/src/state/in_memory/transaction.rs index e249a3b5c78..7dcb96d8273 100644 --- a/crates/fuel-core/src/state/in_memory/transaction.rs +++ b/crates/fuel-core/src/state/in_memory/transaction.rs @@ -50,11 +50,14 @@ pub struct MemoryTransactionView { } impl MemoryTransactionView { - pub fn new(source: DataSource) -> Self { + pub fn new(source: D) -> Self + where + D: Into, + { Self { view_layer: MemoryStore::default(), changes: Default::default(), - data_source: source, + data_source: source.into(), } } diff --git a/crates/services/executor/src/executor.rs b/crates/services/executor/src/executor.rs index 15706793410..a336e4c8538 100644 --- a/crates/services/executor/src/executor.rs +++ b/crates/services/executor/src/executor.rs @@ -1650,7 +1650,7 @@ where fn persist_transaction_status( &self, result: &ExecutionResult, - db: &D, + db: &mut D, ) -> ExecutorResult<()> { let time = result.block.header().time(); let block_id = result.block.id(); diff --git a/crates/services/executor/src/ports.rs b/crates/services/executor/src/ports.rs index 0c4c32a1deb..96395a56670 100644 --- a/crates/services/executor/src/ports.rs +++ b/crates/services/executor/src/ports.rs @@ -93,7 +93,7 @@ pub trait TxIdOwnerRecorder { type Error; fn record_tx_id_owner( - &self, + &mut self, owner: &Address, block_height: BlockHeight, tx_idx: u16, @@ -101,7 +101,7 @@ pub trait TxIdOwnerRecorder { ) -> Result, Self::Error>; fn update_tx_status( - &self, + &mut self, id: &Bytes32, status: TransactionStatus, ) -> Result, Self::Error>; diff --git a/crates/services/relayer/Cargo.toml b/crates/services/relayer/Cargo.toml index 2f2be488b10..0d9ea134abc 100644 --- a/crates/services/relayer/Cargo.toml +++ b/crates/services/relayer/Cargo.toml @@ -40,6 +40,7 @@ fuel-core-services = { path = "../../services", features = ["test-helpers"] } fuel-core-storage = { path = "../../storage", features = ["test-helpers"] } fuel-core-trace = { path = "../../trace" } mockall = { workspace = true } +rand = { workspace = true } test-case = { workspace = true } tokio = { workspace = true, features = ["macros", "test-util"] } diff --git a/crates/services/relayer/src/ports.rs b/crates/services/relayer/src/ports.rs index 725c231d6dd..1df70dbbdd8 100644 --- a/crates/services/relayer/src/ports.rs +++ b/crates/services/relayer/src/ports.rs @@ -2,6 +2,14 @@ use async_trait::async_trait; use fuel_core_storage::{ + basic_storage_tests, + codec::{ + postcard::Postcard, + primitive::Primitive, + }, + column::Column, + structure::plain::Plain, + structured_storage::TableWithStructure, tables::Messages, transactional::Transactional, Error as StorageError, @@ -138,3 +146,17 @@ impl Mappable for RelayerMetadata { /// If the relayer metadata ever contains more than one key, this should be /// changed from a unit value. const METADATA_KEY: () = (); + +impl TableWithStructure for RelayerMetadata { + type Structure = Plain>; + + fn column() -> Column { + Column::RelayerMetadata + } +} + +basic_storage_tests!( + RelayerMetadata, + ::Key::default(), + ::Value::default() +); diff --git a/crates/storage/Cargo.toml b/crates/storage/Cargo.toml index 70f9a1c5d25..1b2cd17c0d7 100644 --- a/crates/storage/Cargo.toml +++ b/crates/storage/Cargo.toml @@ -19,10 +19,21 @@ version = { workspace = true } [dependencies] anyhow = { workspace = true } derive_more = { workspace = true } -fuel-core-types = { workspace = true, default-features = false } +enum-iterator = { workspace = true } +fuel-core-types = { workspace = true, default-features = false, features = ["serde"] } fuel-vm-private = { workspace = true, default-features = false } +itertools = { workspace = true } mockall = { workspace = true, optional = true } +paste = "1" +postcard = { workspace = true, features = ["alloc"] } primitive-types = { workspace = true, default-features = false } +serde = { workspace = true } +strum = { workspace = true } +strum_macros = { workspace = true } + +[dev-dependencies] +fuel-core-types = { workspace = true, default-features = false, features = ["serde", "random", "test-helpers"] } +rand = { workspace = true } [features] test-helpers = ["dep:mockall"] diff --git a/crates/storage/src/codec.rs b/crates/storage/src/codec.rs new file mode 100644 index 00000000000..c3f4f2189c2 --- /dev/null +++ b/crates/storage/src/codec.rs @@ -0,0 +1,49 @@ +use crate::kv_store::Value; +use std::{ + borrow::Cow, + ops::Deref, +}; + +pub mod manual; +pub mod postcard; +pub mod primitive; +pub mod raw; + +pub trait Encoder { + fn as_bytes(&self) -> Cow<[u8]>; +} + +pub trait Encode { + type Encoder<'a>: Encoder + where + T: 'a; + + fn encode(t: &T) -> Self::Encoder<'_>; + + fn encode_as_value(t: &T) -> Value { + Value::new(Self::encode(t).as_bytes().into_owned()) + } +} + +pub trait Decode { + fn decode(bytes: &[u8]) -> anyhow::Result; + + fn decode_from_value(value: Value) -> anyhow::Result { + Self::decode(value.deref()) + } +} + +impl<'a> Encoder for Cow<'a, [u8]> { + fn as_bytes(&self) -> Cow<[u8]> { + match self { + Cow::Borrowed(borrowed) => Cow::Borrowed(borrowed), + Cow::Owned(owned) => Cow::Borrowed(owned.as_ref()), + } + } +} + +impl Encoder for [u8; SIZE] { + fn as_bytes(&self) -> Cow<[u8]> { + Cow::Borrowed(self.as_slice()) + } +} diff --git a/crates/storage/src/codec/manual.rs b/crates/storage/src/codec/manual.rs new file mode 100644 index 00000000000..572ceca1201 --- /dev/null +++ b/crates/storage/src/codec/manual.rs @@ -0,0 +1,42 @@ +use crate::codec::{ + Decode, + Encode, +}; +use fuel_core_types::fuel_vm::ContractsAssetKey; +use fuel_vm_private::storage::ContractsStateKey; +use std::borrow::Cow; + +pub struct Manual(core::marker::PhantomData); + +// TODO: Use `Raw` instead of `Manual` for `ContractsAssetKey`, `ContractsStateKey`, and `OwnedMessageKey` +// when `double_key` macro will generate `TryFrom<&[u8]>` implementation. + +impl Encode for Manual { + type Encoder<'a> = Cow<'a, [u8]>; + + fn encode(t: &ContractsAssetKey) -> Self::Encoder<'_> { + Cow::Borrowed(t.as_ref()) + } +} + +impl Decode for Manual { + fn decode(bytes: &[u8]) -> anyhow::Result { + ContractsAssetKey::from_slice(bytes) + .map_err(|_| anyhow::anyhow!("Unable to decode bytes")) + } +} + +impl Encode for Manual { + type Encoder<'a> = Cow<'a, [u8]>; + + fn encode(t: &ContractsStateKey) -> Self::Encoder<'_> { + Cow::Borrowed(t.as_ref()) + } +} + +impl Decode for Manual { + fn decode(bytes: &[u8]) -> anyhow::Result { + ContractsStateKey::from_slice(bytes) + .map_err(|_| anyhow::anyhow!("Unable to decode bytes")) + } +} diff --git a/crates/storage/src/codec/postcard.rs b/crates/storage/src/codec/postcard.rs new file mode 100644 index 00000000000..737e4139dae --- /dev/null +++ b/crates/storage/src/codec/postcard.rs @@ -0,0 +1,29 @@ +use crate::codec::{ + Decode, + Encode, +}; +use std::borrow::Cow; + +pub struct Postcard; + +impl Encode for Postcard +where + K: ?Sized + serde::Serialize, +{ + type Encoder<'a> = Cow<'a, [u8]> where K: 'a; + + fn encode(value: &K) -> Self::Encoder<'_> { + Cow::Owned(postcard::to_allocvec(value).expect( + "It should be impossible to fail unless serialization is not implemented, which is not true for our types.", + )) + } +} + +impl Decode for Postcard +where + V: serde::de::DeserializeOwned, +{ + fn decode(bytes: &[u8]) -> anyhow::Result { + Ok(postcard::from_bytes(bytes)?) + } +} diff --git a/crates/storage/src/codec/primitive.rs b/crates/storage/src/codec/primitive.rs new file mode 100644 index 00000000000..f33a08161f6 --- /dev/null +++ b/crates/storage/src/codec/primitive.rs @@ -0,0 +1,102 @@ +use crate::codec::{ + Decode, + Encode, + Encoder, +}; +use fuel_core_types::{ + blockchain::primitives::DaBlockHeight, + fuel_tx::{ + TxId, + UtxoId, + }, + fuel_types::BlockHeight, +}; +use std::borrow::Cow; + +pub struct Primitive; + +pub struct PrimitiveEncoder([u8; SIZE]); + +impl Encoder for PrimitiveEncoder { + fn as_bytes(&self) -> Cow<[u8]> { + Cow::Borrowed(&self.0[..]) + } +} + +macro_rules! impl_encode { + ($($ty:ty, $size:expr),*) => { + $( + impl Encode<$ty> for Primitive<{ $size }> { + type Encoder<'a> = PrimitiveEncoder<{ $size }>; + + fn encode(t: &$ty) -> Self::Encoder<'_> { + PrimitiveEncoder(t.to_be_bytes()) + } + } + )* + }; +} +macro_rules! impl_decode { + ($($ty:ty, $size:expr),*) => { + $( + impl Decode<$ty> for Primitive<{ $size }> { + fn decode(bytes: &[u8]) -> anyhow::Result<$ty> { + Ok(<$ty>::from_be_bytes(<[u8; { $size }]>::try_from(bytes)?)) + } + } + )* + }; +} + +impl_encode! { + u8, 1, + u16, 2, + u32, 4, + BlockHeight, 4, + DaBlockHeight, 8, + u64, 8, + u128, 16 +} + +impl_decode! { + u8, 1, + u16, 2, + u32, 4, + u64, 8, + u128, 16 +} + +impl Decode for Primitive<4> { + fn decode(bytes: &[u8]) -> anyhow::Result { + Ok(BlockHeight::from(<[u8; 4]>::try_from(bytes)?)) + } +} + +impl Decode for Primitive<8> { + fn decode(bytes: &[u8]) -> anyhow::Result { + Ok(DaBlockHeight::from(<[u8; 8]>::try_from(bytes)?)) + } +} + +pub fn utxo_id_to_bytes(utxo_id: &UtxoId) -> [u8; TxId::LEN + 1] { + let mut default = [0; TxId::LEN + 1]; + default[0..TxId::LEN].copy_from_slice(utxo_id.tx_id().as_ref()); + default[TxId::LEN] = utxo_id.output_index(); + default +} + +impl Encode for Primitive<{ TxId::LEN + 1 }> { + type Encoder<'a> = PrimitiveEncoder<{ TxId::LEN + 1 }>; + + fn encode(t: &UtxoId) -> Self::Encoder<'_> { + PrimitiveEncoder(utxo_id_to_bytes(t)) + } +} + +impl Decode for Primitive<{ TxId::LEN + 1 }> { + fn decode(bytes: &[u8]) -> anyhow::Result { + let bytes = <[u8; TxId::LEN + 1]>::try_from(bytes)?; + let tx_id: [u8; TxId::LEN] = bytes[0..TxId::LEN].try_into()?; + Ok(UtxoId::new(TxId::from(tx_id), bytes[TxId::LEN])) + } +} diff --git a/crates/storage/src/codec/raw.rs b/crates/storage/src/codec/raw.rs new file mode 100644 index 00000000000..dbda4f71ff4 --- /dev/null +++ b/crates/storage/src/codec/raw.rs @@ -0,0 +1,27 @@ +use crate::codec::{ + Decode, + Encode, +}; +use std::borrow::Cow; + +pub struct Raw; + +impl Encode for Raw +where + K: ?Sized + AsRef<[u8]>, +{ + type Encoder<'a> = Cow<'a, [u8]> where K: 'a; + + fn encode(t: &K) -> Self::Encoder<'_> { + Cow::Borrowed(t.as_ref()) + } +} + +impl Decode for Raw +where + for<'a> V: TryFrom<&'a [u8]>, +{ + fn decode(bytes: &[u8]) -> anyhow::Result { + V::try_from(bytes).map_err(|_| anyhow::anyhow!("Unable to decode bytes")) + } +} diff --git a/crates/storage/src/column.rs b/crates/storage/src/column.rs new file mode 100644 index 00000000000..5e52491fa7c --- /dev/null +++ b/crates/storage/src/column.rs @@ -0,0 +1,179 @@ +use crate::kv_store::StorageColumn; + +macro_rules! column_definition { + ($(#[$meta:meta])* $vis:vis enum $name:ident { + $(#[$complex_meta:meta])* $complex_variants:ident($body:ident), + $($(#[$const_meta:meta])* $const_variants:ident = $const_number:expr,)* + }) => { + $(#[$meta])* + $vis enum $name { + $($(#[$const_meta])* $const_variants = $const_number,)* + $(#[$complex_meta])* $complex_variants($body), + } + + impl $name { + /// Returns the `u32` representation of the `Self`. + pub fn as_u32(&self) -> u32 { + match self { + $($name::$const_variants => $const_number,)* + $name::$complex_variants(foreign) => foreign.id, + } + } + } + } +} + +column_definition! { + /// Database tables column ids to the corresponding [`crate::Mappable`] table. + #[repr(u32)] + #[derive( + Copy, + Clone, + Debug, + strum_macros::EnumCount, + strum_macros::IntoStaticStr, + PartialEq, + Eq, + enum_iterator::Sequence, + Hash, + )] + pub enum Column { + /// The foreign column is not related to the required tables. + ForeignColumn(ForeignColumn), + + // Tables that are required for the state transition and fraud proving. + + /// See [`ContractsRawCode`](crate::tables::ContractsRawCode) + ContractsRawCode = 0, + /// See [`ContractsInfo`](crate::tables::ContractsInfo) + ContractsInfo = 1, + /// See [`ContractsState`](crate::tables::ContractsState) + ContractsState = 2, + /// See [`ContractsLatestUtxo`](crate::tables::ContractsLatestUtxo) + ContractsLatestUtxo = 3, + /// See [`ContractsAssets`](crate::tables::ContractsAssets) + ContractsAssets = 4, + /// See [`Coins`](crate::tables::Coins) + Coins = 5, + /// See [`Transactions`](crate::tables::Transactions) + Transactions = 6, + /// See [`FuelBlocks`](crate::tables::FuelBlocks) + FuelBlocks = 7, + /// See [`FuelBlockMerkleData`](storage::FuelBlockMerkleData) + FuelBlockMerkleData = 8, + /// See [`FuelBlockMerkleMetadata`](storage::FuelBlockMerkleMetadata) + FuelBlockMerkleMetadata = 9, + /// Messages that have been spent. + /// Existence of a key in this column means that the message has been spent. + /// See [`SpentMessages`](crate::tables::SpentMessages) + SpentMessages = 10, + /// See [`ContractsAssetsMerkleData`](storage::ContractsAssetsMerkleData) + ContractsAssetsMerkleData = 11, + /// See [`ContractsAssetsMerkleMetadata`](storage::ContractsAssetsMerkleMetadata) + ContractsAssetsMerkleMetadata = 12, + /// See [`ContractsStateMerkleData`](storage::ContractsStateMerkleData) + ContractsStateMerkleData = 13, + /// See [`ContractsStateMerkleMetadata`](storage::ContractsStateMerkleMetadata) + ContractsStateMerkleMetadata = 14, + /// See [`Messages`](crate::tables::Messages) + Messages = 15, + + // TODO: Extract the columns below into a separate enum to not mix + // required columns and non-required columns. It will break `MemoryStore` + // and `MemoryTransactionView` because they rely on linear index incrementation. + + // Below are the tables used for p2p, block production, starting the node. + + /// The column id of metadata about the blockchain + Metadata = 16, + /// See [`Receipts`](crate::tables::Receipts) + Receipts = 17, + /// See [`FuelBlockSecondaryKeyBlockHeights`](storage::FuelBlockSecondaryKeyBlockHeights) + FuelBlockSecondaryKeyBlockHeights = 18, + /// See [`SealedBlockConsensus`](crate::tables::SealedBlockConsensus) + FuelBlockConsensus = 19, + /// Metadata for the relayer + /// See [`RelayerMetadata`](fuel_core_relayer::ports::RelayerMetadata) + RelayerMetadata = 20, + + // Below are not required tables. They are used for API and may be removed or moved to another place in the future. + + /// The column of the table that stores `true` if `owner` owns `Coin` with `coin_id` + OwnedCoins = 21, + /// Transaction id to current status + TransactionStatus = 22, + /// The column of the table of all `owner`'s transactions + TransactionsByOwnerBlockIdx = 23, + /// The column of the table that stores `true` if `owner` owns `Message` with `message_id` + OwnedMessageIds = 24, + } +} + +impl Column { + /// The total count of variants in the enum. + pub const COUNT: usize = ::COUNT; + + /// Returns the `usize` representation of the `Column`. + pub fn as_usize(&self) -> usize { + self.as_u32() as usize + } +} + +impl StorageColumn for Column { + fn name(&self) -> &'static str { + match self { + Column::ForeignColumn(foreign) => foreign.name, + variant => variant.into(), + } + } + + fn id(&self) -> u32 { + self.as_u32() + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct ForeignColumn { + id: u32, + name: &'static str, +} + +impl ForeignColumn { + /// Creates the foreign column ensuring that the id and name + /// are not already used by the [`Column`] required tables. + pub fn new(id: u32, name: &'static str) -> anyhow::Result { + for column in enum_iterator::all::() { + if column.id() == id { + anyhow::bail!("Column id {} is already used by {}", id, column.name()); + } + if column.name() == name { + anyhow::bail!( + "Column name {} is already used by {}", + name, + column.name() + ); + } + } + Ok(Self { id, name }) + } +} + +impl enum_iterator::Sequence for ForeignColumn { + const CARDINALITY: usize = 0; + + fn next(&self) -> Option { + None + } + + fn previous(&self) -> Option { + None + } + + fn first() -> Option { + None + } + + fn last() -> Option { + None + } +} diff --git a/crates/storage/src/kv_store.rs b/crates/storage/src/kv_store.rs index 430d50f426a..7400bb37149 100644 --- a/crates/storage/src/kv_store.rs +++ b/crates/storage/src/kv_store.rs @@ -20,6 +20,8 @@ pub trait StorageColumn: Clone { fn id(&self) -> u32; } +// TODO: Use `&mut self` for all mutable methods. +// It requires refactoring of all services because right now, most of them work with `&self` storage. /// The definition of the key-value store. pub trait KeyValueStore { /// The type of the column. @@ -113,6 +115,7 @@ pub trait BatchOperations: KeyValueStore { &self, entries: &mut dyn Iterator, Self::Column, WriteOperation)>, ) -> StorageResult<()> { + // TODO: Optimize implementation for in-memory storages. for (key, column, op) in entries { match op { WriteOperation::Insert(value) => { @@ -126,3 +129,151 @@ pub trait BatchOperations: KeyValueStore { Ok(()) } } + +impl KeyValueStore for &T +where + T: KeyValueStore, +{ + type Column = T::Column; + + fn put(&self, key: &[u8], column: Self::Column, value: Value) -> StorageResult<()> { + (**self).put(key, column, value) + } + + fn replace( + &self, + key: &[u8], + column: Self::Column, + value: Value, + ) -> StorageResult> { + (**self).replace(key, column, value) + } + + fn write( + &self, + key: &[u8], + column: Self::Column, + buf: &[u8], + ) -> StorageResult { + (**self).write(key, column, buf) + } + + fn take(&self, key: &[u8], column: Self::Column) -> StorageResult> { + (**self).take(key, column) + } + + fn delete(&self, key: &[u8], column: Self::Column) -> StorageResult<()> { + (**self).delete(key, column) + } + + fn exists(&self, key: &[u8], column: Self::Column) -> StorageResult { + (**self).exists(key, column) + } + + fn size_of_value( + &self, + key: &[u8], + column: Self::Column, + ) -> StorageResult> { + (**self).size_of_value(key, column) + } + + fn get(&self, key: &[u8], column: Self::Column) -> StorageResult> { + (**self).get(key, column) + } + + fn read( + &self, + key: &[u8], + column: Self::Column, + buf: &mut [u8], + ) -> StorageResult> { + (**self).read(key, column, buf) + } +} + +impl KeyValueStore for &mut T +where + T: KeyValueStore, +{ + type Column = T::Column; + + fn put(&self, key: &[u8], column: Self::Column, value: Value) -> StorageResult<()> { + (**self).put(key, column, value) + } + + fn replace( + &self, + key: &[u8], + column: Self::Column, + value: Value, + ) -> StorageResult> { + (**self).replace(key, column, value) + } + + fn write( + &self, + key: &[u8], + column: Self::Column, + buf: &[u8], + ) -> StorageResult { + (**self).write(key, column, buf) + } + + fn take(&self, key: &[u8], column: Self::Column) -> StorageResult> { + (**self).take(key, column) + } + + fn delete(&self, key: &[u8], column: Self::Column) -> StorageResult<()> { + (**self).delete(key, column) + } + + fn exists(&self, key: &[u8], column: Self::Column) -> StorageResult { + (**self).exists(key, column) + } + + fn size_of_value( + &self, + key: &[u8], + column: Self::Column, + ) -> StorageResult> { + (**self).size_of_value(key, column) + } + + fn get(&self, key: &[u8], column: Self::Column) -> StorageResult> { + (**self).get(key, column) + } + + fn read( + &self, + key: &[u8], + column: Self::Column, + buf: &mut [u8], + ) -> StorageResult> { + (**self).read(key, column, buf) + } +} + +impl BatchOperations for &T +where + T: BatchOperations, +{ + fn batch_write( + &self, + entries: &mut dyn Iterator, Self::Column, WriteOperation)>, + ) -> StorageResult<()> { + (**self).batch_write(entries) + } +} + +impl BatchOperations for &mut T +where + T: BatchOperations, +{ + fn batch_write( + &self, + entries: &mut dyn Iterator, Self::Column, WriteOperation)>, + ) -> StorageResult<()> { + (**self).batch_write(entries) + } +} diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index e6a345a1ce5..35cab3d0670 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -7,7 +7,7 @@ #![deny(clippy::arithmetic_side_effects)] #![deny(clippy::cast_possible_truncation)] #![deny(unused_crate_dependencies)] -#![deny(missing_docs)] +// #![deny(missing_docs)] #![deny(warnings)] use core::array::TryFromSliceError; @@ -21,8 +21,12 @@ pub use fuel_vm_private::{ }, }; +pub mod codec; +pub mod column; pub mod iter; pub mod kv_store; +pub mod structure; +pub mod structured_storage; pub mod tables; #[cfg(feature = "test-helpers")] pub mod test_helpers; @@ -33,6 +37,7 @@ pub use fuel_vm_private::storage::{ ContractsAssetKey, ContractsStateKey, }; +pub use paste; /// The storage result alias. pub type Result = core::result::Result; @@ -42,8 +47,8 @@ pub type Result = core::result::Result; /// Error occurring during interaction with storage pub enum Error { /// Error occurred during serialization or deserialization of the entity. - #[display(fmt = "error performing serialization or deserialization")] - Codec, + #[display(fmt = "error performing serialization or deserialization `{_0}`")] + Codec(anyhow::Error), /// Error occurred during interaction with database. #[display(fmt = "error occurred in the underlying datastore `{_0:?}`")] DatabaseError(Box), @@ -107,6 +112,28 @@ impl IsNotFound for Result { } } +pub trait StorageBatchMutate: StorageMutate { + /// Initialize the storage with batch insertion. This method is more performant than + /// [`Self::insert_batch`] in some case. + /// + /// # Errors + /// + /// Returns an error if the storage is already initialized. + fn init_storage( + &mut self, + set: &mut dyn Iterator, + ) -> Result<()>; + + /// Inserts the key-value pair into the storage in batch. + fn insert_batch( + &mut self, + set: &mut dyn Iterator, + ) -> Result<()>; + + /// Removes the key-value pairs from the storage in batch. + fn remove_batch(&mut self, set: &mut dyn Iterator) -> Result<()>; +} + /// Creates `StorageError::NotFound` error with file and line information inside. /// /// # Examples diff --git a/crates/storage/src/structure.rs b/crates/storage/src/structure.rs new file mode 100644 index 00000000000..5af8e796842 --- /dev/null +++ b/crates/storage/src/structure.rs @@ -0,0 +1,102 @@ +use crate::{ + codec::{ + Decode, + Encode, + Encoder, + }, + kv_store::{ + BatchOperations, + KeyValueStore, + }, + Mappable, + Result as StorageResult, +}; + +pub mod plain; +pub mod sparse; + +pub trait Structure +where + M: Mappable, + S: KeyValueStore, +{ + type KeyCodec: Encode + Decode; + type ValueCodec: Encode + Decode; + + fn put( + storage: &mut S, + key: &M::Key, + column: S::Column, + value: &M::Value, + ) -> StorageResult<()>; + + fn replace( + storage: &mut S, + key: &M::Key, + column: S::Column, + value: &M::Value, + ) -> StorageResult>; + + fn take( + storage: &mut S, + key: &M::Key, + column: S::Column, + ) -> StorageResult>; + + fn delete(storage: &mut S, key: &M::Key, column: S::Column) -> StorageResult<()>; + + fn exists(storage: &S, key: &M::Key, column: S::Column) -> StorageResult { + let key_encoder = Self::KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + storage.exists(key_bytes.as_ref(), column) + } + + fn size_of_value( + storage: &S, + key: &M::Key, + column: S::Column, + ) -> StorageResult> { + let key_encoder = Self::KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + storage.size_of_value(key_bytes.as_ref(), column) + } + + fn get( + storage: &S, + key: &M::Key, + column: S::Column, + ) -> StorageResult> { + let key_encoder = Self::KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + storage + .get(key_bytes.as_ref(), column)? + .map(|value| { + Self::ValueCodec::decode_from_value(value).map_err(crate::Error::Codec) + }) + .transpose() + } +} + +pub trait BatchStructure: Structure +where + M: Mappable, + S: BatchOperations, +{ + fn init( + storage: &mut S, + column: S::Column, + set: &mut dyn Iterator, + ) -> StorageResult<()>; + + fn insert( + storage: &mut S, + column: S::Column, + set: &mut dyn Iterator, + ) -> StorageResult<()>; + + fn remove( + storage: &mut S, + column: S::Column, + set: &mut dyn Iterator, + ) -> StorageResult<()>; +} diff --git a/crates/storage/src/structure/plain.rs b/crates/storage/src/structure/plain.rs new file mode 100644 index 00000000000..bbda6c037cd --- /dev/null +++ b/crates/storage/src/structure/plain.rs @@ -0,0 +1,127 @@ +use crate::{ + codec::{ + Decode, + Encode, + Encoder, + }, + column::Column, + kv_store::{ + BatchOperations, + KeyValueStore, + WriteOperation, + }, + structure::{ + BatchStructure, + Structure, + }, + structured_storage::TableWithStructure, + Error as StorageError, + Mappable, + Result as StorageResult, +}; + +pub struct Plain { + _marker: core::marker::PhantomData<(KeyCodec, ValueCodec)>, +} + +impl Structure for Plain +where + M: Mappable, + S: KeyValueStore, + KeyCodec: Encode + Decode, + ValueCodec: Encode + Decode, +{ + type KeyCodec = KeyCodec; + type ValueCodec = ValueCodec; + + fn put( + storage: &mut S, + key: &M::Key, + column: S::Column, + value: &M::Value, + ) -> StorageResult<()> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + let value = ValueCodec::encode_as_value(value); + storage.put(key_bytes.as_ref(), column, value) + } + + fn replace( + storage: &mut S, + key: &M::Key, + column: S::Column, + value: &M::Value, + ) -> StorageResult> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + let value = ValueCodec::encode_as_value(value); + storage + .replace(key_bytes.as_ref(), column, value)? + .map(|value| { + ValueCodec::decode_from_value(value).map_err(StorageError::Codec) + }) + .transpose() + } + + fn take( + storage: &mut S, + key: &M::Key, + column: S::Column, + ) -> StorageResult> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + storage + .take(key_bytes.as_ref(), column)? + .map(|value| { + ValueCodec::decode_from_value(value).map_err(StorageError::Codec) + }) + .transpose() + } + + fn delete(storage: &mut S, key: &M::Key, column: S::Column) -> StorageResult<()> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + storage.delete(key_bytes.as_ref(), column) + } +} + +impl BatchStructure for Plain +where + S: BatchOperations, + M: Mappable + TableWithStructure>, + M::Structure: Structure, +{ + fn init( + storage: &mut S, + column: S::Column, + set: &mut dyn Iterator, + ) -> StorageResult<()> { + Self::insert(storage, column, set) + } + + fn insert( + storage: &mut S, + column: S::Column, + set: &mut dyn Iterator, + ) -> StorageResult<()> { + storage.batch_write(&mut set.map(|(key, value)| { + let key_encoder = >::KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes().to_vec(); + let value = + >::ValueCodec::encode_as_value(value); + (key_bytes, column, WriteOperation::Insert(value)) + })) + } + + fn remove( + storage: &mut S, + column: S::Column, + set: &mut dyn Iterator, + ) -> StorageResult<()> { + storage.batch_write(&mut set.map(|key| { + let key_encoder = >::KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes().to_vec(); + (key_bytes, column, WriteOperation::Remove) + })) + } +} diff --git a/crates/storage/src/structure/sparse.rs b/crates/storage/src/structure/sparse.rs new file mode 100644 index 00000000000..19a6d21258e --- /dev/null +++ b/crates/storage/src/structure/sparse.rs @@ -0,0 +1,440 @@ +use crate::{ + codec::{ + Decode, + Encode, + Encoder, + }, + column::Column, + kv_store::{ + BatchOperations, + KeyValueStore, + StorageColumn, + WriteOperation, + }, + structure::{ + BatchStructure, + Structure, + }, + structured_storage::{ + StructuredStorage, + TableWithStructure, + }, + tables::merkle::SparseMerkleMetadata, + Error as StorageError, + Mappable, + MerkleRoot, + MerkleRootStorage, + Result as StorageResult, + StorageAsMut, + StorageInspect, + StorageMutate, +}; +use fuel_core_types::fuel_merkle::{ + sparse, + sparse::{ + in_memory, + MerkleTree, + MerkleTreeKey, + }, +}; +use itertools::Itertools; +use std::borrow::Cow; + +pub trait MetadataKey { + type InputKey: ?Sized; + type OutputKey: ?Sized; + + fn metadata_key(key: &Self::InputKey) -> &Self::OutputKey; +} + +pub struct Sparse { + _marker: + core::marker::PhantomData<(KeyCodec, ValueCodec, Metadata, Nodes, KeyConvertor)>, +} + +impl + Sparse +where + Metadata: Mappable, + Nodes: Mappable< + Key = MerkleRoot, + Value = sparse::Primitive, + OwnedValue = sparse::Primitive, + >, +{ + fn insert_into_tree( + storage: &mut S, + key: &K, + key_bytes: &[u8], + value_bytes: &[u8], + ) -> StorageResult<()> + where + K: ?Sized, + for<'a> StructuredStorage<&'a mut S>: StorageMutate + + StorageMutate, + KeyConvertor: MetadataKey, + { + let mut storage = StructuredStorage::new(storage); + let metadata_key = KeyConvertor::metadata_key(key); + // Get latest metadata entry for this `metadata_key` + let prev_metadata: Cow = storage + .storage::() + .get(metadata_key)? + .unwrap_or_default(); + + let root = prev_metadata.root; + let mut tree: MerkleTree = MerkleTree::load(&mut storage, &root) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + + tree.update(MerkleTreeKey::new(key_bytes), value_bytes) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + + // Generate new metadata for the updated tree + let root = tree.root(); + let metadata = SparseMerkleMetadata { root }; + storage + .storage::() + .insert(metadata_key, &metadata)?; + Ok(()) + } + + fn remove_from_tree( + storage: &mut S, + key: &K, + key_bytes: &[u8], + ) -> StorageResult<()> + where + K: ?Sized, + for<'a> StructuredStorage<&'a mut S>: StorageMutate + + StorageMutate, + KeyConvertor: MetadataKey, + { + let mut storage = StructuredStorage::new(storage); + let metadata_key = KeyConvertor::metadata_key(key); + // Get latest metadata entry for this `metadata_key` + let prev_metadata: Option> = + storage.storage::().get(metadata_key)?; + + if let Some(prev_metadata) = prev_metadata { + let root = prev_metadata.root; + + let mut tree: MerkleTree = MerkleTree::load(&mut storage, &root) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + + tree.delete(MerkleTreeKey::new(key_bytes)) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + + let root = tree.root(); + if &root == MerkleTree::::empty_root() { + // The tree is now empty; remove the metadata + storage.storage::().remove(metadata_key)?; + } else { + // Generate new metadata for the updated tree + let metadata = SparseMerkleMetadata { root }; + storage + .storage::() + .insert(metadata_key, &metadata)?; + } + } + + Ok(()) + } +} + +impl Structure + for Sparse +where + M: Mappable, + S: KeyValueStore, + KeyCodec: Encode + Decode, + ValueCodec: Encode + Decode, + Metadata: Mappable, + Nodes: Mappable< + Key = MerkleRoot, + Value = sparse::Primitive, + OwnedValue = sparse::Primitive, + >, + KeyConvertor: MetadataKey, + for<'a> StructuredStorage<&'a mut S>: StorageMutate + + StorageMutate, +{ + type KeyCodec = KeyCodec; + type ValueCodec = ValueCodec; + + fn put( + storage: &mut S, + key: &M::Key, + column: S::Column, + value: &M::Value, + ) -> StorageResult<()> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + let value = ValueCodec::encode_as_value(value); + storage.put(key_bytes.as_ref(), column, value.clone())?; + Self::insert_into_tree(storage, key, key_bytes.as_ref(), value.as_ref()) + } + + fn replace( + storage: &mut S, + key: &M::Key, + column: S::Column, + value: &M::Value, + ) -> StorageResult> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + let value = ValueCodec::encode_as_value(value); + let prev = storage + .replace(key_bytes.as_ref(), column, value.clone())? + .map(|value| { + ValueCodec::decode_from_value(value).map_err(StorageError::Codec) + }) + .transpose()?; + + Self::insert_into_tree(storage, key, key_bytes.as_ref(), value.as_ref())?; + Ok(prev) + } + + fn take( + storage: &mut S, + key: &M::Key, + column: S::Column, + ) -> StorageResult> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + let prev = storage + .take(key_bytes.as_ref(), column)? + .map(|value| { + ValueCodec::decode_from_value(value).map_err(StorageError::Codec) + }) + .transpose()?; + Self::remove_from_tree(storage, key, key_bytes.as_ref())?; + Ok(prev) + } + + fn delete(storage: &mut S, key: &M::Key, column: S::Column) -> StorageResult<()> { + let key_encoder = KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes(); + storage.delete(key_bytes.as_ref(), column)?; + Self::remove_from_tree(storage, key, key_bytes.as_ref()) + } +} + +impl + MerkleRootStorage for StructuredStorage +where + S: KeyValueStore, + M: Mappable + + TableWithStructure< + Structure = Sparse, + >, + Self: StorageMutate + + StorageInspect, + Metadata: Mappable, + Metadata::Key: Sized, +{ + fn root(&self, key: &Metadata::Key) -> StorageResult { + use crate::StorageAsRef; + let metadata: Option> = + self.storage_as_ref::().get(key)?; + let root = metadata + .map(|metadata| metadata.root) + .unwrap_or_else(|| in_memory::MerkleTree::new().root()); + Ok(root) + } +} + +type NodeKeyCodec = + <::Structure as Structure>::KeyCodec; +type NodeValueCodec = + <::Structure as Structure>::ValueCodec; + +impl BatchStructure + for Sparse +where + S: BatchOperations, + M: Mappable + + TableWithStructure< + Structure = Sparse, + >, + KeyCodec: Encode + Decode, + ValueCodec: Encode + Decode, + Metadata: Mappable, + Nodes: Mappable< + Key = MerkleRoot, + Value = sparse::Primitive, + OwnedValue = sparse::Primitive, + > + TableWithStructure, + KeyConvertor: MetadataKey, + Nodes::Structure: Structure, + for<'a> StructuredStorage<&'a mut S>: StorageMutate + + StorageMutate + + StorageMutate, +{ + fn init( + storage: &mut S, + column: S::Column, + set: &mut dyn Iterator, + ) -> StorageResult<()> { + let mut set = set.peekable(); + + let metadata_key; + if let Some((key, _)) = set.peek() { + metadata_key = KeyConvertor::metadata_key(*key); + } else { + return Ok(()) + } + + let mut storage = StructuredStorage::new(storage); + + if storage.storage::().contains_key(metadata_key)? { + return Err(anyhow::anyhow!( + "The {} is already initialized", + M::column().name() + ) + .into()) + } + + let encoded_set = set + .map(|(key, value)| { + let key = KeyCodec::encode(key).as_bytes().into_owned(); + let value = ValueCodec::encode(value).as_bytes().into_owned(); + (key, value) + }) + .collect_vec(); + + let (root, nodes) = in_memory::MerkleTree::nodes_from_set( + encoded_set + .iter() + .map(|(key, value)| (MerkleTreeKey::new(key), value)), + ); + + storage.as_mut().batch_write( + &mut encoded_set + .into_iter() + .map(|(key, value)| (key, column, WriteOperation::Insert(value.into()))), + )?; + + let mut nodes = nodes.iter().map(|(key, value)| { + let key = NodeKeyCodec::::encode(key) + .as_bytes() + .into_owned(); + let value = NodeValueCodec::::encode_as_value(value); + (key, Nodes::column(), WriteOperation::Insert(value)) + }); + storage.as_mut().batch_write(&mut nodes)?; + + let metadata = SparseMerkleMetadata { root }; + storage + .storage::() + .insert(metadata_key, &metadata)?; + + Ok(()) + } + + fn insert( + storage: &mut S, + column: S::Column, + set: &mut dyn Iterator, + ) -> StorageResult<()> { + let mut set = set.peekable(); + + let metadata_key; + if let Some((key, _)) = set.peek() { + metadata_key = KeyConvertor::metadata_key(*key); + } else { + return Ok(()) + } + + let mut storage = StructuredStorage::new(storage); + let prev_metadata: Cow = storage + .storage::() + .get(metadata_key)? + .unwrap_or_default(); + + let root = prev_metadata.root; + let mut tree: MerkleTree = MerkleTree::load(&mut storage, &root) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + + let encoded_set = set + .map(|(key, value)| { + let key = KeyCodec::encode(key).as_bytes().into_owned(); + let value = ValueCodec::encode(value).as_bytes().into_owned(); + (key, value) + }) + .collect_vec(); + + for (key_bytes, value_bytes) in encoded_set.iter() { + tree.update(MerkleTreeKey::new(key_bytes), value_bytes) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + } + let root = tree.root(); + + storage.as_mut().batch_write( + &mut encoded_set + .into_iter() + .map(|(key, value)| (key, column, WriteOperation::Insert(value.into()))), + )?; + + // Generate new metadata for the updated tree + let metadata = SparseMerkleMetadata { root }; + storage + .storage::() + .insert(metadata_key, &metadata)?; + + Ok(()) + } + + fn remove( + storage: &mut S, + column: S::Column, + set: &mut dyn Iterator, + ) -> StorageResult<()> { + let mut set = set.peekable(); + + let metadata_key; + if let Some(key) = set.peek() { + metadata_key = KeyConvertor::metadata_key(*key); + } else { + return Ok(()) + } + + let mut storage = StructuredStorage::new(storage); + let prev_metadata: Cow = storage + .storage::() + .get(metadata_key)? + .unwrap_or_default(); + + let root = prev_metadata.root; + let mut tree: MerkleTree = MerkleTree::load(&mut storage, &root) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + + let encoded_set = set + .map(|key| KeyCodec::encode(key).as_bytes().into_owned()) + .collect_vec(); + + for key_bytes in encoded_set.iter() { + tree.delete(MerkleTreeKey::new(key_bytes)) + .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; + } + let root = tree.root(); + + storage.as_mut().batch_write( + &mut encoded_set + .into_iter() + .map(|key| (key, column, WriteOperation::Remove)), + )?; + + if &root == MerkleTree::::empty_root() { + // The tree is now empty; remove the metadata + storage.storage::().remove(metadata_key)?; + } else { + // Generate new metadata for the updated tree + let metadata = SparseMerkleMetadata { root }; + storage + .storage::() + .insert(metadata_key, &metadata)?; + } + + Ok(()) + } +} diff --git a/crates/storage/src/structured_storage.rs b/crates/storage/src/structured_storage.rs new file mode 100644 index 00000000000..ef14915f002 --- /dev/null +++ b/crates/storage/src/structured_storage.rs @@ -0,0 +1,607 @@ +use crate::{ + column::Column, + kv_store::{ + BatchOperations, + KeyValueStore, + }, + structure::{ + BatchStructure, + Structure, + }, + Error as StorageError, + Mappable, + StorageBatchMutate, + StorageInspect, + StorageMutate, + StorageSize, +}; +use std::borrow::Cow; + +pub mod balances; +pub mod blocks; +pub mod coins; +pub mod contracts; +pub mod merkle_data; +pub mod messages; +pub mod receipts; +pub mod sealed_block; +pub mod state; +pub mod transactions; + +pub trait TableWithStructure: Mappable + Sized { + type Structure; + + fn column() -> Column; +} + +#[derive(Clone, Debug)] +pub struct StructuredStorage { + pub(crate) storage: S, +} + +impl StructuredStorage { + pub fn new(storage: S) -> Self { + Self { storage } + } +} + +impl AsRef for StructuredStorage { + fn as_ref(&self) -> &S { + &self.storage + } +} + +impl AsMut for StructuredStorage { + fn as_mut(&mut self) -> &mut S { + &mut self.storage + } +} + +impl StorageInspect for StructuredStorage +where + S: KeyValueStore, + M: Mappable + TableWithStructure, + M::Structure: Structure, +{ + type Error = StorageError; + + fn get(&self, key: &M::Key) -> Result>, Self::Error> { + ::Structure::get(&self.storage, key, M::column()) + .map(|value| value.map(Cow::Owned)) + } + + fn contains_key(&self, key: &M::Key) -> Result { + ::Structure::exists(&self.storage, key, M::column()) + } +} + +impl StorageMutate for StructuredStorage +where + S: KeyValueStore, + M: Mappable + TableWithStructure, + M::Structure: Structure, +{ + fn insert( + &mut self, + key: &M::Key, + value: &M::Value, + ) -> Result, Self::Error> { + ::Structure::replace( + &mut self.storage, + key, + M::column(), + value, + ) + } + + fn remove(&mut self, key: &M::Key) -> Result, Self::Error> { + ::Structure::take(&mut self.storage, key, M::column()) + } +} + +impl StorageSize for StructuredStorage +where + S: KeyValueStore, + M: Mappable + TableWithStructure, + M::Structure: Structure, +{ + fn size_of_value(&self, key: &M::Key) -> Result, Self::Error> { + ::Structure::size_of_value( + &self.storage, + key, + M::column(), + ) + } +} + +impl StorageBatchMutate for StructuredStorage +where + S: BatchOperations, + M: Mappable + TableWithStructure, + M::Structure: BatchStructure, +{ + fn init_storage( + &mut self, + set: &mut dyn Iterator, + ) -> Result<(), Self::Error> { + ::Structure::init(&mut self.storage, M::column(), set) + } + + fn insert_batch( + &mut self, + set: &mut dyn Iterator, + ) -> Result<(), Self::Error> { + ::Structure::insert(&mut self.storage, M::column(), set) + } + + fn remove_batch( + &mut self, + set: &mut dyn Iterator, + ) -> Result<(), Self::Error> { + ::Structure::remove(&mut self.storage, M::column(), set) + } +} + +pub mod test { + use crate::{ + column::Column, + kv_store::{ + BatchOperations, + KeyValueStore, + Value, + }, + Result as StorageResult, + }; + use std::{ + cell::RefCell, + collections::HashMap, + }; + + type Storage = RefCell), Vec>>; + + #[derive(Default, Debug, PartialEq, Eq)] + pub struct InMemoryStorage { + storage: Storage, + } + + impl KeyValueStore for InMemoryStorage { + type Column = Column; + + fn write( + &self, + key: &[u8], + column: Self::Column, + buf: &[u8], + ) -> StorageResult { + let write = buf.len(); + self.storage + .borrow_mut() + .insert((column, key.to_vec()), buf.to_vec()); + Ok(write) + } + + fn delete(&self, key: &[u8], column: Self::Column) -> StorageResult<()> { + self.storage.borrow_mut().remove(&(column, key.to_vec())); + Ok(()) + } + + fn get(&self, key: &[u8], column: Self::Column) -> StorageResult> { + Ok(self + .storage + .borrow_mut() + .get(&(column, key.to_vec())) + .map(|v| v.clone().into())) + } + } + + impl BatchOperations for InMemoryStorage {} + + #[macro_export] + macro_rules! basic_storage_tests { + ($table:ident, $key:expr, $value_insert:expr, $value_return:expr, $random_key:expr) => { + $crate::paste::item! { + #[cfg(test)] + #[allow(unused_imports)] + mod [< $table:snake _basic_tests >] { + use super::*; + use $crate::{ + structured_storage::{ + test::InMemoryStorage, + StructuredStorage, + }, + StorageAsMut, + }; + use $crate::StorageInspect; + use $crate::StorageMutate; + + #[allow(dead_code)] + fn random(rng: &mut R) -> T + where + rand::distributions::Standard: rand::distributions::Distribution, + R: rand::Rng, + { + use rand::Rng; + rng.gen() + } + + #[test] + fn get() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + let key = $key; + + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &$value_insert) + .unwrap(); + + assert_eq!( + structured_storage + .storage_as_mut::<$table>() + .get(&key) + .expect("Should get without errors") + .expect("Should not be empty") + .into_owned(), + $value_return + ); + } + + #[test] + fn insert() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + let key = $key; + + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &$value_insert) + .unwrap(); + + let returned = structured_storage + .storage_as_mut::<$table>() + .get(&key) + .unwrap() + .unwrap() + .into_owned(); + assert_eq!(returned, $value_return); + } + + #[test] + fn remove() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + let key = $key; + + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &$value_insert) + .unwrap(); + + structured_storage.storage_as_mut::<$table>().remove(&key).unwrap(); + + assert!(!structured_storage + .storage_as_mut::<$table>() + .contains_key(&key) + .unwrap()); + } + + #[test] + fn exists() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + let key = $key; + + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &$value_insert) + .unwrap(); + + assert!(structured_storage + .storage_as_mut::<$table>() + .contains_key(&key) + .unwrap()); + } + + #[test] + fn batch_mutate_works() { + use rand::{ + Rng, + rngs::StdRng, + RngCore, + SeedableRng, + }; + + let empty_storage = InMemoryStorage::default(); + + let mut init_storage = InMemoryStorage::default(); + let mut init_structured_storage = StructuredStorage::new(&mut init_storage); + + let mut rng = &mut StdRng::seed_from_u64(1234); + let gen = || Some($random_key(&mut rng)); + let data = core::iter::from_fn(gen).take(5_000).collect::>(); + let value = $value_insert; + + <_ as $crate::StorageBatchMutate<$table>>::init_storage( + &mut init_structured_storage, + &mut data.iter().map(|k| { + let value: &<$table as $crate::Mappable>::Value = &value; + (k, value) + }) + ).expect("Should initialize the storage successfully"); + + let mut insert_storage = InMemoryStorage::default(); + let mut insert_structured_storage = StructuredStorage::new(&mut insert_storage); + + <_ as $crate::StorageBatchMutate<$table>>::insert_batch( + &mut insert_structured_storage, + &mut data.iter().map(|k| { + let value: &<$table as $crate::Mappable>::Value = &value; + (k, value) + }) + ).expect("Should insert batch successfully"); + + assert_eq!(init_storage, insert_storage); + assert_ne!(init_storage, empty_storage); + assert_ne!(insert_storage, empty_storage); + + let mut remove_from_insert_structured_storage = StructuredStorage::new(&mut insert_storage); + <_ as $crate::StorageBatchMutate<$table>>::remove_batch( + &mut remove_from_insert_structured_storage, + &mut data.iter() + ).expect("Should remove all entries successfully from insert storage"); + assert_ne!(init_storage, insert_storage); + assert_eq!(insert_storage, empty_storage); + + let mut remove_from_init_structured_storage = StructuredStorage::new(&mut init_storage); + <_ as $crate::StorageBatchMutate<$table>>::remove_batch( + &mut remove_from_init_structured_storage, + &mut data.iter() + ).expect("Should remove all entries successfully from init storage"); + assert_eq!(init_storage, insert_storage); + assert_eq!(init_storage, empty_storage); + } + }} + }; + ($table:ident, $key:expr, $value_insert:expr, $value_return:expr) => { + $crate::basic_storage_tests!($table, $key, $value_insert, $value_return, random); + }; + ($table:ident, $key:expr, $value:expr) => { + $crate::basic_storage_tests!($table, $key, $value, $value); + }; + } + + #[macro_export] + macro_rules! root_storage_tests { + ($table:ident, $metadata_table:ident, $current_key:expr, $foreign_key:expr, $generate_key:ident, $generate_value:ident) => { + paste::item! { + #[cfg(test)] + mod [< $table:snake _root_tests >] { + use super::*; + use $crate::{ + structured_storage::{ + test::InMemoryStorage, + StructuredStorage, + }, + StorageAsMut, + }; + use rand::{ + rngs::StdRng, + SeedableRng, + }; + + #[test] + fn root() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + + let rng = &mut StdRng::seed_from_u64(1234); + let key = $generate_key(&$current_key, rng); + let value = $generate_value(rng); + structured_storage.storage_as_mut::<$table>().insert(&key, &value) + .unwrap(); + + let root = structured_storage.storage_as_mut::<$table>().root(&$current_key); + assert!(root.is_ok()) + } + + #[test] + fn root_returns_empty_root_for_empty_metadata() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + + let empty_root = fuel_core_types::fuel_merkle::sparse::in_memory::MerkleTree::new().root(); + let root = structured_storage + .storage_as_mut::<$table>() + .root(&$current_key) + .unwrap(); + assert_eq!(root, empty_root) + } + + #[test] + fn put_updates_the_state_merkle_root_for_the_given_metadata() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + + let rng = &mut StdRng::seed_from_u64(1234); + let key = $generate_key(&$current_key, rng); + let state = $generate_value(rng); + + // Write the first contract state + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &state) + .unwrap(); + + // Read the first Merkle root + let root_1 = structured_storage + .storage_as_mut::<$table>() + .root(&$current_key) + .unwrap(); + + // Write the second contract state + let key = $generate_key(&$current_key, rng); + let state = $generate_value(rng); + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &state) + .unwrap(); + + // Read the second Merkle root + let root_2 = structured_storage + .storage_as_mut::<$table>() + .root(&$current_key) + .unwrap(); + + assert_ne!(root_1, root_2); + } + + #[test] + fn remove_updates_the_state_merkle_root_for_the_given_metadata() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + + let rng = &mut StdRng::seed_from_u64(1234); + + // Write the first contract state + let first_key = $generate_key(&$current_key, rng); + let first_state = $generate_value(rng); + structured_storage + .storage_as_mut::<$table>() + .insert(&first_key, &first_state) + .unwrap(); + let root_0 = structured_storage + .storage_as_mut::<$table>() + .root(&$current_key) + .unwrap(); + + // Write the second contract state + let second_key = $generate_key(&$current_key, rng); + let second_state = $generate_value(rng); + structured_storage + .storage_as_mut::<$table>() + .insert(&second_key, &second_state) + .unwrap(); + + // Read the first Merkle root + let root_1 = structured_storage + .storage_as_mut::<$table>() + .root(&$current_key) + .unwrap(); + + // Remove the second contract state + structured_storage.storage_as_mut::<$table>().remove(&second_key).unwrap(); + + // Read the second Merkle root + let root_2 = structured_storage + .storage_as_mut::<$table>() + .root(&$current_key) + .unwrap(); + + assert_ne!(root_1, root_2); + assert_eq!(root_0, root_2); + } + + #[test] + fn updating_foreign_metadata_does_not_affect_the_given_metadata_insertion() { + let given_metadata_key = $current_key; + let foreign_metadata_key = $foreign_key; + + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + + let rng = &mut StdRng::seed_from_u64(1234); + + let state_value = $generate_value(rng); + + // Given + let given_key = $generate_key(&given_metadata_key, rng); + let foreign_key = $generate_key(&foreign_metadata_key, rng); + structured_storage + .storage_as_mut::<$table>() + .insert(&given_key, &state_value) + .unwrap(); + + // When + structured_storage + .storage_as_mut::<$table>() + .insert(&foreign_key, &state_value) + .unwrap(); + structured_storage + .storage_as_mut::<$table>() + .remove(&foreign_key) + .unwrap(); + + // Then + let result = structured_storage + .storage_as_mut::<$table>() + .insert(&given_key, &state_value) + .unwrap(); + + assert!(result.is_some()); + } + + #[test] + fn put_creates_merkle_metadata_when_empty() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + + let rng = &mut StdRng::seed_from_u64(1234); + + // Given + let key = $generate_key(&$current_key, rng); + let state = $generate_value(rng); + + // Write a contract state + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &state) + .unwrap(); + + // Read the Merkle metadata + let metadata = structured_storage + .storage_as_mut::<$metadata_table>() + .get(&$current_key) + .unwrap(); + + assert!(metadata.is_some()); + } + + #[test] + fn remove_deletes_merkle_metadata_when_empty() { + let mut storage = InMemoryStorage::default(); + let mut structured_storage = StructuredStorage::new(&mut storage); + + let rng = &mut StdRng::seed_from_u64(1234); + + // Given + let key = $generate_key(&$current_key, rng); + let state = $generate_value(rng); + + // Write a contract state + structured_storage + .storage_as_mut::<$table>() + .insert(&key, &state) + .unwrap(); + + // Read the Merkle metadata + structured_storage + .storage_as_mut::<$metadata_table>() + .get(&$current_key) + .unwrap() + .expect("Expected Merkle metadata to be present"); + + // Remove the contract asset + structured_storage.storage_as_mut::<$table>().remove(&key).unwrap(); + + // Read the Merkle metadata + let metadata = structured_storage + .storage_as_mut::<$metadata_table>() + .get(&$current_key) + .unwrap(); + + assert!(metadata.is_none()); + } + }} + }; + } +} diff --git a/crates/storage/src/structured_storage/balances.rs b/crates/storage/src/structured_storage/balances.rs new file mode 100644 index 00000000000..8bd758513fe --- /dev/null +++ b/crates/storage/src/structured_storage/balances.rs @@ -0,0 +1,85 @@ +use crate::{ + codec::{ + manual::Manual, + primitive::Primitive, + }, + column::Column, + structure::sparse::{ + MetadataKey, + Sparse, + }, + structured_storage::TableWithStructure, + tables::{ + merkle::{ + ContractsAssetsMerkleData, + ContractsAssetsMerkleMetadata, + }, + ContractsAssets, + }, + Mappable, +}; +use fuel_core_types::fuel_vm::ContractsAssetKey; + +pub struct KeyConvertor; + +impl MetadataKey for KeyConvertor { + type InputKey = ::Key; + type OutputKey = ::Key; + + fn metadata_key(key: &Self::InputKey) -> &Self::OutputKey { + key.contract_id() + } +} + +impl TableWithStructure for ContractsAssets { + type Structure = Sparse< + Manual, + Primitive<8>, + ContractsAssetsMerkleMetadata, + ContractsAssetsMerkleData, + KeyConvertor, + >; + + fn column() -> Column { + Column::ContractsAssets + } +} + +#[cfg(test)] +fn generate_key( + metadata_key: &::Key, + rng: &mut impl rand::Rng, +) -> ::Key { + let mut bytes = [0u8; 32]; + rng.fill(bytes.as_mut()); + ::Key::new(metadata_key, &bytes.into()) +} + +#[cfg(test)] +fn generate_key_for_same_contract( + rng: &mut impl rand::Rng, +) -> ::Key { + generate_key(&fuel_core_types::fuel_tx::ContractId::zeroed(), rng) +} + +crate::basic_storage_tests!( + ContractsAssets, + ::Key::default(), + ::Value::default(), + ::Value::default(), + generate_key_for_same_contract +); + +#[cfg(test)] +fn generate_value(rng: &mut impl rand::Rng) -> ::Value { + rng.gen() +} + +crate::root_storage_tests!( + ContractsAssets, + ContractsAssetsMerkleMetadata, + ::Key::from([1u8; 32]), + ::Key::from([2u8; 32]), + generate_key, + generate_value +); diff --git a/crates/storage/src/structured_storage/blocks.rs b/crates/storage/src/structured_storage/blocks.rs new file mode 100644 index 00000000000..a1c29c0ee13 --- /dev/null +++ b/crates/storage/src/structured_storage/blocks.rs @@ -0,0 +1,24 @@ +use crate::{ + codec::{ + postcard::Postcard, + raw::Raw, + }, + column::Column, + structure::plain::Plain, + structured_storage::TableWithStructure, + tables::FuelBlocks, +}; + +impl TableWithStructure for FuelBlocks { + type Structure = Plain; + + fn column() -> Column { + Column::FuelBlocks + } +} + +crate::basic_storage_tests!( + FuelBlocks, + ::Key::default(), + ::Value::default() +); diff --git a/crates/storage/src/structured_storage/coins.rs b/crates/storage/src/structured_storage/coins.rs new file mode 100644 index 00000000000..ff740c04565 --- /dev/null +++ b/crates/storage/src/structured_storage/coins.rs @@ -0,0 +1,24 @@ +use crate::{ + codec::{ + postcard::Postcard, + primitive::Primitive, + }, + column::Column, + structure::plain::Plain, + structured_storage::TableWithStructure, + tables::Coins, +}; + +impl TableWithStructure for Coins { + type Structure = Plain, Postcard>; + + fn column() -> Column { + Column::Coins + } +} + +crate::basic_storage_tests!( + Coins, + ::Key::default(), + ::Value::default() +); diff --git a/crates/storage/src/structured_storage/contracts.rs b/crates/storage/src/structured_storage/contracts.rs new file mode 100644 index 00000000000..ee5e8a5984a --- /dev/null +++ b/crates/storage/src/structured_storage/contracts.rs @@ -0,0 +1,88 @@ +use crate::{ + codec::{ + postcard::Postcard, + raw::Raw, + }, + column::Column, + kv_store::KeyValueStore, + structure::plain::Plain, + structured_storage::{ + StructuredStorage, + TableWithStructure, + }, + tables::{ + ContractsInfo, + ContractsLatestUtxo, + ContractsRawCode, + }, + StorageRead, +}; +use core::ops::Deref; +use fuel_core_types::fuel_tx::ContractId; + +// # Dev-note: The value of the `ContractsRawCode` has a unique implementation of serialization +// and deserialization and uses `Raw` codec. Because the value is a contract byte code represented +// by bytes, we don't use `serde::Deserialization` and `serde::Serialization` for `Vec`, +// because we don't need to store the size of the contract. We store/load raw bytes. +impl TableWithStructure for ContractsRawCode { + type Structure = Plain; + + fn column() -> Column { + Column::ContractsRawCode + } +} + +impl StorageRead for StructuredStorage +where + S: KeyValueStore, +{ + fn read( + &self, + key: &ContractId, + buf: &mut [u8], + ) -> Result, Self::Error> { + self.storage + .read(key.as_ref(), Column::ContractsRawCode, buf) + } + + fn read_alloc(&self, key: &ContractId) -> Result>, Self::Error> { + self.storage + .get(key.as_ref(), Column::ContractsRawCode) + .map(|value| value.map(|value| value.deref().clone())) + } +} + +impl TableWithStructure for ContractsInfo { + type Structure = Plain; + + fn column() -> Column { + Column::ContractsInfo + } +} + +impl TableWithStructure for ContractsLatestUtxo { + type Structure = Plain; + + fn column() -> Column { + Column::ContractsLatestUtxo + } +} + +crate::basic_storage_tests!( + ContractsRawCode, + ::Key::from([1u8; 32]), + vec![32u8], + ::OwnedValue::from(vec![32u8]) +); + +crate::basic_storage_tests!( + ContractsInfo, + ::Key::from([1u8; 32]), + ([2u8; 32].into(), [3u8; 32].into()) +); + +crate::basic_storage_tests!( + ContractsLatestUtxo, + ::Key::from([1u8; 32]), + ::Value::default() +); diff --git a/crates/storage/src/structured_storage/merkle_data.rs b/crates/storage/src/structured_storage/merkle_data.rs new file mode 100644 index 00000000000..efb709e2604 --- /dev/null +++ b/crates/storage/src/structured_storage/merkle_data.rs @@ -0,0 +1,49 @@ +use crate::{ + codec::{ + postcard::Postcard, + primitive::Primitive, + raw::Raw, + }, + column::Column, + structure::plain::Plain, + structured_storage::TableWithStructure, + tables::merkle::{ + ContractsAssetsMerkleData, + ContractsAssetsMerkleMetadata, + ContractsStateMerkleData, + ContractsStateMerkleMetadata, + FuelBlockMerkleData, + FuelBlockMerkleMetadata, + }, +}; + +macro_rules! merkle_table { + ($table:ident) => { + merkle_table!($table, Raw); + }; + ($table:ident, $key_codec:ident) => { + impl TableWithStructure for $table { + type Structure = Plain<$key_codec, Postcard>; + + fn column() -> Column { + Column::$table + } + } + + crate::basic_storage_tests!( + $table, + <$table as $crate::Mappable>::Key::default(), + <$table as $crate::Mappable>::Value::default() + ); + }; +} + +type U64Codec = Primitive<8>; +type BlockHeightCodec = Primitive<4>; + +merkle_table!(FuelBlockMerkleData, U64Codec); +merkle_table!(FuelBlockMerkleMetadata, BlockHeightCodec); +merkle_table!(ContractsAssetsMerkleData); +merkle_table!(ContractsAssetsMerkleMetadata); +merkle_table!(ContractsStateMerkleData); +merkle_table!(ContractsStateMerkleMetadata); diff --git a/crates/storage/src/structured_storage/messages.rs b/crates/storage/src/structured_storage/messages.rs new file mode 100644 index 00000000000..90a6db8d94a --- /dev/null +++ b/crates/storage/src/structured_storage/messages.rs @@ -0,0 +1,41 @@ +use crate::{ + codec::{ + postcard::Postcard, + raw::Raw, + }, + column::Column, + structure::plain::Plain, + structured_storage::TableWithStructure, + tables::{ + Messages, + SpentMessages, + }, +}; + +impl TableWithStructure for Messages { + type Structure = Plain; + + fn column() -> Column { + Column::Messages + } +} + +impl TableWithStructure for SpentMessages { + type Structure = Plain; + + fn column() -> Column { + Column::SpentMessages + } +} + +crate::basic_storage_tests!( + Messages, + ::Key::default(), + ::Value::default() +); + +crate::basic_storage_tests!( + SpentMessages, + ::Key::default(), + ::Value::default() +); diff --git a/crates/storage/src/structured_storage/receipts.rs b/crates/storage/src/structured_storage/receipts.rs new file mode 100644 index 00000000000..54c7789653a --- /dev/null +++ b/crates/storage/src/structured_storage/receipts.rs @@ -0,0 +1,29 @@ +use crate::{ + codec::{ + postcard::Postcard, + raw::Raw, + }, + column::Column, + structure::plain::Plain, + structured_storage::TableWithStructure, + tables::Receipts, +}; + +impl TableWithStructure for Receipts { + type Structure = Plain; + + fn column() -> Column { + Column::Receipts + } +} + +crate::basic_storage_tests!( + Receipts, + ::Key::from([1u8; 32]), + vec![fuel_core_types::fuel_tx::Receipt::ret( + Default::default(), + Default::default(), + Default::default(), + Default::default() + )] +); diff --git a/crates/storage/src/structured_storage/sealed_block.rs b/crates/storage/src/structured_storage/sealed_block.rs new file mode 100644 index 00000000000..3f2ce0dded5 --- /dev/null +++ b/crates/storage/src/structured_storage/sealed_block.rs @@ -0,0 +1,24 @@ +use crate::{ + codec::{ + postcard::Postcard, + raw::Raw, + }, + column::Column, + structure::plain::Plain, + structured_storage::TableWithStructure, + tables::SealedBlockConsensus, +}; + +impl TableWithStructure for SealedBlockConsensus { + type Structure = Plain; + + fn column() -> Column { + Column::FuelBlockConsensus + } +} + +crate::basic_storage_tests!( + SealedBlockConsensus, + ::Key::from([1u8; 32]), + ::Value::default() +); diff --git a/crates/storage/src/structured_storage/state.rs b/crates/storage/src/structured_storage/state.rs new file mode 100644 index 00000000000..326c7f15e04 --- /dev/null +++ b/crates/storage/src/structured_storage/state.rs @@ -0,0 +1,87 @@ +use crate::{ + codec::{ + manual::Manual, + raw::Raw, + }, + column::Column, + structure::sparse::{ + MetadataKey, + Sparse, + }, + structured_storage::TableWithStructure, + tables::{ + merkle::{ + ContractsStateMerkleData, + ContractsStateMerkleMetadata, + }, + ContractsState, + }, + Mappable, +}; +use fuel_core_types::fuel_vm::ContractsStateKey; + +pub struct KeyConvertor; + +impl MetadataKey for KeyConvertor { + type InputKey = ::Key; + type OutputKey = ::Key; + + fn metadata_key(key: &Self::InputKey) -> &Self::OutputKey { + key.contract_id() + } +} + +impl TableWithStructure for ContractsState { + type Structure = Sparse< + Manual, + Raw, + ContractsStateMerkleMetadata, + ContractsStateMerkleData, + KeyConvertor, + >; + + fn column() -> Column { + Column::ContractsState + } +} + +#[cfg(test)] +fn generate_key( + metadata_key: &::Key, + rng: &mut impl rand::Rng, +) -> ::Key { + let mut bytes = [0u8; 32]; + rng.fill(bytes.as_mut()); + ::Key::new(metadata_key, &bytes.into()) +} + +#[cfg(test)] +fn generate_key_for_same_contract( + rng: &mut impl rand::Rng, +) -> ::Key { + generate_key(&fuel_core_types::fuel_tx::ContractId::zeroed(), rng) +} + +crate::basic_storage_tests!( + ContractsState, + ::Key::default(), + ::Value::zeroed(), + ::Value::zeroed(), + generate_key_for_same_contract +); + +#[cfg(test)] +fn generate_value(rng: &mut impl rand::Rng) -> ::Value { + let mut bytes = [0u8; 32]; + rng.fill(bytes.as_mut()); + bytes.into() +} + +crate::root_storage_tests!( + ContractsState, + ContractsStateMerkleMetadata, + ::Key::from([1u8; 32]), + ::Key::from([2u8; 32]), + generate_key, + generate_value +); diff --git a/crates/storage/src/structured_storage/transactions.rs b/crates/storage/src/structured_storage/transactions.rs new file mode 100644 index 00000000000..47b5d9d8fba --- /dev/null +++ b/crates/storage/src/structured_storage/transactions.rs @@ -0,0 +1,24 @@ +use crate::{ + codec::{ + postcard::Postcard, + raw::Raw, + }, + column::Column, + structure::plain::Plain, + structured_storage::TableWithStructure, + tables::Transactions, +}; + +impl TableWithStructure for Transactions { + type Structure = Plain; + + fn column() -> Column { + Column::Transactions + } +} + +crate::basic_storage_tests!( + Transactions, + ::Key::from([1u8; 32]), + ::Value::default() +); diff --git a/crates/storage/src/tables.rs b/crates/storage/src/tables.rs index 2c2df585f13..1e1dfe3b22f 100644 --- a/crates/storage/src/tables.rs +++ b/crates/storage/src/tables.rs @@ -58,6 +58,7 @@ impl Mappable for ContractsLatestUtxo { type OwnedValue = ContractUtxoInfo; } +// TODO: Move definition to the service that is responsible for its usage. /// Receipts of different hidden internal operations. pub struct Receipts; @@ -121,5 +122,114 @@ impl Mappable for Transactions { type OwnedValue = Transaction; } -// TODO: Add macro to define all common tables to avoid copy/paste of the code. -// TODO: Add macro to define common unit tests. +pub mod merkle { + use crate::{ + Mappable, + MerkleRoot, + }; + use fuel_core_types::{ + fuel_merkle::{ + binary, + sparse, + }, + fuel_tx::ContractId, + fuel_types::BlockHeight, + }; + + /// Metadata for dense Merkle trees + #[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq, Eq)] + pub struct DenseMerkleMetadata { + /// The root hash of the dense Merkle tree structure + pub root: MerkleRoot, + /// The version of the dense Merkle tree structure is equal to the number of + /// leaves. Every time we append a new leaf to the Merkle tree data set, we + /// increment the version number. + pub version: u64, + } + + impl Default for DenseMerkleMetadata { + fn default() -> Self { + let empty_merkle_tree = binary::root_calculator::MerkleRootCalculator::new(); + Self { + root: empty_merkle_tree.root(), + version: 0, + } + } + } + + /// Metadata for sparse Merkle trees + #[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq, Eq)] + pub struct SparseMerkleMetadata { + /// The root hash of the sparse Merkle tree structure + pub root: MerkleRoot, + } + + impl Default for SparseMerkleMetadata { + fn default() -> Self { + let empty_merkle_tree = sparse::in_memory::MerkleTree::new(); + Self { + root: empty_merkle_tree.root(), + } + } + } + + /// The table of BMT data for Fuel blocks. + pub struct FuelBlockMerkleData; + + impl Mappable for FuelBlockMerkleData { + type Key = u64; + type OwnedKey = Self::Key; + type Value = binary::Primitive; + type OwnedValue = Self::Value; + } + + /// The metadata table for [`FuelBlockMerkleData`] table. + pub struct FuelBlockMerkleMetadata; + + impl Mappable for FuelBlockMerkleMetadata { + type Key = BlockHeight; + type OwnedKey = Self::Key; + type Value = DenseMerkleMetadata; + type OwnedValue = Self::Value; + } + + /// The table of SMT data for Contract assets. + pub struct ContractsAssetsMerkleData; + + impl Mappable for ContractsAssetsMerkleData { + type Key = [u8; 32]; + type OwnedKey = Self::Key; + type Value = sparse::Primitive; + type OwnedValue = Self::Value; + } + + /// The metadata table for [`ContractsAssetsMerkleData`] table + pub struct ContractsAssetsMerkleMetadata; + + impl Mappable for ContractsAssetsMerkleMetadata { + type Key = ContractId; + type OwnedKey = Self::Key; + type Value = SparseMerkleMetadata; + type OwnedValue = Self::Value; + } + + /// The table of SMT data for Contract state. + pub struct ContractsStateMerkleData; + + impl Mappable for ContractsStateMerkleData { + type Key = [u8; 32]; + type OwnedKey = Self::Key; + type Value = sparse::Primitive; + type OwnedValue = Self::Value; + } + + /// The metadata table for [`ContractsStateMerkleData`] table + pub struct ContractsStateMerkleMetadata; + + impl Mappable for ContractsStateMerkleMetadata { + type Key = ContractId; + type OwnedKey = Self::Key; + type Value = SparseMerkleMetadata; + type OwnedValue = Self::Value; + } +} diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index 0ad99859428..586408ab50e 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -19,8 +19,10 @@ version = { workspace = true } [dependencies] anyhow = { workspace = true } bs58 = "0.5" +derivative = { version = "2" } derive_more = { version = "0.99" } fuel-vm-private = { workspace = true, default-features = false, features = ["alloc"] } +rand = { workspace = true, optional = true } secrecy = "0.8" serde = { workspace = true, features = ["derive"], optional = true } tai64 = { version = "4.0", features = ["serde"] } @@ -31,5 +33,5 @@ zeroize = "1.5" default = ["std"] serde = ["dep:serde", "fuel-vm-private/serde"] std = ["fuel-vm-private/std"] -random = ["fuel-vm-private/random"] +random = ["dep:rand", "fuel-vm-private/random"] test-helpers = ["random", "fuel-vm-private/test-helpers"] diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index 68497d3f30c..d30e4d776d0 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -22,7 +22,8 @@ use tai64::Tai64; /// A fuel block header that has all the fields generated because it /// has been executed. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, derivative::Derivative)] +#[derivative(PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct BlockHeader { /// The application header. @@ -32,6 +33,7 @@ pub struct BlockHeader { /// The header metadata calculated during creation. /// The field is private to enforce the use of the [`PartialBlockHeader::generate`] method. #[cfg_attr(feature = "serde", serde(skip))] + #[derivative(PartialEq = "ignore")] metadata: Option, } diff --git a/crates/types/src/blockchain/primitives.rs b/crates/types/src/blockchain/primitives.rs index 468df2e2407..a559407e096 100644 --- a/crates/types/src/blockchain/primitives.rs +++ b/crates/types/src/blockchain/primitives.rs @@ -5,6 +5,7 @@ use crate::{ fuel_crypto::SecretKey, fuel_types::Bytes32, }; +use core::array::TryFromSliceError; use derive_more::{ Add, AsRef, @@ -76,6 +77,13 @@ impl AsRef<[u8]> for BlockId { } } +#[cfg(feature = "random")] +impl rand::distributions::Distribution for rand::distributions::Standard { + fn sample(&self, rng: &mut R) -> BlockId { + BlockId(rng.gen()) + } +} + /// Block height of the data availability layer #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive( @@ -111,9 +119,20 @@ impl From for DaBlockHeight { } } +impl From<[u8; 8]> for DaBlockHeight { + fn from(n: [u8; 8]) -> Self { + DaBlockHeight(u64::from_be_bytes(n)) + } +} + impl DaBlockHeight { /// Convert to array of big endian bytes - pub fn to_bytes(self) -> [u8; 8] { + pub fn to_bytes(&self) -> [u8; 8] { + self.to_be_bytes() + } + + /// Convert to array of big endian bytes + pub fn to_be_bytes(&self) -> [u8; 8] { self.0.to_be_bytes() } @@ -144,3 +163,11 @@ impl From<[u8; 32]> for BlockId { Self(bytes.into()) } } + +impl TryFrom<&'_ [u8]> for BlockId { + type Error = TryFromSliceError; + + fn try_from(bytes: &[u8]) -> Result { + Ok(Self::from(TryInto::<[u8; 32]>::try_into(bytes)?)) + } +} diff --git a/crates/types/src/entities/coins/coin.rs b/crates/types/src/entities/coins/coin.rs index b28f6f65040..c22d8cd8e4f 100644 --- a/crates/types/src/entities/coins/coin.rs +++ b/crates/types/src/entities/coins/coin.rs @@ -53,7 +53,7 @@ impl Coin { /// The compressed version of the `Coin` with minimum fields required for /// the proper work of the blockchain. #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[derive(Debug, Clone)] +#[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct CompressedCoin { /// The address with permission to spend this coin pub owner: Address, diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index 5f7c4743ddc..2924c1390c1 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -35,6 +35,7 @@ pub mod fuel_vm { checked_transaction, consts, crypto, + double_key, error::PredicateVerificationFailed, interpreter, prelude::{ @@ -54,6 +55,8 @@ pub mod fuel_vm { }, script_with_data_offset, state, + storage::ContractsAssetKey, + storage::ContractsStateKey, util, }; } From de82e25949df1a968432ec8689664ab9967aff05 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Tue, 26 Dec 2023 23:23:07 +0100 Subject: [PATCH 02/28] Added comments to all newly added stuff. Made self-review and applied fixes. --- Cargo.lock | 26 +++ crates/fuel-core/src/database.rs | 12 +- crates/fuel-core/src/database/block.rs | 4 +- crates/fuel-core/src/database/coin.rs | 4 +- crates/fuel-core/src/database/contracts.rs | 4 +- crates/fuel-core/src/database/metadata.rs | 2 + crates/fuel-core/src/database/storage.rs | 6 + crates/fuel-core/src/database/transactions.rs | 10 +- crates/fuel-core/src/state.rs | 9 +- crates/services/relayer/src/ports.rs | 4 +- crates/storage/Cargo.toml | 6 +- crates/storage/src/codec.rs | 16 ++ crates/storage/src/codec/manual.rs | 6 + crates/storage/src/codec/postcard.rs | 7 + crates/storage/src/codec/primitive.rs | 26 ++- crates/storage/src/codec/raw.rs | 5 + crates/storage/src/column.rs | 10 ++ crates/storage/src/kv_store.rs | 150 +----------------- crates/storage/src/lib.rs | 8 +- crates/storage/src/structure.rs | 32 +++- crates/storage/src/structure/plain.rs | 11 +- crates/storage/src/structure/sparse.rs | 82 ++++++---- crates/storage/src/structured_storage.rs | 36 +++-- .../src/structured_storage/balances.rs | 78 ++++----- .../storage/src/structured_storage/blocks.rs | 3 + .../storage/src/structured_storage/coins.rs | 3 + .../src/structured_storage/contracts.rs | 39 +++-- .../src/structured_storage/merkle_data.rs | 5 +- .../src/structured_storage/messages.rs | 29 ++-- .../src/structured_storage/receipts.rs | 3 + .../src/structured_storage/sealed_block.rs | 3 + .../storage/src/structured_storage/state.rs | 82 +++++----- .../src/structured_storage/transactions.rs | 3 + crates/storage/src/tables.rs | 1 + 34 files changed, 396 insertions(+), 329 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 86852b53eb6..2e092008f8d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3023,8 +3023,10 @@ dependencies = [ "anyhow", "derive_more", "enum-iterator", + "fuel-core-storage", "fuel-core-types", "fuel-vm", + "impl-tools", "itertools 0.10.5", "mockall", "paste", @@ -3996,6 +3998,30 @@ dependencies = [ "serde", ] +[[package]] +name = "impl-tools" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d82c305b1081f1a99fda262883c788e50ab57d36c00830bdd7e0a82894ad965c" +dependencies = [ + "autocfg", + "impl-tools-lib", + "proc-macro-error", + "syn 2.0.41", +] + +[[package]] +name = "impl-tools-lib" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85d3946d886eaab0702fa0c6585adcced581513223fa9df7ccfabbd9fa331a88" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.41", +] + [[package]] name = "impl-trait-for-tuples" version = "0.2.2" diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index 5a05102ba95..eef1bd5bb95 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -12,6 +12,7 @@ use fuel_core_chain_config::{ MessageConfig, }; use fuel_core_storage::{ + codec::Decode, iter::IterDirection, kv_store::{ BatchOperations, @@ -19,7 +20,11 @@ use fuel_core_storage::{ Value, WriteOperation, }, - structured_storage::StructuredStorage, + structure::Structure, + structured_storage::{ + StructuredStorage, + TableWithStructure, + }, transactional::{ StorageTransaction, Transactional, @@ -55,11 +60,6 @@ type DatabaseResult = Result; // TODO: Extract `Database` and all belongs into `fuel-core-database`. #[cfg(feature = "rocksdb")] use crate::state::rocks_db::RocksDb; -use fuel_core_storage::{ - codec::Decode, - structure::Structure, - structured_storage::TableWithStructure, -}; #[cfg(feature = "rocksdb")] use std::path::Path; #[cfg(feature = "rocksdb")] diff --git a/crates/fuel-core/src/database/block.rs b/crates/fuel-core/src/database/block.rs index 9677c2d8202..5e3e04469df 100644 --- a/crates/fuel-core/src/database/block.rs +++ b/crates/fuel-core/src/database/block.rs @@ -4,7 +4,6 @@ use crate::database::{ Error as DatabaseError, }; use fuel_core_storage::{ - basic_storage_tests, codec::{ primitive::Primitive, raw::Raw, @@ -71,7 +70,8 @@ impl TableWithStructure for FuelBlockSecondaryKeyBlockHeights { } } -basic_storage_tests!( +#[cfg(test)] +fuel_core_storage::basic_storage_tests!( FuelBlockSecondaryKeyBlockHeights, ::Key::default(), ::Value::default() diff --git a/crates/fuel-core/src/database/coin.rs b/crates/fuel-core/src/database/coin.rs index 51ac80deaff..fdb25b47ef6 100644 --- a/crates/fuel-core/src/database/coin.rs +++ b/crates/fuel-core/src/database/coin.rs @@ -4,7 +4,6 @@ use crate::database::{ }; use fuel_core_chain_config::CoinConfig; use fuel_core_storage::{ - basic_storage_tests, codec::{ postcard::Postcard, primitive::utxo_id_to_bytes, @@ -68,7 +67,8 @@ fn generate_key(rng: &mut impl rand::Rng) -> ::Key { bytes } -basic_storage_tests!( +#[cfg(test)] +fuel_core_storage::basic_storage_tests!( OwnedCoins, [0u8; 65], ::Value::default(), diff --git a/crates/fuel-core/src/database/contracts.rs b/crates/fuel-core/src/database/contracts.rs index 0d56ed2c48c..ead374f4653 100644 --- a/crates/fuel-core/src/database/contracts.rs +++ b/crates/fuel-core/src/database/contracts.rs @@ -60,7 +60,7 @@ impl Database { Ok((*key.state_key(), value)) }) .filter(|val| val.is_ok()) - .collect::>>()?, + .collect::>>()?, ); let balances = Some( @@ -71,7 +71,7 @@ impl Database { Ok((*key.asset_id(), value)) }) .filter(|val| val.is_ok()) - .collect::>>()?, + .collect::>>()?, ); Ok(ContractConfig { diff --git a/crates/fuel-core/src/database/metadata.rs b/crates/fuel-core/src/database/metadata.rs index 93cc113a543..a7bf078d053 100644 --- a/crates/fuel-core/src/database/metadata.rs +++ b/crates/fuel-core/src/database/metadata.rs @@ -20,6 +20,8 @@ use fuel_core_storage::{ StorageMutate, }; +/// The table that stores all metadata. Each key is a string, while the value depends on the context. +/// The tables mostly used to store metadata for correct work of the `fuel-core`. pub struct MetadataTable(core::marker::PhantomData); impl Mappable for MetadataTable diff --git a/crates/fuel-core/src/database/storage.rs b/crates/fuel-core/src/database/storage.rs index 19414196753..84900a79b0b 100644 --- a/crates/fuel-core/src/database/storage.rs +++ b/crates/fuel-core/src/database/storage.rs @@ -46,12 +46,18 @@ use fuel_core_storage::{ }; use std::borrow::Cow; +/// The trait allows selectively inheriting the implementation of storage traits from `StructuredStorage` +/// for the `Database`. Not all default implementations of the `StructuredStorage` are suitable +/// for the `Database`. Sometimes we want to override some of them and add a custom implementation +/// with additional logic. For example, we want to override the `StorageMutate` trait for the `Messages` +/// table to also track the owner of messages. pub trait UseStructuredImplementation where M: Mappable, { } +/// The trait allows to implementation of `UseStructuredImplementation` for the `StructuredStorage` for multiple tables. macro_rules! use_structured_implementation { ($($m:ty),*) => { $( diff --git a/crates/fuel-core/src/database/transactions.rs b/crates/fuel-core/src/database/transactions.rs index a0362ecb27e..61b9d435bad 100644 --- a/crates/fuel-core/src/database/transactions.rs +++ b/crates/fuel-core/src/database/transactions.rs @@ -2,7 +2,10 @@ use crate::database::{ Column, Database, }; -use core::mem::size_of; +use core::{ + array::TryFromSliceError, + mem::size_of, +}; use fuel_core_storage::{ codec::{ manual::Manual, @@ -31,8 +34,8 @@ use fuel_core_types::{ }, services::txpool::TransactionStatus, }; -use std::array::TryFromSliceError; +/// Teh tables allows to iterate over all transactions owned by an address. pub struct OwnedTransactions; impl Mappable for OwnedTransactions { @@ -57,6 +60,7 @@ fn generate_key(rng: &mut impl rand::Rng) -> ::Ke bytes.into() } +#[cfg(test)] fuel_core_storage::basic_storage_tests!( OwnedTransactions, [1u8; INDEX_SIZE].into(), @@ -65,6 +69,7 @@ fuel_core_storage::basic_storage_tests!( generate_key ); +/// The table stores the status of each transaction. pub struct TransactionStatuses; impl Mappable for TransactionStatuses { @@ -82,6 +87,7 @@ impl TableWithStructure for TransactionStatuses { } } +#[cfg(test)] fuel_core_storage::basic_storage_tests!( TransactionStatuses, ::Key::default(), diff --git a/crates/fuel-core/src/state.rs b/crates/fuel-core/src/state.rs index 73f5cee37b2..83c93851df0 100644 --- a/crates/fuel-core/src/state.rs +++ b/crates/fuel-core/src/state.rs @@ -22,7 +22,12 @@ use std::{ sync::Arc, }; +pub mod in_memory; +#[cfg(feature = "rocksdb")] +pub mod rocks_db; + type DataSourceInner = Arc>; + #[derive(Clone, Debug)] pub struct DataSource(DataSourceInner); @@ -70,7 +75,3 @@ pub trait TransactableStorage: fn flush(&self) -> DatabaseResult<()>; } - -pub mod in_memory; -#[cfg(feature = "rocksdb")] -pub mod rocks_db; diff --git a/crates/services/relayer/src/ports.rs b/crates/services/relayer/src/ports.rs index 1df70dbbdd8..e4bea0c252a 100644 --- a/crates/services/relayer/src/ports.rs +++ b/crates/services/relayer/src/ports.rs @@ -2,7 +2,6 @@ use async_trait::async_trait; use fuel_core_storage::{ - basic_storage_tests, codec::{ postcard::Postcard, primitive::Primitive, @@ -155,7 +154,8 @@ impl TableWithStructure for RelayerMetadata { } } -basic_storage_tests!( +#[cfg(test)] +fuel_core_storage::basic_storage_tests!( RelayerMetadata, ::Key::default(), ::Value::default() diff --git a/crates/storage/Cargo.toml b/crates/storage/Cargo.toml index 1b2cd17c0d7..7380c559905 100644 --- a/crates/storage/Cargo.toml +++ b/crates/storage/Cargo.toml @@ -22,18 +22,20 @@ derive_more = { workspace = true } enum-iterator = { workspace = true } fuel-core-types = { workspace = true, default-features = false, features = ["serde"] } fuel-vm-private = { workspace = true, default-features = false } +impl-tools = "0.10" itertools = { workspace = true } mockall = { workspace = true, optional = true } paste = "1" postcard = { workspace = true, features = ["alloc"] } primitive-types = { workspace = true, default-features = false } +rand = { workspace = true, optional = true } serde = { workspace = true } strum = { workspace = true } strum_macros = { workspace = true } [dev-dependencies] +fuel-core-storage = { path = ".", features = ["test-helpers"] } fuel-core-types = { workspace = true, default-features = false, features = ["serde", "random", "test-helpers"] } -rand = { workspace = true } [features] -test-helpers = ["dep:mockall"] +test-helpers = ["dep:mockall", "dep:rand"] diff --git a/crates/storage/src/codec.rs b/crates/storage/src/codec.rs index c3f4f2189c2..baf6a7ee7a7 100644 --- a/crates/storage/src/codec.rs +++ b/crates/storage/src/codec.rs @@ -1,3 +1,6 @@ +//! The module contains the traits for encoding and decoding the types(a.k.a Codec). +//! It implements common codecs and encoders, but it is always possible to define own codecs. + use crate::kv_store::Value; use std::{ borrow::Cow, @@ -9,25 +12,38 @@ pub mod postcard; pub mod primitive; pub mod raw; +/// The trait is usually implemented by the encoder that stores serialized objects. pub trait Encoder { + /// Returns the serialized object as a slice. fn as_bytes(&self) -> Cow<[u8]>; } +/// The trait encodes the type to the bytes and passes it to the `Encoder`, +/// which stores it and provides a reference to it. That allows gives more +/// flexibility and more performant encoding, allowing the use of slices and arrays +/// instead of vectors in some cases. Since the [`Encoder`] returns `Cow<[u8]>`, +/// it is always possible to take ownership of the serialized value. pub trait Encode { + /// The encoder type that stores serialized object. type Encoder<'a>: Encoder where T: 'a; + /// Encodes the object to the bytes and passes it to the `Encoder`. fn encode(t: &T) -> Self::Encoder<'_>; + /// Returns the serialized object as an [`Value`]. fn encode_as_value(t: &T) -> Value { Value::new(Self::encode(t).as_bytes().into_owned()) } } +/// The trait decodes the type from the bytes. pub trait Decode { + /// Decodes the type `T` from the bytes. fn decode(bytes: &[u8]) -> anyhow::Result; + /// Decodes the type `T` from the [`Value`]. fn decode_from_value(value: Value) -> anyhow::Result { Self::decode(value.deref()) } diff --git a/crates/storage/src/codec/manual.rs b/crates/storage/src/codec/manual.rs index 572ceca1201..020a389387a 100644 --- a/crates/storage/src/codec/manual.rs +++ b/crates/storage/src/codec/manual.rs @@ -1,3 +1,8 @@ +//! The module contains the implementation of the `Manual` codec. +//! The codec allows the definition of manual implementation for specific +//! types that don't follow any patterns from other codecs. Anyone can implement +//! a codec like that, and it's more of an example of how it can be done for foreign types. + use crate::codec::{ Decode, Encode, @@ -6,6 +11,7 @@ use fuel_core_types::fuel_vm::ContractsAssetKey; use fuel_vm_private::storage::ContractsStateKey; use std::borrow::Cow; +/// The codec allows the definition of manual implementation for specific type `T`. pub struct Manual(core::marker::PhantomData); // TODO: Use `Raw` instead of `Manual` for `ContractsAssetKey`, `ContractsStateKey`, and `OwnedMessageKey` diff --git a/crates/storage/src/codec/postcard.rs b/crates/storage/src/codec/postcard.rs index 737e4139dae..a8218fa8849 100644 --- a/crates/storage/src/codec/postcard.rs +++ b/crates/storage/src/codec/postcard.rs @@ -1,9 +1,16 @@ +//! The module contains the implementation of the `Postcard` codec. +//! Any type that implements `serde::Serialize` and `serde::Deserialize` +//! can use the `Postcard` codec to be encoded/decoded into/from bytes. +//! The `serde` serialization and deserialization add their own overhead, +//! so this codec shouldn't be used for simple types. + use crate::codec::{ Decode, Encode, }; use std::borrow::Cow; +/// The codec is used to serialized/deserialized types that supports `serde::Serialize` and `serde::Deserialize`. pub struct Postcard; impl Encode for Postcard diff --git a/crates/storage/src/codec/primitive.rs b/crates/storage/src/codec/primitive.rs index f33a08161f6..4f39ddb982f 100644 --- a/crates/storage/src/codec/primitive.rs +++ b/crates/storage/src/codec/primitive.rs @@ -1,7 +1,11 @@ +//! The module contains the implementation of the `Postcard` codec. +//! The codec is used for types that can be represented by an array. +//! It includes all primitive types and types that are arrays inside +//! or could be represented by arrays. + use crate::codec::{ Decode, Encode, - Encoder, }; use fuel_core_types::{ blockchain::primitives::DaBlockHeight, @@ -11,26 +15,19 @@ use fuel_core_types::{ }, fuel_types::BlockHeight, }; -use std::borrow::Cow; +/// The codec is used for types that can be represented by an array. +/// The `SIZE` const specifies the size of the array used to represent the type. pub struct Primitive; -pub struct PrimitiveEncoder([u8; SIZE]); - -impl Encoder for PrimitiveEncoder { - fn as_bytes(&self) -> Cow<[u8]> { - Cow::Borrowed(&self.0[..]) - } -} - macro_rules! impl_encode { ($($ty:ty, $size:expr),*) => { $( impl Encode<$ty> for Primitive<{ $size }> { - type Encoder<'a> = PrimitiveEncoder<{ $size }>; + type Encoder<'a> = [u8; { $size }]; fn encode(t: &$ty) -> Self::Encoder<'_> { - PrimitiveEncoder(t.to_be_bytes()) + t.to_be_bytes() } } )* @@ -78,6 +75,7 @@ impl Decode for Primitive<8> { } } +/// Converts the `UtxoId` into an array of bytes. pub fn utxo_id_to_bytes(utxo_id: &UtxoId) -> [u8; TxId::LEN + 1] { let mut default = [0; TxId::LEN + 1]; default[0..TxId::LEN].copy_from_slice(utxo_id.tx_id().as_ref()); @@ -86,10 +84,10 @@ pub fn utxo_id_to_bytes(utxo_id: &UtxoId) -> [u8; TxId::LEN + 1] { } impl Encode for Primitive<{ TxId::LEN + 1 }> { - type Encoder<'a> = PrimitiveEncoder<{ TxId::LEN + 1 }>; + type Encoder<'a> = [u8; TxId::LEN + 1]; fn encode(t: &UtxoId) -> Self::Encoder<'_> { - PrimitiveEncoder(utxo_id_to_bytes(t)) + utxo_id_to_bytes(t) } } diff --git a/crates/storage/src/codec/raw.rs b/crates/storage/src/codec/raw.rs index dbda4f71ff4..2a3a9d17b13 100644 --- a/crates/storage/src/codec/raw.rs +++ b/crates/storage/src/codec/raw.rs @@ -1,9 +1,14 @@ +//! The module contains the implementation of the `Raw` codec. +//! The codec is used for types that are already represented by bytes +//! and can be deserialized into bytes-based objects. + use crate::codec::{ Decode, Encode, }; use std::borrow::Cow; +/// The codec is used for types that are already represented by bytes. pub struct Raw; impl Encode for Raw diff --git a/crates/storage/src/column.rs b/crates/storage/src/column.rs index 5e52491fa7c..0e9605ab399 100644 --- a/crates/storage/src/column.rs +++ b/crates/storage/src/column.rs @@ -1,5 +1,10 @@ +//! The module defines the `Column` and default tables used by the current `fuel-core` codebase. +//! In the future, the `Column` enum should contain only the required tables for the execution. +//! All other tables should live in the downstream creates in the place where they are really used. + use crate::kv_store::StorageColumn; +/// Helper macro to generate the `Column` enum and its implementation for `as_u32` method. macro_rules! column_definition { ($(#[$meta:meta])* $vis:vis enum $name:ident { $(#[$complex_meta:meta])* $complex_variants:ident($body:ident), @@ -132,6 +137,8 @@ impl StorageColumn for Column { } } +/// The foreign column is not related to the required tables. +/// It can be used to extend the database with additional tables. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct ForeignColumn { id: u32, @@ -158,6 +165,9 @@ impl ForeignColumn { } } +/// It is required to implement iteration over the variants of the enum. +/// The `ForeignColumn` is not iterable, so we implement the `Sequence` trait +/// to do nothing. impl enum_iterator::Sequence for ForeignColumn { const CARDINALITY: usize = 0; diff --git a/crates/storage/src/kv_store.rs b/crates/storage/src/kv_store.rs index 7400bb37149..2fa8b1602ce 100644 --- a/crates/storage/src/kv_store.rs +++ b/crates/storage/src/kv_store.rs @@ -23,6 +23,7 @@ pub trait StorageColumn: Clone { // TODO: Use `&mut self` for all mutable methods. // It requires refactoring of all services because right now, most of them work with `&self` storage. /// The definition of the key-value store. +#[impl_tools::autoimpl(for &T, &mut T, Box, Arc)] pub trait KeyValueStore { /// The type of the column. type Column: StorageColumn; @@ -109,6 +110,7 @@ pub enum WriteOperation { } /// The definition of the key-value store with batch operations. +#[impl_tools::autoimpl(for &T, &mut T, Box, Arc)] pub trait BatchOperations: KeyValueStore { /// Writes the batch of the entries into the storage. fn batch_write( @@ -129,151 +131,3 @@ pub trait BatchOperations: KeyValueStore { Ok(()) } } - -impl KeyValueStore for &T -where - T: KeyValueStore, -{ - type Column = T::Column; - - fn put(&self, key: &[u8], column: Self::Column, value: Value) -> StorageResult<()> { - (**self).put(key, column, value) - } - - fn replace( - &self, - key: &[u8], - column: Self::Column, - value: Value, - ) -> StorageResult> { - (**self).replace(key, column, value) - } - - fn write( - &self, - key: &[u8], - column: Self::Column, - buf: &[u8], - ) -> StorageResult { - (**self).write(key, column, buf) - } - - fn take(&self, key: &[u8], column: Self::Column) -> StorageResult> { - (**self).take(key, column) - } - - fn delete(&self, key: &[u8], column: Self::Column) -> StorageResult<()> { - (**self).delete(key, column) - } - - fn exists(&self, key: &[u8], column: Self::Column) -> StorageResult { - (**self).exists(key, column) - } - - fn size_of_value( - &self, - key: &[u8], - column: Self::Column, - ) -> StorageResult> { - (**self).size_of_value(key, column) - } - - fn get(&self, key: &[u8], column: Self::Column) -> StorageResult> { - (**self).get(key, column) - } - - fn read( - &self, - key: &[u8], - column: Self::Column, - buf: &mut [u8], - ) -> StorageResult> { - (**self).read(key, column, buf) - } -} - -impl KeyValueStore for &mut T -where - T: KeyValueStore, -{ - type Column = T::Column; - - fn put(&self, key: &[u8], column: Self::Column, value: Value) -> StorageResult<()> { - (**self).put(key, column, value) - } - - fn replace( - &self, - key: &[u8], - column: Self::Column, - value: Value, - ) -> StorageResult> { - (**self).replace(key, column, value) - } - - fn write( - &self, - key: &[u8], - column: Self::Column, - buf: &[u8], - ) -> StorageResult { - (**self).write(key, column, buf) - } - - fn take(&self, key: &[u8], column: Self::Column) -> StorageResult> { - (**self).take(key, column) - } - - fn delete(&self, key: &[u8], column: Self::Column) -> StorageResult<()> { - (**self).delete(key, column) - } - - fn exists(&self, key: &[u8], column: Self::Column) -> StorageResult { - (**self).exists(key, column) - } - - fn size_of_value( - &self, - key: &[u8], - column: Self::Column, - ) -> StorageResult> { - (**self).size_of_value(key, column) - } - - fn get(&self, key: &[u8], column: Self::Column) -> StorageResult> { - (**self).get(key, column) - } - - fn read( - &self, - key: &[u8], - column: Self::Column, - buf: &mut [u8], - ) -> StorageResult> { - (**self).read(key, column, buf) - } -} - -impl BatchOperations for &T -where - T: BatchOperations, -{ - fn batch_write( - &self, - entries: &mut dyn Iterator, Self::Column, WriteOperation)>, - ) -> StorageResult<()> { - (**self).batch_write(entries) - } -} - -impl BatchOperations for &mut T -where - T: BatchOperations, -{ - fn batch_write( - &self, - entries: &mut dyn Iterator, Self::Column, WriteOperation)>, - ) -> StorageResult<()> { - (**self).batch_write(entries) - } -} diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index 35cab3d0670..21116de17f3 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -7,7 +7,7 @@ #![deny(clippy::arithmetic_side_effects)] #![deny(clippy::cast_possible_truncation)] #![deny(unused_crate_dependencies)] -// #![deny(missing_docs)] +#![deny(missing_docs)] #![deny(warnings)] use core::array::TryFromSliceError; @@ -37,7 +37,11 @@ pub use fuel_vm_private::storage::{ ContractsAssetKey, ContractsStateKey, }; +#[doc(hidden)] pub use paste; +#[cfg(feature = "test-helpers")] +#[doc(hidden)] +pub use rand; /// The storage result alias. pub type Result = core::result::Result; @@ -112,6 +116,8 @@ impl IsNotFound for Result { } } +/// The traits allow work with the storage in batches. +/// Some implementations can perform batch operations faster than one by one. pub trait StorageBatchMutate: StorageMutate { /// Initialize the storage with batch insertion. This method is more performant than /// [`Self::insert_batch`] in some case. diff --git a/crates/storage/src/structure.rs b/crates/storage/src/structure.rs index 5af8e796842..067f17e698c 100644 --- a/crates/storage/src/structure.rs +++ b/crates/storage/src/structure.rs @@ -1,3 +1,8 @@ +//! The module defines structures for the [`Mappable`] tables. +//! Each table may have its structure that defines how it works with the storage. +//! The table may have a plain structure that simply works in CRUD mode, or it may be an SMT-based +//! structure that maintains a valid Merkle tree over the storage entries. + use crate::{ codec::{ Decode, @@ -15,14 +20,26 @@ use crate::{ pub mod plain; pub mod sparse; +/// This trait allows defining the agnostic implementation for all storage +/// traits(`StorageInspect,` `StorageMutate,` etc) while the main logic is +/// hidden inside the structure. It allows quickly adding support for new +/// structures only by implementing the trait and reusing the existing +/// infrastructure in other places. It allows changing the structure on the +/// fly in the definition of the table without affecting other areas of the codebase. +/// +/// The structure is responsible for encoding/decoding(usually it is done via `KeyCodec` and `ValueCodec`) +/// the key and value and putting/extracting it to/from the storage. pub trait Structure where M: Mappable, S: KeyValueStore, { + /// The codec used to encode and decode storage key. type KeyCodec: Encode + Decode; + /// The codec used to encode and decode storage value. type ValueCodec: Encode + Decode; + /// Puts the key-value pair into the storage. fn put( storage: &mut S, key: &M::Key, @@ -30,6 +47,7 @@ where value: &M::Value, ) -> StorageResult<()>; + /// Puts the key-value pair into the storage and returns the old value. fn replace( storage: &mut S, key: &M::Key, @@ -37,20 +55,25 @@ where value: &M::Value, ) -> StorageResult>; + /// Takes the value from the storage and returns it. + /// The value is removed from the storage. fn take( storage: &mut S, key: &M::Key, column: S::Column, ) -> StorageResult>; + /// Removes the value from the storage. fn delete(storage: &mut S, key: &M::Key, column: S::Column) -> StorageResult<()>; + /// Checks if the value exists in the storage. fn exists(storage: &S, key: &M::Key, column: S::Column) -> StorageResult { let key_encoder = Self::KeyCodec::encode(key); let key_bytes = key_encoder.as_bytes(); storage.exists(key_bytes.as_ref(), column) } + /// Returns the size of the value in the storage. fn size_of_value( storage: &S, key: &M::Key, @@ -61,6 +84,7 @@ where storage.size_of_value(key_bytes.as_ref(), column) } + /// Returns the value from the storage. fn get( storage: &S, key: &M::Key, @@ -77,23 +101,29 @@ where } } -pub trait BatchStructure: Structure +/// It is an extension of the structure that allows supporting batch operations. +/// Usually, they are more performant than initializing/inserting/removing values one by one. +pub trait SupportsBatching: Structure where M: Mappable, S: BatchOperations, { + /// Initializes the storage with a bunch of key-value pairs. + /// In some cases, this method may be more performant than [`Self::insert`]. fn init( storage: &mut S, column: S::Column, set: &mut dyn Iterator, ) -> StorageResult<()>; + /// Inserts the batch of key-value pairs into the storage. fn insert( storage: &mut S, column: S::Column, set: &mut dyn Iterator, ) -> StorageResult<()>; + /// Removes the batch of key-value pairs from the storage. fn remove( storage: &mut S, column: S::Column, diff --git a/crates/storage/src/structure/plain.rs b/crates/storage/src/structure/plain.rs index bbda6c037cd..d1a732b9a64 100644 --- a/crates/storage/src/structure/plain.rs +++ b/crates/storage/src/structure/plain.rs @@ -1,3 +1,8 @@ +//! This module implements the plain structure for the storage. +//! The plain structure is the simplest one. It doesn't maintain any additional data structures +//! and doesn't provide any additional functionality. It is just a key-value store that encodes/decodes +//! the key and value and puts/takes them into/from the storage. + use crate::{ codec::{ Decode, @@ -11,8 +16,8 @@ use crate::{ WriteOperation, }, structure::{ - BatchStructure, Structure, + SupportsBatching, }, structured_storage::TableWithStructure, Error as StorageError, @@ -20,6 +25,8 @@ use crate::{ Result as StorageResult, }; +/// The type that represents the plain structure. +/// The `KeyCodec` and `ValueCodec` are used to encode/decode the key and value. pub struct Plain { _marker: core::marker::PhantomData<(KeyCodec, ValueCodec)>, } @@ -85,7 +92,7 @@ where } } -impl BatchStructure for Plain +impl SupportsBatching for Plain where S: BatchOperations, M: Mappable + TableWithStructure>, diff --git a/crates/storage/src/structure/sparse.rs b/crates/storage/src/structure/sparse.rs index 19a6d21258e..3a43fc26b7f 100644 --- a/crates/storage/src/structure/sparse.rs +++ b/crates/storage/src/structure/sparse.rs @@ -1,3 +1,8 @@ +//! The module defines the `Sparse` structure for the storage. +//! The `Sparse` structure implements the sparse merkle tree on top of the storage. +//! It is like a [`Plain`](super::plain::Plain) structure that builds the sparse +//! merkle tree parallel to the normal storage and maintains it. + use crate::{ codec::{ Decode, @@ -12,8 +17,8 @@ use crate::{ WriteOperation, }, structure::{ - BatchStructure, Structure, + SupportsBatching, }, structured_storage::{ StructuredStorage, @@ -40,13 +45,30 @@ use fuel_core_types::fuel_merkle::{ use itertools::Itertools; use std::borrow::Cow; -pub trait MetadataKey { +/// The trait that allows to convert the key of the table into the key of the metadata table. +/// If the key comprises several entities, it is possible to build a Merkle tree over different primary keys. +/// The trait defines the key over which to build an SMT. +pub trait PrimaryKey { + /// The storage key of the table. type InputKey: ?Sized; + /// The extracted primary key. type OutputKey: ?Sized; - fn metadata_key(key: &Self::InputKey) -> &Self::OutputKey; + /// Converts the key of the table into the primary key of the metadata table. + fn primary_key(key: &Self::InputKey) -> &Self::OutputKey; } +/// The `Sparse` structure builds the storage as a [`Plain`](super::plain::Plain) +/// structure and maintains the sparse merkle tree by the `Metadata` and `Nodes` tables. +/// +/// It uses the `KeyCodec` and `ValueCodec` to encode/decode the key and value in the +/// same way as a plain structure. +/// +/// The `Metadata` table stores the metadata of the tree(like a root of the tree), +/// and the `Nodes` table stores the tree's nodes. The SMT is built over the encoded +/// keys and values using the same encoding as for main key-value pairs. +/// +/// The `KeyConvertor` is used to convert the key of the table into the primary key of the metadata table. pub struct Sparse { _marker: core::marker::PhantomData<(KeyCodec, ValueCodec, Metadata, Nodes, KeyConvertor)>, @@ -72,14 +94,14 @@ where K: ?Sized, for<'a> StructuredStorage<&'a mut S>: StorageMutate + StorageMutate, - KeyConvertor: MetadataKey, + KeyConvertor: PrimaryKey, { let mut storage = StructuredStorage::new(storage); - let metadata_key = KeyConvertor::metadata_key(key); - // Get latest metadata entry for this `metadata_key` + let primary_key = KeyConvertor::primary_key(key); + // Get latest metadata entry for this `primary_key` let prev_metadata: Cow = storage .storage::() - .get(metadata_key)? + .get(primary_key)? .unwrap_or_default(); let root = prev_metadata.root; @@ -94,7 +116,7 @@ where let metadata = SparseMerkleMetadata { root }; storage .storage::() - .insert(metadata_key, &metadata)?; + .insert(primary_key, &metadata)?; Ok(()) } @@ -107,13 +129,13 @@ where K: ?Sized, for<'a> StructuredStorage<&'a mut S>: StorageMutate + StorageMutate, - KeyConvertor: MetadataKey, + KeyConvertor: PrimaryKey, { let mut storage = StructuredStorage::new(storage); - let metadata_key = KeyConvertor::metadata_key(key); - // Get latest metadata entry for this `metadata_key` + let primary_key = KeyConvertor::primary_key(key); + // Get latest metadata entry for this `primary_key` let prev_metadata: Option> = - storage.storage::().get(metadata_key)?; + storage.storage::().get(primary_key)?; if let Some(prev_metadata) = prev_metadata { let root = prev_metadata.root; @@ -127,13 +149,13 @@ where let root = tree.root(); if &root == MerkleTree::::empty_root() { // The tree is now empty; remove the metadata - storage.storage::().remove(metadata_key)?; + storage.storage::().remove(primary_key)?; } else { // Generate new metadata for the updated tree let metadata = SparseMerkleMetadata { root }; storage .storage::() - .insert(metadata_key, &metadata)?; + .insert(primary_key, &metadata)?; } } @@ -154,7 +176,7 @@ where Value = sparse::Primitive, OwnedValue = sparse::Primitive, >, - KeyConvertor: MetadataKey, + KeyConvertor: PrimaryKey, for<'a> StructuredStorage<&'a mut S>: StorageMutate + StorageMutate, { @@ -248,7 +270,7 @@ type NodeKeyCodec = type NodeValueCodec = <::Structure as Structure>::ValueCodec; -impl BatchStructure +impl SupportsBatching for Sparse where S: BatchOperations, @@ -264,7 +286,7 @@ where Value = sparse::Primitive, OwnedValue = sparse::Primitive, > + TableWithStructure, - KeyConvertor: MetadataKey, + KeyConvertor: PrimaryKey, Nodes::Structure: Structure, for<'a> StructuredStorage<&'a mut S>: StorageMutate + StorageMutate @@ -277,16 +299,16 @@ where ) -> StorageResult<()> { let mut set = set.peekable(); - let metadata_key; + let primary_key; if let Some((key, _)) = set.peek() { - metadata_key = KeyConvertor::metadata_key(*key); + primary_key = KeyConvertor::primary_key(*key); } else { return Ok(()) } let mut storage = StructuredStorage::new(storage); - if storage.storage::().contains_key(metadata_key)? { + if storage.storage::().contains_key(primary_key)? { return Err(anyhow::anyhow!( "The {} is already initialized", M::column().name() @@ -326,7 +348,7 @@ where let metadata = SparseMerkleMetadata { root }; storage .storage::() - .insert(metadata_key, &metadata)?; + .insert(primary_key, &metadata)?; Ok(()) } @@ -338,9 +360,9 @@ where ) -> StorageResult<()> { let mut set = set.peekable(); - let metadata_key; + let primary_key; if let Some((key, _)) = set.peek() { - metadata_key = KeyConvertor::metadata_key(*key); + primary_key = KeyConvertor::primary_key(*key); } else { return Ok(()) } @@ -348,7 +370,7 @@ where let mut storage = StructuredStorage::new(storage); let prev_metadata: Cow = storage .storage::() - .get(metadata_key)? + .get(primary_key)? .unwrap_or_default(); let root = prev_metadata.root; @@ -379,7 +401,7 @@ where let metadata = SparseMerkleMetadata { root }; storage .storage::() - .insert(metadata_key, &metadata)?; + .insert(primary_key, &metadata)?; Ok(()) } @@ -391,9 +413,9 @@ where ) -> StorageResult<()> { let mut set = set.peekable(); - let metadata_key; + let primary_key; if let Some(key) = set.peek() { - metadata_key = KeyConvertor::metadata_key(*key); + primary_key = KeyConvertor::primary_key(*key); } else { return Ok(()) } @@ -401,7 +423,7 @@ where let mut storage = StructuredStorage::new(storage); let prev_metadata: Cow = storage .storage::() - .get(metadata_key)? + .get(primary_key)? .unwrap_or_default(); let root = prev_metadata.root; @@ -426,13 +448,13 @@ where if &root == MerkleTree::::empty_root() { // The tree is now empty; remove the metadata - storage.storage::().remove(metadata_key)?; + storage.storage::().remove(primary_key)?; } else { // Generate new metadata for the updated tree let metadata = SparseMerkleMetadata { root }; storage .storage::() - .insert(metadata_key, &metadata)?; + .insert(primary_key, &metadata)?; } Ok(()) diff --git a/crates/storage/src/structured_storage.rs b/crates/storage/src/structured_storage.rs index ef14915f002..974c2ae768e 100644 --- a/crates/storage/src/structured_storage.rs +++ b/crates/storage/src/structured_storage.rs @@ -1,3 +1,6 @@ +//! The module contains the [`StructuredStorage`] wrapper around the key-value storage +//! that implements the storage traits for the tables with structure. + use crate::{ column::Column, kv_store::{ @@ -5,8 +8,8 @@ use crate::{ KeyValueStore, }, structure::{ - BatchStructure, Structure, + SupportsBatching, }, Error as StorageError, Mappable, @@ -28,18 +31,26 @@ pub mod sealed_block; pub mod state; pub mod transactions; +/// The table can implement this trait to indicate that it has a structure. +/// It inherits the default implementation of the storage traits through the [`StructuredStorage`] +/// for the table. pub trait TableWithStructure: Mappable + Sized { + /// The type of the structure used by the table. type Structure; + /// The column occupied by the table. fn column() -> Column; } +/// The wrapper around the key-value storage that implements the storage traits for the tables +/// with structure. #[derive(Clone, Debug)] pub struct StructuredStorage { pub(crate) storage: S, } impl StructuredStorage { + /// Creates a new instance of the structured storage. pub fn new(storage: S) -> Self { Self { storage } } @@ -118,7 +129,7 @@ impl StorageBatchMutate for StructuredStorage where S: BatchOperations, M: Mappable + TableWithStructure, - M::Structure: BatchStructure, + M::Structure: SupportsBatching, { fn init_storage( &mut self, @@ -142,8 +153,11 @@ where } } +/// The module that provides helper macros for testing the structured storage. +#[cfg(feature = "test-helpers")] pub mod test { - use crate::{ + use crate as fuel_core_storage; + use fuel_core_storage::{ column::Column, kv_store::{ BatchOperations, @@ -159,6 +173,7 @@ pub mod test { type Storage = RefCell), Vec>>; + /// The in-memory storage for testing purposes. #[derive(Default, Debug, PartialEq, Eq)] pub struct InMemoryStorage { storage: Storage, @@ -196,6 +211,7 @@ pub mod test { impl BatchOperations for InMemoryStorage {} + /// The macro that generates basic storage tests for the table with [`InMemoryStorage`]. #[macro_export] macro_rules! basic_storage_tests { ($table:ident, $key:expr, $value_insert:expr, $value_return:expr, $random_key:expr) => { @@ -213,6 +229,7 @@ pub mod test { }; use $crate::StorageInspect; use $crate::StorageMutate; + use $crate::rand; #[allow(dead_code)] fn random(rng: &mut R) -> T @@ -304,7 +321,7 @@ pub mod test { #[test] fn batch_mutate_works() { - use rand::{ + use $crate::rand::{ Rng, rngs::StdRng, RngCore, @@ -370,6 +387,7 @@ pub mod test { }; } + /// The macro that generates SMT storage tests for the table with [`InMemoryStorage`]. #[macro_export] macro_rules! root_storage_tests { ($table:ident, $metadata_table:ident, $current_key:expr, $foreign_key:expr, $generate_key:ident, $generate_value:ident) => { @@ -384,7 +402,7 @@ pub mod test { }, StorageAsMut, }; - use rand::{ + use $crate::rand::{ rngs::StdRng, SeedableRng, }; @@ -503,8 +521,8 @@ pub mod test { #[test] fn updating_foreign_metadata_does_not_affect_the_given_metadata_insertion() { - let given_metadata_key = $current_key; - let foreign_metadata_key = $foreign_key; + let given_primary_key = $current_key; + let foreign_primary_key = $foreign_key; let mut storage = InMemoryStorage::default(); let mut structured_storage = StructuredStorage::new(&mut storage); @@ -514,8 +532,8 @@ pub mod test { let state_value = $generate_value(rng); // Given - let given_key = $generate_key(&given_metadata_key, rng); - let foreign_key = $generate_key(&foreign_metadata_key, rng); + let given_key = $generate_key(&given_primary_key, rng); + let foreign_key = $generate_key(&foreign_primary_key, rng); structured_storage .storage_as_mut::<$table>() .insert(&given_key, &state_value) diff --git a/crates/storage/src/structured_storage/balances.rs b/crates/storage/src/structured_storage/balances.rs index 8bd758513fe..349a1e15c93 100644 --- a/crates/storage/src/structured_storage/balances.rs +++ b/crates/storage/src/structured_storage/balances.rs @@ -1,3 +1,5 @@ +//! The module contains implementations and tests for the `ContractsAssets` table. + use crate::{ codec::{ manual::Manual, @@ -5,7 +7,7 @@ use crate::{ }, column::Column, structure::sparse::{ - MetadataKey, + PrimaryKey, Sparse, }, structured_storage::TableWithStructure, @@ -20,13 +22,15 @@ use crate::{ }; use fuel_core_types::fuel_vm::ContractsAssetKey; +/// The key convertor used to convert the key from the `ContractsAssets` table +/// to the key of the `ContractsAssetsMerkleMetadata` table. pub struct KeyConvertor; -impl MetadataKey for KeyConvertor { +impl PrimaryKey for KeyConvertor { type InputKey = ::Key; type OutputKey = ::Key; - fn metadata_key(key: &Self::InputKey) -> &Self::OutputKey { + fn primary_key(key: &Self::InputKey) -> &Self::OutputKey { key.contract_id() } } @@ -46,40 +50,42 @@ impl TableWithStructure for ContractsAssets { } #[cfg(test)] -fn generate_key( - metadata_key: &::Key, - rng: &mut impl rand::Rng, -) -> ::Key { - let mut bytes = [0u8; 32]; - rng.fill(bytes.as_mut()); - ::Key::new(metadata_key, &bytes.into()) -} +mod test { + use super::*; -#[cfg(test)] -fn generate_key_for_same_contract( - rng: &mut impl rand::Rng, -) -> ::Key { - generate_key(&fuel_core_types::fuel_tx::ContractId::zeroed(), rng) -} + fn generate_key( + primary_key: &::Key, + rng: &mut impl rand::Rng, + ) -> ::Key { + let mut bytes = [0u8; 32]; + rng.fill(bytes.as_mut()); + ::Key::new(primary_key, &bytes.into()) + } -crate::basic_storage_tests!( - ContractsAssets, - ::Key::default(), - ::Value::default(), - ::Value::default(), - generate_key_for_same_contract -); + fn generate_key_for_same_contract( + rng: &mut impl rand::Rng, + ) -> ::Key { + generate_key(&fuel_core_types::fuel_tx::ContractId::zeroed(), rng) + } -#[cfg(test)] -fn generate_value(rng: &mut impl rand::Rng) -> ::Value { - rng.gen() -} + crate::basic_storage_tests!( + ContractsAssets, + ::Key::default(), + ::Value::default(), + ::Value::default(), + generate_key_for_same_contract + ); + + fn generate_value(rng: &mut impl rand::Rng) -> ::Value { + rng.gen() + } -crate::root_storage_tests!( - ContractsAssets, - ContractsAssetsMerkleMetadata, - ::Key::from([1u8; 32]), - ::Key::from([2u8; 32]), - generate_key, - generate_value -); + crate::root_storage_tests!( + ContractsAssets, + ContractsAssetsMerkleMetadata, + ::Key::from([1u8; 32]), + ::Key::from([2u8; 32]), + generate_key, + generate_value + ); +} diff --git a/crates/storage/src/structured_storage/blocks.rs b/crates/storage/src/structured_storage/blocks.rs index a1c29c0ee13..2b2ba45c32c 100644 --- a/crates/storage/src/structured_storage/blocks.rs +++ b/crates/storage/src/structured_storage/blocks.rs @@ -1,3 +1,5 @@ +//! The module contains implementations and tests for the `FuelBlocks` table. + use crate::{ codec::{ postcard::Postcard, @@ -17,6 +19,7 @@ impl TableWithStructure for FuelBlocks { } } +#[cfg(test)] crate::basic_storage_tests!( FuelBlocks, ::Key::default(), diff --git a/crates/storage/src/structured_storage/coins.rs b/crates/storage/src/structured_storage/coins.rs index ff740c04565..e4e24d96ca5 100644 --- a/crates/storage/src/structured_storage/coins.rs +++ b/crates/storage/src/structured_storage/coins.rs @@ -1,3 +1,5 @@ +//! The module contains implementations and tests for the `Coins` table. + use crate::{ codec::{ postcard::Postcard, @@ -17,6 +19,7 @@ impl TableWithStructure for Coins { } } +#[cfg(test)] crate::basic_storage_tests!( Coins, ::Key::default(), diff --git a/crates/storage/src/structured_storage/contracts.rs b/crates/storage/src/structured_storage/contracts.rs index ee5e8a5984a..df79222ca42 100644 --- a/crates/storage/src/structured_storage/contracts.rs +++ b/crates/storage/src/structured_storage/contracts.rs @@ -1,3 +1,5 @@ +//! The module contains implementations and tests for the contracts tables. + use crate::{ codec::{ postcard::Postcard, @@ -68,21 +70,26 @@ impl TableWithStructure for ContractsLatestUtxo { } } -crate::basic_storage_tests!( - ContractsRawCode, - ::Key::from([1u8; 32]), - vec![32u8], - ::OwnedValue::from(vec![32u8]) -); +#[cfg(test)] +mod test { + use super::*; + + crate::basic_storage_tests!( + ContractsRawCode, + ::Key::from([1u8; 32]), + vec![32u8], + ::OwnedValue::from(vec![32u8]) + ); -crate::basic_storage_tests!( - ContractsInfo, - ::Key::from([1u8; 32]), - ([2u8; 32].into(), [3u8; 32].into()) -); + crate::basic_storage_tests!( + ContractsInfo, + ::Key::from([1u8; 32]), + ([2u8; 32].into(), [3u8; 32].into()) + ); -crate::basic_storage_tests!( - ContractsLatestUtxo, - ::Key::from([1u8; 32]), - ::Value::default() -); + crate::basic_storage_tests!( + ContractsLatestUtxo, + ::Key::from([1u8; 32]), + ::Value::default() + ); +} diff --git a/crates/storage/src/structured_storage/merkle_data.rs b/crates/storage/src/structured_storage/merkle_data.rs index efb709e2604..27f40dff04d 100644 --- a/crates/storage/src/structured_storage/merkle_data.rs +++ b/crates/storage/src/structured_storage/merkle_data.rs @@ -1,3 +1,5 @@ +//! The module contains implementations and tests for merkle related tables. + use crate::{ codec::{ postcard::Postcard, @@ -30,7 +32,8 @@ macro_rules! merkle_table { } } - crate::basic_storage_tests!( + #[cfg(test)] + $crate::basic_storage_tests!( $table, <$table as $crate::Mappable>::Key::default(), <$table as $crate::Mappable>::Value::default() diff --git a/crates/storage/src/structured_storage/messages.rs b/crates/storage/src/structured_storage/messages.rs index 90a6db8d94a..0d9390fe6be 100644 --- a/crates/storage/src/structured_storage/messages.rs +++ b/crates/storage/src/structured_storage/messages.rs @@ -1,3 +1,5 @@ +//! The module contains implementations and tests for the messages tables. + use crate::{ codec::{ postcard::Postcard, @@ -28,14 +30,19 @@ impl TableWithStructure for SpentMessages { } } -crate::basic_storage_tests!( - Messages, - ::Key::default(), - ::Value::default() -); - -crate::basic_storage_tests!( - SpentMessages, - ::Key::default(), - ::Value::default() -); +#[cfg(test)] +mod test { + use super::*; + + crate::basic_storage_tests!( + Messages, + ::Key::default(), + ::Value::default() + ); + + crate::basic_storage_tests!( + SpentMessages, + ::Key::default(), + ::Value::default() + ); +} diff --git a/crates/storage/src/structured_storage/receipts.rs b/crates/storage/src/structured_storage/receipts.rs index 54c7789653a..0e78ee36fdb 100644 --- a/crates/storage/src/structured_storage/receipts.rs +++ b/crates/storage/src/structured_storage/receipts.rs @@ -1,3 +1,5 @@ +//! The module contains implementations and tests for the `Receipts` table. + use crate::{ codec::{ postcard::Postcard, @@ -17,6 +19,7 @@ impl TableWithStructure for Receipts { } } +#[cfg(test)] crate::basic_storage_tests!( Receipts, ::Key::from([1u8; 32]), diff --git a/crates/storage/src/structured_storage/sealed_block.rs b/crates/storage/src/structured_storage/sealed_block.rs index 3f2ce0dded5..2c201f7623d 100644 --- a/crates/storage/src/structured_storage/sealed_block.rs +++ b/crates/storage/src/structured_storage/sealed_block.rs @@ -1,3 +1,5 @@ +//! The module contains implementations and tests for the `SealedBlockConsensus` table. + use crate::{ codec::{ postcard::Postcard, @@ -17,6 +19,7 @@ impl TableWithStructure for SealedBlockConsensus { } } +#[cfg(test)] crate::basic_storage_tests!( SealedBlockConsensus, ::Key::from([1u8; 32]), diff --git a/crates/storage/src/structured_storage/state.rs b/crates/storage/src/structured_storage/state.rs index 326c7f15e04..2d63470f1e3 100644 --- a/crates/storage/src/structured_storage/state.rs +++ b/crates/storage/src/structured_storage/state.rs @@ -1,3 +1,5 @@ +//! The module contains implementations and tests for the `ContractsState` table. + use crate::{ codec::{ manual::Manual, @@ -5,7 +7,7 @@ use crate::{ }, column::Column, structure::sparse::{ - MetadataKey, + PrimaryKey, Sparse, }, structured_storage::TableWithStructure, @@ -20,13 +22,15 @@ use crate::{ }; use fuel_core_types::fuel_vm::ContractsStateKey; +/// The key convertor used to convert the key from the `ContractsState` table +/// to the key of the `ContractsStateMerkleMetadata` table. pub struct KeyConvertor; -impl MetadataKey for KeyConvertor { +impl PrimaryKey for KeyConvertor { type InputKey = ::Key; type OutputKey = ::Key; - fn metadata_key(key: &Self::InputKey) -> &Self::OutputKey { + fn primary_key(key: &Self::InputKey) -> &Self::OutputKey { key.contract_id() } } @@ -46,42 +50,44 @@ impl TableWithStructure for ContractsState { } #[cfg(test)] -fn generate_key( - metadata_key: &::Key, - rng: &mut impl rand::Rng, -) -> ::Key { - let mut bytes = [0u8; 32]; - rng.fill(bytes.as_mut()); - ::Key::new(metadata_key, &bytes.into()) -} +mod test { + use super::*; -#[cfg(test)] -fn generate_key_for_same_contract( - rng: &mut impl rand::Rng, -) -> ::Key { - generate_key(&fuel_core_types::fuel_tx::ContractId::zeroed(), rng) -} + fn generate_key( + primary_key: &::Key, + rng: &mut impl rand::Rng, + ) -> ::Key { + let mut bytes = [0u8; 32]; + rng.fill(bytes.as_mut()); + ::Key::new(primary_key, &bytes.into()) + } -crate::basic_storage_tests!( - ContractsState, - ::Key::default(), - ::Value::zeroed(), - ::Value::zeroed(), - generate_key_for_same_contract -); + fn generate_key_for_same_contract( + rng: &mut impl rand::Rng, + ) -> ::Key { + generate_key(&fuel_core_types::fuel_tx::ContractId::zeroed(), rng) + } -#[cfg(test)] -fn generate_value(rng: &mut impl rand::Rng) -> ::Value { - let mut bytes = [0u8; 32]; - rng.fill(bytes.as_mut()); - bytes.into() -} + crate::basic_storage_tests!( + ContractsState, + ::Key::default(), + ::Value::zeroed(), + ::Value::zeroed(), + generate_key_for_same_contract + ); + + fn generate_value(rng: &mut impl rand::Rng) -> ::Value { + let mut bytes = [0u8; 32]; + rng.fill(bytes.as_mut()); + bytes.into() + } -crate::root_storage_tests!( - ContractsState, - ContractsStateMerkleMetadata, - ::Key::from([1u8; 32]), - ::Key::from([2u8; 32]), - generate_key, - generate_value -); + crate::root_storage_tests!( + ContractsState, + ContractsStateMerkleMetadata, + ::Key::from([1u8; 32]), + ::Key::from([2u8; 32]), + generate_key, + generate_value + ); +} diff --git a/crates/storage/src/structured_storage/transactions.rs b/crates/storage/src/structured_storage/transactions.rs index 47b5d9d8fba..1b9e99131a1 100644 --- a/crates/storage/src/structured_storage/transactions.rs +++ b/crates/storage/src/structured_storage/transactions.rs @@ -1,3 +1,5 @@ +//! The module contains implementations and tests for the `Transactions` table. + use crate::{ codec::{ postcard::Postcard, @@ -17,6 +19,7 @@ impl TableWithStructure for Transactions { } } +#[cfg(test)] crate::basic_storage_tests!( Transactions, ::Key::from([1u8; 32]), diff --git a/crates/storage/src/tables.rs b/crates/storage/src/tables.rs index 1e1dfe3b22f..75b8f25162d 100644 --- a/crates/storage/src/tables.rs +++ b/crates/storage/src/tables.rs @@ -122,6 +122,7 @@ impl Mappable for Transactions { type OwnedValue = Transaction; } +/// The module contains definition of merkle-related tables. pub mod merkle { use crate::{ Mappable, From 33661bcb68524af773dba5b101affcc59f90de52 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Wed, 27 Dec 2023 00:43:59 +0100 Subject: [PATCH 03/28] Updated CHANGELOG.md --- CHANGELOG.md | 108 ++++++++++++++++++ crates/fuel-core/src/database/coin.rs | 29 ++--- crates/fuel-core/src/database/transactions.rs | 46 ++++---- 3 files changed, 148 insertions(+), 35 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b2cf0978023..c0562bc4478 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,114 @@ and this project adheres to [Semantic Versioning](http://semver.org/). Description of the upcoming release here. +### Changed + +#### Breaking +- [#1576](https://github.com/FuelLabs/fuel-core/pull/1576): The change moves the implementation of the storage traits for required tables from `fuel-core` to `fuel-core-storage` crate. The change also adds a more flexible configuration of the encoding/decoding per the table and allows the implementation of specific behaviors for the table in a much easier way. It unifies the encoding between database, SMTs, and iteration, preventing mismatching bytes representation on the Rust type system level. Plus, it increases the re-usage of the code by applying the same structure to other tables. + + It is a breaking PR because it changes database encoding/decoding for some tables. + + ### StructuredStorage + + The change adds a new type `StructuredStorage`. It is a wrapper around the key-value storage that implements the storage traits(`StorageInspect`, `StorageMutate`, `StorageRead`, etc) for the tables with structure. This structure works in tandem with the `TableWithStructure` trait. The table may implement `TableWithStructure` specifying the structure, as an example: + + ```rust + impl TableWithStructure for ContractsRawCode { + type Structure = Plain; + + fn column() -> Column { + Column::ContractsRawCode + } + } + ``` + + It is a definition of the structure for the `ContractsRawCode` table. It has a plain structure meaning it simply encodes/decodes bytes and stores/loads them into/from the storage. As a key codec and value codec, it uses a `Raw` encoding/decoding that simplifies writing bytes and loads them back into the memory without applying any serialization or deserialization algorithm. + + If the table implements `TableWithStructure` and the selected codec satisfies all structure requirements, the corresponding storage traits for that table are implemented on the `StructuredStorage` type. + + ### Codecs + + Each structure allows customizing the key and value codecs. It allows the use of different codecs for different tables, taking into account the complexity and weight of the data and providing a way of more optimal implementation. + + That property may be very useful to perform migration in a more easier way. Plus, it also can be a `no_std` migration potentially allowing its fraud proving. + + An example of migration: + + ```rust + /// Define the table for V1 value encoding/decoding. + impl TableWithStructure for ContractsRawCodeV1 { + type Structure = Plain; + + fn column() -> Column { + Column::ContractsRawCode + } + } + + /// Define the table for V2 value encoding/decoding. + /// It uses `Postcard` codec for the value instead of `Raw` codec. + /// + /// # Dev-note: The columns is the same. + impl TableWithStructure for ContractsRawCodeV2 { + type Structure = Plain; + + fn column() -> Column { + Column::ContractsRawCode + } + } + + fn migration(storage: &mut Database) { + let mut iter = storage.iter_all::(None); + while let Ok((key, value)) = iter.next() { + // Insert into the same table but with another codec. + storage.storage::().insert(key, value); + } + } + ``` + + ### Structures + + The structure of the table defines its behavior. As an example, a `Plain` structure simply encodes/decodes bytes and stores/loads them into/from the storage. The `SMT` structure builds a sparse merkle tree on top of the key-value pairs. + + Implementing a structure one time, we can apply it to any table satisfying the requirements of this structure. It increases the re-usage of the code and minimizes duplication. + + It can be useful if we decide to create global roots for all required tables that are used in fraud proving. + + ```rust + impl TableWithStructure for SpentMessages { + type Structure = Plain; + + fn column() -> Column { + Column::SpentMessages + } + } + | + | + \|/ + + impl TableWithStructure for SpentMessages { + type Structure = + Sparse; + + fn column() -> Column { + Column::SpentMessages + } + } + ``` + + ### Side changes + + #### `iter_all` + The `iter_all` functionality now accepts the table instead of `K` and `V` generics. It is done to use the correct codec during deserialization. Also, the table definition provides the column. + + #### Duplicated unit tests + + The `fuel-core-storage` crate provides macros that generate unit tests. Almost all tables had the same test like `get`, `insert`, `remove`, `exist`. All duplicated tests were moved to macros. The unique one still stays at the same place where it was before. + + #### `StorageBatchMutate` + + Added a new `StorageBatchMutate` trait that we can move to `fuel-storage` crate later. It allows batch operations on the storage. It may be more performant in some cases. + + ## [Version 0.22.0] ### Added diff --git a/crates/fuel-core/src/database/coin.rs b/crates/fuel-core/src/database/coin.rs index fdb25b47ef6..9a31ad3b07d 100644 --- a/crates/fuel-core/src/database/coin.rs +++ b/crates/fuel-core/src/database/coin.rs @@ -61,20 +61,23 @@ impl TableWithStructure for OwnedCoins { } #[cfg(test)] -fn generate_key(rng: &mut impl rand::Rng) -> ::Key { - let mut bytes = [0u8; 65]; - rng.fill(bytes.as_mut()); - bytes -} +mod test { + use super::*; -#[cfg(test)] -fuel_core_storage::basic_storage_tests!( - OwnedCoins, - [0u8; 65], - ::Value::default(), - ::Value::default(), - generate_key -); + fn generate_key(rng: &mut impl rand::Rng) -> ::Key { + let mut bytes = [0u8; 65]; + rng.fill(bytes.as_mut()); + bytes + } + + fuel_core_storage::basic_storage_tests!( + OwnedCoins, + [0u8; 65], + ::Value::default(), + ::Value::default(), + generate_key + ); +} impl StorageInspect for Database { type Error = StorageError; diff --git a/crates/fuel-core/src/database/transactions.rs b/crates/fuel-core/src/database/transactions.rs index 61b9d435bad..1ac351870c3 100644 --- a/crates/fuel-core/src/database/transactions.rs +++ b/crates/fuel-core/src/database/transactions.rs @@ -53,22 +53,6 @@ impl TableWithStructure for OwnedTransactions { } } -#[cfg(test)] -fn generate_key(rng: &mut impl rand::Rng) -> ::Key { - let mut bytes = [0u8; INDEX_SIZE]; - rng.fill(bytes.as_mut()); - bytes.into() -} - -#[cfg(test)] -fuel_core_storage::basic_storage_tests!( - OwnedTransactions, - [1u8; INDEX_SIZE].into(), - ::Value::default(), - ::Value::default(), - generate_key -); - /// The table stores the status of each transaction. pub struct TransactionStatuses; @@ -88,13 +72,31 @@ impl TableWithStructure for TransactionStatuses { } #[cfg(test)] -fuel_core_storage::basic_storage_tests!( - TransactionStatuses, - ::Key::default(), - TransactionStatus::Submitted { - time: fuel_core_types::tai64::Tai64::UNIX_EPOCH, +mod test { + use super::*; + + fn generate_key(rng: &mut impl rand::Rng) -> ::Key { + let mut bytes = [0u8; INDEX_SIZE]; + rng.fill(bytes.as_mut()); + bytes.into() } -); + + fuel_core_storage::basic_storage_tests!( + OwnedTransactions, + [1u8; INDEX_SIZE].into(), + ::Value::default(), + ::Value::default(), + generate_key + ); + + fuel_core_storage::basic_storage_tests!( + TransactionStatuses, + ::Key::default(), + TransactionStatus::Submitted { + time: fuel_core_types::tai64::Tai64::UNIX_EPOCH, + } + ); +} impl Database { pub fn all_transactions( From 82df7e2065fee76e98abe1278b6b8ebd01858191 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 29 Dec 2023 18:17:22 +0100 Subject: [PATCH 04/28] Moved insertion of the blocks into the `BlockImporter` instead of the executor --- Cargo.lock | 1 + crates/fuel-core/src/service.rs | 3 + .../src/service/adapters/block_importer.rs | 46 +++-- .../service/adapters/consensus_module/poa.rs | 40 ++++- .../src/service/adapters/executor.rs | 9 +- .../src/service/adapters/producer.rs | 34 +++- crates/fuel-core/src/service/genesis.rs | 7 +- crates/fuel-core/src/service/sub_services.rs | 4 +- .../consensus_module/poa/src/ports.rs | 6 +- .../consensus_module/poa/src/service.rs | 55 ++++-- .../consensus_module/poa/src/service_test.rs | 6 +- .../service_test/manually_produce_tests.rs | 5 +- crates/services/executor/src/executor.rs | 12 -- crates/services/importer/Cargo.toml | 1 + crates/services/importer/src/importer.rs | 54 ++++-- crates/services/importer/src/importer/test.rs | 158 ++++++++++++------ crates/services/importer/src/ports.rs | 16 +- .../services/producer/src/block_producer.rs | 67 +++++++- .../producer/src/block_producer/tests.rs | 12 +- crates/services/producer/src/mocks.rs | 35 +--- crates/services/producer/src/ports.rs | 8 +- tests/Cargo.toml | 2 +- tests/tests/tx.rs | 115 ++++--------- 23 files changed, 423 insertions(+), 273 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 479ee9fa62f..cc1e1924e76 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2843,6 +2843,7 @@ version = "0.22.0" dependencies = [ "anyhow", "derive_more", + "fuel-core-chain-config", "fuel-core-metrics", "fuel-core-storage", "fuel-core-trace", diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index a6497bfc4a7..3d5240cab28 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -19,6 +19,7 @@ pub use config::{ }; pub use fuel_core_services::Service as ServiceTrait; +use crate::service::adapters::PoAAdapter; pub use fuel_core_consensus_module::RelayerVerifierConfig; use self::adapters::BlockImporterAdapter; @@ -32,6 +33,8 @@ pub mod sub_services; #[derive(Clone)] pub struct SharedState { + /// The PoA adaptor around the shared state of the consensus module. + pub poa_adapter: PoAAdapter, /// The transaction pool shared state. pub txpool: fuel_core_txpool::service::SharedState, /// The P2P network shared state. diff --git a/crates/fuel-core/src/service/adapters/block_importer.rs b/crates/fuel-core/src/service/adapters/block_importer.rs index 3fc939a0f7b..f44191d26c0 100644 --- a/crates/fuel-core/src/service/adapters/block_importer.rs +++ b/crates/fuel-core/src/service/adapters/block_importer.rs @@ -6,6 +6,7 @@ use crate::{ VerifierAdapter, }, }; +use fuel_core_chain_config::ChainConfig; use fuel_core_importer::{ ports::{ BlockVerifier, @@ -18,14 +19,20 @@ use fuel_core_importer::{ }; use fuel_core_poa::ports::RelayerPort; use fuel_core_storage::{ - tables::SealedBlockConsensus, + tables::{ + FuelBlocks, + SealedBlockConsensus, + }, transactional::StorageTransaction, Result as StorageResult, StorageAsMut, }; use fuel_core_types::{ blockchain::{ - block::Block, + block::{ + Block, + CompressedBlock, + }, consensus::Consensus, primitives::{ BlockId, @@ -42,16 +49,20 @@ use fuel_core_types::{ }; use std::sync::Arc; -use super::MaybeRelayerAdapter; +use super::{ + MaybeRelayerAdapter, + TransactionsSource, +}; impl BlockImporterAdapter { pub fn new( config: Config, + chain_config: &ChainConfig, database: Database, executor: ExecutorAdapter, verifier: VerifierAdapter, ) -> Self { - let importer = Importer::new(config, database, executor, verifier); + let importer = Importer::new(config, chain_config, database, executor, verifier); importer.init_metrics(); Self { block_importer: Arc::new(importer), @@ -112,8 +123,8 @@ impl RelayerPort for MaybeRelayerAdapter { } impl ImporterDatabase for Database { - fn latest_block_height(&self) -> StorageResult { - self.latest_height() + fn latest_block_height(&self) -> StorageResult> { + Ok(self.ids_of_latest_block()?.map(|(height, _)| height)) } fn increase_tx_count(&self, new_txs_count: u64) -> StorageResult { @@ -126,10 +137,21 @@ impl ExecutorDatabase for Database { &mut self, block_id: &BlockId, consensus: &Consensus, - ) -> StorageResult> { - self.storage::() - .insert(block_id, consensus) - .map_err(Into::into) + ) -> StorageResult> { + Ok(self + .storage::() + .insert(block_id, consensus)? + .map(|_| ())) + } + fn block( + &mut self, + block_id: &BlockId, + block: &CompressedBlock, + ) -> StorageResult> { + Ok(self + .storage::() + .insert(block_id, block)? + .map(|_| ())) } } @@ -141,6 +163,8 @@ impl Executor for ExecutorAdapter { block: Block, ) -> ExecutorResult>> { - self._execute_without_commit(ExecutionTypes::Validation(block)) + self._execute_without_commit::(ExecutionTypes::Validation( + block, + )) } } diff --git a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs index 46ed86fdc1b..e53b37e11bd 100644 --- a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs +++ b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs @@ -18,13 +18,19 @@ use fuel_core_poa::{ P2pPort, TransactionPool, }, - service::SharedState, + service::{ + Mode, + SharedState, + }, }; use fuel_core_services::stream::BoxStream; use fuel_core_storage::transactional::StorageTransaction; use fuel_core_types::{ fuel_asm::Word, - fuel_tx::TxId, + fuel_tx::{ + Transaction, + TxId, + }, fuel_types::BlockHeight, services::{ block_importer::{ @@ -45,6 +51,18 @@ impl PoAAdapter { pub fn new(shared_state: Option) -> Self { Self { shared_state } } + + pub async fn manually_produce_blocks( + &self, + start_time: Option, + mode: Mode, + ) -> anyhow::Result<()> { + self.shared_state + .as_ref() + .ok_or(anyhow!("The block production is disabled"))? + .manually_produce_block(start_time, mode) + .await + } } #[async_trait::async_trait] @@ -54,10 +72,7 @@ impl ConsensusModulePort for PoAAdapter { start_time: Option, number_of_blocks: u32, ) -> anyhow::Result<()> { - self.shared_state - .as_ref() - .ok_or(anyhow!("The block production is disabled"))? - .manually_produce_block(start_time, number_of_blocks) + self.manually_produce_blocks(start_time, Mode::Blocks { number_of_blocks }) .await } } @@ -91,11 +106,18 @@ impl fuel_core_poa::ports::BlockProducer for BlockProducerAdapter { &self, height: BlockHeight, block_time: Tai64, + txs: Option>, max_gas: Word, ) -> anyhow::Result>> { - self.block_producer - .produce_and_execute_block(height, block_time, max_gas) - .await + if let Some(txs) = txs { + self.block_producer + .produce_and_execute_block_transactions(height, block_time, txs, max_gas) + .await + } else { + self.block_producer + .produce_and_execute_block_txpool(height, block_time, max_gas) + .await + } } } diff --git a/crates/fuel-core/src/service/adapters/executor.rs b/crates/fuel-core/src/service/adapters/executor.rs index bb6f27083f3..bb8e46042db 100644 --- a/crates/fuel-core/src/service/adapters/executor.rs +++ b/crates/fuel-core/src/service/adapters/executor.rs @@ -50,10 +50,13 @@ impl fuel_core_executor::ports::TransactionsSource for TransactionsSource { } impl ExecutorAdapter { - pub(crate) fn _execute_without_commit( + pub(crate) fn _execute_without_commit( &self, - block: ExecutionBlockWithSource, - ) -> ExecutorResult>> { + block: ExecutionBlockWithSource, + ) -> ExecutorResult>> + where + TxSource: fuel_core_executor::ports::TransactionsSource, + { let executor = Executor { database: self.relayer.database.clone(), relayer: self.relayer.clone(), diff --git a/crates/fuel-core/src/service/adapters/producer.rs b/crates/fuel-core/src/service/adapters/producer.rs index 5def3cc1943..f966c48e337 100644 --- a/crates/fuel-core/src/service/adapters/producer.rs +++ b/crates/fuel-core/src/service/adapters/producer.rs @@ -11,6 +11,7 @@ use crate::{ sub_services::BlockProducerService, }, }; +use fuel_core_executor::executor::OnceTransactionsSource; use fuel_core_producer::ports::TxPool; use fuel_core_storage::{ not_found, @@ -25,7 +26,10 @@ use fuel_core_types::{ primitives, }, fuel_tx, - fuel_tx::Receipt, + fuel_tx::{ + Receipt, + Transaction, + }, fuel_types::{ BlockHeight, Bytes32, @@ -61,18 +65,38 @@ impl TxPool for TxPoolAdapter { } } -#[async_trait::async_trait] -impl fuel_core_producer::ports::Executor for ExecutorAdapter { +impl fuel_core_producer::ports::Executor for ExecutorAdapter { type Database = Database; - type TxSource = TransactionsSource; fn execute_without_commit( &self, - component: Components, + component: Components, ) -> ExecutorResult>> { self._execute_without_commit(ExecutionTypes::Production(component)) } +} + +impl fuel_core_producer::ports::Executor> for ExecutorAdapter { + type Database = Database; + + fn execute_without_commit( + &self, + component: Components>, + ) -> ExecutorResult>> { + let Components { + header_to_produce, + transactions_source, + gas_limit, + } = component; + self._execute_without_commit(ExecutionTypes::Production(Components { + header_to_produce, + transactions_source: OnceTransactionsSource::new(transactions_source), + gas_limit, + })) + } +} +impl fuel_core_producer::ports::DryRunner for ExecutorAdapter { fn dry_run( &self, block: Components, diff --git a/crates/fuel-core/src/service/genesis.rs b/crates/fuel-core/src/service/genesis.rs index 8da0fd49637..31c409b607e 100644 --- a/crates/fuel-core/src/service/genesis.rs +++ b/crates/fuel-core/src/service/genesis.rs @@ -16,7 +16,6 @@ use fuel_core_storage::{ ContractsInfo, ContractsLatestUtxo, ContractsRawCode, - FuelBlocks, Messages, }, transactional::Transactional, @@ -125,11 +124,6 @@ fn import_genesis_block( &[], ); - let block_id = block.id(); - database.storage::().insert( - &block_id, - &block.compress(&config.chain_conf.consensus_parameters.chain_id), - )?; let consensus = Consensus::Genesis(genesis); let block = SealedBlock { entity: block, @@ -138,6 +132,7 @@ fn import_genesis_block( let importer = Importer::new( config.block_importer.clone(), + &config.chain_conf, original_database.clone(), (), (), diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 36abbf6c54b..0a027cc3d2e 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -89,6 +89,7 @@ pub fn init_sub_services( let importer_adapter = BlockImporterAdapter::new( config.block_importer.clone(), + &config.chain_conf, database.clone(), executor.clone(), verifier.clone(), @@ -205,13 +206,14 @@ pub fn init_sub_services( Box::new(database.clone()), Box::new(tx_pool_adapter), Box::new(producer_adapter), - Box::new(poa_adapter), + Box::new(poa_adapter.clone()), Box::new(p2p_adapter), config.query_log_threshold_time, config.api_request_timeout, )?; let shared = SharedState { + poa_adapter, txpool: txpool.shared.clone(), #[cfg(feature = "p2p")] network: network.as_ref().map(|n| n.shared.clone()), diff --git a/crates/services/consensus_module/poa/src/ports.rs b/crates/services/consensus_module/poa/src/ports.rs index 967f64ef94c..1866b871cbc 100644 --- a/crates/services/consensus_module/poa/src/ports.rs +++ b/crates/services/consensus_module/poa/src/ports.rs @@ -9,7 +9,10 @@ use fuel_core_types::{ primitives::DaBlockHeight, }, fuel_asm::Word, - fuel_tx::TxId, + fuel_tx::{ + Transaction, + TxId, + }, fuel_types::{ BlockHeight, Bytes32, @@ -49,6 +52,7 @@ pub trait BlockProducer: Send + Sync { &self, height: BlockHeight, block_time: Tai64, + txs: Option>, max_gas: Word, ) -> anyhow::Result>>; } diff --git a/crates/services/consensus_module/poa/src/service.rs b/crates/services/consensus_module/poa/src/service.rs index 46b84e14a26..918304f2a1f 100644 --- a/crates/services/consensus_module/poa/src/service.rs +++ b/crates/services/consensus_module/poa/src/service.rs @@ -42,7 +42,10 @@ use fuel_core_types::{ }, fuel_asm::Word, fuel_crypto::Signature, - fuel_tx::TxId, + fuel_tx::{ + Transaction, + TxId, + }, fuel_types::BlockHeight, secrecy::{ ExposeSecret, @@ -81,16 +84,13 @@ impl SharedState { pub async fn manually_produce_block( &self, start_time: Option, - number_of_blocks: u32, + mode: Mode, ) -> anyhow::Result<()> { let (sender, receiver) = oneshot::channel(); self.request_sender .send(Request::ManualBlocks(( - ManualProduction { - start_time, - number_of_blocks, - }, + ManualProduction { start_time, mode }, sender, ))) .await?; @@ -98,9 +98,16 @@ impl SharedState { } } +pub enum Mode { + /// Produces `number_of_blocks` blocks using `TxPool` as a source of transactions. + Blocks { number_of_blocks: u32 }, + /// Produces one block with the given transactions. + BlockWithTransactions(Vec), +} + struct ManualProduction { pub start_time: Option, - pub number_of_blocks: u32, + pub mode: Mode, } /// Requests accepted by the task. @@ -248,9 +255,10 @@ where &self, height: BlockHeight, block_time: Tai64, + txs: Option>, ) -> anyhow::Result>> { self.block_producer - .produce_and_execute_block(height, block_time, self.block_gas_limit) + .produce_and_execute_block(height, block_time, txs, self.block_gas_limit) .await } @@ -258,6 +266,7 @@ where self.produce_block( self.next_height(), self.next_time(RequestType::Trigger)?, + None, RequestType::Trigger, ) .await @@ -272,10 +281,28 @@ where } else { self.next_time(RequestType::Manual)? }; - for _ in 0..block_production.number_of_blocks { - self.produce_block(self.next_height(), block_time, RequestType::Manual) + match block_production.mode { + Mode::Blocks { number_of_blocks } => { + for _ in 0..number_of_blocks { + self.produce_block( + self.next_height(), + block_time, + None, + RequestType::Manual, + ) + .await?; + block_time = self.next_time(RequestType::Manual)?; + } + } + Mode::BlockWithTransactions(txs) => { + self.produce_block( + self.next_height(), + block_time, + Some(txs), + RequestType::Manual, + ) .await?; - block_time = self.next_time(RequestType::Manual)?; + } } Ok(()) } @@ -284,6 +311,7 @@ where &mut self, height: BlockHeight, block_time: Tai64, + txs: Option>, request_type: RequestType, ) -> anyhow::Result<()> { let last_block_created = Instant::now(); @@ -304,7 +332,10 @@ where tx_status, }, db_transaction, - ) = self.signal_produce_block(height, block_time).await?.into(); + ) = self + .signal_produce_block(height, block_time, txs) + .await? + .into(); let mut tx_ids_to_remove = Vec::with_capacity(skipped_transactions.len()); for (tx_id, err) in skipped_transactions { diff --git a/crates/services/consensus_module/poa/src/service_test.rs b/crates/services/consensus_module/poa/src/service_test.rs index 864a4e7d94b..44525e3be62 100644 --- a/crates/services/consensus_module/poa/src/service_test.rs +++ b/crates/services/consensus_module/poa/src/service_test.rs @@ -123,7 +123,7 @@ impl TestContextBuilder { let mut producer = MockBlockProducer::default(); producer .expect_produce_and_execute_block() - .returning(|_, _, _| { + .returning(|_, _, _, _| { Ok(UncommittedResult::new( ExecutionResult { block: Default::default(), @@ -272,7 +272,7 @@ async fn remove_skipped_transactions() { block_producer .expect_produce_and_execute_block() .times(1) - .returning(move |_, _, _| { + .returning(move |_, _, _, _| { Ok(UncommittedResult::new( ExecutionResult { block: Default::default(), @@ -357,7 +357,7 @@ async fn does_not_produce_when_txpool_empty_in_instant_mode() { block_producer .expect_produce_and_execute_block() - .returning(|_, _, _| panic!("Block production should not be called")); + .returning(|_, _, _, _| panic!("Block production should not be called")); let mut block_importer = MockBlockImporter::default(); diff --git a/crates/services/consensus_module/poa/src/service_test/manually_produce_tests.rs b/crates/services/consensus_module/poa/src/service_test/manually_produce_tests.rs index 47bb8d5e30f..3699fffb39b 100644 --- a/crates/services/consensus_module/poa/src/service_test/manually_produce_tests.rs +++ b/crates/services/consensus_module/poa/src/service_test/manually_produce_tests.rs @@ -1,3 +1,4 @@ +use crate::service::Mode; use fuel_core_types::{ blockchain::block::Block, tai64::Tai64, @@ -82,7 +83,7 @@ async fn can_manually_produce_block( let mut producer = MockBlockProducer::default(); producer .expect_produce_and_execute_block() - .returning(|_, time, _| { + .returning(|_, time, _, _| { let mut block = Block::default(); block.header_mut().consensus.time = time; block.header_mut().recalculate_metadata(); @@ -101,7 +102,7 @@ async fn can_manually_produce_block( ctx.service .shared - .manually_produce_block(Some(start_time), number_of_blocks) + .manually_produce_block(Some(start_time), Mode::Blocks { number_of_blocks }) .await .unwrap(); for tx in txs { diff --git a/crates/services/executor/src/executor.rs b/crates/services/executor/src/executor.rs index 15706793410..1a9f5cdb272 100644 --- a/crates/services/executor/src/executor.rs +++ b/crates/services/executor/src/executor.rs @@ -12,7 +12,6 @@ use fuel_core_storage::{ Coins, ContractsInfo, ContractsLatestUtxo, - FuelBlocks, Messages, Receipts, SpentMessages, @@ -458,17 +457,6 @@ where // ------------ GraphQL API Functionality END ------------ - // insert block into database - block_st_transaction - .as_mut() - .storage::() - .insert( - &finalized_block_id, - &result - .block - .compress(&self.config.consensus_parameters.chain_id), - )?; - // Get the complete fuel block. Ok(UncommittedResult::new(result, block_st_transaction)) } diff --git a/crates/services/importer/Cargo.toml b/crates/services/importer/Cargo.toml index a8b359ceb76..7cd93840428 100644 --- a/crates/services/importer/Cargo.toml +++ b/crates/services/importer/Cargo.toml @@ -12,6 +12,7 @@ description = "Fuel Block Importer" [dependencies] anyhow = { workspace = true } derive_more = { workspace = true } +fuel-core-chain-config = { workspace = true } fuel-core-metrics = { workspace = true } fuel-core-storage = { workspace = true } fuel-core-types = { workspace = true } diff --git a/crates/services/importer/src/importer.rs b/crates/services/importer/src/importer.rs index ca1256005bb..c3844cf2357 100644 --- a/crates/services/importer/src/importer.rs +++ b/crates/services/importer/src/importer.rs @@ -7,11 +7,12 @@ use crate::{ }, Config, }; +use fuel_core_chain_config::ChainConfig; use fuel_core_metrics::importer::importer_metrics; use fuel_core_storage::{ + not_found, transactional::StorageTransaction, Error as StorageError, - IsNotFound, }; use fuel_core_types::{ blockchain::{ @@ -22,7 +23,10 @@ use fuel_core_types::{ primitives::BlockId, SealedBlock, }, - fuel_types::BlockHeight, + fuel_types::{ + BlockHeight, + ChainId, + }, services::{ block_importer::{ ImportResult, @@ -59,8 +63,8 @@ pub enum Error { )] InvalidUnderlyingDatabaseGenesisState, #[display(fmt = "The wrong state of database after execution of the block.\ - The actual height is {_1}, when the next expected height is {_0}.")] - InvalidDatabaseStateAfterExecution(BlockHeight, BlockHeight), + The actual height is {_1:?}, when the next expected height is {_0:?}.")] + InvalidDatabaseStateAfterExecution(Option, Option), #[display(fmt = "Got overflow during increasing the height.")] Overflow, #[display(fmt = "The non-generic block can't have zero height.")] @@ -96,7 +100,7 @@ impl From for anyhow::Error { #[cfg(test)] impl PartialEq for Error { fn eq(&self, other: &Self) -> bool { - format!("{self:?}") == format!("{other:?}") + format!("{self}") == format!("{other}") } } @@ -104,18 +108,26 @@ pub struct Importer { database: D, executor: E, verifier: V, + chain_id: ChainId, broadcast: broadcast::Sender>, guard: tokio::sync::Semaphore, } impl Importer { - pub fn new(config: Config, database: D, executor: E, verifier: V) -> Self { + pub fn new( + config: Config, + chain_config: &ChainConfig, + database: D, + executor: E, + verifier: V, + ) -> Self { let (broadcast, _) = broadcast::channel(config.max_block_notify_buffer); Self { database, executor, verifier, + chain_id: chain_config.consensus_parameters.chain_id, broadcast, guard: tokio::sync::Semaphore::new(1), } @@ -196,9 +208,9 @@ where // database height + 1. let expected_next_height = match consensus { Consensus::Genesis(_) => { - let result = self.database.latest_block_height(); - let found = !result.is_not_found(); - // Because the genesis block is not committed, it should return non found error. + let result = self.database.latest_block_height()?; + let found = result.is_some(); + // Because the genesis block is not committed, it should return `None`. // If we find the latest height, something is wrong with the state of the database. if found { return Err(Error::InvalidUnderlyingDatabaseGenesisState) @@ -210,7 +222,10 @@ where return Err(Error::ZeroNonGenericHeight) } - let last_db_height = self.database.latest_block_height()?; + let last_db_height = self + .database + .latest_block_height()? + .ok_or(not_found!("Latest block height"))?; last_db_height .checked_add(1u32) .ok_or(Error::Overflow)? @@ -228,15 +243,19 @@ where let db_after_execution = db_tx.as_mut(); // Importer expects that `UncommittedResult` contains the result of block - // execution(It includes the block itself). + // execution without block itself. + let expected_height = self.database.latest_block_height()?; let actual_height = db_after_execution.latest_block_height()?; - if expected_next_height != actual_height { + if expected_height != actual_height { return Err(Error::InvalidDatabaseStateAfterExecution( - expected_next_height, + expected_height, actual_height, )) } + db_after_execution + .block(&block_id, &block.compress(&self.chain_id))? + .should_be_unique(&expected_next_height)?; db_after_execution .seal_block(&block_id, &result.sealed_block.consensus)? .should_be_unique(&expected_next_height)?; @@ -252,7 +271,7 @@ where importer_metrics().total_txs_count.set(total_txs as i64); importer_metrics() .block_height - .set(*actual_height.deref() as i64); + .set(*actual_next_height.deref() as i64); let current_time = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() @@ -273,8 +292,11 @@ where // Errors are optimistically handled via fallback to default values since the metrics // should get updated regularly anyways and these errors will be discovered and handled // correctly in more mission critical areas (such as _commit_result) - let current_block_height = - self.database.latest_block_height().unwrap_or_default(); + let current_block_height = self + .database + .latest_block_height() + .unwrap_or_default() + .unwrap_or_default(); let total_tx_count = self.database.increase_tx_count(0).unwrap_or_default(); importer_metrics() diff --git a/crates/services/importer/src/importer/test.rs b/crates/services/importer/src/importer/test.rs index 24db5d043c3..fe5effb25a9 100644 --- a/crates/services/importer/src/importer/test.rs +++ b/crates/services/importer/src/importer/test.rs @@ -10,7 +10,6 @@ use crate::{ }; use anyhow::anyhow; use fuel_core_storage::{ - not_found, transactional::{ StorageTransaction, Transaction as TransactionTrait, @@ -20,7 +19,10 @@ use fuel_core_storage::{ }; use fuel_core_types::{ blockchain::{ - block::Block, + block::{ + Block, + CompressedBlock, + }, consensus::Consensus, primitives::BlockId, SealedBlock, @@ -50,7 +52,7 @@ mockall::mock! { pub Database {} impl ImporterDatabase for Database { - fn latest_block_height(&self) -> StorageResult; + fn latest_block_height(&self) -> StorageResult>; fn increase_tx_count(&self, new_txs_count: u64) -> StorageResult; } @@ -59,7 +61,10 @@ mockall::mock! { &mut self, block_id: &BlockId, consensus: &Consensus, - ) -> StorageResult>; + ) -> StorageResult>; + + fn block(&mut self, block_id: &BlockId, block: &CompressedBlock) + -> StorageResult>; } impl TransactionTrait for Database { @@ -109,30 +114,38 @@ fn poa_block(height: u32) -> SealedBlock { fn underlying_db(result: R) -> impl Fn() -> MockDatabase where - R: Fn() -> StorageResult + Send + Clone + 'static, + R: Fn() -> StorageResult> + Send + Clone + 'static, { move || { let result = result.clone(); let mut db = MockDatabase::default(); db.expect_latest_block_height() - .returning(move || result().map(Into::into)); + .returning(move || result().map(|v| v.map(Into::into))); db.expect_increase_tx_count().returning(Ok); db } } -fn executor_db(height: H, seal: S, commits: usize) -> impl Fn() -> MockDatabase +fn executor_db( + height: H, + seal: S, + block: B, + commits: usize, +) -> impl Fn() -> MockDatabase where - H: Fn() -> StorageResult + Send + Clone + 'static, - S: Fn() -> StorageResult> + Send + Clone + 'static, + H: Fn() -> StorageResult> + Send + Clone + 'static, + S: Fn() -> StorageResult> + Send + Clone + 'static, + B: Fn() -> StorageResult> + Send + Clone + 'static, { move || { let height = height.clone(); let seal = seal.clone(); + let block = block.clone(); let mut db = MockDatabase::default(); db.expect_latest_block_height() - .returning(move || height().map(Into::into)); + .returning(move || height().map(|v| v.map(Into::into))); db.expect_seal_block().returning(move |_, _| seal()); + db.expect_block().returning(move |_, _| block()); db.expect_commit().times(commits).returning(|| Ok(())); db.expect_increase_tx_count().returning(Ok); db @@ -143,16 +156,12 @@ fn ok(entity: T) -> impl Fn() -> Result + Clone { move || Ok(entity.clone()) } -fn not_found() -> StorageResult { - Err(not_found!("Not found")) -} - fn storage_failure() -> StorageResult { Err(StorageError::Other(anyhow!("Some failure"))) } fn storage_failure_error() -> Error { - Error::StorageError(StorageError::Other(anyhow!("Some failure"))) + storage_failure::<()>().unwrap_err().into() } fn ex_result(height: u32, skipped_transactions: usize) -> MockExecutionResult { @@ -200,7 +209,7 @@ fn verification_failure() -> anyhow::Result { } fn verification_failure_error() -> Error { - Error::FailedVerification(anyhow!("Not verified")) + Error::FailedVerification(verification_failure::<()>().unwrap_err()) } fn verifier(result: R) -> MockBlockVerifier @@ -219,46 +228,53 @@ where //////////////// //////////// Genesis Block /////////// //////////////// #[test_case( genesis(0), - underlying_db(not_found), - executor_db(ok(0), ok(None), 1) + underlying_db(ok(None)), + executor_db(ok(None), ok(None), ok(None), 1) => Ok(()); "successfully imports genesis block when latest block not found" )] #[test_case( genesis(113), - underlying_db(not_found), - executor_db(ok(113), ok(None), 1) + underlying_db(ok(None)), + executor_db(ok(None), ok(None), ok(None), 1) => Ok(()); "successfully imports block at arbitrary height when executor db expects it and last block not found" )] #[test_case( genesis(0), underlying_db(storage_failure), - executor_db(ok(0), ok(None), 0) - => Err(Error::InvalidUnderlyingDatabaseGenesisState); + executor_db(ok(Some(0)), ok(None), ok(None), 0) + => Err(storage_failure_error()); "fails to import genesis when underlying database fails" )] #[test_case( genesis(0), - underlying_db(ok(0)), - executor_db(ok(0), ok(None), 0) + underlying_db(ok(Some(0))), + executor_db(ok(Some(0)), ok(None), ok(None), 0) => Err(Error::InvalidUnderlyingDatabaseGenesisState); "fails to import genesis block when already exists" )] #[test_case( genesis(1), - underlying_db(not_found), - executor_db(ok(0), ok(None), 0) - => Err(Error::InvalidDatabaseStateAfterExecution(1u32.into(), 0u32.into())); + underlying_db(ok(None)), + executor_db(ok(Some(0)), ok(None), ok(None), 0) + => Err(Error::InvalidDatabaseStateAfterExecution(None, Some(0u32.into()))); "fails to import genesis block when next height is not 0" )] #[test_case( genesis(0), - underlying_db(not_found), - executor_db(ok(0), ok(Some(Consensus::Genesis(Default::default()))), 0) + underlying_db(ok(None)), + executor_db(ok(None), ok(Some(())), ok(None), 0) => Err(Error::NotUnique(0u32.into())); "fails to import genesis block when consensus exists for height 0" )] +#[test_case( + genesis(0), + underlying_db(ok(None)), + executor_db(ok(None), ok(None), ok(Some(())), 0) + => Err(Error::NotUnique(0u32.into())); + "fails to import genesis block when block exists for height 0" +)] fn commit_result_genesis( sealed_block: SealedBlock, underlying_db: impl Fn() -> MockDatabase, @@ -270,67 +286,81 @@ fn commit_result_genesis( //////////////////////////// PoA Block //////////////////////////// #[test_case( poa_block(1), - underlying_db(ok(0)), - executor_db(ok(1), ok(None), 1) + underlying_db(ok(Some(0))), + executor_db(ok(Some(0)), ok(None), ok(None), 1) => Ok(()); "successfully imports block at height 1 when latest block is genesis" )] #[test_case( poa_block(113), - underlying_db(ok(112)), - executor_db(ok(113), ok(None), 1) + underlying_db(ok(Some(112))), + executor_db(ok(Some(112)), ok(None), ok(None), 1) => Ok(()); "successfully imports block at arbitrary height when latest block height is one fewer and executor db expects it" )] #[test_case( poa_block(0), - underlying_db(ok(0)), - executor_db(ok(1), ok(None), 0) + underlying_db(ok(Some(0))), + executor_db(ok(Some(1)), ok(None), ok(None), 0) => Err(Error::ZeroNonGenericHeight); "fails to import PoA block with height 0" )] #[test_case( poa_block(113), - underlying_db(ok(111)), - executor_db(ok(113), ok(None), 0) + underlying_db(ok(Some(111))), + executor_db(ok(Some(113)), ok(None), ok(None), 0) => Err(Error::IncorrectBlockHeight(112u32.into(), 113u32.into())); "fails to import block at height 113 when latest block height is 111" )] #[test_case( poa_block(113), - underlying_db(ok(114)), - executor_db(ok(113), ok(None), 0) + underlying_db(ok(Some(114))), + executor_db(ok(Some(113)), ok(None), ok(None), 0) => Err(Error::IncorrectBlockHeight(115u32.into(), 113u32.into())); "fails to import block at height 113 when latest block height is 114" )] #[test_case( poa_block(113), - underlying_db(ok(112)), - executor_db(ok(114), ok(None), 0) - => Err(Error::InvalidDatabaseStateAfterExecution(113u32.into(), 114u32.into())); + underlying_db(ok(Some(112))), + executor_db(ok(Some(114)), ok(None), ok(None), 0) + => Err(Error::InvalidDatabaseStateAfterExecution(Some(112u32.into()), Some(114u32.into()))); "fails to import block 113 when executor db expects height 114" )] #[test_case( poa_block(113), - underlying_db(ok(112)), - executor_db(storage_failure, ok(None), 0) + underlying_db(ok(Some(112))), + executor_db(storage_failure, ok(None), ok(None), 0) => Err(storage_failure_error()); "fails to import block when executor db fails to find latest block" )] #[test_case( poa_block(113), - underlying_db(ok(112)), - executor_db(ok(113), ok(Some(Consensus::PoA(Default::default()))), 0) + underlying_db(ok(Some(112))), + executor_db(ok(Some(112)), ok(Some(())), ok(None), 0) => Err(Error::NotUnique(113u32.into())); "fails to import block when consensus exists for block" )] #[test_case( poa_block(113), - underlying_db(ok(112)), - executor_db(ok(113), storage_failure, 0) + underlying_db(ok(Some(112))), + executor_db(ok(Some(112)), storage_failure, ok(None), 0) => Err(storage_failure_error()); "fails to import block when executor db fails to find consensus" )] +#[test_case( + poa_block(113), + underlying_db(ok(Some(112))), + executor_db(ok(Some(112)), ok(None), ok(Some(())), 0) + => Err(Error::NotUnique(113u32.into())); + "fails to import block when block exists" +)] +#[test_case( + poa_block(113), + underlying_db(ok(Some(112))), + executor_db(ok(Some(112)), ok(None), storage_failure, 0) + => Err(storage_failure_error()); + "fails to import block when executor db fails to find block" +)] fn commit_result_and_execute_and_commit_poa( sealed_block: SealedBlock, underlying_db: impl Fn() -> MockDatabase, @@ -357,7 +387,13 @@ fn commit_result_assert( executor_db: MockDatabase, ) -> Result<(), Error> { let expected_to_broadcast = sealed_block.clone(); - let importer = Importer::new(Default::default(), underlying_db, (), ()); + let importer = Importer::new( + Default::default(), + &Default::default(), + underlying_db, + (), + (), + ); let uncommitted_result = UncommittedResult::new( ImportResult::new_from_local(sealed_block, vec![]), StorageTransaction::new(executor_db), @@ -387,7 +423,13 @@ fn execute_and_commit_assert( verifier: MockBlockVerifier, ) -> Result<(), Error> { let expected_to_broadcast = sealed_block.clone(); - let importer = Importer::new(Default::default(), underlying_db, executor, verifier); + let importer = Importer::new( + Default::default(), + &Default::default(), + underlying_db, + executor, + verifier, + ); let mut imported_blocks = importer.subscribe(); let result = importer.execute_and_commit(sealed_block); @@ -408,7 +450,13 @@ fn execute_and_commit_assert( #[test] fn commit_result_fail_when_locked() { - let importer = Importer::new(Default::default(), MockDatabase::default(), (), ()); + let importer = Importer::new( + Default::default(), + &Default::default(), + MockDatabase::default(), + (), + (), + ); let uncommitted_result = UncommittedResult::new( ImportResult::default(), StorageTransaction::new(MockDatabase::default()), @@ -425,6 +473,7 @@ fn commit_result_fail_when_locked() { fn execute_and_commit_fail_when_locked() { let importer = Importer::new( Default::default(), + &Default::default(), MockDatabase::default(), MockExecutor::default(), MockBlockVerifier::default(), @@ -441,6 +490,7 @@ fn execute_and_commit_fail_when_locked() { fn one_lock_at_the_same_time() { let importer = Importer::new( Default::default(), + &Default::default(), MockDatabase::default(), MockExecutor::default(), MockBlockVerifier::default(), @@ -513,10 +563,10 @@ where let previous_height = expected_height.checked_sub(1).unwrap_or_default(); let execute_and_commit_result = execute_and_commit_assert( sealed_block, - underlying_db(ok(previous_height))(), + underlying_db(ok(Some(previous_height)))(), executor( block_after_execution, - executor_db(ok(expected_height), ok(None), commits)(), + executor_db(ok(Some(previous_height)), ok(None), ok(None), commits)(), ), verifier(verifier_result), ); @@ -535,6 +585,7 @@ where { let importer = Importer::new( Default::default(), + &Default::default(), MockDatabase::default(), executor(block_after_execution, MockDatabase::default()), verifier(verifier_result), @@ -547,6 +598,7 @@ where fn verify_and_execute_allowed_when_locked() { let importer = Importer::new( Default::default(), + &Default::default(), MockDatabase::default(), executor(ok(ex_result(13, 0)), MockDatabase::default()), verifier(ok(())), diff --git a/crates/services/importer/src/ports.rs b/crates/services/importer/src/ports.rs index ce0449a8743..32b46cbc440 100644 --- a/crates/services/importer/src/ports.rs +++ b/crates/services/importer/src/ports.rs @@ -4,7 +4,10 @@ use fuel_core_storage::{ }; use fuel_core_types::{ blockchain::{ - block::Block, + block::{ + Block, + CompressedBlock, + }, consensus::Consensus, primitives::BlockId, }, @@ -32,7 +35,7 @@ pub trait Executor: Send + Sync { /// The database port used by the block importer. pub trait ImporterDatabase { /// Returns the latest block height. - fn latest_block_height(&self) -> StorageResult; + fn latest_block_height(&self) -> StorageResult>; /// Update metadata about the total number of transactions on the chain. /// Returns the total count after the update. fn increase_tx_count(&self, new_txs_count: u64) -> StorageResult; @@ -46,7 +49,14 @@ pub trait ExecutorDatabase: ImporterDatabase { &mut self, block_id: &BlockId, consensus: &Consensus, - ) -> StorageResult>; + ) -> StorageResult>; + + /// Inserts the `CompressedBlock` under the `block_id`. + fn block( + &mut self, + block_id: &BlockId, + block: &CompressedBlock, + ) -> StorageResult>; } #[cfg_attr(test, mockall::automock)] diff --git a/crates/services/producer/src/block_producer.rs b/crates/services/producer/src/block_producer.rs index 3e57c794195..93a5949c541 100644 --- a/crates/services/producer/src/block_producer.rs +++ b/crates/services/producer/src/block_producer.rs @@ -72,20 +72,21 @@ pub struct Producer { pub lock: Mutex<()>, } -impl - Producer +impl Producer where Database: ports::BlockProducerDatabase + 'static, - TxPool: ports::TxPool + 'static, - Executor: ports::Executor + 'static, { - /// Produces and execute block for the specified height - pub async fn produce_and_execute_block( + /// Produces and execute block for the specified height. + async fn produce_and_execute( &self, height: BlockHeight, block_time: Tai64, + tx_source: impl FnOnce(BlockHeight) -> TxSource, max_gas: Word, - ) -> anyhow::Result>> { + ) -> anyhow::Result>> + where + Executor: ports::Executor + 'static, + { // - get previous block info (hash, root, etc) // - select best da_height from relayer // - get available txs from txpool @@ -97,7 +98,7 @@ where // prevent simultaneous block production calls, the guard will drop at the end of this fn. let _production_guard = self.lock.lock().await; - let source = self.txpool.get_source(height); + let source = tx_source(height); let header = self.new_header(height, block_time).await?; @@ -107,7 +108,7 @@ where gas_limit: max_gas, }; - // Store the context string incase we error. + // Store the context string in case we error. let context_string = format!("Failed to produce block {height:?} due to execution failure"); let result = self @@ -119,7 +120,55 @@ where debug!("Produced block with result: {:?}", result.result()); Ok(result) } +} +impl + Producer +where + Database: ports::BlockProducerDatabase + 'static, + TxPool: ports::TxPool + 'static, + Executor: ports::Executor + 'static, +{ + /// Produces and execute block for the specified height with transactions from the `TxPool`. + pub async fn produce_and_execute_block_txpool( + &self, + height: BlockHeight, + block_time: Tai64, + max_gas: Word, + ) -> anyhow::Result>> { + self.produce_and_execute( + height, + block_time, + |height| self.txpool.get_source(height), + max_gas, + ) + .await + } +} + +impl Producer +where + Database: ports::BlockProducerDatabase + 'static, + Executor: ports::Executor, Database = ExecutorDB> + 'static, +{ + /// Produces and execute block for the specified height with `transactions`. + pub async fn produce_and_execute_block_transactions( + &self, + height: BlockHeight, + block_time: Tai64, + transactions: Vec, + max_gas: Word, + ) -> anyhow::Result>> { + self.produce_and_execute(height, block_time, |_| transactions, max_gas) + .await + } +} + +impl Producer +where + Database: ports::BlockProducerDatabase + 'static, + Executor: ports::DryRunner + 'static, +{ // TODO: Support custom `block_time` for `dry_run`. /// Simulate a transaction without altering any state. Does not aquire the production lock /// since it is basically a "read only" operation and shouldn't get in the way of normal diff --git a/crates/services/producer/src/block_producer/tests.rs b/crates/services/producer/src/block_producer/tests.rs index f9e959d16c8..2263004c925 100644 --- a/crates/services/producer/src/block_producer/tests.rs +++ b/crates/services/producer/src/block_producer/tests.rs @@ -42,7 +42,7 @@ async fn cant_produce_at_genesis_height() { let producer = ctx.producer(); let err = producer - .produce_and_execute_block(0u32.into(), Tai64::now(), 1_000_000_000) + .produce_and_execute_block_txpool(0u32.into(), Tai64::now(), 1_000_000_000) .await .expect_err("expected failure"); @@ -58,7 +58,7 @@ async fn can_produce_initial_block() { let producer = ctx.producer(); let result = producer - .produce_and_execute_block(1u32.into(), Tai64::now(), 1_000_000_000) + .produce_and_execute_block_txpool(1u32.into(), Tai64::now(), 1_000_000_000) .await; assert!(result.is_ok()); @@ -93,7 +93,7 @@ async fn can_produce_next_block() { let ctx = TestContext::default_from_db(db); let producer = ctx.producer(); let result = producer - .produce_and_execute_block( + .produce_and_execute_block_txpool( prev_height .succ() .expect("The block height should be valid"), @@ -112,7 +112,7 @@ async fn cant_produce_if_no_previous_block() { let producer = ctx.producer(); let err = producer - .produce_and_execute_block(100u32.into(), Tai64::now(), 1_000_000_000) + .produce_and_execute_block_txpool(100u32.into(), Tai64::now(), 1_000_000_000) .await .expect_err("expected failure"); @@ -156,7 +156,7 @@ async fn cant_produce_if_previous_block_da_height_too_high() { let producer = ctx.producer(); let err = producer - .produce_and_execute_block( + .produce_and_execute_block_txpool( prev_height .succ() .expect("The block height should be valid"), @@ -187,7 +187,7 @@ async fn production_fails_on_execution_error() { let producer = ctx.producer(); let err = producer - .produce_and_execute_block(1u32.into(), Tai64::now(), 1_000_000_000) + .produce_and_execute_block_txpool(1u32.into(), Tai64::now(), 1_000_000_000) .await .expect_err("expected failure"); diff --git a/crates/services/producer/src/mocks.rs b/crates/services/producer/src/mocks.rs index 69ca3d482dd..eadfcfed0df 100644 --- a/crates/services/producer/src/mocks.rs +++ b/crates/services/producer/src/mocks.rs @@ -20,8 +20,6 @@ use fuel_core_types::{ }, primitives::DaBlockHeight, }, - fuel_tx, - fuel_tx::Receipt, fuel_types::{ Address, BlockHeight, @@ -133,14 +131,12 @@ fn to_block(component: Components>) -> Block { Block::new(component.header_to_produce, transactions, &[]) } -impl Executor for MockExecutor { +impl Executor> for MockExecutor { type Database = MockDb; - /// The source of transaction used by the executor. - type TxSource = Vec; fn execute_without_commit( &self, - component: Components, + component: Components>, ) -> ExecutorResult>> { let block = to_block(component); // simulate executor inserting a block @@ -158,26 +154,16 @@ impl Executor for MockExecutor { StorageTransaction::new(self.0.clone()), )) } - - fn dry_run( - &self, - _block: Components, - _utxo_validation: Option, - ) -> ExecutorResult>> { - Ok(Default::default()) - } } pub struct FailingMockExecutor(pub Mutex>); -impl Executor for FailingMockExecutor { +impl Executor> for FailingMockExecutor { type Database = MockDb; - /// The source of transaction used by the executor. - type TxSource = Vec; fn execute_without_commit( &self, - component: Components, + component: Components>, ) -> ExecutorResult>> { // simulate an execution failure let mut err = self.0.lock().unwrap(); @@ -195,19 +181,6 @@ impl Executor for FailingMockExecutor { )) } } - - fn dry_run( - &self, - _block: Components, - _utxo_validation: Option, - ) -> ExecutorResult>> { - let mut err = self.0.lock().unwrap(); - if let Some(err) = err.take() { - Err(err) - } else { - Ok(Default::default()) - } - } } #[derive(Clone, Default, Debug)] diff --git a/crates/services/producer/src/ports.rs b/crates/services/producer/src/ports.rs index fb53df1934d..1af44bc9d46 100644 --- a/crates/services/producer/src/ports.rs +++ b/crates/services/producer/src/ports.rs @@ -58,19 +58,19 @@ pub trait Relayer: Send + Sync { ) -> anyhow::Result; } -pub trait Executor: Send + Sync { +pub trait Executor: Send + Sync { /// The database used by the executor. type Database; - /// The source of transaction used by the executor. - type TxSource; /// Executes the block and returns the result of execution with uncommitted database /// transaction. fn execute_without_commit( &self, - component: Components, + component: Components, ) -> ExecutorResult>>; +} +pub trait DryRunner: Send + Sync { /// Executes the block without committing it to the database. During execution collects the /// receipts to return them. The `utxo_validation` field can be used to disable the validation /// of utxos during execution. diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 9ed4728fb71..9faa23ec731 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -24,7 +24,7 @@ ethers = "2" fuel-core = { path = "../crates/fuel-core", default-features = false, features = ["test-helpers"] } fuel-core-benches = { path = "../benches" } fuel-core-client = { path = "../crates/client", features = ["test-helpers"] } -fuel-core-executor = { workspace = true, features = ["test-helpers"] } +fuel-core-executor = { workspace = true } fuel-core-p2p = { path = "../crates/services/p2p", features = ["test-helpers"], optional = true } fuel-core-poa = { path = "../crates/services/consensus_module/poa" } fuel-core-relayer = { path = "../crates/services/relayer", features = [ diff --git a/tests/tests/tx.rs b/tests/tests/tx.rs index 82948de6b0e..da1db7b1beb 100644 --- a/tests/tests/tx.rs +++ b/tests/tests/tx.rs @@ -1,9 +1,7 @@ use crate::helpers::TestContext; use fuel_core::{ - database::Database, schema::tx::receipt::all_receipts, service::{ - adapters::MaybeRelayerAdapter, Config, FuelService, }, @@ -16,20 +14,12 @@ use fuel_core_client::client::{ types::TransactionStatus, FuelClient, }; -use fuel_core_executor::executor::Executor; +use fuel_core_poa::service::Mode; use fuel_core_types::{ - blockchain::{ - block::PartialFuelBlock, - header::{ - ConsensusHeader, - PartialBlockHeader, - }, - }, fuel_asm::*, + fuel_crypto::SecretKey, fuel_tx::*, fuel_types::ChainId, - services::executor::ExecutionBlock, - tai64::Tai64, }; use itertools::Itertools; use rand::{ @@ -503,55 +493,30 @@ async fn get_transactions_by_owner_supports_cursor(direction: PageDirection) { #[tokio::test] async fn get_transactions_from_manual_blocks() { - let (executor, db) = get_executor_and_db(); - // get access to a client - let context = initialize_client(db).await; + let context = TestContext::new(100).await; // create 10 txs - let txs: Vec = (0..10).map(create_mock_tx).collect(); + let txs: Vec<_> = (0..10).map(create_mock_tx).collect(); // make 1st test block - let first_test_block = PartialFuelBlock { - header: PartialBlockHeader { - consensus: ConsensusHeader { - height: 1u32.into(), - time: Tai64::now(), - ..Default::default() - }, - ..Default::default() - }, - - // set the first 5 ids of the manually saved txs - transactions: txs.iter().take(5).cloned().collect(), - }; + let first_batch = txs.iter().take(5).cloned().collect(); + context + .srv + .shared + .poa_adapter + .manually_produce_blocks(None, Mode::BlockWithTransactions(first_batch)) + .await + .expect("Should produce first block with first 5 transactions."); // make 2nd test block - let second_test_block = PartialFuelBlock { - header: PartialBlockHeader { - consensus: ConsensusHeader { - height: 2u32.into(), - time: Tai64::now(), - ..Default::default() - }, - ..Default::default() - }, - // set the last 5 ids of the manually saved txs - transactions: txs.iter().skip(5).take(5).cloned().collect(), - }; - - // process blocks and save block height - executor - .execute_and_commit( - ExecutionBlock::Production(first_test_block), - Default::default(), - ) - .unwrap(); - executor - .execute_and_commit( - ExecutionBlock::Production(second_test_block), - Default::default(), - ) - .unwrap(); + let second_batch = txs.iter().skip(5).take(5).cloned().collect(); + context + .srv + .shared + .poa_adapter + .manually_produce_blocks(None, Mode::BlockWithTransactions(second_batch)) + .await + .expect("Should produce block with last 5 transactions."); // Query for first 4: [0, 1, 2, 3] let page_request_forwards = PaginationRequest { @@ -672,38 +637,18 @@ async fn get_owned_transactions() { assert_eq!(&charlie_txs, &[tx1, tx2, tx3]); } -fn get_executor_and_db() -> (Executor, Database) { - let db = Database::default(); - let relayer = MaybeRelayerAdapter { - database: db.clone(), - #[cfg(feature = "relayer")] - relayer_synced: None, - #[cfg(feature = "relayer")] - da_deploy_height: 0u64.into(), - }; - let executor = Executor { - relayer, - database: db.clone(), - config: Default::default(), - }; - - (executor, db) -} - -async fn initialize_client(db: Database) -> TestContext { - let config = Config::local_node(); - let srv = FuelService::from_database(db, config).await.unwrap(); - let client = FuelClient::from(srv.bound_address); - TestContext { - srv, - rng: StdRng::seed_from_u64(0x123), - client, - } -} - // add random val for unique tx fn create_mock_tx(val: u64) -> Transaction { + let mut rng = StdRng::seed_from_u64(val); + TransactionBuilder::script(val.to_be_bytes().to_vec(), Default::default()) - .add_random_fee_input() + .add_unsigned_coin_input( + SecretKey::random(&mut rng), + rng.gen(), + 1_000_000, + Default::default(), + Default::default(), + Default::default(), + ) .finalize_as_transaction() } From d4a4d0a1d26abe47270637c0d375fa1bac1c4747 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 29 Dec 2023 18:44:14 +0100 Subject: [PATCH 05/28] Update CHANGELOG.md --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b2cf0978023..e8ada0fcdd1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,11 @@ and this project adheres to [Semantic Versioning](http://semver.org/). Description of the upcoming release here. + +### Changed + +- [#1577](https://github.com/FuelLabs/fuel-core/pull/1577): Moved insertion of the blocks into the `BlockImporter` instead of the executor. + ## [Version 0.22.0] ### Added From 5033d8cd23b146069f35ed7ff678d11eb471a677 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 29 Dec 2023 19:23:49 +0100 Subject: [PATCH 06/28] Cleanup --- crates/services/executor/src/ports.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/crates/services/executor/src/ports.rs b/crates/services/executor/src/ports.rs index 0c4c32a1deb..dd221166a2a 100644 --- a/crates/services/executor/src/ports.rs +++ b/crates/services/executor/src/ports.rs @@ -6,7 +6,6 @@ use fuel_core_storage::{ ContractsLatestUtxo, ContractsRawCode, ContractsState, - FuelBlocks, Messages, Receipts, SpentMessages, @@ -109,11 +108,9 @@ pub trait TxIdOwnerRecorder { // TODO: Remove `Clone` bound pub trait ExecutorDatabaseTrait: - StorageMutate - + StorageMutate + StorageMutate + StorageMutate - + MerkleRootStorage - + StorageInspect + + MerkleRootStorage + MessageIsSpent + StorageMutate + StorageMutate From ab88c67432d3dcda8ca0bc0edfad5dc20cf6f6b5 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 29 Dec 2023 21:49:47 +0100 Subject: [PATCH 07/28] Moved insertion of the whole block with transactions --- CHANGELOG.md | 2 +- crates/fuel-core/src/database.rs | 2 + crates/fuel-core/src/database/storage.rs | 7 ++ crates/fuel-core/src/executor.rs | 37 +-------- .../src/service/adapters/block_importer.rs | 51 ++++++------ crates/services/executor/src/executor.rs | 12 +-- crates/services/executor/src/ports.rs | 4 +- crates/services/importer/src/importer.rs | 6 +- crates/services/importer/src/importer/test.rs | 83 ++++++------------- crates/services/importer/src/ports.rs | 28 +++---- crates/storage/src/tables.rs | 11 +++ 11 files changed, 96 insertions(+), 147 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e8ada0fcdd1..5870b438e50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,7 +11,7 @@ Description of the upcoming release here. ### Changed -- [#1577](https://github.com/FuelLabs/fuel-core/pull/1577): Moved insertion of the blocks into the `BlockImporter` instead of the executor. +- [#1577](https://github.com/FuelLabs/fuel-core/pull/1577): Moved insertion of sealed blocks into the `BlockImporter` instead of the executor. ## [Version 0.22.0] diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index d2fb65cfddd..29ace79dcd1 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -150,6 +150,8 @@ pub enum Column { ContractsStateMerkleData = 23, /// See [`ContractsStateMerkleMetadata`](storage::ContractsStateMerkleMetadata) ContractsStateMerkleMetadata = 24, + /// See [`ProcessedTransactions`](storage::ProcessedTransactions) + ProcessedTransactions = 25, } impl Column { diff --git a/crates/fuel-core/src/database/storage.rs b/crates/fuel-core/src/database/storage.rs index 6ceab3a776b..2c2c5333c6e 100644 --- a/crates/fuel-core/src/database/storage.rs +++ b/crates/fuel-core/src/database/storage.rs @@ -3,6 +3,7 @@ use crate::database::{ Database, }; use fuel_core_storage::{ + tables::ProcessedTransactions, Error as StorageError, Mappable, MerkleRoot, @@ -160,6 +161,12 @@ impl DatabaseColumn for FuelBlockSecondaryKeyBlockHeights { } } +impl DatabaseColumn for ProcessedTransactions { + fn column() -> Column { + Column::ProcessedTransactions + } +} + impl DatabaseColumn for FuelBlockMerkleData { fn column() -> Column { Column::FuelBlockMerkleData diff --git a/crates/fuel-core/src/executor.rs b/crates/fuel-core/src/executor.rs index 36ece1ca9a8..04a770582ce 100644 --- a/crates/fuel-core/src/executor.rs +++ b/crates/fuel-core/src/executor.rs @@ -20,7 +20,6 @@ mod tests { ContractsRawCode, Messages, Receipts, - Transactions, }, StorageAsMut, }; @@ -1571,7 +1570,7 @@ mod tests { .into(); let db = &mut Database::default(); - let mut executor = create_executor( + let executor = create_executor( db.clone(), Config { utxo_validation_default: false, @@ -1607,16 +1606,6 @@ mod tests { assert_eq!(executed_tx.inputs()[0].balance_root(), Some(&empty_state)); assert_eq!(executed_tx.outputs()[0].state_root(), Some(&empty_state)); assert_eq!(executed_tx.outputs()[0].balance_root(), Some(&empty_state)); - - let expected_tx = block.transactions()[1].clone(); - let storage_tx = executor - .database - .storage::() - .get(&executed_tx.id(&ChainId::default())) - .unwrap() - .unwrap() - .into_owned(); - assert_eq!(storage_tx, expected_tx); } #[test] @@ -1638,7 +1627,7 @@ mod tests { .into(); let db = &mut Database::default(); - let mut executor = create_executor( + let executor = create_executor( db.clone(), Config { utxo_validation_default: false, @@ -1680,16 +1669,6 @@ mod tests { ); assert_eq!(executed_tx.inputs()[0].state_root(), Some(&empty_state)); assert_eq!(executed_tx.inputs()[0].balance_root(), Some(&empty_state)); - - let expected_tx = block.transactions()[1].clone(); - let storage_tx = executor - .database - .storage::() - .get(&expected_tx.id(&ChainId::default())) - .unwrap() - .unwrap() - .into_owned(); - assert_eq!(storage_tx, expected_tx); } #[test] @@ -1751,7 +1730,7 @@ mod tests { .clone(); let db = &mut Database::default(); - let mut executor = create_executor( + let executor = create_executor( db.clone(), Config { utxo_validation_default: false, @@ -1793,16 +1772,6 @@ mod tests { executed_tx.inputs()[0].balance_root(), executed_tx.outputs()[0].balance_root() ); - - let expected_tx = block.transactions()[1].clone(); - let storage_tx = executor - .database - .storage::() - .get(&expected_tx.id(&ChainId::default())) - .unwrap() - .unwrap() - .into_owned(); - assert_eq!(storage_tx, expected_tx); } #[test] diff --git a/crates/fuel-core/src/service/adapters/block_importer.rs b/crates/fuel-core/src/service/adapters/block_importer.rs index f44191d26c0..399eade7312 100644 --- a/crates/fuel-core/src/service/adapters/block_importer.rs +++ b/crates/fuel-core/src/service/adapters/block_importer.rs @@ -22,6 +22,7 @@ use fuel_core_storage::{ tables::{ FuelBlocks, SealedBlockConsensus, + Transactions, }, transactional::StorageTransaction, Result as StorageResult, @@ -29,18 +30,16 @@ use fuel_core_storage::{ }; use fuel_core_types::{ blockchain::{ - block::{ - Block, - CompressedBlock, - }, + block::Block, consensus::Consensus, - primitives::{ - BlockId, - DaBlockHeight, - }, + primitives::DaBlockHeight, SealedBlock, }, - fuel_types::BlockHeight, + fuel_tx::UniqueIdentifier, + fuel_types::{ + BlockHeight, + ChainId, + }, services::executor::{ ExecutionTypes, Result as ExecutorResult, @@ -133,25 +132,29 @@ impl ImporterDatabase for Database { } impl ExecutorDatabase for Database { - fn seal_block( - &mut self, - block_id: &BlockId, - consensus: &Consensus, - ) -> StorageResult> { - Ok(self - .storage::() - .insert(block_id, consensus)? - .map(|_| ())) - } fn block( &mut self, - block_id: &BlockId, - block: &CompressedBlock, + chain_id: &ChainId, + block: &SealedBlock, ) -> StorageResult> { - Ok(self + let block_id = block.entity.id(); + let mut found = self .storage::() - .insert(block_id, block)? - .map(|_| ())) + .insert(&block_id, &block.entity.compress(chain_id))? + .is_some(); + found |= self + .storage::() + .insert(&block_id, &block.consensus)? + .is_some(); + + // TODO: Use `batch_insert` from https://github.com/FuelLabs/fuel-core/pull/1576 + for tx in block.entity.transactions() { + found |= self + .storage::() + .insert(&tx.id(chain_id), tx)? + .is_some(); + } + Ok(found.then_some(())) } } diff --git a/crates/services/executor/src/executor.rs b/crates/services/executor/src/executor.rs index 1a9f5cdb272..6be1e94498a 100644 --- a/crates/services/executor/src/executor.rs +++ b/crates/services/executor/src/executor.rs @@ -13,9 +13,9 @@ use fuel_core_storage::{ ContractsInfo, ContractsLatestUtxo, Messages, + ProcessedTransactions, Receipts, SpentMessages, - Transactions, }, transactional::{ StorageTransaction, @@ -617,7 +617,7 @@ where // Throw a clear error if the transaction id is a duplicate if tx_st_transaction .as_ref() - .storage::() + .storage::() .contains_key(tx_id)? { return Err(ExecutorError::TransactionIdCollision(*tx_id)) @@ -811,8 +811,8 @@ where if block_st_transaction .as_mut() - .storage::() - .insert(&coinbase_id, &tx)? + .storage::() + .insert(&coinbase_id, &())? .is_some() { return Err(ExecutorError::TransactionIdCollision(coinbase_id)) @@ -967,8 +967,8 @@ where // Store tx into the block db transaction tx_st_transaction .as_mut() - .storage::() - .insert(&tx_id, &final_tx)?; + .storage::() + .insert(&tx_id, &())?; // persist receipts self.persist_receipts(&tx_id, &receipts, tx_st_transaction.as_mut())?; diff --git a/crates/services/executor/src/ports.rs b/crates/services/executor/src/ports.rs index dd221166a2a..1ca5a5058fd 100644 --- a/crates/services/executor/src/ports.rs +++ b/crates/services/executor/src/ports.rs @@ -7,9 +7,9 @@ use fuel_core_storage::{ ContractsRawCode, ContractsState, Messages, + ProcessedTransactions, Receipts, SpentMessages, - Transactions, }, transactional::Transactional, vm_storage::VmStorageRequirements, @@ -109,7 +109,7 @@ pub trait TxIdOwnerRecorder { // TODO: Remove `Clone` bound pub trait ExecutorDatabaseTrait: StorageMutate - + StorageMutate + + StorageMutate + MerkleRootStorage + MessageIsSpent + StorageMutate diff --git a/crates/services/importer/src/importer.rs b/crates/services/importer/src/importer.rs index c3844cf2357..b16a73725c1 100644 --- a/crates/services/importer/src/importer.rs +++ b/crates/services/importer/src/importer.rs @@ -199,7 +199,6 @@ where let (result, mut db_tx) = result.into(); let block = &result.sealed_block.entity; let consensus = &result.sealed_block.consensus; - let block_id = block.id(); let actual_next_height = *block.header().height(); // During importing of the genesis block, the database should not be initialized @@ -254,10 +253,7 @@ where } db_after_execution - .block(&block_id, &block.compress(&self.chain_id))? - .should_be_unique(&expected_next_height)?; - db_after_execution - .seal_block(&block_id, &result.sealed_block.consensus)? + .block(&self.chain_id, &result.sealed_block)? .should_be_unique(&expected_next_height)?; // Update the total tx count in chain metadata diff --git a/crates/services/importer/src/importer/test.rs b/crates/services/importer/src/importer/test.rs index fe5effb25a9..7d8bac9990a 100644 --- a/crates/services/importer/src/importer/test.rs +++ b/crates/services/importer/src/importer/test.rs @@ -19,16 +19,15 @@ use fuel_core_storage::{ }; use fuel_core_types::{ blockchain::{ - block::{ - Block, - CompressedBlock, - }, + block::Block, consensus::Consensus, - primitives::BlockId, SealedBlock, }, fuel_tx::TxId, - fuel_types::BlockHeight, + fuel_types::{ + BlockHeight, + ChainId, + }, services::{ block_importer::{ ImportResult, @@ -57,14 +56,11 @@ mockall::mock! { } impl ExecutorDatabase for Database { - fn seal_block( + fn block( &mut self, - block_id: &BlockId, - consensus: &Consensus, + chain_id: &ChainId, + block: &SealedBlock, ) -> StorageResult>; - - fn block(&mut self, block_id: &BlockId, block: &CompressedBlock) - -> StorageResult>; } impl TransactionTrait for Database { @@ -126,25 +122,17 @@ where } } -fn executor_db( - height: H, - seal: S, - block: B, - commits: usize, -) -> impl Fn() -> MockDatabase +fn executor_db(height: H, block: B, commits: usize) -> impl Fn() -> MockDatabase where H: Fn() -> StorageResult> + Send + Clone + 'static, - S: Fn() -> StorageResult> + Send + Clone + 'static, B: Fn() -> StorageResult> + Send + Clone + 'static, { move || { let height = height.clone(); - let seal = seal.clone(); let block = block.clone(); let mut db = MockDatabase::default(); db.expect_latest_block_height() .returning(move || height().map(|v| v.map(Into::into))); - db.expect_seal_block().returning(move |_, _| seal()); db.expect_block().returning(move |_, _| block()); db.expect_commit().times(commits).returning(|| Ok(())); db.expect_increase_tx_count().returning(Ok); @@ -229,49 +217,42 @@ where #[test_case( genesis(0), underlying_db(ok(None)), - executor_db(ok(None), ok(None), ok(None), 1) + executor_db(ok(None), ok(None), 1) => Ok(()); "successfully imports genesis block when latest block not found" )] #[test_case( genesis(113), underlying_db(ok(None)), - executor_db(ok(None), ok(None), ok(None), 1) + executor_db(ok(None), ok(None), 1) => Ok(()); "successfully imports block at arbitrary height when executor db expects it and last block not found" )] #[test_case( genesis(0), underlying_db(storage_failure), - executor_db(ok(Some(0)), ok(None), ok(None), 0) + executor_db(ok(Some(0)), ok(None), 0) => Err(storage_failure_error()); "fails to import genesis when underlying database fails" )] #[test_case( genesis(0), underlying_db(ok(Some(0))), - executor_db(ok(Some(0)), ok(None), ok(None), 0) + executor_db(ok(Some(0)), ok(None), 0) => Err(Error::InvalidUnderlyingDatabaseGenesisState); "fails to import genesis block when already exists" )] #[test_case( genesis(1), underlying_db(ok(None)), - executor_db(ok(Some(0)), ok(None), ok(None), 0) + executor_db(ok(Some(0)), ok(None), 0) => Err(Error::InvalidDatabaseStateAfterExecution(None, Some(0u32.into()))); "fails to import genesis block when next height is not 0" )] #[test_case( genesis(0), underlying_db(ok(None)), - executor_db(ok(None), ok(Some(())), ok(None), 0) - => Err(Error::NotUnique(0u32.into())); - "fails to import genesis block when consensus exists for height 0" -)] -#[test_case( - genesis(0), - underlying_db(ok(None)), - executor_db(ok(None), ok(None), ok(Some(())), 0) + executor_db(ok(None), ok(Some(())), 0) => Err(Error::NotUnique(0u32.into())); "fails to import genesis block when block exists for height 0" )] @@ -287,77 +268,63 @@ fn commit_result_genesis( #[test_case( poa_block(1), underlying_db(ok(Some(0))), - executor_db(ok(Some(0)), ok(None), ok(None), 1) + executor_db(ok(Some(0)), ok(None), 1) => Ok(()); "successfully imports block at height 1 when latest block is genesis" )] #[test_case( poa_block(113), underlying_db(ok(Some(112))), - executor_db(ok(Some(112)), ok(None), ok(None), 1) + executor_db(ok(Some(112)), ok(None), 1) => Ok(()); "successfully imports block at arbitrary height when latest block height is one fewer and executor db expects it" )] #[test_case( poa_block(0), underlying_db(ok(Some(0))), - executor_db(ok(Some(1)), ok(None), ok(None), 0) + executor_db(ok(Some(1)), ok(None), 0) => Err(Error::ZeroNonGenericHeight); "fails to import PoA block with height 0" )] #[test_case( poa_block(113), underlying_db(ok(Some(111))), - executor_db(ok(Some(113)), ok(None), ok(None), 0) + executor_db(ok(Some(113)), ok(None), 0) => Err(Error::IncorrectBlockHeight(112u32.into(), 113u32.into())); "fails to import block at height 113 when latest block height is 111" )] #[test_case( poa_block(113), underlying_db(ok(Some(114))), - executor_db(ok(Some(113)), ok(None), ok(None), 0) + executor_db(ok(Some(113)), ok(None), 0) => Err(Error::IncorrectBlockHeight(115u32.into(), 113u32.into())); "fails to import block at height 113 when latest block height is 114" )] #[test_case( poa_block(113), underlying_db(ok(Some(112))), - executor_db(ok(Some(114)), ok(None), ok(None), 0) + executor_db(ok(Some(114)), ok(None), 0) => Err(Error::InvalidDatabaseStateAfterExecution(Some(112u32.into()), Some(114u32.into()))); "fails to import block 113 when executor db expects height 114" )] #[test_case( poa_block(113), underlying_db(ok(Some(112))), - executor_db(storage_failure, ok(None), ok(None), 0) + executor_db(storage_failure, ok(None), 0) => Err(storage_failure_error()); "fails to import block when executor db fails to find latest block" )] #[test_case( poa_block(113), underlying_db(ok(Some(112))), - executor_db(ok(Some(112)), ok(Some(())), ok(None), 0) - => Err(Error::NotUnique(113u32.into())); - "fails to import block when consensus exists for block" -)] -#[test_case( - poa_block(113), - underlying_db(ok(Some(112))), - executor_db(ok(Some(112)), storage_failure, ok(None), 0) - => Err(storage_failure_error()); - "fails to import block when executor db fails to find consensus" -)] -#[test_case( - poa_block(113), - underlying_db(ok(Some(112))), - executor_db(ok(Some(112)), ok(None), ok(Some(())), 0) + executor_db(ok(Some(112)), ok(Some(())), 0) => Err(Error::NotUnique(113u32.into())); "fails to import block when block exists" )] #[test_case( poa_block(113), underlying_db(ok(Some(112))), - executor_db(ok(Some(112)), ok(None), storage_failure, 0) + executor_db(ok(Some(112)), storage_failure, 0) => Err(storage_failure_error()); "fails to import block when executor db fails to find block" )] @@ -566,7 +533,7 @@ where underlying_db(ok(Some(previous_height)))(), executor( block_after_execution, - executor_db(ok(Some(previous_height)), ok(None), ok(None), commits)(), + executor_db(ok(Some(previous_height)), ok(None), commits)(), ), verifier(verifier_result), ); diff --git a/crates/services/importer/src/ports.rs b/crates/services/importer/src/ports.rs index 32b46cbc440..c26d4bab4a0 100644 --- a/crates/services/importer/src/ports.rs +++ b/crates/services/importer/src/ports.rs @@ -4,14 +4,14 @@ use fuel_core_storage::{ }; use fuel_core_types::{ blockchain::{ - block::{ - Block, - CompressedBlock, - }, + block::Block, consensus::Consensus, - primitives::BlockId, + SealedBlock, + }, + fuel_types::{ + BlockHeight, + ChainId, }, - fuel_types::BlockHeight, services::executor::{ Result as ExecutorResult, UncommittedResult, @@ -43,19 +43,13 @@ pub trait ImporterDatabase { /// The port for returned database from the executor. pub trait ExecutorDatabase: ImporterDatabase { - /// Assigns the `Consensus` data to the block under the `block_id`. - /// Return the previous value at the `height`, if any. - fn seal_block( - &mut self, - block_id: &BlockId, - consensus: &Consensus, - ) -> StorageResult>; - - /// Inserts the `CompressedBlock` under the `block_id`. + /// Inserts the `SealedBlock` under the `block_id`. + // TODO: Remove `chain_id` from the signature, but for that transactions inside + // the block should have `cached_id`. We need to guarantee that from the Rust-type system. fn block( &mut self, - block_id: &BlockId, - block: &CompressedBlock, + chain_id: &ChainId, + block: &SealedBlock, ) -> StorageResult>; } diff --git a/crates/storage/src/tables.rs b/crates/storage/src/tables.rs index 2c2df585f13..27f5cb2fb23 100644 --- a/crates/storage/src/tables.rs +++ b/crates/storage/src/tables.rs @@ -121,5 +121,16 @@ impl Mappable for Transactions { type OwnedValue = Transaction; } +/// The storage table of processed transactions that were executed in the past. +/// The table helps to drop duplicated transactions. +pub struct ProcessedTransactions; + +impl Mappable for ProcessedTransactions { + type Key = Self::OwnedKey; + type OwnedKey = TxId; + type Value = Self::OwnedValue; + type OwnedValue = (); +} + // TODO: Add macro to define all common tables to avoid copy/paste of the code. // TODO: Add macro to define common unit tests. From a49d96d3fd0925278aab0e02d738a3452e218d98 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Sat, 30 Dec 2023 15:18:34 +0100 Subject: [PATCH 08/28] Use `store_block` as a name instead of `block` --- .../fuel-core/src/service/adapters/block_importer.rs | 2 +- crates/services/importer/src/importer.rs | 2 +- crates/services/importer/src/importer/test.rs | 12 ++++++++---- crates/services/importer/src/ports.rs | 2 +- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/crates/fuel-core/src/service/adapters/block_importer.rs b/crates/fuel-core/src/service/adapters/block_importer.rs index 399eade7312..1c439171df5 100644 --- a/crates/fuel-core/src/service/adapters/block_importer.rs +++ b/crates/fuel-core/src/service/adapters/block_importer.rs @@ -132,7 +132,7 @@ impl ImporterDatabase for Database { } impl ExecutorDatabase for Database { - fn block( + fn store_block( &mut self, chain_id: &ChainId, block: &SealedBlock, diff --git a/crates/services/importer/src/importer.rs b/crates/services/importer/src/importer.rs index b16a73725c1..42fe5f1d5f7 100644 --- a/crates/services/importer/src/importer.rs +++ b/crates/services/importer/src/importer.rs @@ -253,7 +253,7 @@ where } db_after_execution - .block(&self.chain_id, &result.sealed_block)? + .store_block(&self.chain_id, &result.sealed_block)? .should_be_unique(&expected_next_height)?; // Update the total tx count in chain metadata diff --git a/crates/services/importer/src/importer/test.rs b/crates/services/importer/src/importer/test.rs index 7d8bac9990a..f448af76e7d 100644 --- a/crates/services/importer/src/importer/test.rs +++ b/crates/services/importer/src/importer/test.rs @@ -56,7 +56,7 @@ mockall::mock! { } impl ExecutorDatabase for Database { - fn block( + fn store_block( &mut self, chain_id: &ChainId, block: &SealedBlock, @@ -122,18 +122,22 @@ where } } -fn executor_db(height: H, block: B, commits: usize) -> impl Fn() -> MockDatabase +fn executor_db( + height: H, + store_block: B, + commits: usize, +) -> impl Fn() -> MockDatabase where H: Fn() -> StorageResult> + Send + Clone + 'static, B: Fn() -> StorageResult> + Send + Clone + 'static, { move || { let height = height.clone(); - let block = block.clone(); + let store_block = store_block.clone(); let mut db = MockDatabase::default(); db.expect_latest_block_height() .returning(move || height().map(|v| v.map(Into::into))); - db.expect_block().returning(move |_, _| block()); + db.expect_store_block().returning(move |_, _| store_block()); db.expect_commit().times(commits).returning(|| Ok(())); db.expect_increase_tx_count().returning(Ok); db diff --git a/crates/services/importer/src/ports.rs b/crates/services/importer/src/ports.rs index c26d4bab4a0..9f188c472a5 100644 --- a/crates/services/importer/src/ports.rs +++ b/crates/services/importer/src/ports.rs @@ -46,7 +46,7 @@ pub trait ExecutorDatabase: ImporterDatabase { /// Inserts the `SealedBlock` under the `block_id`. // TODO: Remove `chain_id` from the signature, but for that transactions inside // the block should have `cached_id`. We need to guarantee that from the Rust-type system. - fn block( + fn store_block( &mut self, chain_id: &ChainId, block: &SealedBlock, From f5c38f245f81d65195c1133964e076e280f1cbe5 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 5 Jan 2024 16:09:20 +0100 Subject: [PATCH 09/28] Move `ChainId` to `fuel_core_importer::Config` --- bin/fuel-core/src/cli/run.rs | 6 ++-- .../src/service/adapters/block_importer.rs | 4 +-- crates/fuel-core/src/service/config.rs | 6 ++-- crates/fuel-core/src/service/genesis.rs | 1 - crates/fuel-core/src/service/sub_services.rs | 1 - crates/services/importer/src/config.rs | 16 +++++++++++ crates/services/importer/src/importer.rs | 11 ++------ crates/services/importer/src/importer/test.rs | 28 ++----------------- 8 files changed, 29 insertions(+), 44 deletions(-) diff --git a/bin/fuel-core/src/cli/run.rs b/bin/fuel-core/src/cli/run.rs index e6210d8330c..9b1faeff4ad 100644 --- a/bin/fuel-core/src/cli/run.rs +++ b/bin/fuel-core/src/cli/run.rs @@ -300,6 +300,9 @@ impl Command { max_wait_time: max_wait_time.into(), }; + let block_importer = + fuel_core::service::config::fuel_core_importer::Config::new(&chain_conf); + let config = Config { addr, api_request_timeout: api_request_timeout.into(), @@ -328,8 +331,7 @@ impl Command { coinbase_recipient, metrics, }, - block_executor: Default::default(), - block_importer: Default::default(), + block_importer, #[cfg(feature = "relayer")] relayer: relayer_cfg, #[cfg(feature = "p2p")] diff --git a/crates/fuel-core/src/service/adapters/block_importer.rs b/crates/fuel-core/src/service/adapters/block_importer.rs index 1c439171df5..e286e0c2dd7 100644 --- a/crates/fuel-core/src/service/adapters/block_importer.rs +++ b/crates/fuel-core/src/service/adapters/block_importer.rs @@ -6,7 +6,6 @@ use crate::{ VerifierAdapter, }, }; -use fuel_core_chain_config::ChainConfig; use fuel_core_importer::{ ports::{ BlockVerifier, @@ -56,12 +55,11 @@ use super::{ impl BlockImporterAdapter { pub fn new( config: Config, - chain_config: &ChainConfig, database: Database, executor: ExecutorAdapter, verifier: VerifierAdapter, ) -> Self { - let importer = Importer::new(config, chain_config, database, executor, verifier); + let importer = Importer::new(config, database, executor, verifier); importer.init_metrics(); Self { block_importer: Arc::new(importer), diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index f0cabfda032..5aafec6446b 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -30,6 +30,7 @@ use fuel_core_p2p::config::{ #[cfg(feature = "relayer")] use fuel_core_relayer::Config as RelayerConfig; +pub use fuel_core_importer; pub use fuel_core_poa::Trigger; #[derive(Clone, Debug)] @@ -51,7 +52,6 @@ pub struct Config { pub vm: VMConfig, pub txpool: fuel_core_txpool::Config, pub block_producer: fuel_core_producer::Config, - pub block_executor: fuel_core_executor::Config, pub block_importer: fuel_core_importer::Config, #[cfg(feature = "relayer")] pub relayer: Option, @@ -73,6 +73,7 @@ pub struct Config { impl Config { pub fn local_node() -> Self { let chain_conf = ChainConfig::local_testnet(); + let block_importer = fuel_core_importer::Config::new(&chain_conf); let utxo_validation = false; let min_gas_price = 0; @@ -99,8 +100,7 @@ impl Config { ..fuel_core_txpool::Config::default() }, block_producer: Default::default(), - block_executor: Default::default(), - block_importer: Default::default(), + block_importer, #[cfg(feature = "relayer")] relayer: None, #[cfg(feature = "p2p")] diff --git a/crates/fuel-core/src/service/genesis.rs b/crates/fuel-core/src/service/genesis.rs index 31c409b607e..8039f438d12 100644 --- a/crates/fuel-core/src/service/genesis.rs +++ b/crates/fuel-core/src/service/genesis.rs @@ -132,7 +132,6 @@ fn import_genesis_block( let importer = Importer::new( config.block_importer.clone(), - &config.chain_conf, original_database.clone(), (), (), diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 0a027cc3d2e..1523fe41c15 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -89,7 +89,6 @@ pub fn init_sub_services( let importer_adapter = BlockImporterAdapter::new( config.block_importer.clone(), - &config.chain_conf, database.clone(), executor.clone(), verifier.clone(), diff --git a/crates/services/importer/src/config.rs b/crates/services/importer/src/config.rs index ddb17391427..c551127c68a 100644 --- a/crates/services/importer/src/config.rs +++ b/crates/services/importer/src/config.rs @@ -1,14 +1,30 @@ +use fuel_core_chain_config::ChainConfig; +use fuel_core_types::fuel_types::ChainId; + #[derive(Debug, Clone)] pub struct Config { pub max_block_notify_buffer: usize, pub metrics: bool, + pub chain_id: ChainId, +} + +impl Config { + pub fn new(chain_config: &ChainConfig) -> Self { + Self { + max_block_notify_buffer: 1 << 10, + metrics: false, + chain_id: chain_config.consensus_parameters.chain_id, + } + } } +#[cfg(test)] impl Default for Config { fn default() -> Self { Self { max_block_notify_buffer: 1 << 10, metrics: false, + chain_id: ChainId::default(), } } } diff --git a/crates/services/importer/src/importer.rs b/crates/services/importer/src/importer.rs index 42fe5f1d5f7..daf129eb73d 100644 --- a/crates/services/importer/src/importer.rs +++ b/crates/services/importer/src/importer.rs @@ -7,7 +7,6 @@ use crate::{ }, Config, }; -use fuel_core_chain_config::ChainConfig; use fuel_core_metrics::importer::importer_metrics; use fuel_core_storage::{ not_found, @@ -114,20 +113,14 @@ pub struct Importer { } impl Importer { - pub fn new( - config: Config, - chain_config: &ChainConfig, - database: D, - executor: E, - verifier: V, - ) -> Self { + pub fn new(config: Config, database: D, executor: E, verifier: V) -> Self { let (broadcast, _) = broadcast::channel(config.max_block_notify_buffer); Self { database, executor, verifier, - chain_id: chain_config.consensus_parameters.chain_id, + chain_id: config.chain_id, broadcast, guard: tokio::sync::Semaphore::new(1), } diff --git a/crates/services/importer/src/importer/test.rs b/crates/services/importer/src/importer/test.rs index f448af76e7d..ad8e9aa3402 100644 --- a/crates/services/importer/src/importer/test.rs +++ b/crates/services/importer/src/importer/test.rs @@ -358,13 +358,7 @@ fn commit_result_assert( executor_db: MockDatabase, ) -> Result<(), Error> { let expected_to_broadcast = sealed_block.clone(); - let importer = Importer::new( - Default::default(), - &Default::default(), - underlying_db, - (), - (), - ); + let importer = Importer::new(Default::default(), underlying_db, (), ()); let uncommitted_result = UncommittedResult::new( ImportResult::new_from_local(sealed_block, vec![]), StorageTransaction::new(executor_db), @@ -394,13 +388,7 @@ fn execute_and_commit_assert( verifier: MockBlockVerifier, ) -> Result<(), Error> { let expected_to_broadcast = sealed_block.clone(); - let importer = Importer::new( - Default::default(), - &Default::default(), - underlying_db, - executor, - verifier, - ); + let importer = Importer::new(Default::default(), underlying_db, executor, verifier); let mut imported_blocks = importer.subscribe(); let result = importer.execute_and_commit(sealed_block); @@ -421,13 +409,7 @@ fn execute_and_commit_assert( #[test] fn commit_result_fail_when_locked() { - let importer = Importer::new( - Default::default(), - &Default::default(), - MockDatabase::default(), - (), - (), - ); + let importer = Importer::new(Default::default(), MockDatabase::default(), (), ()); let uncommitted_result = UncommittedResult::new( ImportResult::default(), StorageTransaction::new(MockDatabase::default()), @@ -444,7 +426,6 @@ fn commit_result_fail_when_locked() { fn execute_and_commit_fail_when_locked() { let importer = Importer::new( Default::default(), - &Default::default(), MockDatabase::default(), MockExecutor::default(), MockBlockVerifier::default(), @@ -461,7 +442,6 @@ fn execute_and_commit_fail_when_locked() { fn one_lock_at_the_same_time() { let importer = Importer::new( Default::default(), - &Default::default(), MockDatabase::default(), MockExecutor::default(), MockBlockVerifier::default(), @@ -556,7 +536,6 @@ where { let importer = Importer::new( Default::default(), - &Default::default(), MockDatabase::default(), executor(block_after_execution, MockDatabase::default()), verifier(verifier_result), @@ -569,7 +548,6 @@ where fn verify_and_execute_allowed_when_locked() { let importer = Importer::new( Default::default(), - &Default::default(), MockDatabase::default(), executor(ok(ex_result(13, 0)), MockDatabase::default()), verifier(ok(())), From 4c3f18cfa4fa7f60f65d75e47acb0956b11c4977 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 5 Jan 2024 16:48:16 +0100 Subject: [PATCH 10/28] Apply suggestions from the PR --- Cargo.lock | 4 +- crates/storage/src/lib.rs | 2 +- crates/storage/src/structure/sparse.rs | 42 +++++++++---------- .../src/structured_storage/balances.rs | 6 +-- .../storage/src/structured_storage/state.rs | 6 +-- 5 files changed, 30 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c97b8d67068..1208e97f32f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4007,7 +4007,7 @@ dependencies = [ "autocfg", "impl-tools-lib", "proc-macro-error", - "syn 2.0.41", + "syn 2.0.42", ] [[package]] @@ -4019,7 +4019,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.42", ] [[package]] diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index 21116de17f3..9460834cce4 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -120,7 +120,7 @@ impl IsNotFound for Result { /// Some implementations can perform batch operations faster than one by one. pub trait StorageBatchMutate: StorageMutate { /// Initialize the storage with batch insertion. This method is more performant than - /// [`Self::insert_batch`] in some case. + /// [`Self::insert_batch`] in some cases. /// /// # Errors /// diff --git a/crates/storage/src/structure/sparse.rs b/crates/storage/src/structure/sparse.rs index 3a43fc26b7f..028eef57056 100644 --- a/crates/storage/src/structure/sparse.rs +++ b/crates/storage/src/structure/sparse.rs @@ -68,14 +68,14 @@ pub trait PrimaryKey { /// and the `Nodes` table stores the tree's nodes. The SMT is built over the encoded /// keys and values using the same encoding as for main key-value pairs. /// -/// The `KeyConvertor` is used to convert the key of the table into the primary key of the metadata table. -pub struct Sparse { +/// The `KeyConverter` is used to convert the key of the table into the primary key of the metadata table. +pub struct Sparse { _marker: - core::marker::PhantomData<(KeyCodec, ValueCodec, Metadata, Nodes, KeyConvertor)>, + core::marker::PhantomData<(KeyCodec, ValueCodec, Metadata, Nodes, KeyConverter)>, } -impl - Sparse +impl + Sparse where Metadata: Mappable, Nodes: Mappable< @@ -94,10 +94,10 @@ where K: ?Sized, for<'a> StructuredStorage<&'a mut S>: StorageMutate + StorageMutate, - KeyConvertor: PrimaryKey, + KeyConverter: PrimaryKey, { let mut storage = StructuredStorage::new(storage); - let primary_key = KeyConvertor::primary_key(key); + let primary_key = KeyConverter::primary_key(key); // Get latest metadata entry for this `primary_key` let prev_metadata: Cow = storage .storage::() @@ -129,10 +129,10 @@ where K: ?Sized, for<'a> StructuredStorage<&'a mut S>: StorageMutate + StorageMutate, - KeyConvertor: PrimaryKey, + KeyConverter: PrimaryKey, { let mut storage = StructuredStorage::new(storage); - let primary_key = KeyConvertor::primary_key(key); + let primary_key = KeyConverter::primary_key(key); // Get latest metadata entry for this `primary_key` let prev_metadata: Option> = storage.storage::().get(primary_key)?; @@ -163,8 +163,8 @@ where } } -impl Structure - for Sparse +impl Structure + for Sparse where M: Mappable, S: KeyValueStore, @@ -176,7 +176,7 @@ where Value = sparse::Primitive, OwnedValue = sparse::Primitive, >, - KeyConvertor: PrimaryKey, + KeyConverter: PrimaryKey, for<'a> StructuredStorage<&'a mut S>: StorageMutate + StorageMutate, { @@ -241,13 +241,13 @@ where } } -impl +impl MerkleRootStorage for StructuredStorage where S: KeyValueStore, M: Mappable + TableWithStructure< - Structure = Sparse, + Structure = Sparse, >, Self: StorageMutate + StorageInspect, @@ -270,13 +270,13 @@ type NodeKeyCodec = type NodeValueCodec = <::Structure as Structure>::ValueCodec; -impl SupportsBatching - for Sparse +impl SupportsBatching + for Sparse where S: BatchOperations, M: Mappable + TableWithStructure< - Structure = Sparse, + Structure = Sparse, >, KeyCodec: Encode + Decode, ValueCodec: Encode + Decode, @@ -286,7 +286,7 @@ where Value = sparse::Primitive, OwnedValue = sparse::Primitive, > + TableWithStructure, - KeyConvertor: PrimaryKey, + KeyConverter: PrimaryKey, Nodes::Structure: Structure, for<'a> StructuredStorage<&'a mut S>: StorageMutate + StorageMutate @@ -301,7 +301,7 @@ where let primary_key; if let Some((key, _)) = set.peek() { - primary_key = KeyConvertor::primary_key(*key); + primary_key = KeyConverter::primary_key(*key); } else { return Ok(()) } @@ -362,7 +362,7 @@ where let primary_key; if let Some((key, _)) = set.peek() { - primary_key = KeyConvertor::primary_key(*key); + primary_key = KeyConverter::primary_key(*key); } else { return Ok(()) } @@ -415,7 +415,7 @@ where let primary_key; if let Some(key) = set.peek() { - primary_key = KeyConvertor::primary_key(*key); + primary_key = KeyConverter::primary_key(*key); } else { return Ok(()) } diff --git a/crates/storage/src/structured_storage/balances.rs b/crates/storage/src/structured_storage/balances.rs index 349a1e15c93..fd2d274d713 100644 --- a/crates/storage/src/structured_storage/balances.rs +++ b/crates/storage/src/structured_storage/balances.rs @@ -24,9 +24,9 @@ use fuel_core_types::fuel_vm::ContractsAssetKey; /// The key convertor used to convert the key from the `ContractsAssets` table /// to the key of the `ContractsAssetsMerkleMetadata` table. -pub struct KeyConvertor; +pub struct KeyConverter; -impl PrimaryKey for KeyConvertor { +impl PrimaryKey for KeyConverter { type InputKey = ::Key; type OutputKey = ::Key; @@ -41,7 +41,7 @@ impl TableWithStructure for ContractsAssets { Primitive<8>, ContractsAssetsMerkleMetadata, ContractsAssetsMerkleData, - KeyConvertor, + KeyConverter, >; fn column() -> Column { diff --git a/crates/storage/src/structured_storage/state.rs b/crates/storage/src/structured_storage/state.rs index 2d63470f1e3..a0e3b76ff4a 100644 --- a/crates/storage/src/structured_storage/state.rs +++ b/crates/storage/src/structured_storage/state.rs @@ -24,9 +24,9 @@ use fuel_core_types::fuel_vm::ContractsStateKey; /// The key convertor used to convert the key from the `ContractsState` table /// to the key of the `ContractsStateMerkleMetadata` table. -pub struct KeyConvertor; +pub struct KeyConverter; -impl PrimaryKey for KeyConvertor { +impl PrimaryKey for KeyConverter { type InputKey = ::Key; type OutputKey = ::Key; @@ -41,7 +41,7 @@ impl TableWithStructure for ContractsState { Raw, ContractsStateMerkleMetadata, ContractsStateMerkleData, - KeyConvertor, + KeyConverter, >; fn column() -> Column { From c1ee5f8c628bad3af0631cfea292302d336992f4 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 5 Jan 2024 22:39:38 +0100 Subject: [PATCH 11/28] Apply suggestions from the PR --- .../src/service/adapters/block_importer.rs | 6 +-- .../service/adapters/consensus_module/poa.rs | 29 +++++++------- .../consensus_module/poa/src/ports.rs | 10 ++++- .../consensus_module/poa/src/service.rs | 15 +++---- crates/services/importer/src/importer.rs | 6 +-- crates/services/importer/src/importer/test.rs | 39 ++++++++++--------- crates/services/importer/src/ports.rs | 8 ++-- 7 files changed, 64 insertions(+), 49 deletions(-) diff --git a/crates/fuel-core/src/service/adapters/block_importer.rs b/crates/fuel-core/src/service/adapters/block_importer.rs index e286e0c2dd7..89627483c8d 100644 --- a/crates/fuel-core/src/service/adapters/block_importer.rs +++ b/crates/fuel-core/src/service/adapters/block_importer.rs @@ -130,11 +130,11 @@ impl ImporterDatabase for Database { } impl ExecutorDatabase for Database { - fn store_block( + fn store_new_block( &mut self, chain_id: &ChainId, block: &SealedBlock, - ) -> StorageResult> { + ) -> StorageResult { let block_id = block.entity.id(); let mut found = self .storage::() @@ -152,7 +152,7 @@ impl ExecutorDatabase for Database { .insert(&tx.id(chain_id), tx)? .is_some(); } - Ok(found.then_some(())) + Ok(!found) } } diff --git a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs index e53b37e11bd..ac446c71675 100644 --- a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs +++ b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs @@ -17,6 +17,7 @@ use fuel_core_poa::{ BlockImporter, P2pPort, TransactionPool, + TransactionsSource, }, service::{ Mode, @@ -27,10 +28,7 @@ use fuel_core_services::stream::BoxStream; use fuel_core_storage::transactional::StorageTransaction; use fuel_core_types::{ fuel_asm::Word, - fuel_tx::{ - Transaction, - TxId, - }, + fuel_tx::TxId, fuel_types::BlockHeight, services::{ block_importer::{ @@ -106,17 +104,22 @@ impl fuel_core_poa::ports::BlockProducer for BlockProducerAdapter { &self, height: BlockHeight, block_time: Tai64, - txs: Option>, + source: TransactionsSource, max_gas: Word, ) -> anyhow::Result>> { - if let Some(txs) = txs { - self.block_producer - .produce_and_execute_block_transactions(height, block_time, txs, max_gas) - .await - } else { - self.block_producer - .produce_and_execute_block_txpool(height, block_time, max_gas) - .await + match source { + TransactionsSource::TxPool => { + self.block_producer + .produce_and_execute_block_txpool(height, block_time, max_gas) + .await + } + TransactionsSource::SpecificTransactions(txs) => { + self.block_producer + .produce_and_execute_block_transactions( + height, block_time, txs, max_gas, + ) + .await + } } } } diff --git a/crates/services/consensus_module/poa/src/ports.rs b/crates/services/consensus_module/poa/src/ports.rs index 1866b871cbc..fdb8a2d11de 100644 --- a/crates/services/consensus_module/poa/src/ports.rs +++ b/crates/services/consensus_module/poa/src/ports.rs @@ -43,6 +43,14 @@ pub trait TransactionPool: Send + Sync { #[cfg(test)] use fuel_core_storage::test_helpers::EmptyStorage; +/// The source of transactions for the block. +pub enum TransactionsSource { + /// The source of transactions for the block is the `TxPool`. + TxPool, + /// Use specific transactions for the block. + SpecificTransactions(Vec), +} + #[cfg_attr(test, mockall::automock(type Database=EmptyStorage;))] #[async_trait::async_trait] pub trait BlockProducer: Send + Sync { @@ -52,7 +60,7 @@ pub trait BlockProducer: Send + Sync { &self, height: BlockHeight, block_time: Tai64, - txs: Option>, + source: TransactionsSource, max_gas: Word, ) -> anyhow::Result>>; } diff --git a/crates/services/consensus_module/poa/src/service.rs b/crates/services/consensus_module/poa/src/service.rs index 918304f2a1f..3ec7b8727d8 100644 --- a/crates/services/consensus_module/poa/src/service.rs +++ b/crates/services/consensus_module/poa/src/service.rs @@ -8,6 +8,7 @@ use crate::{ BlockProducer, P2pPort, TransactionPool, + TransactionsSource, }, sync::{ SyncState, @@ -255,10 +256,10 @@ where &self, height: BlockHeight, block_time: Tai64, - txs: Option>, + source: TransactionsSource, ) -> anyhow::Result>> { self.block_producer - .produce_and_execute_block(height, block_time, txs, self.block_gas_limit) + .produce_and_execute_block(height, block_time, source, self.block_gas_limit) .await } @@ -266,7 +267,7 @@ where self.produce_block( self.next_height(), self.next_time(RequestType::Trigger)?, - None, + TransactionsSource::TxPool, RequestType::Trigger, ) .await @@ -287,7 +288,7 @@ where self.produce_block( self.next_height(), block_time, - None, + TransactionsSource::TxPool, RequestType::Manual, ) .await?; @@ -298,7 +299,7 @@ where self.produce_block( self.next_height(), block_time, - Some(txs), + TransactionsSource::SpecificTransactions(txs), RequestType::Manual, ) .await?; @@ -311,7 +312,7 @@ where &mut self, height: BlockHeight, block_time: Tai64, - txs: Option>, + source: TransactionsSource, request_type: RequestType, ) -> anyhow::Result<()> { let last_block_created = Instant::now(); @@ -333,7 +334,7 @@ where }, db_transaction, ) = self - .signal_produce_block(height, block_time, txs) + .signal_produce_block(height, block_time, source) .await? .into(); diff --git a/crates/services/importer/src/importer.rs b/crates/services/importer/src/importer.rs index daf129eb73d..056c4010410 100644 --- a/crates/services/importer/src/importer.rs +++ b/crates/services/importer/src/importer.rs @@ -245,9 +245,9 @@ where )) } - db_after_execution - .store_block(&self.chain_id, &result.sealed_block)? - .should_be_unique(&expected_next_height)?; + if !db_after_execution.store_new_block(&self.chain_id, &result.sealed_block)? { + return Err(Error::NotUnique(expected_next_height)) + } // Update the total tx count in chain metadata let total_txs = db_after_execution diff --git a/crates/services/importer/src/importer/test.rs b/crates/services/importer/src/importer/test.rs index ad8e9aa3402..897be9f9945 100644 --- a/crates/services/importer/src/importer/test.rs +++ b/crates/services/importer/src/importer/test.rs @@ -56,11 +56,11 @@ mockall::mock! { } impl ExecutorDatabase for Database { - fn store_block( + fn store_new_block( &mut self, chain_id: &ChainId, block: &SealedBlock, - ) -> StorageResult>; + ) -> StorageResult; } impl TransactionTrait for Database { @@ -129,7 +129,7 @@ fn executor_db( ) -> impl Fn() -> MockDatabase where H: Fn() -> StorageResult> + Send + Clone + 'static, - B: Fn() -> StorageResult> + Send + Clone + 'static, + B: Fn() -> StorageResult + Send + Clone + 'static, { move || { let height = height.clone(); @@ -137,7 +137,8 @@ where let mut db = MockDatabase::default(); db.expect_latest_block_height() .returning(move || height().map(|v| v.map(Into::into))); - db.expect_store_block().returning(move |_, _| store_block()); + db.expect_store_new_block() + .returning(move |_, _| store_block()); db.expect_commit().times(commits).returning(|| Ok(())); db.expect_increase_tx_count().returning(Ok); db @@ -221,42 +222,42 @@ where #[test_case( genesis(0), underlying_db(ok(None)), - executor_db(ok(None), ok(None), 1) + executor_db(ok(None), ok(true), 1) => Ok(()); "successfully imports genesis block when latest block not found" )] #[test_case( genesis(113), underlying_db(ok(None)), - executor_db(ok(None), ok(None), 1) + executor_db(ok(None), ok(true), 1) => Ok(()); "successfully imports block at arbitrary height when executor db expects it and last block not found" )] #[test_case( genesis(0), underlying_db(storage_failure), - executor_db(ok(Some(0)), ok(None), 0) + executor_db(ok(Some(0)), ok(true), 0) => Err(storage_failure_error()); "fails to import genesis when underlying database fails" )] #[test_case( genesis(0), underlying_db(ok(Some(0))), - executor_db(ok(Some(0)), ok(None), 0) + executor_db(ok(Some(0)), ok(true), 0) => Err(Error::InvalidUnderlyingDatabaseGenesisState); "fails to import genesis block when already exists" )] #[test_case( genesis(1), underlying_db(ok(None)), - executor_db(ok(Some(0)), ok(None), 0) + executor_db(ok(Some(0)), ok(true), 0) => Err(Error::InvalidDatabaseStateAfterExecution(None, Some(0u32.into()))); "fails to import genesis block when next height is not 0" )] #[test_case( genesis(0), underlying_db(ok(None)), - executor_db(ok(None), ok(Some(())), 0) + executor_db(ok(None), ok(false), 0) => Err(Error::NotUnique(0u32.into())); "fails to import genesis block when block exists for height 0" )] @@ -272,56 +273,56 @@ fn commit_result_genesis( #[test_case( poa_block(1), underlying_db(ok(Some(0))), - executor_db(ok(Some(0)), ok(None), 1) + executor_db(ok(Some(0)), ok(true), 1) => Ok(()); "successfully imports block at height 1 when latest block is genesis" )] #[test_case( poa_block(113), underlying_db(ok(Some(112))), - executor_db(ok(Some(112)), ok(None), 1) + executor_db(ok(Some(112)), ok(true), 1) => Ok(()); "successfully imports block at arbitrary height when latest block height is one fewer and executor db expects it" )] #[test_case( poa_block(0), underlying_db(ok(Some(0))), - executor_db(ok(Some(1)), ok(None), 0) + executor_db(ok(Some(1)), ok(true), 0) => Err(Error::ZeroNonGenericHeight); "fails to import PoA block with height 0" )] #[test_case( poa_block(113), underlying_db(ok(Some(111))), - executor_db(ok(Some(113)), ok(None), 0) + executor_db(ok(Some(113)), ok(true), 0) => Err(Error::IncorrectBlockHeight(112u32.into(), 113u32.into())); "fails to import block at height 113 when latest block height is 111" )] #[test_case( poa_block(113), underlying_db(ok(Some(114))), - executor_db(ok(Some(113)), ok(None), 0) + executor_db(ok(Some(113)), ok(true), 0) => Err(Error::IncorrectBlockHeight(115u32.into(), 113u32.into())); "fails to import block at height 113 when latest block height is 114" )] #[test_case( poa_block(113), underlying_db(ok(Some(112))), - executor_db(ok(Some(114)), ok(None), 0) + executor_db(ok(Some(114)), ok(true), 0) => Err(Error::InvalidDatabaseStateAfterExecution(Some(112u32.into()), Some(114u32.into()))); "fails to import block 113 when executor db expects height 114" )] #[test_case( poa_block(113), underlying_db(ok(Some(112))), - executor_db(storage_failure, ok(None), 0) + executor_db(storage_failure, ok(true), 0) => Err(storage_failure_error()); "fails to import block when executor db fails to find latest block" )] #[test_case( poa_block(113), underlying_db(ok(Some(112))), - executor_db(ok(Some(112)), ok(Some(())), 0) + executor_db(ok(Some(112)), ok(false), 0) => Err(Error::NotUnique(113u32.into())); "fails to import block when block exists" )] @@ -517,7 +518,7 @@ where underlying_db(ok(Some(previous_height)))(), executor( block_after_execution, - executor_db(ok(Some(previous_height)), ok(None), commits)(), + executor_db(ok(Some(previous_height)), ok(true), commits)(), ), verifier(verifier_result), ); diff --git a/crates/services/importer/src/ports.rs b/crates/services/importer/src/ports.rs index 9f188c472a5..51c14e5085b 100644 --- a/crates/services/importer/src/ports.rs +++ b/crates/services/importer/src/ports.rs @@ -43,14 +43,16 @@ pub trait ImporterDatabase { /// The port for returned database from the executor. pub trait ExecutorDatabase: ImporterDatabase { - /// Inserts the `SealedBlock` under the `block_id`. + /// Inserts the `SealedBlock`. + /// + /// The method returns `true` if the block is a new, otherwise `false`. // TODO: Remove `chain_id` from the signature, but for that transactions inside // the block should have `cached_id`. We need to guarantee that from the Rust-type system. - fn store_block( + fn store_new_block( &mut self, chain_id: &ChainId, block: &SealedBlock, - ) -> StorageResult>; + ) -> StorageResult; } #[cfg_attr(test, mockall::automock)] From c594d45366ba59a76248663589df65537bf11313 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Thu, 4 Jan 2024 12:09:14 +0100 Subject: [PATCH 12/28] Extract off chain logic from the executor --- Cargo.lock | 1 + crates/fuel-core/src/coins_query.rs | 55 ++-- crates/fuel-core/src/executor.rs | 37 +-- crates/fuel-core/src/graphql_api.rs | 5 +- .../{service.rs => api_service.rs} | 38 ++- crates/fuel-core/src/graphql_api/database.rs | 223 ++++++++++++++ crates/fuel-core/src/graphql_api/ports.rs | 125 +++++--- .../src/graphql_api/view_extension.rs | 44 +++ .../src/graphql_api/worker_service.rs | 277 ++++++++++++++++++ crates/fuel-core/src/query/balance.rs | 4 +- .../src/query/balance/asset_query.rs | 10 +- crates/fuel-core/src/query/block.rs | 6 +- crates/fuel-core/src/query/chain.rs | 4 +- crates/fuel-core/src/query/coin.rs | 7 +- crates/fuel-core/src/query/contract.rs | 4 +- crates/fuel-core/src/query/message.rs | 10 +- crates/fuel-core/src/query/tx.rs | 15 +- crates/fuel-core/src/schema/balance.rs | 8 +- crates/fuel-core/src/schema/block.rs | 30 +- crates/fuel-core/src/schema/chain.rs | 10 +- crates/fuel-core/src/schema/coins.rs | 12 +- crates/fuel-core/src/schema/contract.rs | 20 +- crates/fuel-core/src/schema/message.rs | 19 +- crates/fuel-core/src/schema/node_info.rs | 2 +- crates/fuel-core/src/schema/tx.rs | 45 +-- crates/fuel-core/src/schema/tx/types.rs | 24 +- crates/fuel-core/src/service.rs | 6 +- .../src/service/adapters/block_importer.rs | 6 +- .../service/adapters/consensus_module/poa.rs | 8 +- .../src/service/adapters/executor.rs | 39 +-- .../src/service/adapters/graphql_api.rs | 210 ++----------- .../service/adapters/graphql_api/off_chain.rs | 116 ++++++++ .../service/adapters/graphql_api/on_chain.rs | 139 +++++++++ crates/fuel-core/src/service/adapters/sync.rs | 1 + .../fuel-core/src/service/adapters/txpool.rs | 14 +- crates/fuel-core/src/service/genesis.rs | 3 +- crates/fuel-core/src/service/sub_services.rs | 39 ++- .../consensus_module/poa/src/ports.rs | 3 +- .../consensus_module/poa/src/service.rs | 10 +- crates/services/executor/src/executor.rs | 180 ++---------- crates/services/executor/src/ports.rs | 40 +-- crates/services/importer/Cargo.toml | 1 + crates/services/importer/src/config.rs | 2 +- crates/services/importer/src/importer.rs | 152 ++++++++-- crates/services/importer/src/importer/test.rs | 62 ++-- crates/services/importer/src/ports.rs | 4 +- crates/services/txpool/src/mock_db.rs | 7 - crates/services/txpool/src/ports.rs | 8 +- crates/services/txpool/src/service.rs | 13 +- .../txpool/src/service/test_helpers.rs | 9 +- crates/services/txpool/src/txpool.rs | 27 +- crates/storage/src/transactional.rs | 11 + crates/types/src/services/block_importer.rs | 17 +- crates/types/src/services/executor.rs | 3 + crates/types/src/services/txpool.rs | 30 +- 55 files changed, 1428 insertions(+), 767 deletions(-) rename crates/fuel-core/src/graphql_api/{service.rs => api_service.rs} (89%) create mode 100644 crates/fuel-core/src/graphql_api/database.rs create mode 100644 crates/fuel-core/src/graphql_api/view_extension.rs create mode 100644 crates/fuel-core/src/graphql_api/worker_service.rs create mode 100644 crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs create mode 100644 crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs diff --git a/Cargo.lock b/Cargo.lock index 264c9099a88..9ae03c137dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2850,6 +2850,7 @@ dependencies = [ "mockall", "test-case", "tokio", + "tokio-rayon", "tracing", ] diff --git a/crates/fuel-core/src/coins_query.rs b/crates/fuel-core/src/coins_query.rs index 254e5f1b7f3..7314889b402 100644 --- a/crates/fuel-core/src/coins_query.rs +++ b/crates/fuel-core/src/coins_query.rs @@ -1,5 +1,5 @@ use crate::{ - fuel_core_graphql_api::service::Database, + fuel_core_graphql_api::database::ReadView, query::asset_query::{ AssetQuery, AssetSpendTarget, @@ -95,7 +95,7 @@ impl SpendQuery { } /// Return [`AssetQuery`]s. - pub fn asset_queries<'a>(&'a self, db: &'a Database) -> Vec> { + pub fn asset_queries<'a>(&'a self, db: &'a ReadView) -> Vec> { self.query_per_asset .iter() .map(|asset| { @@ -159,7 +159,7 @@ pub fn largest_first(query: &AssetQuery) -> Result, CoinsQueryErro // An implementation of the method described on: https://iohk.io/en/blog/posts/2018/07/03/self-organisation-in-coin-selection/ pub fn random_improve( - db: &Database, + db: &ReadView, spend_query: &SpendQuery, ) -> Result>, CoinsQueryError> { let mut coins_per_asset = vec![]; @@ -229,7 +229,7 @@ mod tests { SpendQuery, }, database::Database, - fuel_core_graphql_api::service::Database as ServiceDatabase, + fuel_core_graphql_api::api_service::ReadDatabase as ServiceDatabase, query::asset_query::{ AssetQuery, AssetSpendTarget, @@ -323,15 +323,19 @@ mod tests { let result: Vec<_> = spend_query .iter() .map(|asset| { - largest_first(&AssetQuery::new(owner, asset, base_asset_id, None, db)) - .map(|coins| { - coins - .iter() - .map(|coin| { - (*coin.asset_id(base_asset_id), coin.amount()) - }) - .collect() - }) + largest_first(&AssetQuery::new( + owner, + asset, + base_asset_id, + None, + &db.view(), + )) + .map(|coins| { + coins + .iter() + .map(|coin| (*coin.asset_id(base_asset_id), coin.amount())) + .collect() + }) }) .try_collect()?; Ok(result) @@ -484,7 +488,7 @@ mod tests { db: &ServiceDatabase, ) -> Result, CoinsQueryError> { let coins = random_improve( - db, + &db.view(), &SpendQuery::new(owner, &query_per_asset, None, base_asset_id)?, ); @@ -682,7 +686,7 @@ mod tests { Some(excluded_ids), base_asset_id, )?; - let coins = random_improve(&db.service_database(), &spend_query); + let coins = random_improve(&db.service_database().view(), &spend_query); // Transform result for convenience coins.map(|coins| { @@ -840,7 +844,7 @@ mod tests { } let coins = random_improve( - &db.service_database(), + &db.service_database().view(), &SpendQuery::new( owner, &[AssetSpendTarget { @@ -930,7 +934,8 @@ mod tests { } fn service_database(&self) -> ServiceDatabase { - Box::new(self.database.clone()) + let database = self.database.clone(); + ServiceDatabase::new(database.clone(), database) } } @@ -980,18 +985,22 @@ mod tests { pub fn owned_coins(&self, owner: &Address) -> Vec { use crate::query::CoinQueryData; - let db = self.service_database(); - db.owned_coins_ids(owner, None, IterDirection::Forward) - .map(|res| res.map(|id| db.coin(id).unwrap())) + let query = self.service_database(); + let query = query.view(); + query + .owned_coins_ids(owner, None, IterDirection::Forward) + .map(|res| res.map(|id| query.coin(id).unwrap())) .try_collect() .unwrap() } pub fn owned_messages(&self, owner: &Address) -> Vec { use crate::query::MessageQueryData; - let db = self.service_database(); - db.owned_message_ids(owner, None, IterDirection::Forward) - .map(|res| res.map(|id| db.message(&id).unwrap())) + let query = self.service_database(); + let query = query.view(); + query + .owned_message_ids(owner, None, IterDirection::Forward) + .map(|res| res.map(|id| query.message(&id).unwrap())) .try_collect() .unwrap() } diff --git a/crates/fuel-core/src/executor.rs b/crates/fuel-core/src/executor.rs index 04a770582ce..c719aadeb54 100644 --- a/crates/fuel-core/src/executor.rs +++ b/crates/fuel-core/src/executor.rs @@ -19,7 +19,6 @@ mod tests { Coins, ContractsRawCode, Messages, - Receipts, }, StorageAsMut, }; @@ -662,23 +661,18 @@ mod tests { coinbase_recipient: config_coinbase, ..Default::default() }; - let mut producer = create_executor(Default::default(), config); + let producer = create_executor(Default::default(), config); let mut block = Block::default(); *block.transactions_mut() = vec![script.clone().into()]; - assert!(producer + let ExecutionResult { tx_status, .. } = producer .execute_and_commit( ExecutionBlock::Production(block.into()), - Default::default() + Default::default(), ) - .is_ok()); - let receipts = producer - .database - .storage::() - .get(&script.id(&producer.config.consensus_parameters.chain_id)) - .unwrap() - .unwrap(); + .expect("Should execute the block"); + let receipts = &tx_status[0].receipts; if let Some(Receipt::Return { val, .. }) = receipts.get(0) { *val == 1 @@ -2756,20 +2750,16 @@ mod tests { }, ); - executor + let ExecutionResult { tx_status, .. } = executor .execute_and_commit( ExecutionBlock::Production(block), ExecutionOptions { utxo_validation: true, }, ) - .unwrap(); + .expect("Should execute the block"); - let receipts = database - .storage::() - .get(&tx.id(&ChainId::default())) - .unwrap() - .unwrap(); + let receipts = &tx_status[0].receipts; assert_eq!(block_height as u64, receipts[0].val().unwrap()); } @@ -2835,21 +2825,16 @@ mod tests { }, ); - executor + let ExecutionResult { tx_status, .. } = executor .execute_and_commit( ExecutionBlock::Production(block), ExecutionOptions { utxo_validation: true, }, ) - .unwrap(); - - let receipts = database - .storage::() - .get(&tx.id(&ChainId::default())) - .unwrap() - .unwrap(); + .expect("Should execute the block"); + let receipts = &tx_status[0].receipts; assert_eq!(time.0, receipts[0].val().unwrap()); } } diff --git a/crates/fuel-core/src/graphql_api.rs b/crates/fuel-core/src/graphql_api.rs index 3fd27a3c19b..12603d964a5 100644 --- a/crates/fuel-core/src/graphql_api.rs +++ b/crates/fuel-core/src/graphql_api.rs @@ -9,9 +9,12 @@ use fuel_core_types::{ }; use std::net::SocketAddr; +pub mod api_service; +pub mod database; pub(crate) mod metrics_extension; pub mod ports; -pub mod service; +pub(crate) mod view_extension; +pub mod worker_service; #[derive(Clone, Debug)] pub struct Config { diff --git a/crates/fuel-core/src/graphql_api/service.rs b/crates/fuel-core/src/graphql_api/api_service.rs similarity index 89% rename from crates/fuel-core/src/graphql_api/service.rs rename to crates/fuel-core/src/graphql_api/api_service.rs index 6c6879ae308..15023a5995f 100644 --- a/crates/fuel-core/src/graphql_api/service.rs +++ b/crates/fuel-core/src/graphql_api/api_service.rs @@ -1,13 +1,17 @@ use crate::{ - fuel_core_graphql_api::ports::{ - BlockProducerPort, - ConsensusModulePort, - DatabasePort, - P2pPort, - TxPoolPort, - }, - graphql_api::{ + fuel_core_graphql_api::{ + database::{ + OffChainView, + OnChainView, + }, metrics_extension::MetricsExtension, + ports::{ + BlockProducerPort, + ConsensusModulePort, + P2pPort, + TxPoolPort, + }, + view_extension::ViewExtension, Config, }, schema::{ @@ -55,6 +59,7 @@ use fuel_core_services::{ RunnableTask, StateWatcher, }; +use fuel_core_storage::transactional::AtomicView; use futures::Stream; use serde_json::json; use std::{ @@ -75,7 +80,7 @@ use tower_http::{ pub type Service = fuel_core_services::ServiceRunner; -pub type Database = Box; +pub use super::database::ReadDatabase; pub type BlockProducer = Box; // In the future GraphQL should not be aware of `TxPool`. It should @@ -160,28 +165,35 @@ impl RunnableTask for Task { // Need a seperate Data Object for each Query endpoint, cannot be avoided #[allow(clippy::too_many_arguments)] -pub fn new_service( +pub fn new_service( config: Config, schema: CoreSchemaBuilder, - database: Database, + on_database: OnChain, + off_database: OffChain, txpool: TxPool, producer: BlockProducer, consensus_module: ConsensusModule, p2p_service: P2pService, log_threshold_ms: Duration, request_timeout: Duration, -) -> anyhow::Result { +) -> anyhow::Result +where + OnChain: AtomicView + 'static, + OffChain: AtomicView + 'static, +{ let network_addr = config.addr; + let combined_read_database = ReadDatabase::new(on_database, off_database); let schema = schema .data(config) - .data(database) + .data(combined_read_database) .data(txpool) .data(producer) .data(consensus_module) .data(p2p_service) .extension(async_graphql::extensions::Tracing) .extension(MetricsExtension::new(log_threshold_ms)) + .extension(ViewExtension::new()) .finish(); let router = Router::new() diff --git a/crates/fuel-core/src/graphql_api/database.rs b/crates/fuel-core/src/graphql_api/database.rs new file mode 100644 index 00000000000..175f0610f18 --- /dev/null +++ b/crates/fuel-core/src/graphql_api/database.rs @@ -0,0 +1,223 @@ +use crate::fuel_core_graphql_api::ports::{ + DatabaseBlocks, + DatabaseChain, + DatabaseContracts, + DatabaseMessageProof, + DatabaseMessages, + OffChainDatabase, + OnChainDatabase, +}; +use fuel_core_storage::{ + iter::{ + BoxedIter, + IterDirection, + }, + tables::Receipts, + transactional::AtomicView, + Error as StorageError, + Mappable, + Result as StorageResult, + StorageInspect, +}; +use fuel_core_txpool::types::{ + ContractId, + TxId, +}; +use fuel_core_types::{ + blockchain::primitives::{ + BlockId, + DaBlockHeight, + }, + entities::message::{ + MerkleProof, + Message, + }, + fuel_tx::{ + Address, + AssetId, + TxPointer, + UtxoId, + }, + fuel_types::{ + BlockHeight, + Nonce, + }, + services::{ + graphql_api::ContractBalance, + txpool::TransactionStatus, + }, +}; +use std::{ + borrow::Cow, + sync::Arc, +}; + +pub type OnChainView = Arc; +pub type OffChainView = Arc; + +pub struct ReadDatabase { + on_chain: Box>, + off_chain: Box>, +} + +impl ReadDatabase { + pub fn new(on_chain: OnChain, off_chain: OffChain) -> Self + where + OnChain: AtomicView + 'static, + OffChain: AtomicView + 'static, + { + Self { + on_chain: Box::new(on_chain), + off_chain: Box::new(off_chain), + } + } + + pub fn view(&self) -> ReadView { + ReadView { + on_chain: self.on_chain.latest_view(), + off_chain: self.off_chain.latest_view(), + } + } +} + +pub struct ReadView { + on_chain: OnChainView, + off_chain: OffChainView, +} + +impl DatabaseBlocks for ReadView { + fn block_id(&self, height: &BlockHeight) -> StorageResult { + self.on_chain.block_id(height) + } + + fn blocks_ids( + &self, + start: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult<(BlockHeight, BlockId)>> { + self.on_chain.blocks_ids(start, direction) + } + + fn ids_of_latest_block(&self) -> StorageResult<(BlockHeight, BlockId)> { + self.on_chain.ids_of_latest_block() + } +} + +impl StorageInspect for ReadView +where + M: Mappable, + dyn OnChainDatabase: StorageInspect, +{ + type Error = StorageError; + + fn get(&self, key: &M::Key) -> StorageResult>> { + self.on_chain.get(key) + } + + fn contains_key(&self, key: &M::Key) -> StorageResult { + self.on_chain.contains_key(key) + } +} + +impl DatabaseMessages for ReadView { + fn all_messages( + &self, + start_message_id: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult> { + self.on_chain.all_messages(start_message_id, direction) + } + + fn message_is_spent(&self, nonce: &Nonce) -> StorageResult { + self.on_chain.message_is_spent(nonce) + } + + fn message_exists(&self, nonce: &Nonce) -> StorageResult { + self.on_chain.message_exists(nonce) + } +} + +impl DatabaseContracts for ReadView { + fn contract_balances( + &self, + contract: ContractId, + start_asset: Option, + direction: IterDirection, + ) -> BoxedIter> { + self.on_chain + .contract_balances(contract, start_asset, direction) + } +} + +impl DatabaseChain for ReadView { + fn chain_name(&self) -> StorageResult { + self.on_chain.chain_name() + } + + fn da_height(&self) -> StorageResult { + self.on_chain.da_height() + } +} + +impl DatabaseMessageProof for ReadView { + fn block_history_proof( + &self, + message_block_height: &BlockHeight, + commit_block_height: &BlockHeight, + ) -> StorageResult { + self.on_chain + .block_history_proof(message_block_height, commit_block_height) + } +} + +impl OnChainDatabase for ReadView {} + +impl StorageInspect for ReadView { + type Error = StorageError; + + fn get( + &self, + key: &::Key, + ) -> StorageResult::OwnedValue>>> { + self.off_chain.get(key) + } + + fn contains_key(&self, key: &::Key) -> StorageResult { + self.off_chain.contains_key(key) + } +} + +impl OffChainDatabase for ReadView { + fn owned_message_ids( + &self, + owner: &Address, + start_message_id: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult> { + self.off_chain + .owned_message_ids(owner, start_message_id, direction) + } + + fn owned_coins_ids( + &self, + owner: &Address, + start_coin: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult> { + self.off_chain.owned_coins_ids(owner, start_coin, direction) + } + + fn tx_status(&self, tx_id: &TxId) -> StorageResult { + self.off_chain.tx_status(tx_id) + } + + fn owned_transactions_ids( + &self, + owner: Address, + start: Option, + direction: IterDirection, + ) -> BoxedIter> { + self.off_chain + .owned_transactions_ids(owner, start, direction) + } +} diff --git a/crates/fuel-core/src/graphql_api/ports.rs b/crates/fuel-core/src/graphql_api/ports.rs index b897acb2489..44ff62b79b3 100644 --- a/crates/fuel-core/src/graphql_api/ports.rs +++ b/crates/fuel-core/src/graphql_api/ports.rs @@ -14,7 +14,6 @@ use fuel_core_storage::{ Messages, Receipts, SealedBlockConsensus, - SpentMessages, Transactions, }, Error as StorageError, @@ -57,14 +56,41 @@ use fuel_core_types::{ }; use std::sync::Arc; -/// The database port expected by GraphQL API service. -pub trait DatabasePort: +pub trait OffChainDatabase: + Send + Sync + StorageInspect +{ + fn owned_message_ids( + &self, + owner: &Address, + start_message_id: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult>; + + fn owned_coins_ids( + &self, + owner: &Address, + start_coin: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult>; + + fn tx_status(&self, tx_id: &TxId) -> StorageResult; + + fn owned_transactions_ids( + &self, + owner: Address, + start: Option, + direction: IterDirection, + ) -> BoxedIter>; +} + +/// The on chain database port expected by GraphQL API service. +pub trait OnChainDatabase: Send + Sync + DatabaseBlocks - + DatabaseTransactions + + StorageInspect + DatabaseMessages - + DatabaseCoins + + StorageInspect + DatabaseContracts + DatabaseChain + DatabaseMessageProof @@ -87,33 +113,8 @@ pub trait DatabaseBlocks: fn ids_of_latest_block(&self) -> StorageResult<(BlockHeight, BlockId)>; } -/// Trait that specifies all the getters required for transactions. -pub trait DatabaseTransactions: - StorageInspect - + StorageInspect -{ - fn tx_status(&self, tx_id: &TxId) -> StorageResult; - - fn owned_transactions_ids( - &self, - owner: Address, - start: Option, - direction: IterDirection, - ) -> BoxedIter>; -} - /// Trait that specifies all the getters required for messages. -pub trait DatabaseMessages: - StorageInspect - + StorageInspect -{ - fn owned_message_ids( - &self, - owner: &Address, - start_message_id: Option, - direction: IterDirection, - ) -> BoxedIter<'_, StorageResult>; - +pub trait DatabaseMessages: StorageInspect { fn all_messages( &self, start_message_id: Option, @@ -125,16 +126,6 @@ pub trait DatabaseMessages: fn message_exists(&self, nonce: &Nonce) -> StorageResult; } -/// Trait that specifies all the getters required for coins. -pub trait DatabaseCoins: StorageInspect { - fn owned_coins_ids( - &self, - owner: &Address, - start_coin: Option, - direction: IterDirection, - ) -> BoxedIter<'_, StorageResult>; -} - /// Trait that specifies all the getters required for contract. pub trait DatabaseContracts: StorageInspect @@ -174,7 +165,7 @@ pub trait TxPoolPort: Send + Sync { } #[async_trait] -pub trait DryRunExecution { +pub trait BlockProducerPort: Send + Sync { async fn dry_run_tx( &self, transaction: Transaction, @@ -183,8 +174,6 @@ pub trait DryRunExecution { ) -> anyhow::Result>; } -pub trait BlockProducerPort: Send + Sync + DryRunExecution {} - #[async_trait::async_trait] pub trait ConsensusModulePort: Send + Sync { async fn manually_produce_blocks( @@ -209,3 +198,51 @@ pub trait DatabaseMessageProof: Send + Sync { pub trait P2pPort: Send + Sync { async fn all_peer_info(&self) -> anyhow::Result>; } + +pub mod worker { + use fuel_core_services::stream::BoxStream; + use fuel_core_storage::{ + tables::Receipts, + transactional::Transactional, + Error as StorageError, + Result as StorageResult, + StorageMutate, + }; + use fuel_core_types::{ + fuel_tx::{ + Address, + Bytes32, + }, + fuel_types::BlockHeight, + services::{ + block_importer::SharedImportResult, + txpool::TransactionStatus, + }, + }; + + pub trait OffChainDatabase: + Send + + Sync + + StorageMutate + + Transactional + { + fn record_tx_id_owner( + &mut self, + owner: &Address, + block_height: BlockHeight, + tx_idx: u16, + tx_id: &Bytes32, + ) -> StorageResult>; + + fn update_tx_status( + &mut self, + id: &Bytes32, + status: TransactionStatus, + ) -> StorageResult>; + } + + pub trait BlockImporter { + /// Returns a stream of imported block. + fn block_events(&self) -> BoxStream; + } +} diff --git a/crates/fuel-core/src/graphql_api/view_extension.rs b/crates/fuel-core/src/graphql_api/view_extension.rs new file mode 100644 index 00000000000..ca482fe9878 --- /dev/null +++ b/crates/fuel-core/src/graphql_api/view_extension.rs @@ -0,0 +1,44 @@ +use crate::graphql_api::database::ReadDatabase; +use async_graphql::{ + extensions::{ + Extension, + ExtensionContext, + ExtensionFactory, + NextPrepareRequest, + }, + Request, + ServerResult, +}; +use std::sync::Arc; + +/// The extension that adds the `ReadView` to the request context. +/// It guarantees that the request works with the one view of the database, +/// and external database modification cannot affect the result. +pub(crate) struct ViewExtension; + +impl ViewExtension { + pub fn new() -> Self { + Self + } +} + +impl ExtensionFactory for ViewExtension { + fn create(&self) -> Arc { + Arc::new(ViewExtension::new()) + } +} + +#[async_trait::async_trait] +impl Extension for ViewExtension { + async fn prepare_request( + &self, + ctx: &ExtensionContext<'_>, + request: Request, + next: NextPrepareRequest<'_>, + ) -> ServerResult { + let database: &ReadDatabase = ctx.data_unchecked(); + let view = database.view(); + let request = request.data(view); + next.run(ctx, request).await + } +} diff --git a/crates/fuel-core/src/graphql_api/worker_service.rs b/crates/fuel-core/src/graphql_api/worker_service.rs new file mode 100644 index 00000000000..fe904d7f7d8 --- /dev/null +++ b/crates/fuel-core/src/graphql_api/worker_service.rs @@ -0,0 +1,277 @@ +use crate::fuel_core_graphql_api::ports; +use fuel_core_services::{ + stream::BoxStream, + EmptyShared, + RunnableService, + RunnableTask, + ServiceRunner, + StateWatcher, +}; +use fuel_core_storage::{ + tables::Receipts, + Result as StorageResult, + StorageAsMut, +}; +use fuel_core_types::{ + blockchain::block::Block, + fuel_tx::{ + field::{ + Inputs, + Outputs, + }, + input::coin::{ + CoinPredicate, + CoinSigned, + }, + Input, + Output, + Receipt, + Transaction, + TxId, + UniqueIdentifier, + }, + fuel_types::{ + BlockHeight, + Bytes32, + }, + services::{ + block_importer::{ + ImportResult, + SharedImportResult, + }, + executor::TransactionExecutionStatus, + txpool::from_executor_to_status, + }, +}; +use futures::{ + FutureExt, + StreamExt, +}; + +pub struct Task { + block_importer: BoxStream, + database: D, +} + +impl Task +where + D: ports::worker::OffChainDatabase, +{ + fn process_block(&mut self, result: SharedImportResult) -> anyhow::Result<()> { + let mut transaction = self.database.transaction(); + // save the status for every transaction using the finalized block id + self.persist_transaction_status(&result, transaction.as_mut())?; + + // save the associated owner for each transaction in the block + self.index_tx_owners_for_block( + &result.sealed_block.entity, + transaction.as_mut(), + )?; + transaction.commit()?; + + Ok(()) + } + + /// Associate all transactions within a block to their respective UTXO owners + fn index_tx_owners_for_block( + &self, + block: &Block, + block_st_transaction: &mut D, + ) -> anyhow::Result<()> { + for (tx_idx, tx) in block.transactions().iter().enumerate() { + let block_height = *block.header().height(); + let inputs; + let outputs; + let tx_idx = u16::try_from(tx_idx).map_err(|e| { + anyhow::anyhow!("The block has more than `u16::MAX` transactions, {}", e) + })?; + let tx_id = tx.cached_id().expect( + "The imported block should contains only transactions with cached id", + ); + match tx { + Transaction::Script(tx) => { + inputs = tx.inputs().as_slice(); + outputs = tx.outputs().as_slice(); + } + Transaction::Create(tx) => { + inputs = tx.inputs().as_slice(); + outputs = tx.outputs().as_slice(); + } + Transaction::Mint(_) => continue, + } + self.persist_owners_index( + block_height, + inputs, + outputs, + &tx_id, + tx_idx, + block_st_transaction, + )?; + } + Ok(()) + } + + /// Index the tx id by owner for all of the inputs and outputs + fn persist_owners_index( + &self, + block_height: BlockHeight, + inputs: &[Input], + outputs: &[Output], + tx_id: &Bytes32, + tx_idx: u16, + db: &mut D, + ) -> StorageResult<()> { + let mut owners = vec![]; + for input in inputs { + if let Input::CoinSigned(CoinSigned { owner, .. }) + | Input::CoinPredicate(CoinPredicate { owner, .. }) = input + { + owners.push(owner); + } + } + + for output in outputs { + match output { + Output::Coin { to, .. } + | Output::Change { to, .. } + | Output::Variable { to, .. } => { + owners.push(to); + } + Output::Contract(_) | Output::ContractCreated { .. } => {} + } + } + + // dedupe owners from inputs and outputs prior to indexing + owners.sort(); + owners.dedup(); + + for owner in owners { + db.record_tx_id_owner(owner, block_height, tx_idx, tx_id)?; + } + + Ok(()) + } + + fn persist_transaction_status( + &self, + import_result: &ImportResult, + db: &mut D, + ) -> StorageResult<()> { + for TransactionExecutionStatus { + id, + result, + receipts, + } in import_result.tx_status.iter() + { + let status = from_executor_to_status( + &import_result.sealed_block.entity, + result.clone(), + ); + + if db.update_tx_status(id, status)?.is_some() { + return Err(anyhow::anyhow!( + "Transaction status already exists for tx {}", + id + ) + .into()); + } + + self.persist_receipts(id, receipts, db)?; + } + Ok(()) + } + + fn persist_receipts( + &self, + tx_id: &TxId, + receipts: &[Receipt], + db: &mut D, + ) -> StorageResult<()> { + if db.storage::().insert(tx_id, receipts)?.is_some() { + return Err(anyhow::anyhow!("Receipts already exist for tx {}", tx_id).into()); + } + Ok(()) + } +} + +#[async_trait::async_trait] +impl RunnableService for Task +where + D: ports::worker::OffChainDatabase, +{ + const NAME: &'static str = "GraphQL_Off_Chain_Worker"; + type SharedData = EmptyShared; + type Task = Self; + type TaskParams = (); + + fn shared_data(&self) -> Self::SharedData { + EmptyShared + } + + async fn into_task( + self, + _: &StateWatcher, + _: Self::TaskParams, + ) -> anyhow::Result { + // TODO: It is possible that the node was shut down before we processed all imported blocks. + // It could lead to some missed blocks and the database's inconsistent state. + // Because the result of block execution is not stored on the chain, it is impossible + // to actualize the database without executing the block at the previous state + // of the blockchain. When `AtomicView::view_at` is implemented, we can + // process all missed blocks and actualize the database here. + Ok(self) + } +} + +#[async_trait::async_trait] +impl RunnableTask for Task +where + D: ports::worker::OffChainDatabase, +{ + async fn run(&mut self, watcher: &mut StateWatcher) -> anyhow::Result { + let should_continue; + tokio::select! { + biased; + + _ = watcher.while_started() => { + should_continue = false; + } + + result = self.block_importer.next() => { + if let Some(block) = result { + self.process_block(block)?; + + should_continue = true + } else { + should_continue = false + } + } + } + Ok(should_continue) + } + + async fn shutdown(mut self) -> anyhow::Result<()> { + loop { + let result = self.block_importer.next().now_or_never(); + + if let Some(Some(block)) = result { + self.process_block(block)?; + } else { + break; + } + } + Ok(()) + } +} + +pub fn new_service(block_importer: I, database: D) -> ServiceRunner> +where + I: ports::worker::BlockImporter, + D: ports::worker::OffChainDatabase, +{ + let block_importer = block_importer.block_events(); + ServiceRunner::new(Task { + block_importer, + database, + }) +} diff --git a/crates/fuel-core/src/query/balance.rs b/crates/fuel-core/src/query/balance.rs index c5977422257..ecbc47620bd 100644 --- a/crates/fuel-core/src/query/balance.rs +++ b/crates/fuel-core/src/query/balance.rs @@ -1,4 +1,4 @@ -use crate::fuel_core_graphql_api::service::Database; +use crate::fuel_core_graphql_api::database::ReadView; use asset_query::{ AssetQuery, AssetSpendTarget, @@ -43,7 +43,7 @@ pub trait BalanceQueryData: Send + Sync { ) -> BoxedIter>; } -impl BalanceQueryData for Database { +impl BalanceQueryData for ReadView { fn balance( &self, owner: Address, diff --git a/crates/fuel-core/src/query/balance/asset_query.rs b/crates/fuel-core/src/query/balance/asset_query.rs index e93c9d0f304..ee0266b1245 100644 --- a/crates/fuel-core/src/query/balance/asset_query.rs +++ b/crates/fuel-core/src/query/balance/asset_query.rs @@ -1,5 +1,5 @@ use crate::{ - graphql_api::service::Database, + graphql_api::database::ReadView, query::{ CoinQueryData, MessageQueryData, @@ -58,7 +58,7 @@ pub struct AssetsQuery<'a> { pub owner: &'a Address, pub assets: Option>, pub exclude: Option<&'a Exclude>, - pub database: &'a Database, + pub database: &'a ReadView, pub base_asset_id: &'a AssetId, } @@ -67,7 +67,7 @@ impl<'a> AssetsQuery<'a> { owner: &'a Address, assets: Option>, exclude: Option<&'a Exclude>, - database: &'a Database, + database: &'a ReadView, base_asset_id: &'a AssetId, ) -> Self { Self { @@ -171,7 +171,7 @@ pub struct AssetQuery<'a> { pub owner: &'a Address, pub asset: &'a AssetSpendTarget, pub exclude: Option<&'a Exclude>, - pub database: &'a Database, + pub database: &'a ReadView, query: AssetsQuery<'a>, } @@ -181,7 +181,7 @@ impl<'a> AssetQuery<'a> { asset: &'a AssetSpendTarget, base_asset_id: &'a AssetId, exclude: Option<&'a Exclude>, - database: &'a Database, + database: &'a ReadView, ) -> Self { let mut allowed = HashSet::new(); allowed.insert(&asset.id); diff --git a/crates/fuel-core/src/query/block.rs b/crates/fuel-core/src/query/block.rs index 66cba1f941b..8aeed56f76d 100644 --- a/crates/fuel-core/src/query/block.rs +++ b/crates/fuel-core/src/query/block.rs @@ -1,4 +1,4 @@ -use crate::graphql_api::ports::DatabasePort; +use crate::fuel_core_graphql_api::ports::OnChainDatabase; use fuel_core_storage::{ iter::{ BoxedIter, @@ -26,7 +26,7 @@ pub trait SimpleBlockData: Send + Sync { fn block(&self, id: &BlockId) -> StorageResult; } -impl SimpleBlockData for D { +impl SimpleBlockData for D { fn block(&self, id: &BlockId) -> StorageResult { let block = self .storage::() @@ -56,7 +56,7 @@ pub trait BlockQueryData: Send + Sync + SimpleBlockData { fn consensus(&self, id: &BlockId) -> StorageResult; } -impl BlockQueryData for D { +impl BlockQueryData for D { fn block_id(&self, height: &BlockHeight) -> StorageResult { self.block_id(height) } diff --git a/crates/fuel-core/src/query/chain.rs b/crates/fuel-core/src/query/chain.rs index 88ce035ba1b..b9408ddfcd3 100644 --- a/crates/fuel-core/src/query/chain.rs +++ b/crates/fuel-core/src/query/chain.rs @@ -1,4 +1,4 @@ -use crate::graphql_api::ports::DatabasePort; +use crate::fuel_core_graphql_api::ports::OnChainDatabase; use fuel_core_storage::Result as StorageResult; use fuel_core_types::blockchain::primitives::DaBlockHeight; @@ -8,7 +8,7 @@ pub trait ChainQueryData: Send + Sync { fn da_height(&self) -> StorageResult; } -impl ChainQueryData for D { +impl ChainQueryData for D { fn name(&self) -> StorageResult { self.chain_name() } diff --git a/crates/fuel-core/src/query/coin.rs b/crates/fuel-core/src/query/coin.rs index d31b60690e9..427379a728b 100644 --- a/crates/fuel-core/src/query/coin.rs +++ b/crates/fuel-core/src/query/coin.rs @@ -1,4 +1,7 @@ -use crate::graphql_api::ports::DatabasePort; +use crate::fuel_core_graphql_api::ports::{ + OffChainDatabase, + OnChainDatabase, +}; use fuel_core_storage::{ iter::{ BoxedIter, @@ -34,7 +37,7 @@ pub trait CoinQueryData: Send + Sync { ) -> BoxedIter>; } -impl CoinQueryData for D { +impl CoinQueryData for D { fn coin(&self, utxo_id: UtxoId) -> StorageResult { let coin = self .storage::() diff --git a/crates/fuel-core/src/query/contract.rs b/crates/fuel-core/src/query/contract.rs index d05d90999bb..d4bbb8b5d62 100644 --- a/crates/fuel-core/src/query/contract.rs +++ b/crates/fuel-core/src/query/contract.rs @@ -1,4 +1,4 @@ -use crate::graphql_api::ports::DatabasePort; +use crate::fuel_core_graphql_api::ports::OnChainDatabase; use fuel_core_storage::{ iter::{ BoxedIter, @@ -43,7 +43,7 @@ pub trait ContractQueryData: Send + Sync { ) -> BoxedIter>; } -impl ContractQueryData for D { +impl ContractQueryData for D { fn contract_id(&self, id: ContractId) -> StorageResult { let contract_exists = self.storage::().contains_key(&id)?; if contract_exists { diff --git a/crates/fuel-core/src/query/message.rs b/crates/fuel-core/src/query/message.rs index b1ce17e4bb9..334c24dc0d7 100644 --- a/crates/fuel-core/src/query/message.rs +++ b/crates/fuel-core/src/query/message.rs @@ -3,7 +3,8 @@ use crate::{ ports::{ DatabaseMessageProof, DatabaseMessages, - DatabasePort, + OffChainDatabase, + OnChainDatabase, }, IntoApiResult, }, @@ -80,7 +81,7 @@ pub trait MessageQueryData: Send + Sync { ) -> BoxedIter>; } -impl MessageQueryData for D { +impl MessageQueryData for D { fn message(&self, id: &Nonce) -> StorageResult { self.storage::() .get(id)? @@ -128,7 +129,10 @@ pub trait MessageProofData: ) -> StorageResult; } -impl MessageProofData for D { +impl MessageProofData for D +where + D: OnChainDatabase + OffChainDatabase + ?Sized, +{ fn transaction_status( &self, transaction_id: &TxId, diff --git a/crates/fuel-core/src/query/tx.rs b/crates/fuel-core/src/query/tx.rs index 74d325e33ae..09994be55fe 100644 --- a/crates/fuel-core/src/query/tx.rs +++ b/crates/fuel-core/src/query/tx.rs @@ -1,4 +1,7 @@ -use crate::graphql_api::ports::DatabasePort; +use crate::fuel_core_graphql_api::ports::{ + OffChainDatabase, + OnChainDatabase, +}; use fuel_core_storage::{ iter::{ BoxedIter, @@ -32,7 +35,10 @@ pub trait SimpleTransactionData: Send + Sync { fn transaction(&self, transaction_id: &TxId) -> StorageResult; } -impl SimpleTransactionData for D { +impl SimpleTransactionData for D +where + D: OffChainDatabase + OnChainDatabase + ?Sized, +{ fn transaction(&self, tx_id: &TxId) -> StorageResult { self.storage::() .get(tx_id) @@ -57,7 +63,10 @@ pub trait TransactionQueryData: Send + Sync + SimpleTransactionData { ) -> BoxedIter>; } -impl TransactionQueryData for D { +impl TransactionQueryData for D +where + D: OnChainDatabase + OffChainDatabase + ?Sized, +{ fn status(&self, tx_id: &TxId) -> StorageResult { self.tx_status(tx_id) } diff --git a/crates/fuel-core/src/schema/balance.rs b/crates/fuel-core/src/schema/balance.rs index 9188696a897..da5a72ada58 100644 --- a/crates/fuel-core/src/schema/balance.rs +++ b/crates/fuel-core/src/schema/balance.rs @@ -1,6 +1,6 @@ use crate::{ fuel_core_graphql_api::{ - service::Database, + database::ReadView, Config, }, query::BalanceQueryData, @@ -56,12 +56,12 @@ impl BalanceQuery { #[graphql(desc = "address of the owner")] owner: Address, #[graphql(desc = "asset_id of the coin")] asset_id: AssetId, ) -> async_graphql::Result { - let data: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let base_asset_id = *ctx .data_unchecked::() .consensus_parameters .base_asset_id(); - let balance = data.balance(owner.0, asset_id.0, base_asset_id)?.into(); + let balance = query.balance(owner.0, asset_id.0, base_asset_id)?.into(); Ok(balance) } @@ -82,7 +82,7 @@ impl BalanceQuery { if before.is_some() || after.is_some() { return Err(anyhow!("pagination is not yet supported").into()) } - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); crate::schema::query_pagination(after, before, first, last, |_, direction| { let owner = filter.owner.into(); let base_asset_id = *ctx diff --git a/crates/fuel-core/src/schema/block.rs b/crates/fuel-core/src/schema/block.rs index 5d503f281bc..a092600c071 100644 --- a/crates/fuel-core/src/schema/block.rs +++ b/crates/fuel-core/src/schema/block.rs @@ -4,13 +4,11 @@ use super::scalars::{ }; use crate::{ fuel_core_graphql_api::{ - service::{ - ConsensusModule, - Database, - }, + api_service::ConsensusModule, + database::ReadView, Config as GraphQLConfig, + IntoApiResult, }, - graphql_api::IntoApiResult, query::{ BlockQueryData, SimpleBlockData, @@ -96,7 +94,7 @@ impl Block { } async fn consensus(&self, ctx: &Context<'_>) -> async_graphql::Result { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let id = self.0.header().id(); let consensus = query.consensus(&id)?; @@ -107,7 +105,7 @@ impl Block { &self, ctx: &Context<'_>, ) -> async_graphql::Result> { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); self.0 .transactions() .iter() @@ -192,7 +190,7 @@ impl BlockQuery { #[graphql(desc = "ID of the block")] id: Option, #[graphql(desc = "Height of the block")] height: Option, ) -> async_graphql::Result> { - let data: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let id = match (id, height) { (Some(_), Some(_)) => { return Err(async_graphql::Error::new( @@ -202,14 +200,14 @@ impl BlockQuery { (Some(id), None) => Ok(id.0.into()), (None, Some(height)) => { let height: u32 = height.into(); - data.block_id(&height.into()) + query.block_id(&height.into()) } (None, None) => { return Err(async_graphql::Error::new("Missing either id or height")) } }; - id.and_then(|id| data.block(&id)).into_api_result() + id.and_then(|id| query.block(&id)).into_api_result() } async fn blocks( @@ -220,9 +218,9 @@ impl BlockQuery { last: Option, before: Option, ) -> async_graphql::Result> { - let db: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); crate::schema::query_pagination(after, before, first, last, |start, direction| { - Ok(blocks_query(db, start.map(Into::into), direction)) + Ok(blocks_query(query, start.map(Into::into), direction)) }) .await } @@ -253,16 +251,16 @@ impl HeaderQuery { last: Option, before: Option, ) -> async_graphql::Result> { - let db: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); crate::schema::query_pagination(after, before, first, last, |start, direction| { - Ok(blocks_query(db, start.map(Into::into), direction)) + Ok(blocks_query(query, start.map(Into::into), direction)) }) .await } } fn blocks_query( - query: &Database, + query: &ReadView, start: Option, direction: IterDirection, ) -> BoxedIter> @@ -292,7 +290,7 @@ impl BlockMutation { start_timestamp: Option, blocks_to_produce: U32, ) -> async_graphql::Result { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let consensus_module = ctx.data_unchecked::(); let config = ctx.data_unchecked::().clone(); diff --git a/crates/fuel-core/src/schema/chain.rs b/crates/fuel-core/src/schema/chain.rs index e1df56c7eb2..7c8bb918aa3 100644 --- a/crates/fuel-core/src/schema/chain.rs +++ b/crates/fuel-core/src/schema/chain.rs @@ -1,6 +1,6 @@ use crate::{ fuel_core_graphql_api::{ - service::Database, + database::ReadView, Config as GraphQLConfig, }, query::{ @@ -683,19 +683,19 @@ impl HeavyOperation { #[Object] impl ChainInfo { async fn name(&self, ctx: &Context<'_>) -> async_graphql::Result { - let data: &Database = ctx.data_unchecked(); - Ok(data.name()?) + let query: &ReadView = ctx.data_unchecked(); + Ok(query.name()?) } async fn latest_block(&self, ctx: &Context<'_>) -> async_graphql::Result { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let latest_block = query.latest_block()?.into(); Ok(latest_block) } async fn da_height(&self, ctx: &Context<'_>) -> U64 { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let height = query .da_height() diff --git a/crates/fuel-core/src/schema/coins.rs b/crates/fuel-core/src/schema/coins.rs index 60a75add8f9..476058016be 100644 --- a/crates/fuel-core/src/schema/coins.rs +++ b/crates/fuel-core/src/schema/coins.rs @@ -4,10 +4,10 @@ use crate::{ SpendQuery, }, fuel_core_graphql_api::{ + database::ReadView, Config as GraphQLConfig, IntoApiResult, }, - graphql_api::service::Database, query::{ asset_query::AssetSpendTarget, CoinQueryData, @@ -152,8 +152,8 @@ impl CoinQuery { ctx: &Context<'_>, #[graphql(desc = "The ID of the coin")] utxo_id: UtxoId, ) -> async_graphql::Result> { - let data: &Database = ctx.data_unchecked(); - data.coin(utxo_id.0).into_api_result() + let query: &ReadView = ctx.data_unchecked(); + query.coin(utxo_id.0).into_api_result() } /// Gets all unspent coins of some `owner` maybe filtered with by `asset_id` per page. @@ -166,7 +166,7 @@ impl CoinQuery { last: Option, before: Option, ) -> async_graphql::Result> { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); crate::schema::query_pagination(after, before, first, last, |start, direction| { let owner: fuel_tx::Address = filter.owner.into(); let coins = query @@ -240,9 +240,9 @@ impl CoinQuery { let spend_query = SpendQuery::new(owner, &query_per_asset, excluded_ids, *base_asset_id)?; - let db = ctx.data_unchecked::(); + let query: &ReadView = ctx.data_unchecked(); - let coins = random_improve(db, &spend_query)? + let coins = random_improve(query, &spend_query)? .into_iter() .map(|coins| { coins diff --git a/crates/fuel-core/src/schema/contract.rs b/crates/fuel-core/src/schema/contract.rs index 2409041925d..16a26b87704 100644 --- a/crates/fuel-core/src/schema/contract.rs +++ b/crates/fuel-core/src/schema/contract.rs @@ -1,6 +1,6 @@ use crate::{ fuel_core_graphql_api::{ - service::Database, + database::ReadView, IntoApiResult, }, query::ContractQueryData, @@ -41,16 +41,16 @@ impl Contract { } async fn bytecode(&self, ctx: &Context<'_>) -> async_graphql::Result { - let context: &Database = ctx.data_unchecked(); - context + let query: &ReadView = ctx.data_unchecked(); + query .contract_bytecode(self.0) .map(HexString) .map_err(Into::into) } async fn salt(&self, ctx: &Context<'_>) -> async_graphql::Result { - let context: &Database = ctx.data_unchecked(); - context + let query: &ReadView = ctx.data_unchecked(); + query .contract_salt(self.0) .map(Into::into) .map_err(Into::into) @@ -67,8 +67,8 @@ impl ContractQuery { ctx: &Context<'_>, #[graphql(desc = "ID of the Contract")] id: ContractId, ) -> async_graphql::Result> { - let data: &Database = ctx.data_unchecked(); - data.contract_id(id.0).into_api_result() + let query: &ReadView = ctx.data_unchecked(); + query.contract_id(id.0).into_api_result() } } @@ -108,8 +108,8 @@ impl ContractBalanceQuery { ) -> async_graphql::Result { let contract_id = contract.into(); let asset_id = asset.into(); - let context: &Database = ctx.data_unchecked(); - context + let query: &ReadView = ctx.data_unchecked(); + query .contract_balance(contract_id, asset_id) .into_api_result() .map(|result| { @@ -135,7 +135,7 @@ impl ContractBalanceQuery { ) -> async_graphql::Result< Connection, > { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); crate::schema::query_pagination(after, before, first, last, |start, direction| { let balances = query diff --git a/crates/fuel-core/src/schema/message.rs b/crates/fuel-core/src/schema/message.rs index 75707190e22..dfc17606864 100644 --- a/crates/fuel-core/src/schema/message.rs +++ b/crates/fuel-core/src/schema/message.rs @@ -1,5 +1,3 @@ -use std::ops::Deref; - use super::{ block::Header, scalars::{ @@ -12,7 +10,10 @@ use super::{ }, }; use crate::{ - fuel_core_graphql_api::service::Database, + fuel_core_graphql_api::{ + database::ReadView, + ports::DatabaseBlocks, + }, query::MessageQueryData, schema::scalars::{ BlockId, @@ -75,7 +76,7 @@ impl MessageQuery { before: Option, ) -> async_graphql::Result> { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); crate::schema::query_pagination( after, before, @@ -114,12 +115,12 @@ impl MessageQuery { commit_block_id: Option, commit_block_height: Option, ) -> async_graphql::Result> { - let data: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let block_id = match (commit_block_id, commit_block_height) { (Some(commit_block_id), None) => commit_block_id.0.into(), (None, Some(commit_block_height)) => { let block_height = commit_block_height.0.into(); - data.block_id(&block_height)? + query.block_id(&block_height)? } _ => Err(anyhow::anyhow!( "Either `commit_block_id` or `commit_block_height` must be provided exclusively" @@ -127,7 +128,7 @@ impl MessageQuery { }; Ok(crate::query::message_proof( - data.deref(), + query, transaction_id.into(), nonce.into(), block_id, @@ -140,8 +141,8 @@ impl MessageQuery { ctx: &Context<'_>, nonce: Nonce, ) -> async_graphql::Result { - let data: &Database = ctx.data_unchecked(); - let status = crate::query::message_status(data.deref(), nonce.into())?; + let query: &ReadView = ctx.data_unchecked(); + let status = crate::query::message_status(query, nonce.into())?; Ok(status.into()) } } diff --git a/crates/fuel-core/src/schema/node_info.rs b/crates/fuel-core/src/schema/node_info.rs index 97ef85167c0..647b0c4215e 100644 --- a/crates/fuel-core/src/schema/node_info.rs +++ b/crates/fuel-core/src/schema/node_info.rs @@ -47,7 +47,7 @@ impl NodeInfo { async fn peers(&self, _ctx: &Context<'_>) -> async_graphql::Result> { #[cfg(feature = "p2p")] { - let p2p: &crate::fuel_core_graphql_api::service::P2pService = + let p2p: &crate::fuel_core_graphql_api::api_service::P2pService = _ctx.data_unchecked(); let peer_info = p2p.all_peer_info().await?; let peers = peer_info.into_iter().map(PeerInfo).collect(); diff --git a/crates/fuel-core/src/schema/tx.rs b/crates/fuel-core/src/schema/tx.rs index 0d772b86854..19a8599b10c 100644 --- a/crates/fuel-core/src/schema/tx.rs +++ b/crates/fuel-core/src/schema/tx.rs @@ -1,25 +1,29 @@ use crate::{ fuel_core_graphql_api::{ - service::{ + api_service::{ BlockProducer, - Database, TxPool, }, + database::ReadView, + ports::OffChainDatabase, + Config, IntoApiResult, }, - graphql_api::Config, query::{ transaction_status_change, BlockQueryData, SimpleTransactionData, TransactionQueryData, }, - schema::scalars::{ - Address, - HexString, - SortedTxCursor, - TransactionId, - TxPointer, + schema::{ + scalars::{ + Address, + HexString, + SortedTxCursor, + TransactionId, + TxPointer, + }, + tx::types::TransactionStatus, }, }; use async_graphql::{ @@ -48,7 +52,10 @@ use fuel_core_types::{ }, fuel_types, fuel_types::canonical::Deserialize, - fuel_vm::checked_transaction::EstimatePredicates, + fuel_vm::checked_transaction::{ + CheckPredicateParams, + EstimatePredicates, + }, services::txpool, }; use futures::{ @@ -63,9 +70,6 @@ use std::{ use tokio_stream::StreamExt; use types::Transaction; -use self::types::TransactionStatus; -use fuel_core_types::fuel_vm::checked_transaction::CheckPredicateParams; - pub mod input; pub mod output; pub mod receipt; @@ -81,7 +85,7 @@ impl TxQuery { ctx: &Context<'_>, #[graphql(desc = "The ID of the transaction")] id: TransactionId, ) -> async_graphql::Result> { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let id = id.0; let txpool = ctx.data_unchecked::(); @@ -105,8 +109,7 @@ impl TxQuery { ) -> async_graphql::Result< Connection, > { - let db_query: &Database = ctx.data_unchecked(); - let tx_query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); crate::schema::query_pagination( after, before, @@ -115,7 +118,7 @@ impl TxQuery { |start: &Option, direction| { let start = *start; let block_id = start.map(|sorted| sorted.block_height); - let all_block_ids = db_query.compressed_blocks(block_id, direction); + let all_block_ids = query.compressed_blocks(block_id, direction); let all_txs = all_block_ids .map(move |block| { @@ -145,7 +148,7 @@ impl TxQuery { }); let all_txs = all_txs.map(|result: StorageResult| { result.and_then(|sorted| { - let tx = tx_query.transaction(&sorted.tx_id.0)?; + let tx = query.transaction(&sorted.tx_id.0)?; Ok((sorted, Transaction::from_tx(sorted.tx_id.0, tx))) }) @@ -167,7 +170,7 @@ impl TxQuery { before: Option, ) -> async_graphql::Result> { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let config = ctx.data_unchecked::(); let owner = fuel_types::Address::from(owner); @@ -298,11 +301,11 @@ impl TxStatusSubscription { ) -> anyhow::Result> + 'a> { let txpool = ctx.data_unchecked::(); - let db = ctx.data_unchecked::(); + let query: &ReadView = ctx.data_unchecked(); let rx = txpool.tx_update_subscribe(id.into())?; Ok(transaction_status_change( - move |id| match db.tx_status(&id) { + move |id| match query.tx_status(&id) { Ok(status) => Ok(Some(status)), Err(StorageError::NotFound(_, _)) => Ok(txpool .submission_time(id) diff --git a/crates/fuel-core/src/schema/tx/types.rs b/crates/fuel-core/src/schema/tx/types.rs index 41b06f5cb3c..fcd0e110ff2 100644 --- a/crates/fuel-core/src/schema/tx/types.rs +++ b/crates/fuel-core/src/schema/tx/types.rs @@ -5,10 +5,8 @@ use super::{ }; use crate::{ fuel_core_graphql_api::{ - service::{ - Database, - TxPool, - }, + api_service::TxPool, + database::ReadView, Config, IntoApiResult, }, @@ -160,7 +158,7 @@ impl SuccessStatus { } async fn block(&self, ctx: &Context<'_>) -> async_graphql::Result { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let block = query.block(&self.block_id)?; Ok(block.into()) } @@ -174,8 +172,8 @@ impl SuccessStatus { } async fn receipts(&self, ctx: &Context<'_>) -> async_graphql::Result> { - let db = ctx.data_unchecked::(); - let receipts = db + let query: &ReadView = ctx.data_unchecked(); + let receipts = query .receipts(&self.tx_id) .unwrap_or_default() .into_iter() @@ -201,7 +199,7 @@ impl FailureStatus { } async fn block(&self, ctx: &Context<'_>) -> async_graphql::Result { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let block = query.block(&self.block_id)?; Ok(block.into()) } @@ -219,8 +217,8 @@ impl FailureStatus { } async fn receipts(&self, ctx: &Context<'_>) -> async_graphql::Result> { - let db = ctx.data_unchecked::(); - let receipts = db + let query: &ReadView = ctx.data_unchecked(); + let receipts = query .receipts(&self.tx_id) .unwrap_or_default() .into_iter() @@ -526,7 +524,7 @@ impl Transaction { ctx: &Context<'_>, ) -> async_graphql::Result> { let id = self.1; - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let txpool = ctx.data_unchecked::(); get_tx_status(id, query, txpool).map_err(Into::into) } @@ -535,7 +533,7 @@ impl Transaction { &self, ctx: &Context<'_>, ) -> async_graphql::Result>> { - let query: &Database = ctx.data_unchecked(); + let query: &ReadView = ctx.data_unchecked(); let receipts = query .receipts(&self.1) .into_api_result::, async_graphql::Error>()?; @@ -622,7 +620,7 @@ impl Transaction { #[tracing::instrument(level = "debug", skip(query, txpool), ret, err)] pub(crate) fn get_tx_status( id: fuel_core_types::fuel_types::Bytes32, - query: &Database, + query: &ReadView, txpool: &TxPool, ) -> Result, StorageError> { match query diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index 3d5240cab28..70004f2fbaa 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -44,7 +44,7 @@ pub struct SharedState { /// The Relayer shared state. pub relayer: Option>, /// The GraphQL shared state. - pub graph_ql: crate::fuel_core_graphql_api::service::SharedState, + pub graph_ql: crate::fuel_core_graphql_api::api_service::SharedState, /// The underlying database. pub database: Database, /// Subscribe to new block production. @@ -305,9 +305,9 @@ mod tests { i += 1; } - // current services: graphql, txpool, PoA + // current services: graphql, graphql worker, txpool, PoA #[allow(unused_mut)] - let mut expected_services = 3; + let mut expected_services = 4; // Relayer service is disabled with `Config::local_node`. // #[cfg(feature = "relayer")] diff --git a/crates/fuel-core/src/service/adapters/block_importer.rs b/crates/fuel-core/src/service/adapters/block_importer.rs index 89627483c8d..7fdfb2c3035 100644 --- a/crates/fuel-core/src/service/adapters/block_importer.rs +++ b/crates/fuel-core/src/service/adapters/block_importer.rs @@ -70,11 +70,7 @@ impl BlockImporterAdapter { &self, sealed_block: SealedBlock, ) -> anyhow::Result<()> { - tokio::task::spawn_blocking({ - let importer = self.block_importer.clone(); - move || importer.execute_and_commit(sealed_block) - }) - .await??; + self.block_importer.execute_and_commit(sealed_block).await?; Ok(()) } } diff --git a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs index ac446c71675..9e57c2cf0ed 100644 --- a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs +++ b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs @@ -1,5 +1,3 @@ -use std::ops::Deref; - use crate::{ database::Database, fuel_core_graphql_api::ports::ConsensusModulePort, @@ -124,15 +122,17 @@ impl fuel_core_poa::ports::BlockProducer for BlockProducerAdapter { } } +#[async_trait::async_trait] impl BlockImporter for BlockImporterAdapter { type Database = Database; - fn commit_result( + async fn commit_result( &self, result: UncommittedImporterResult>, ) -> anyhow::Result<()> { self.block_importer .commit_result(result) + .await .map_err(Into::into) } @@ -140,7 +140,7 @@ impl BlockImporter for BlockImporterAdapter { Box::pin( BroadcastStream::new(self.block_importer.subscribe()) .filter_map(|result| result.ok()) - .map(|r| r.deref().into()), + .map(BlockImportInfo::from), ) } } diff --git a/crates/fuel-core/src/service/adapters/executor.rs b/crates/fuel-core/src/service/adapters/executor.rs index bb8e46042db..dbeece6c739 100644 --- a/crates/fuel-core/src/service/adapters/executor.rs +++ b/crates/fuel-core/src/service/adapters/executor.rs @@ -16,26 +16,19 @@ use fuel_core_executor::{ use fuel_core_storage::{ transactional::StorageTransaction, Error as StorageError, - Result as StorageResult, }; use fuel_core_types::{ blockchain::primitives::DaBlockHeight, entities::message::Message, fuel_tx, fuel_tx::Receipt, - fuel_types::{ - Address, - BlockHeight, - Bytes32, - Nonce, - }, + fuel_types::Nonce, services::{ block_producer::Components, executor::{ Result as ExecutorResult, UncommittedResult, }, - txpool::TransactionStatus, }, }; @@ -84,36 +77,6 @@ impl fuel_core_executor::refs::ContractStorageTrait for Database { type InnerError = StorageError; } -impl fuel_core_executor::ports::MessageIsSpent for Database { - type Error = StorageError; - - fn message_is_spent(&self, nonce: &Nonce) -> StorageResult { - self.message_is_spent(nonce) - } -} - -impl fuel_core_executor::ports::TxIdOwnerRecorder for Database { - type Error = StorageError; - - fn record_tx_id_owner( - &self, - owner: &Address, - block_height: BlockHeight, - tx_idx: u16, - tx_id: &Bytes32, - ) -> Result, Self::Error> { - self.record_tx_id_owner(owner, block_height, tx_idx, tx_id) - } - - fn update_tx_status( - &self, - id: &Bytes32, - status: TransactionStatus, - ) -> Result, Self::Error> { - self.update_tx_status(id, status) - } -} - impl fuel_core_executor::ports::ExecutorDatabaseTrait for Database {} impl fuel_core_executor::ports::RelayerPort for MaybeRelayerAdapter { diff --git a/crates/fuel-core/src/service/adapters/graphql_api.rs b/crates/fuel-core/src/service/adapters/graphql_api.rs index 4faea60040a..e83efc44e08 100644 --- a/crates/fuel-core/src/service/adapters/graphql_api.rs +++ b/crates/fuel-core/src/service/adapters/graphql_api.rs @@ -1,20 +1,13 @@ -use super::BlockProducerAdapter; +use super::{ + BlockImporterAdapter, + BlockProducerAdapter, +}; use crate::{ - database::{ - transactions::OwnedTransactionIndexCursor, - Database, - }, + database::Database, fuel_core_graphql_api::ports::{ + worker, BlockProducerPort, - DatabaseBlocks, - DatabaseChain, - DatabaseCoins, - DatabaseContracts, DatabaseMessageProof, - DatabaseMessages, - DatabasePort, - DatabaseTransactions, - DryRunExecution, P2pPort, TxPoolPort, }, @@ -25,51 +18,22 @@ use crate::{ }; use async_trait::async_trait; use fuel_core_services::stream::BoxStream; -use fuel_core_storage::{ - iter::{ - BoxedIter, - IntoBoxedIter, - IterDirection, - }, - not_found, - Error as StorageError, - Result as StorageResult, -}; +use fuel_core_storage::Result as StorageResult; use fuel_core_txpool::{ service::TxStatusMessage, - types::{ - ContractId, - TxId, - }, + types::TxId, }; use fuel_core_types::{ - blockchain::primitives::{ - BlockId, - DaBlockHeight, - }, - entities::message::{ - MerkleProof, - Message, - }, + entities::message::MerkleProof, fuel_tx::{ - Address, - AssetId, Receipt as TxReceipt, Transaction, - TxPointer, - UtxoId, - }, - fuel_types::{ - BlockHeight, - Nonce, }, + fuel_types::BlockHeight, services::{ - graphql_api::ContractBalance, + block_importer::SharedImportResult, p2p::PeerInfo, - txpool::{ - InsertionResult, - TransactionStatus, - }, + txpool::InsertionResult, }, tai64::Tai64, }; @@ -78,140 +42,8 @@ use std::{ sync::Arc, }; -impl DatabaseBlocks for Database { - fn block_id(&self, height: &BlockHeight) -> StorageResult { - self.get_block_id(height) - .and_then(|height| height.ok_or(not_found!("BlockId"))) - } - - fn blocks_ids( - &self, - start: Option, - direction: IterDirection, - ) -> BoxedIter<'_, StorageResult<(BlockHeight, BlockId)>> { - self.all_block_ids(start, direction) - .map(|result| result.map_err(StorageError::from)) - .into_boxed() - } - - fn ids_of_latest_block(&self) -> StorageResult<(BlockHeight, BlockId)> { - self.ids_of_latest_block() - .transpose() - .ok_or(not_found!("BlockId"))? - } -} - -impl DatabaseTransactions for Database { - fn tx_status(&self, tx_id: &TxId) -> StorageResult { - self.get_tx_status(tx_id) - .transpose() - .ok_or(not_found!("TransactionId"))? - } - - fn owned_transactions_ids( - &self, - owner: Address, - start: Option, - direction: IterDirection, - ) -> BoxedIter> { - let start = start.map(|tx_pointer| OwnedTransactionIndexCursor { - block_height: tx_pointer.block_height(), - tx_idx: tx_pointer.tx_index(), - }); - self.owned_transactions(owner, start, Some(direction)) - .map(|result| result.map_err(StorageError::from)) - .into_boxed() - } -} - -impl DatabaseMessages for Database { - fn owned_message_ids( - &self, - owner: &Address, - start_message_id: Option, - direction: IterDirection, - ) -> BoxedIter<'_, StorageResult> { - self.owned_message_ids(owner, start_message_id, Some(direction)) - .map(|result| result.map_err(StorageError::from)) - .into_boxed() - } - - fn all_messages( - &self, - start_message_id: Option, - direction: IterDirection, - ) -> BoxedIter<'_, StorageResult> { - self.all_messages(start_message_id, Some(direction)) - .map(|result| result.map_err(StorageError::from)) - .into_boxed() - } - - fn message_is_spent(&self, nonce: &Nonce) -> StorageResult { - self.message_is_spent(nonce) - } - - fn message_exists(&self, nonce: &Nonce) -> StorageResult { - self.message_exists(nonce) - } -} - -impl DatabaseCoins for Database { - fn owned_coins_ids( - &self, - owner: &Address, - start_coin: Option, - direction: IterDirection, - ) -> BoxedIter<'_, StorageResult> { - self.owned_coins_ids(owner, start_coin, Some(direction)) - .map(|res| res.map_err(StorageError::from)) - .into_boxed() - } -} - -impl DatabaseContracts for Database { - fn contract_balances( - &self, - contract: ContractId, - start_asset: Option, - direction: IterDirection, - ) -> BoxedIter> { - self.contract_balances(contract, start_asset, Some(direction)) - .map(move |result| { - result - .map_err(StorageError::from) - .map(|(asset_id, amount)| ContractBalance { - owner: contract, - amount, - asset_id, - }) - }) - .into_boxed() - } -} - -impl DatabaseChain for Database { - fn chain_name(&self) -> StorageResult { - pub const DEFAULT_NAME: &str = "Fuel.testnet"; - - Ok(self - .get_chain_name()? - .unwrap_or_else(|| DEFAULT_NAME.to_string())) - } - - fn da_height(&self) -> StorageResult { - #[cfg(feature = "relayer")] - { - use fuel_core_relayer::ports::RelayerDb; - self.get_finalized_da_height() - } - #[cfg(not(feature = "relayer"))] - { - Ok(0u64.into()) - } - } -} - -impl DatabasePort for Database {} +mod off_chain; +mod on_chain; #[async_trait] impl TxPoolPort for TxPoolAdapter { @@ -253,7 +85,7 @@ impl DatabaseMessageProof for Database { } #[async_trait] -impl DryRunExecution for BlockProducerAdapter { +impl BlockProducerPort for BlockProducerAdapter { async fn dry_run_tx( &self, transaction: Transaction, @@ -266,8 +98,6 @@ impl DryRunExecution for BlockProducerAdapter { } } -impl BlockProducerPort for BlockProducerAdapter {} - #[async_trait::async_trait] impl P2pPort for P2PAdapter { async fn all_peer_info(&self) -> anyhow::Result> { @@ -305,3 +135,13 @@ impl P2pPort for P2PAdapter { } } } + +impl worker::BlockImporter for BlockImporterAdapter { + fn block_events(&self) -> BoxStream { + use futures::StreamExt; + fuel_core_services::stream::IntoBoxStream::into_boxed( + tokio_stream::wrappers::BroadcastStream::new(self.block_importer.subscribe()) + .filter_map(|r| futures::future::ready(r.ok())), + ) + } +} diff --git a/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs b/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs new file mode 100644 index 00000000000..ba23d77bd5a --- /dev/null +++ b/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs @@ -0,0 +1,116 @@ +use crate::{ + database::{ + transactions::OwnedTransactionIndexCursor, + Database, + }, + fuel_core_graphql_api::{ + database::OffChainView, + ports::{ + worker, + OffChainDatabase, + }, + }, +}; +use fuel_core_storage::{ + iter::{ + BoxedIter, + IntoBoxedIter, + IterDirection, + }, + not_found, + transactional::AtomicView, + Error as StorageError, + Result as StorageResult, +}; +use fuel_core_txpool::types::TxId; +use fuel_core_types::{ + fuel_tx::{ + Address, + Bytes32, + TxPointer, + UtxoId, + }, + fuel_types::{ + BlockHeight, + Nonce, + }, + services::txpool::TransactionStatus, +}; +use std::sync::Arc; + +impl OffChainDatabase for Database { + fn owned_message_ids( + &self, + owner: &Address, + start_message_id: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult> { + self.owned_message_ids(owner, start_message_id, Some(direction)) + .map(|result| result.map_err(StorageError::from)) + .into_boxed() + } + + fn owned_coins_ids( + &self, + owner: &Address, + start_coin: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult> { + self.owned_coins_ids(owner, start_coin, Some(direction)) + .map(|res| res.map_err(StorageError::from)) + .into_boxed() + } + + fn tx_status(&self, tx_id: &TxId) -> StorageResult { + self.get_tx_status(tx_id) + .transpose() + .ok_or(not_found!("TransactionId"))? + } + + fn owned_transactions_ids( + &self, + owner: Address, + start: Option, + direction: IterDirection, + ) -> BoxedIter> { + let start = start.map(|tx_pointer| OwnedTransactionIndexCursor { + block_height: tx_pointer.block_height(), + tx_idx: tx_pointer.tx_index(), + }); + self.owned_transactions(owner, start, Some(direction)) + .map(|result| result.map_err(StorageError::from)) + .into_boxed() + } +} + +impl AtomicView for Database { + fn view_at(&self, _: BlockHeight) -> StorageResult { + unimplemented!( + "Unimplemented until of the https://github.com/FuelLabs/fuel-core/issues/451" + ) + } + + fn latest_view(&self) -> OffChainView { + Arc::new(self.clone()) + } +} + +impl worker::OffChainDatabase for Database { + fn record_tx_id_owner( + &mut self, + owner: &Address, + block_height: BlockHeight, + tx_idx: u16, + tx_id: &Bytes32, + ) -> StorageResult> { + Database::record_tx_id_owner(self, owner, block_height, tx_idx, tx_id) + } + + fn update_tx_status( + &mut self, + id: &Bytes32, + status: TransactionStatus, + ) -> StorageResult> { + Database::update_tx_status(self, id, status) + } +} diff --git a/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs b/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs new file mode 100644 index 00000000000..d0bcaf3eeb2 --- /dev/null +++ b/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs @@ -0,0 +1,139 @@ +use crate::{ + database::Database, + fuel_core_graphql_api::{ + database::OnChainView, + ports::{ + DatabaseBlocks, + DatabaseChain, + DatabaseContracts, + DatabaseMessages, + OnChainDatabase, + }, + }, +}; +use fuel_core_storage::{ + iter::{ + BoxedIter, + IntoBoxedIter, + IterDirection, + }, + not_found, + transactional::AtomicView, + Error as StorageError, + Result as StorageResult, +}; +use fuel_core_txpool::types::ContractId; +use fuel_core_types::{ + blockchain::primitives::{ + BlockId, + DaBlockHeight, + }, + entities::message::Message, + fuel_tx::AssetId, + fuel_types::{ + BlockHeight, + Nonce, + }, + services::graphql_api::ContractBalance, +}; +use std::sync::Arc; + +impl DatabaseBlocks for Database { + fn block_id(&self, height: &BlockHeight) -> StorageResult { + self.get_block_id(height) + .and_then(|height| height.ok_or(not_found!("BlockId"))) + } + + fn blocks_ids( + &self, + start: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult<(BlockHeight, BlockId)>> { + self.all_block_ids(start, direction) + .map(|result| result.map_err(StorageError::from)) + .into_boxed() + } + + fn ids_of_latest_block(&self) -> StorageResult<(BlockHeight, BlockId)> { + self.ids_of_latest_block() + .transpose() + .ok_or(not_found!("BlockId"))? + } +} + +impl DatabaseMessages for Database { + fn all_messages( + &self, + start_message_id: Option, + direction: IterDirection, + ) -> BoxedIter<'_, StorageResult> { + self.all_messages(start_message_id, Some(direction)) + .map(|result| result.map_err(StorageError::from)) + .into_boxed() + } + + fn message_is_spent(&self, nonce: &Nonce) -> StorageResult { + self.message_is_spent(nonce) + } + + fn message_exists(&self, nonce: &Nonce) -> StorageResult { + self.message_exists(nonce) + } +} + +impl DatabaseContracts for Database { + fn contract_balances( + &self, + contract: ContractId, + start_asset: Option, + direction: IterDirection, + ) -> BoxedIter> { + self.contract_balances(contract, start_asset, Some(direction)) + .map(move |result| { + result + .map_err(StorageError::from) + .map(|(asset_id, amount)| ContractBalance { + owner: contract, + amount, + asset_id, + }) + }) + .into_boxed() + } +} + +impl DatabaseChain for Database { + fn chain_name(&self) -> StorageResult { + pub const DEFAULT_NAME: &str = "Fuel.testnet"; + + Ok(self + .get_chain_name()? + .unwrap_or_else(|| DEFAULT_NAME.to_string())) + } + + fn da_height(&self) -> StorageResult { + #[cfg(feature = "relayer")] + { + use fuel_core_relayer::ports::RelayerDb; + self.get_finalized_da_height() + } + #[cfg(not(feature = "relayer"))] + { + Ok(0u64.into()) + } + } +} + +impl OnChainDatabase for Database {} + +impl AtomicView for Database { + fn view_at(&self, _: BlockHeight) -> StorageResult { + unimplemented!( + "Unimplemented until of the https://github.com/FuelLabs/fuel-core/issues/451" + ) + } + + fn latest_view(&self) -> OnChainView { + Arc::new(self.clone()) + } +} diff --git a/crates/fuel-core/src/service/adapters/sync.rs b/crates/fuel-core/src/service/adapters/sync.rs index 1b63c8c25e1..ddbcd2ed6d0 100644 --- a/crates/fuel-core/src/service/adapters/sync.rs +++ b/crates/fuel-core/src/service/adapters/sync.rs @@ -137,6 +137,7 @@ impl BlockImporterPort for BlockImporterAdapter { }), ) } + async fn execute_and_commit(&self, block: SealedBlock) -> anyhow::Result<()> { self.execute_and_commit(block).await } diff --git a/crates/fuel-core/src/service/adapters/txpool.rs b/crates/fuel-core/src/service/adapters/txpool.rs index 6f1593f6d77..ccd33474df6 100644 --- a/crates/fuel-core/src/service/adapters/txpool.rs +++ b/crates/fuel-core/src/service/adapters/txpool.rs @@ -7,7 +7,6 @@ use crate::{ }; use fuel_core_services::stream::BoxStream; use fuel_core_storage::{ - not_found, tables::{ Coins, ContractsRawCode, @@ -33,7 +32,7 @@ use fuel_core_types::{ Nonce, }, services::{ - block_importer::ImportResult, + block_importer::SharedImportResult, p2p::{ GossipsubMessageAcceptance, GossipsubMessageInfo, @@ -44,7 +43,7 @@ use fuel_core_types::{ use std::sync::Arc; impl BlockImporter for BlockImporterAdapter { - fn block_events(&self) -> BoxStream> { + fn block_events(&self) -> BoxStream { use tokio_stream::{ wrappers::BroadcastStream, StreamExt, @@ -144,13 +143,4 @@ impl fuel_core_txpool::ports::TxPoolDb for Database { fn current_block_height(&self) -> StorageResult { self.latest_height() } - - fn transaction_status( - &self, - tx_id: &fuel_core_types::fuel_types::Bytes32, - ) -> StorageResult { - self.get_tx_status(tx_id) - .transpose() - .ok_or(not_found!("TransactionId"))? - } } diff --git a/crates/fuel-core/src/service/genesis.rs b/crates/fuel-core/src/service/genesis.rs index 8039f438d12..9942df0a810 100644 --- a/crates/fuel-core/src/service/genesis.rs +++ b/crates/fuel-core/src/service/genesis.rs @@ -136,7 +136,8 @@ fn import_genesis_block( (), (), ); - importer.commit_result(UncommittedImportResult::new( + // We commit Genesis block before start of any service, so there is no listeners. + importer.commit_result_without_awaiting_listeners(UncommittedImportResult::new( ImportResult::new_from_local(block, vec![]), database_transaction, ))?; diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 1523fe41c15..ba8dc05e93a 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -3,6 +3,7 @@ use super::adapters::P2PAdapter; use crate::{ database::Database, + fuel_core_graphql_api, fuel_core_graphql_api::Config as GraphQLConfig, schema::build_schema, service::{ @@ -41,7 +42,7 @@ pub type BlockProducerService = fuel_core_producer::block_producer::Producer< TxPoolAdapter, ExecutorAdapter, >; -pub type GraphQL = crate::fuel_core_graphql_api::service::Service; +pub type GraphQL = crate::fuel_core_graphql_api::api_service::Service; pub fn init_sub_services( config: &Config, @@ -189,20 +190,28 @@ pub fn init_sub_services( ) .data(database.clone()); - let graph_ql = crate::fuel_core_graphql_api::service::new_service( - GraphQLConfig { - addr: config.addr, - utxo_validation: config.utxo_validation, - debug: config.debug, - vm_backtrace: config.vm.backtrace, - min_gas_price: config.txpool.min_gas_price, - max_tx: config.txpool.max_tx, - max_depth: config.txpool.max_depth, - consensus_parameters: config.chain_conf.consensus_parameters.clone(), - consensus_key: config.consensus_key.clone(), - }, + let graphql_worker = fuel_core_graphql_api::worker_service::new_service( + importer_adapter.clone(), + database.clone(), + ); + + let graphql_config = GraphQLConfig { + addr: config.addr, + utxo_validation: config.utxo_validation, + debug: config.debug, + vm_backtrace: config.vm.backtrace, + min_gas_price: config.txpool.min_gas_price, + max_tx: config.txpool.max_tx, + max_depth: config.txpool.max_depth, + consensus_parameters: config.chain_conf.consensus_parameters.clone(), + consensus_key: config.consensus_key.clone(), + }; + + let graph_ql = fuel_core_graphql_api::api_service::new_service( + graphql_config, schema, - Box::new(database.clone()), + database.clone(), + database.clone(), Box::new(tx_pool_adapter), Box::new(producer_adapter), Box::new(poa_adapter.clone()), @@ -249,5 +258,7 @@ pub fn init_sub_services( } } + services.push(Box::new(graphql_worker)); + Ok((services, shared)) } diff --git a/crates/services/consensus_module/poa/src/ports.rs b/crates/services/consensus_module/poa/src/ports.rs index fdb8a2d11de..c93180645bc 100644 --- a/crates/services/consensus_module/poa/src/ports.rs +++ b/crates/services/consensus_module/poa/src/ports.rs @@ -66,10 +66,11 @@ pub trait BlockProducer: Send + Sync { } #[cfg_attr(test, mockall::automock(type Database=EmptyStorage;))] +#[async_trait::async_trait] pub trait BlockImporter: Send + Sync { type Database; - fn commit_result( + async fn commit_result( &self, result: UncommittedImportResult>, ) -> anyhow::Result<()>; diff --git a/crates/services/consensus_module/poa/src/service.rs b/crates/services/consensus_module/poa/src/service.rs index 3ec7b8727d8..4fd65a220e4 100644 --- a/crates/services/consensus_module/poa/src/service.rs +++ b/crates/services/consensus_module/poa/src/service.rs @@ -356,10 +356,12 @@ where consensus: seal, }; // Import the sealed block - self.block_importer.commit_result(Uncommitted::new( - ImportResult::new_from_local(block, tx_status), - db_transaction, - ))?; + self.block_importer + .commit_result(Uncommitted::new( + ImportResult::new_from_local(block, tx_status), + db_transaction, + )) + .await?; // Update last block time self.last_height = height; diff --git a/crates/services/executor/src/executor.rs b/crates/services/executor/src/executor.rs index 6be1e94498a..a2041c56f4d 100644 --- a/crates/services/executor/src/executor.rs +++ b/crates/services/executor/src/executor.rs @@ -14,7 +14,6 @@ use fuel_core_storage::{ ContractsLatestUtxo, Messages, ProcessedTransactions, - Receipts, SpentMessages, }, transactional::{ @@ -23,7 +22,6 @@ use fuel_core_storage::{ }, StorageAsMut, StorageAsRef, - StorageInspect, }; use fuel_core_types::{ blockchain::{ @@ -45,11 +43,9 @@ use fuel_core_types::{ fuel_tx::{ field::{ InputContract, - Inputs, MintAmount, MintAssetId, OutputContract, - Outputs, TxPointer as TxPointerField, }, input, @@ -79,7 +75,6 @@ use fuel_core_types::{ Transaction, TxId, TxPointer, - UniqueIdentifier, UtxoId, }, fuel_types::{ @@ -123,7 +118,6 @@ use fuel_core_types::{ TransactionValidityError, UncommittedResult, }, - txpool::TransactionStatus, }, }; use parking_lot::Mutex as ParkingMutex; @@ -267,11 +261,11 @@ where let ( ExecutionResult { - block, skipped_transactions, + tx_status, .. }, - temporary_db, + _temporary_db, ) = self .execute_without_commit(ExecutionTypes::DryRun(component), options)? .into(); @@ -281,19 +275,11 @@ where return Err(err) } - block - .transactions() - .iter() - .map(|tx| { - let id = tx.id(&self.config.consensus_parameters.chain_id); - StorageInspect::::get(temporary_db.as_ref(), &id) - .transpose() - .unwrap_or_else(|| Ok(Default::default())) - .map(|v| v.into_owned()) - }) - .collect::>, _>>() - .map_err(Into::into) - // drop `temporary_db` without committing to avoid altering state. + Ok(tx_status + .into_iter() + .map(|tx| tx.receipts) + .collect::>>()) + // drop `_temporary_db` without committing to avoid altering state. } } @@ -447,16 +433,6 @@ where tx_status, }; - // ------------ GraphQL API Functionality BEGIN ------------ - - // save the status for every transaction using the finalized block id - self.persist_transaction_status(&result, block_st_transaction.as_mut())?; - - // save the associated owner for each transaction in the block - self.index_tx_owners_for_block(&result.block, block_st_transaction.as_mut())?; - - // ------------ GraphQL API Functionality END ------------ - // Get the complete fuel block. Ok(UncommittedResult::new(result, block_st_transaction)) } @@ -807,6 +783,7 @@ where execution_data.tx_status.push(TransactionExecutionStatus { id: coinbase_id, result: TransactionExecutionResult::Success { result: None }, + receipts: vec![], }); if block_st_transaction @@ -895,7 +872,10 @@ where debug_assert_eq!(tx.id(&self.config.consensus_parameters.chain_id), tx_id); } - // Wrap inputs in the execution kind. + // TODO: We need to call this function before `vm.transact` but we can't do that because of + // `Checked` immutability requirements. So we do it here after its execution for now. + // But it should be fixed in the future. + // https://github.com/FuelLabs/fuel-vm/issues/651 self.compute_inputs( match execution_kind { ExecutionKind::DryRun => ExecutionTypes::DryRun(tx.inputs_mut()), @@ -970,9 +950,6 @@ where .storage::() .insert(&tx_id, &())?; - // persist receipts - self.persist_receipts(&tx_id, &receipts, tx_st_transaction.as_mut())?; - let status = if reverted { self.log_backtrace(&vm, &receipts); // get reason for revert @@ -1004,14 +981,15 @@ where .checked_add(tx_fee) .ok_or(ExecutorError::FeeOverflow)?; execution_data.used_gas = execution_data.used_gas.saturating_add(used_gas); + execution_data + .message_ids + .extend(receipts.iter().filter_map(|r| r.message_id())); // queue up status for this tx to be stored once block id is finalized. execution_data.tx_status.push(TransactionExecutionStatus { id: tx_id, result: status, + receipts, }); - execution_data - .message_ids - .extend(receipts.iter().filter_map(|r| r.message_id())); Ok(final_tx) } @@ -1070,7 +1048,7 @@ where | Input::MessageDataSigned(MessageDataSigned { nonce, .. }) | Input::MessageDataPredicate(MessageDataPredicate { nonce, .. }) => { // Eagerly return already spent if status is known. - if db.message_is_spent(nonce)? { + if db.storage::().contains_key(nonce)? { return Err( TransactionValidityError::MessageAlreadySpent(*nonce).into() ) @@ -1545,130 +1523,6 @@ where Ok(()) } - - fn persist_receipts( - &self, - tx_id: &TxId, - receipts: &[Receipt], - db: &mut D, - ) -> ExecutorResult<()> { - if db.storage::().insert(tx_id, receipts)?.is_some() { - return Err(ExecutorError::OutputAlreadyExists) - } - Ok(()) - } - - /// Associate all transactions within a block to their respective UTXO owners - fn index_tx_owners_for_block( - &self, - block: &Block, - block_st_transaction: &mut D, - ) -> ExecutorResult<()> { - for (tx_idx, tx) in block.transactions().iter().enumerate() { - let block_height = *block.header().height(); - let inputs; - let outputs; - let tx_idx = - u16::try_from(tx_idx).map_err(|_| ExecutorError::TooManyTransactions)?; - let tx_id = tx.id(&self.config.consensus_parameters.chain_id); - match tx { - Transaction::Script(tx) => { - inputs = tx.inputs().as_slice(); - outputs = tx.outputs().as_slice(); - } - Transaction::Create(tx) => { - inputs = tx.inputs().as_slice(); - outputs = tx.outputs().as_slice(); - } - Transaction::Mint(_) => continue, - } - self.persist_owners_index( - block_height, - inputs, - outputs, - &tx_id, - tx_idx, - block_st_transaction, - )?; - } - Ok(()) - } - - /// Index the tx id by owner for all of the inputs and outputs - fn persist_owners_index( - &self, - block_height: BlockHeight, - inputs: &[Input], - outputs: &[Output], - tx_id: &Bytes32, - tx_idx: u16, - db: &mut D, - ) -> ExecutorResult<()> { - let mut owners = vec![]; - for input in inputs { - if let Input::CoinSigned(CoinSigned { owner, .. }) - | Input::CoinPredicate(CoinPredicate { owner, .. }) = input - { - owners.push(owner); - } - } - - for output in outputs { - match output { - Output::Coin { to, .. } - | Output::Change { to, .. } - | Output::Variable { to, .. } => { - owners.push(to); - } - Output::Contract(_) | Output::ContractCreated { .. } => {} - } - } - - // dedupe owners from inputs and outputs prior to indexing - owners.sort(); - owners.dedup(); - - for owner in owners { - db.record_tx_id_owner(owner, block_height, tx_idx, tx_id)?; - } - - Ok(()) - } - - fn persist_transaction_status( - &self, - result: &ExecutionResult, - db: &D, - ) -> ExecutorResult<()> { - let time = result.block.header().time(); - let block_id = result.block.id(); - for TransactionExecutionStatus { id, result } in result.tx_status.iter() { - match result { - TransactionExecutionResult::Success { result } => { - db.update_tx_status( - id, - TransactionStatus::Success { - block_id, - time, - result: *result, - }, - )?; - } - TransactionExecutionResult::Failed { result, reason } => { - db.update_tx_status( - id, - TransactionStatus::Failed { - block_id, - time, - result: *result, - reason: reason.clone(), - }, - )?; - } - } - } - Ok(()) - } } trait Fee { diff --git a/crates/services/executor/src/ports.rs b/crates/services/executor/src/ports.rs index 1ca5a5058fd..e9c5b1b9b4e 100644 --- a/crates/services/executor/src/ports.rs +++ b/crates/services/executor/src/ports.rs @@ -8,14 +8,12 @@ use fuel_core_storage::{ ContractsState, Messages, ProcessedTransactions, - Receipts, SpentMessages, }, transactional::Transactional, vm_storage::VmStorageRequirements, Error as StorageError, MerkleRootStorage, - StorageInspect, StorageMutate, StorageRead, }; @@ -25,18 +23,14 @@ use fuel_core_types::{ entities::message::Message, fuel_tx, fuel_tx::{ - Address, - Bytes32, TxId, UniqueIdentifier, }, fuel_types::{ - BlockHeight, ChainId, Nonce, }, fuel_vm::checked_transaction::CheckedTransaction, - services::txpool::TransactionStatus, }; use fuel_core_types::fuel_tx::ContractId; @@ -79,50 +73,20 @@ pub trait RelayerPort { ) -> anyhow::Result>; } -pub trait MessageIsSpent: - StorageInspect - + StorageInspect -{ - type Error; - - fn message_is_spent(&self, nonce: &Nonce) -> Result; -} - -pub trait TxIdOwnerRecorder { - type Error; - - fn record_tx_id_owner( - &self, - owner: &Address, - block_height: BlockHeight, - tx_idx: u16, - tx_id: &Bytes32, - ) -> Result, Self::Error>; - - fn update_tx_status( - &self, - id: &Bytes32, - status: TransactionStatus, - ) -> Result, Self::Error>; -} - // TODO: Remove `Clone` bound pub trait ExecutorDatabaseTrait: - StorageMutate + StorageMutate + StorageMutate + MerkleRootStorage - + MessageIsSpent + StorageMutate + StorageMutate + StorageMutate - + StorageMutate + StorageMutate - + StorageRead + + StorageRead + StorageMutate + MerkleRootStorage + VmStorageRequirements + Transactional - + TxIdOwnerRecorder + Clone { } diff --git a/crates/services/importer/Cargo.toml b/crates/services/importer/Cargo.toml index 7cd93840428..6b47a8272f3 100644 --- a/crates/services/importer/Cargo.toml +++ b/crates/services/importer/Cargo.toml @@ -17,6 +17,7 @@ fuel-core-metrics = { workspace = true } fuel-core-storage = { workspace = true } fuel-core-types = { workspace = true } tokio = { workspace = true, features = ["full"] } +tokio-rayon = { workspace = true } tracing = { workspace = true } [dev-dependencies] diff --git a/crates/services/importer/src/config.rs b/crates/services/importer/src/config.rs index c551127c68a..0e9d938be93 100644 --- a/crates/services/importer/src/config.rs +++ b/crates/services/importer/src/config.rs @@ -22,7 +22,7 @@ impl Config { impl Default for Config { fn default() -> Self { Self { - max_block_notify_buffer: 1 << 10, + max_block_notify_buffer: 1, metrics: false, chain_id: ChainId::default(), } diff --git a/crates/services/importer/src/importer.rs b/crates/services/importer/src/importer.rs index 056c4010410..d75709e1c9e 100644 --- a/crates/services/importer/src/importer.rs +++ b/crates/services/importer/src/importer.rs @@ -29,6 +29,7 @@ use fuel_core_types::{ services::{ block_importer::{ ImportResult, + SharedImportResult, UncommittedResult, }, executor, @@ -38,7 +39,10 @@ use fuel_core_types::{ }; use std::{ ops::Deref, - sync::Arc, + sync::{ + Arc, + Mutex, + }, time::{ Instant, SystemTime, @@ -47,6 +51,7 @@ use std::{ }; use tokio::sync::{ broadcast, + oneshot, TryAcquireError, }; @@ -105,10 +110,14 @@ impl PartialEq for Error { pub struct Importer { database: D, - executor: E, - verifier: V, + executor: Arc, + verifier: Arc, chain_id: ChainId, - broadcast: broadcast::Sender>, + broadcast: broadcast::Sender, + /// The channel to notify about the end of the processing of the previous block by all listeners. + /// It is used to await until all receivers of the notification process the `SharedImportResult` + /// before starting committing a new block. + prev_block_process_result: Mutex>>, guard: tokio::sync::Semaphore, } @@ -118,15 +127,16 @@ impl Importer { Self { database, - executor, - verifier, + executor: Arc::new(executor), + verifier: Arc::new(verifier), chain_id: config.chain_id, broadcast, + prev_block_process_result: Default::default(), guard: tokio::sync::Semaphore::new(1), } } - pub fn subscribe(&self) -> broadcast::Receiver> { + pub fn subscribe(&self) -> broadcast::Receiver { self.broadcast.subscribe() } @@ -162,7 +172,7 @@ where /// /// Only one commit may be in progress at the time. All other calls will fail. /// Returns an error if called while another call is in progress. - pub fn commit_result( + pub async fn commit_result( &self, result: UncommittedResult>, ) -> Result<(), Error> @@ -170,9 +180,36 @@ where ExecutorDatabase: ports::ExecutorDatabase, { let _guard = self.lock()?; + // It is safe to unwrap the channel because we have the `_guard`. + let previous_block_result = self + .prev_block_process_result + .lock() + .expect("poisoned") + .take(); + + // Await until all receivers of the notification process the result. + if let Some(channel) = previous_block_result { + let _ = channel.await; + } + self._commit_result(result) } + /// The method works in the same way as [`Importer::commit_result`], but it doesn't + /// wait for listeners to process the result. + pub fn commit_result_without_awaiting_listeners( + &self, + result: UncommittedResult>, + ) -> Result<(), Error> + where + ExecutorDatabase: ports::ExecutorDatabase, + { + let _guard = self.lock()?; + self._commit_result(result)?; + Ok(()) + } + + /// The method commits the result of the block execution and notifies about a new imported block. #[tracing::instrument( skip_all, fields( @@ -270,7 +307,13 @@ where .set(current_time); tracing::info!("Committed block {:#x}", result.sealed_block.entity.id()); - let _ = self.broadcast.send(Arc::new(result)); + + // The `tokio::sync::oneshot::Sender` is used to notify about the end + // of the processing of a new block by all listeners. + let (sender, receiver) = oneshot::channel(); + let _ = self.broadcast.send(Arc::new(Awaiter::new(result, sender))); + *self.prev_block_process_result.lock().expect("poisoned") = Some(receiver); + Ok(()) } @@ -324,13 +367,24 @@ where pub fn verify_and_execute_block( &self, sealed_block: SealedBlock, + ) -> Result>, Error> { + Self::verify_and_execute_block_inner( + self.executor.clone(), + self.verifier.clone(), + sealed_block, + ) + } + + fn verify_and_execute_block_inner( + executor: Arc, + verifier: Arc, + sealed_block: SealedBlock, ) -> Result>, Error> { let consensus = sealed_block.consensus; let block = sealed_block.entity; let sealed_block_id = block.id(); - let result_of_verification = - self.verifier.verify_block_fields(&consensus, &block); + let result_of_verification = verifier.verify_block_fields(&consensus, &block); if let Err(err) = result_of_verification { return Err(Error::FailedVerification(err)) } @@ -350,8 +404,7 @@ where tx_status, }, db_tx, - ) = self - .executor + ) = executor .execute_without_commit(block) .map_err(Error::FailedExecution)? .into(); @@ -380,19 +433,47 @@ where impl Importer where - IDatabase: ImporterDatabase, - E: Executor, - V: BlockVerifier, + IDatabase: ImporterDatabase + 'static, + E: Executor + 'static, + V: BlockVerifier + 'static, { /// The method validates the `Block` fields and commits the `SealedBlock`. /// It is a combination of the [`Importer::verify_and_execute_block`] and [`Importer::commit_result`]. - pub fn execute_and_commit(&self, sealed_block: SealedBlock) -> Result<(), Error> { + pub async fn execute_and_commit( + &self, + sealed_block: SealedBlock, + ) -> Result<(), Error> { let _guard = self.lock()?; + + let executor = self.executor.clone(); + let verifier = self.verifier.clone(); + let (result, execute_time) = tokio_rayon::spawn_fifo(|| { + let start = Instant::now(); + let result = + Self::verify_and_execute_block_inner(executor, verifier, sealed_block); + let execute_time = start.elapsed().as_secs_f64(); + (result, execute_time) + }) + .await; + + let result = result?; + + // It is safe to unwrap the channel because we have the `_guard`. + let previous_block_result = self + .prev_block_process_result + .lock() + .expect("poisoned") + .take(); + + // Await until all receivers of the notification process the result. + if let Some(channel) = previous_block_result { + let _ = channel.await; + } + let start = Instant::now(); - let result = self.verify_and_execute_block(sealed_block)?; let commit_result = self._commit_result(result); - // record the execution time to prometheus - let time = start.elapsed().as_secs_f64(); + let commit_time = start.elapsed().as_secs_f64(); + let time = execute_time + commit_time; importer_metrics().execute_and_commit_duration.observe(time); // return execution result commit_result @@ -412,3 +493,34 @@ impl ShouldBeUnique for Option { } } } + +/// The wrapper around `ImportResult` to notify about the end of the processing of a new block. +struct Awaiter { + result: ImportResult, + release_channel: Option>, +} + +impl Drop for Awaiter { + fn drop(&mut self) { + if let Some(release_channel) = core::mem::take(&mut self.release_channel) { + let _ = release_channel.send(()); + } + } +} + +impl Deref for Awaiter { + type Target = ImportResult; + + fn deref(&self) -> &Self::Target { + &self.result + } +} + +impl Awaiter { + fn new(result: ImportResult, channel: oneshot::Sender<()>) -> Self { + Self { + result, + release_channel: Some(channel), + } + } +} diff --git a/crates/services/importer/src/importer/test.rs b/crates/services/importer/src/importer/test.rs index 897be9f9945..717271093fd 100644 --- a/crates/services/importer/src/importer/test.rs +++ b/crates/services/importer/src/importer/test.rs @@ -261,12 +261,13 @@ where => Err(Error::NotUnique(0u32.into())); "fails to import genesis block when block exists for height 0" )] -fn commit_result_genesis( +#[tokio::test] +async fn commit_result_genesis( sealed_block: SealedBlock, underlying_db: impl Fn() -> MockDatabase, executor_db: impl Fn() -> MockDatabase, ) -> Result<(), Error> { - commit_result_assert(sealed_block, underlying_db(), executor_db()) + commit_result_assert(sealed_block, underlying_db(), executor_db()).await } //////////////////////////// PoA Block //////////////////////////// @@ -333,7 +334,8 @@ fn commit_result_genesis( => Err(storage_failure_error()); "fails to import block when executor db fails to find block" )] -fn commit_result_and_execute_and_commit_poa( +#[tokio::test] +async fn commit_result_and_execute_and_commit_poa( sealed_block: SealedBlock, underlying_db: impl Fn() -> MockDatabase, executor_db: impl Fn() -> MockDatabase, @@ -342,18 +344,19 @@ fn commit_result_and_execute_and_commit_poa( // validation rules(-> test cases) during committing the result. let height = *sealed_block.entity.header().height(); let commit_result = - commit_result_assert(sealed_block.clone(), underlying_db(), executor_db()); + commit_result_assert(sealed_block.clone(), underlying_db(), executor_db()).await; let execute_and_commit_result = execute_and_commit_assert( sealed_block, underlying_db(), executor(ok(ex_result(height.into(), 0)), executor_db()), verifier(ok(())), - ); + ) + .await; assert_eq!(commit_result, execute_and_commit_result); commit_result } -fn commit_result_assert( +async fn commit_result_assert( sealed_block: SealedBlock, underlying_db: MockDatabase, executor_db: MockDatabase, @@ -366,23 +369,22 @@ fn commit_result_assert( ); let mut imported_blocks = importer.subscribe(); - let result = importer.commit_result(uncommitted_result); + let result = importer.commit_result(uncommitted_result).await; if result.is_ok() { let actual_sealed_block = imported_blocks.try_recv().unwrap(); assert_eq!(actual_sealed_block.sealed_block, expected_to_broadcast); - assert_eq!( - imported_blocks - .try_recv() - .expect_err("We should broadcast only one block"), - TryRecvError::Empty - ) + if let Err(err) = imported_blocks.try_recv() { + assert_eq!(err, TryRecvError::Empty); + } else { + panic!("We should broadcast only one block"); + } } result } -fn execute_and_commit_assert( +async fn execute_and_commit_assert( sealed_block: SealedBlock, underlying_db: MockDatabase, executor: MockExecutor, @@ -392,24 +394,24 @@ fn execute_and_commit_assert( let importer = Importer::new(Default::default(), underlying_db, executor, verifier); let mut imported_blocks = importer.subscribe(); - let result = importer.execute_and_commit(sealed_block); + let result = importer.execute_and_commit(sealed_block).await; if result.is_ok() { let actual_sealed_block = imported_blocks.try_recv().unwrap(); assert_eq!(actual_sealed_block.sealed_block, expected_to_broadcast); - assert_eq!( - imported_blocks - .try_recv() - .expect_err("We should broadcast only one block"), - TryRecvError::Empty - ) + + if let Err(err) = imported_blocks.try_recv() { + assert_eq!(err, TryRecvError::Empty); + } else { + panic!("We should broadcast only one block"); + } } result } -#[test] -fn commit_result_fail_when_locked() { +#[tokio::test] +async fn commit_result_fail_when_locked() { let importer = Importer::new(Default::default(), MockDatabase::default(), (), ()); let uncommitted_result = UncommittedResult::new( ImportResult::default(), @@ -418,13 +420,13 @@ fn commit_result_fail_when_locked() { let _guard = importer.lock(); assert_eq!( - importer.commit_result(uncommitted_result), + importer.commit_result(uncommitted_result).await, Err(Error::SemaphoreError(TryAcquireError::NoPermits)) ); } -#[test] -fn execute_and_commit_fail_when_locked() { +#[tokio::test] +async fn execute_and_commit_fail_when_locked() { let importer = Importer::new( Default::default(), MockDatabase::default(), @@ -434,7 +436,7 @@ fn execute_and_commit_fail_when_locked() { let _guard = importer.lock(); assert_eq!( - importer.execute_and_commit(Default::default()), + importer.execute_and_commit(Default::default()).await, Err(Error::SemaphoreError(TryAcquireError::NoPermits)) ); } @@ -491,7 +493,8 @@ fn one_lock_at_the_same_time() { => Err(verification_failure_error()); "commit fails if verification fails" )] -fn execute_and_commit_and_verify_and_execute_block_poa( +#[tokio::test] +async fn execute_and_commit_and_verify_and_execute_block_poa( sealed_block: SealedBlock, block_after_execution: P, verifier_result: V, @@ -521,7 +524,8 @@ where executor_db(ok(Some(previous_height)), ok(true), commits)(), ), verifier(verifier_result), - ); + ) + .await; assert_eq!(verify_and_execute_result, execute_and_commit_result); execute_and_commit_result } diff --git a/crates/services/importer/src/ports.rs b/crates/services/importer/src/ports.rs index 51c14e5085b..99f097fefe5 100644 --- a/crates/services/importer/src/ports.rs +++ b/crates/services/importer/src/ports.rs @@ -33,7 +33,7 @@ pub trait Executor: Send + Sync { } /// The database port used by the block importer. -pub trait ImporterDatabase { +pub trait ImporterDatabase: Send + Sync { /// Returns the latest block height. fn latest_block_height(&self) -> StorageResult>; /// Update metadata about the total number of transactions on the chain. @@ -57,7 +57,7 @@ pub trait ExecutorDatabase: ImporterDatabase { #[cfg_attr(test, mockall::automock)] /// The verifier of the block. -pub trait BlockVerifier { +pub trait BlockVerifier: Send + Sync { /// Verifies the consistency of the block fields for the block's height. /// It includes the verification of **all** fields, it includes the consensus rules for /// the corresponding height. diff --git a/crates/services/txpool/src/mock_db.rs b/crates/services/txpool/src/mock_db.rs index 157e5e7f27a..5435585a3f1 100644 --- a/crates/services/txpool/src/mock_db.rs +++ b/crates/services/txpool/src/mock_db.rs @@ -95,11 +95,4 @@ impl TxPoolDb for MockDb { fn current_block_height(&self) -> StorageResult { Ok(Default::default()) } - - fn transaction_status( - &self, - _tx_id: &fuel_core_types::fuel_types::Bytes32, - ) -> StorageResult { - unimplemented!() - } } diff --git a/crates/services/txpool/src/ports.rs b/crates/services/txpool/src/ports.rs index de51f429e93..375d7066982 100644 --- a/crates/services/txpool/src/ports.rs +++ b/crates/services/txpool/src/ports.rs @@ -11,18 +11,16 @@ use fuel_core_types::{ }, fuel_types::{ BlockHeight, - Bytes32, ContractId, Nonce, }, services::{ - block_importer::ImportResult, + block_importer::SharedImportResult, p2p::{ GossipsubMessageAcceptance, GossipsubMessageInfo, NetworkData, }, - txpool::TransactionStatus, }, }; use std::sync::Arc; @@ -46,7 +44,7 @@ pub trait PeerToPeer: Send + Sync { pub trait BlockImporter: Send + Sync { /// Wait until the next block is available - fn block_events(&self) -> BoxStream>; + fn block_events(&self) -> BoxStream; } pub trait TxPoolDb: Send + Sync { @@ -59,6 +57,4 @@ pub trait TxPoolDb: Send + Sync { fn is_message_spent(&self, message_id: &Nonce) -> StorageResult; fn current_block_height(&self) -> StorageResult; - - fn transaction_status(&self, tx_id: &Bytes32) -> StorageResult; } diff --git a/crates/services/txpool/src/service.rs b/crates/services/txpool/src/service.rs index e247e196a77..38ac9b75929 100644 --- a/crates/services/txpool/src/service.rs +++ b/crates/services/txpool/src/service.rs @@ -34,7 +34,6 @@ use fuel_core_types::{ Bytes32, }, services::{ - block_importer::ImportResult, p2p::{ GossipData, GossipsubMessageAcceptance, @@ -52,6 +51,7 @@ use fuel_core_types::{ }; use anyhow::anyhow; +use fuel_core_types::services::block_importer::SharedImportResult; use parking_lot::Mutex as ParkingMutex; use std::{ sync::Arc, @@ -143,7 +143,7 @@ impl Clone for SharedState { pub struct Task { gossiped_tx_stream: BoxStream, - committed_block_stream: BoxStream>, + committed_block_stream: BoxStream, shared: SharedState, ttl_timer: tokio::time::Interval, } @@ -201,14 +201,13 @@ where result = self.committed_block_stream.next() => { if let Some(result) = result { - let block = result + let block = &result .sealed_block - .entity - .compress(&self.shared.consensus_params.chain_id); + .entity; self.shared.txpool.lock().block_update( &self.shared.tx_status_sender, - block.header().height(), - block.transactions() + block, + &result.tx_status, ); should_continue = true; } else { diff --git a/crates/services/txpool/src/service/test_helpers.rs b/crates/services/txpool/src/service/test_helpers.rs index decaf2f98d1..3cf532bfa8b 100644 --- a/crates/services/txpool/src/service/test_helpers.rs +++ b/crates/services/txpool/src/service/test_helpers.rs @@ -21,7 +21,10 @@ use fuel_core_types::{ TransactionBuilder, Word, }, - services::p2p::GossipsubMessageAcceptance, + services::{ + block_importer::ImportResult, + p2p::GossipsubMessageAcceptance, + }, }; use std::cell::RefCell; @@ -103,7 +106,7 @@ mockall::mock! { pub Importer {} impl BlockImporter for Importer { - fn block_events(&self) -> BoxStream>; + fn block_events(&self) -> BoxStream; } } @@ -115,7 +118,7 @@ impl MockImporter { let stream = fuel_core_services::stream::unfold(blocks, |mut blocks| async { let block = blocks.pop(); if let Some(sealed_block) = block { - let result = + let result: SharedImportResult = Arc::new(ImportResult::new_from_local(sealed_block, vec![])); Some((result, blocks)) diff --git a/crates/services/txpool/src/txpool.rs b/crates/services/txpool/src/txpool.rs index 50c7d2484e0..1c3c0376e8d 100644 --- a/crates/services/txpool/src/txpool.rs +++ b/crates/services/txpool/src/txpool.rs @@ -35,8 +35,16 @@ use fuel_core_types::{ tai64::Tai64, }; +use crate::service::TxStatusMessage; use fuel_core_metrics::txpool_metrics::txpool_metrics; -use fuel_core_types::fuel_vm::checked_transaction::CheckPredicateParams; +use fuel_core_types::{ + blockchain::block::Block, + fuel_vm::checked_transaction::CheckPredicateParams, + services::{ + executor::TransactionExecutionStatus, + txpool::from_executor_to_status, + }, +}; use std::{ cmp::Reverse, collections::HashMap, @@ -315,14 +323,19 @@ where pub fn block_update( &mut self, tx_status_sender: &TxStatusChange, - height: &BlockHeight, - transactions: &[TxId], + block: &Block, + tx_status: &[TransactionExecutionStatus], // spend_outputs: [Input], added_outputs: [AddedOutputs] ) { - for tx_id in transactions { - let tx_id = *tx_id; - let result = self.database.transaction_status(&tx_id); - tx_status_sender.send_complete(tx_id, height, result); + let height = block.header().height(); + for status in tx_status { + let tx_id = status.id; + let status = from_executor_to_status(block, status.result.clone()); + tx_status_sender.send_complete( + tx_id, + height, + TxStatusMessage::Status(status), + ); self.remove_committed_tx(&tx_id); } } diff --git a/crates/storage/src/transactional.rs b/crates/storage/src/transactional.rs index d44041113b2..854557bd117 100644 --- a/crates/storage/src/transactional.rs +++ b/crates/storage/src/transactional.rs @@ -1,6 +1,7 @@ //! The primitives to work with storage in transactional mode. use crate::Result as StorageResult; +use fuel_core_types::fuel_types::BlockHeight; #[cfg_attr(feature = "test-helpers", mockall::automock(type Storage = crate::test_helpers::EmptyStorage;))] /// The types is transactional and may create `StorageTransaction`. @@ -75,3 +76,13 @@ impl StorageTransaction { self.transaction.commit() } } + +/// Provides a view of the storage at the given height. +/// It guarantees to be atomic, meaning the view is immutable to outside modifications. +pub trait AtomicView: Send + Sync { + /// Returns the view of the storage at the given `height`. + fn view_at(&self, height: BlockHeight) -> StorageResult; + + /// Returns the view of the storage for the latest block height. + fn latest_view(&self) -> View; +} diff --git a/crates/types/src/services/block_importer.rs b/crates/types/src/services/block_importer.rs index 494abb8b572..276a305b960 100644 --- a/crates/types/src/services/block_importer.rs +++ b/crates/types/src/services/block_importer.rs @@ -10,11 +10,16 @@ use crate::{ Uncommitted, }, }; +use core::ops::Deref; +use std::sync::Arc; /// The uncommitted result of the block importing. pub type UncommittedResult = Uncommitted; +/// The alias for the `ImportResult` that can be shared between threads. +pub type SharedImportResult = Arc + Send + Sync>; + /// The result of the block import. #[derive(Debug)] #[cfg_attr(any(test, feature = "test-helpers"), derive(Default))] @@ -27,6 +32,14 @@ pub struct ImportResult { pub source: Source, } +impl Deref for ImportResult { + type Target = Self; + + fn deref(&self) -> &Self::Target { + self + } +} + /// The source producer of the block. #[derive(Debug, Clone, Copy, PartialEq, Default)] pub enum Source { @@ -87,8 +100,8 @@ impl BlockImportInfo { } } -impl From<&ImportResult> for BlockImportInfo { - fn from(result: &ImportResult) -> Self { +impl From for BlockImportInfo { + fn from(result: SharedImportResult) -> Self { Self { block_header: result.sealed_block.entity.header().clone(), source: result.source, diff --git a/crates/types/src/services/executor.rs b/crates/types/src/services/executor.rs index 8f48c815e79..a51c5b564d5 100644 --- a/crates/types/src/services/executor.rs +++ b/crates/types/src/services/executor.rs @@ -9,6 +9,7 @@ use crate::{ primitives::BlockId, }, fuel_tx::{ + Receipt, TxId, UtxoId, ValidityError, @@ -53,6 +54,8 @@ pub struct TransactionExecutionStatus { pub id: Bytes32, /// The result of the executed transaction. pub result: TransactionExecutionResult, + /// The receipts generated by the executed transaction. + pub receipts: Vec, } /// The result of transaction execution. diff --git a/crates/types/src/services/txpool.rs b/crates/types/src/services/txpool.rs index c323761ec82..4cc483e6c7b 100644 --- a/crates/types/src/services/txpool.rs +++ b/crates/types/src/services/txpool.rs @@ -1,7 +1,10 @@ //! Types for interoperability with the txpool service use crate::{ - blockchain::primitives::BlockId, + blockchain::{ + block::Block, + primitives::BlockId, + }, fuel_asm::Word, fuel_tx::{ field::{ @@ -27,6 +30,7 @@ use crate::{ checked_transaction::Checked, ProgramState, }, + services::executor::TransactionExecutionResult, }; use fuel_vm_private::checked_transaction::CheckedTransaction; use std::{ @@ -199,6 +203,30 @@ pub enum TransactionStatus { }, } +/// Converts the transaction execution result to the transaction status. +pub fn from_executor_to_status( + block: &Block, + result: TransactionExecutionResult, +) -> TransactionStatus { + let time = block.header().time(); + let block_id = block.id(); + match result { + TransactionExecutionResult::Success { result } => TransactionStatus::Success { + block_id, + time, + result, + }, + TransactionExecutionResult::Failed { result, reason } => { + TransactionStatus::Failed { + block_id, + time, + result, + reason: reason.clone(), + } + } + } +} + #[allow(missing_docs)] #[derive(thiserror::Error, Debug, PartialEq, Eq, Clone)] #[non_exhaustive] From bd60793f0a5451676a4bfea01ee42787c8a5bd4b Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 5 Jan 2024 15:48:10 +0100 Subject: [PATCH 13/28] Added comments and linked todos --- crates/fuel-core/src/graphql_api/database.rs | 11 +++++++++++ crates/fuel-core/src/graphql_api/worker_service.rs | 7 +++++++ crates/fuel-core/src/query/coin.rs | 2 +- crates/fuel-core/src/query/tx.rs | 2 +- .../src/service/adapters/graphql_api/off_chain.rs | 1 + .../src/service/adapters/graphql_api/on_chain.rs | 1 + crates/fuel-core/src/service/adapters/sync.rs | 1 - crates/storage/src/tables.rs | 1 + 8 files changed, 23 insertions(+), 3 deletions(-) diff --git a/crates/fuel-core/src/graphql_api/database.rs b/crates/fuel-core/src/graphql_api/database.rs index 175f0610f18..5aa4ba0f975 100644 --- a/crates/fuel-core/src/graphql_api/database.rs +++ b/crates/fuel-core/src/graphql_api/database.rs @@ -52,15 +52,22 @@ use std::{ sync::Arc, }; +/// The on-chain view of the database used by the [`ReadView`] to fetch on-chain data. pub type OnChainView = Arc; +/// The off-chain view of the database used by the [`ReadView`] to fetch off-chain data. pub type OffChainView = Arc; +/// The container of the on-chain and off-chain database view provides. +/// It is used only by [`ViewExtension`](super::view_extension::ViewExtension) to create a [`ReadView`]. pub struct ReadDatabase { + /// The on-chain database view provider. on_chain: Box>, + /// The off-chain database view provider. off_chain: Box>, } impl ReadDatabase { + /// Creates a new [`ReadDatabase`] with the given on-chain and off-chain database view providers. pub fn new(on_chain: OnChain, off_chain: OffChain) -> Self where OnChain: AtomicView + 'static, @@ -72,7 +79,11 @@ impl ReadDatabase { } } + /// Creates a consistent view of the database. pub fn view(&self) -> ReadView { + // TODO: Use the same height for both views to guarantee consistency. + // It is not possible to implement until `view_at` is implemented for the `AtomicView`. + // https://github.com/FuelLabs/fuel-core/issues/1582 ReadView { on_chain: self.on_chain.latest_view(), off_chain: self.off_chain.latest_view(), diff --git a/crates/fuel-core/src/graphql_api/worker_service.rs b/crates/fuel-core/src/graphql_api/worker_service.rs index fe904d7f7d8..22f54719227 100644 --- a/crates/fuel-core/src/graphql_api/worker_service.rs +++ b/crates/fuel-core/src/graphql_api/worker_service.rs @@ -48,6 +48,8 @@ use futures::{ StreamExt, }; +/// The off-chain GraphQL API worker task processes the imported blocks +/// and actualize the information used by the GraphQL service. pub struct Task { block_importer: BoxStream, database: D, @@ -58,6 +60,9 @@ where D: ports::worker::OffChainDatabase, { fn process_block(&mut self, result: SharedImportResult) -> anyhow::Result<()> { + // TODO: Implement the creation of indexes for the messages and coins. + // Implement table `BlockId -> BlockHeight` to get the block height by block id. + // https://github.com/FuelLabs/fuel-core/issues/1583 let mut transaction = self.database.transaction(); // save the status for every transaction using the finalized block id self.persist_transaction_status(&result, transaction.as_mut())?; @@ -219,6 +224,7 @@ where // to actualize the database without executing the block at the previous state // of the blockchain. When `AtomicView::view_at` is implemented, we can // process all missed blocks and actualize the database here. + // https://github.com/FuelLabs/fuel-core/issues/1584 Ok(self) } } @@ -251,6 +257,7 @@ where } async fn shutdown(mut self) -> anyhow::Result<()> { + // Process all remaining blocks before shutdown to not lose any data. loop { let result = self.block_importer.next().now_or_never(); diff --git a/crates/fuel-core/src/query/coin.rs b/crates/fuel-core/src/query/coin.rs index 427379a728b..171a88168bd 100644 --- a/crates/fuel-core/src/query/coin.rs +++ b/crates/fuel-core/src/query/coin.rs @@ -37,7 +37,7 @@ pub trait CoinQueryData: Send + Sync { ) -> BoxedIter>; } -impl CoinQueryData for D { +impl CoinQueryData for D { fn coin(&self, utxo_id: UtxoId) -> StorageResult { let coin = self .storage::() diff --git a/crates/fuel-core/src/query/tx.rs b/crates/fuel-core/src/query/tx.rs index 09994be55fe..ebc2531f27f 100644 --- a/crates/fuel-core/src/query/tx.rs +++ b/crates/fuel-core/src/query/tx.rs @@ -37,7 +37,7 @@ pub trait SimpleTransactionData: Send + Sync { impl SimpleTransactionData for D where - D: OffChainDatabase + OnChainDatabase + ?Sized, + D: OnChainDatabase + OffChainDatabase + ?Sized, { fn transaction(&self, tx_id: &TxId) -> StorageResult { self.storage::() diff --git a/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs b/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs index ba23d77bd5a..86fc7002a02 100644 --- a/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs +++ b/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs @@ -91,6 +91,7 @@ impl AtomicView for Database { } fn latest_view(&self) -> OffChainView { + // TODO: https://github.com/FuelLabs/fuel-core/issues/1581 Arc::new(self.clone()) } } diff --git a/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs b/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs index d0bcaf3eeb2..dd9c9937ffa 100644 --- a/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs +++ b/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs @@ -134,6 +134,7 @@ impl AtomicView for Database { } fn latest_view(&self) -> OnChainView { + // TODO: https://github.com/FuelLabs/fuel-core/issues/1581 Arc::new(self.clone()) } } diff --git a/crates/fuel-core/src/service/adapters/sync.rs b/crates/fuel-core/src/service/adapters/sync.rs index ddbcd2ed6d0..1b63c8c25e1 100644 --- a/crates/fuel-core/src/service/adapters/sync.rs +++ b/crates/fuel-core/src/service/adapters/sync.rs @@ -137,7 +137,6 @@ impl BlockImporterPort for BlockImporterAdapter { }), ) } - async fn execute_and_commit(&self, block: SealedBlock) -> anyhow::Result<()> { self.execute_and_commit(block).await } diff --git a/crates/storage/src/tables.rs b/crates/storage/src/tables.rs index 27f5cb2fb23..5e7762ea762 100644 --- a/crates/storage/src/tables.rs +++ b/crates/storage/src/tables.rs @@ -40,6 +40,7 @@ impl Mappable for FuelBlocks { /// Unique identifier of the fuel block. type Key = Self::OwnedKey; // TODO: Seems it would be faster to use `BlockHeight` as primary key. + // https://github.com/FuelLabs/fuel-core/issues/1580. type OwnedKey = BlockId; type Value = Self::OwnedValue; type OwnedValue = CompressedBlock; From 8221443f1e4418b7419afaa27343ab7614a8adb7 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 5 Jan 2024 15:49:34 +0100 Subject: [PATCH 14/28] Updated CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5870b438e50..2227d4e107f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ Description of the upcoming release here. ### Changed +- [#1579](https://github.com/FuelLabs/fuel-core/pull/1579): The change extracts the off-chain-related logic from the executor and moves it to the GraphQL off-chain worker. It creates two new concepts - Off-chain and On-chain databases where the GraphQL worker has exclusive ownership of the database and may modify it without intersecting with the On-chain database. - [#1577](https://github.com/FuelLabs/fuel-core/pull/1577): Moved insertion of sealed blocks into the `BlockImporter` instead of the executor. ## [Version 0.22.0] From b0ed3e9704c30b7c9e365dcb57ca015a5158fe2e Mon Sep 17 00:00:00 2001 From: xgreenx Date: Sat, 6 Jan 2024 03:06:35 +0100 Subject: [PATCH 15/28] Fixed compilation --- Cargo.lock | 4 ++-- crates/fuel-core/src/database/storage.rs | 2 ++ crates/storage/src/column.rs | 20 ++++++++++--------- .../src/structured_storage/transactions.rs | 20 ++++++++++++++++++- 4 files changed, 34 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 304799815b3..beb61aef6b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4007,7 +4007,7 @@ dependencies = [ "autocfg", "impl-tools-lib", "proc-macro-error", - "syn 2.0.42", + "syn 2.0.43", ] [[package]] @@ -4019,7 +4019,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.42", + "syn 2.0.43", ] [[package]] diff --git a/crates/fuel-core/src/database/storage.rs b/crates/fuel-core/src/database/storage.rs index 84900a79b0b..e63a64323d5 100644 --- a/crates/fuel-core/src/database/storage.rs +++ b/crates/fuel-core/src/database/storage.rs @@ -27,6 +27,7 @@ use fuel_core_storage::{ ContractsLatestUtxo, ContractsRawCode, ContractsState, + ProcessedTransactions, Receipts, SealedBlockConsensus, SpentMessages, @@ -75,6 +76,7 @@ use_structured_implementation!( SpentMessages, SealedBlockConsensus, Transactions, + ProcessedTransactions, Receipts, ContractsStateMerkleMetadata, ContractsStateMerkleData, diff --git a/crates/storage/src/column.rs b/crates/storage/src/column.rs index 0e9605ab399..70813e0de7d 100644 --- a/crates/storage/src/column.rs +++ b/crates/storage/src/column.rs @@ -82,6 +82,8 @@ column_definition! { ContractsStateMerkleMetadata = 14, /// See [`Messages`](crate::tables::Messages) Messages = 15, + /// See [`ProcessedTransactions`](storage::ProcessedTransactions) + ProcessedTransactions = 16, // TODO: Extract the columns below into a separate enum to not mix // required columns and non-required columns. It will break `MemoryStore` @@ -90,27 +92,27 @@ column_definition! { // Below are the tables used for p2p, block production, starting the node. /// The column id of metadata about the blockchain - Metadata = 16, + Metadata = 17, /// See [`Receipts`](crate::tables::Receipts) - Receipts = 17, + Receipts = 18, /// See [`FuelBlockSecondaryKeyBlockHeights`](storage::FuelBlockSecondaryKeyBlockHeights) - FuelBlockSecondaryKeyBlockHeights = 18, + FuelBlockSecondaryKeyBlockHeights = 19, /// See [`SealedBlockConsensus`](crate::tables::SealedBlockConsensus) - FuelBlockConsensus = 19, + FuelBlockConsensus = 20, /// Metadata for the relayer /// See [`RelayerMetadata`](fuel_core_relayer::ports::RelayerMetadata) - RelayerMetadata = 20, + RelayerMetadata = 21, // Below are not required tables. They are used for API and may be removed or moved to another place in the future. /// The column of the table that stores `true` if `owner` owns `Coin` with `coin_id` - OwnedCoins = 21, + OwnedCoins = 22, /// Transaction id to current status - TransactionStatus = 22, + TransactionStatus = 23, /// The column of the table of all `owner`'s transactions - TransactionsByOwnerBlockIdx = 23, + TransactionsByOwnerBlockIdx = 24, /// The column of the table that stores `true` if `owner` owns `Message` with `message_id` - OwnedMessageIds = 24, + OwnedMessageIds = 25, } } diff --git a/crates/storage/src/structured_storage/transactions.rs b/crates/storage/src/structured_storage/transactions.rs index 1b9e99131a1..c2cc0affcd4 100644 --- a/crates/storage/src/structured_storage/transactions.rs +++ b/crates/storage/src/structured_storage/transactions.rs @@ -8,7 +8,10 @@ use crate::{ column::Column, structure::plain::Plain, structured_storage::TableWithStructure, - tables::Transactions, + tables::{ + ProcessedTransactions, + Transactions, + }, }; impl TableWithStructure for Transactions { @@ -25,3 +28,18 @@ crate::basic_storage_tests!( ::Key::from([1u8; 32]), ::Value::default() ); + +impl TableWithStructure for ProcessedTransactions { + type Structure = Plain; + + fn column() -> Column { + Column::ProcessedTransactions + } +} + +#[cfg(test)] +crate::basic_storage_tests!( + ProcessedTransactions, + ::Key::from([1u8; 32]), + ::Value::default() +); From d2b55045d691ba80ac6a163f8249a11c696aa596 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Sat, 6 Jan 2024 23:36:25 +0100 Subject: [PATCH 16/28] Use `BlockHeight` as a primary key for the `FuelsBlock` table --- crates/fuel-core/src/database.rs | 73 ++++++----- crates/fuel-core/src/database/block.rs | 116 +++++++++--------- crates/fuel-core/src/database/coin.rs | 6 +- crates/fuel-core/src/database/contracts.rs | 6 +- crates/fuel-core/src/database/message.rs | 14 +-- crates/fuel-core/src/database/sealed_block.rs | 56 +++------ crates/fuel-core/src/database/transactions.rs | 22 ++-- crates/fuel-core/src/graphql_api/ports.rs | 19 +-- crates/fuel-core/src/query/block.rs | 42 +++---- crates/fuel-core/src/query/message.rs | 12 +- crates/fuel-core/src/query/message/test.rs | 35 ++++-- crates/fuel-core/src/schema/block.rs | 14 ++- crates/fuel-core/src/schema/dap.rs | 3 +- crates/fuel-core/src/schema/message.rs | 11 +- crates/fuel-core/src/schema/tx/types.rs | 6 +- .../src/service/adapters/block_importer.rs | 13 +- .../src/service/adapters/graphql_api.rs | 33 ++--- crates/fuel-core/src/service/adapters/p2p.rs | 2 +- .../src/service/adapters/producer.rs | 3 +- crates/fuel-core/src/service/genesis.rs | 7 +- .../storage/src/structured_storage/blocks.rs | 4 +- .../src/structured_storage/sealed_block.rs | 6 +- crates/storage/src/tables.rs | 7 +- tests/tests/blocks.rs | 8 +- tests/tests/poa.rs | 8 +- 25 files changed, 265 insertions(+), 261 deletions(-) diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index eef1bd5bb95..57f4d3f6465 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -12,7 +12,11 @@ use fuel_core_chain_config::{ MessageConfig, }; use fuel_core_storage::{ - codec::Decode, + codec::{ + Decode, + Encode, + Encoder, + }, iter::IterDirection, kv_store::{ BatchOperations, @@ -253,7 +257,7 @@ impl BatchOperations for DataSource { /// Read-only methods. impl Database { - fn iter_all( + pub(crate) fn iter_all( &self, direction: Option, ) -> impl Iterator> + '_ @@ -261,10 +265,10 @@ impl Database { M: Mappable + TableWithStructure, M::Structure: Structure, { - self.iter_all_filtered::, Vec>(None, None, direction) + self.iter_all_filtered::(None, None, direction) } - fn iter_all_by_prefix( + pub(crate) fn iter_all_by_prefix( &self, prefix: Option

, ) -> impl Iterator> + '_ @@ -273,57 +277,64 @@ impl Database { M::Structure: Structure, P: AsRef<[u8]>, { - self.iter_all_filtered::(prefix, None, None) + self.iter_all_filtered::(prefix, None, None) } - fn iter_all_by_start( + pub(crate) fn iter_all_by_start( &self, - start: Option, + start: Option<&M::Key>, direction: Option, ) -> impl Iterator> + '_ where M: Mappable + TableWithStructure, M::Structure: Structure, - S: AsRef<[u8]>, { - self.iter_all_filtered::(None, start, direction) + self.iter_all_filtered::(None, start, direction) } - fn iter_all_filtered( + pub(crate) fn iter_all_filtered( &self, prefix: Option

, - start: Option, + start: Option<&M::Key>, direction: Option, ) -> impl Iterator> + '_ where M: Mappable + TableWithStructure, M::Structure: Structure, P: AsRef<[u8]>, - S: AsRef<[u8]>, { - self.data - .as_ref() - .iter_all( + let iter = if let Some(start) = start { + let encoder = + >::KeyCodec::encode(start); + + self.data.as_ref().iter_all( M::column(), prefix.as_ref().map(|p| p.as_ref()), - start.as_ref().map(|s| s.as_ref()), + Some(encoder.as_bytes().as_ref()), direction.unwrap_or_default(), ) - .map(|val| { - val.and_then(|(key, value)| { - let key = - >::KeyCodec::decode( - key.as_slice(), - ) - .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; - let value = - >::ValueCodec::decode( - value.as_slice(), - ) - .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; - Ok((key, value)) - }) + } else { + self.data.as_ref().iter_all( + M::column(), + prefix.as_ref().map(|p| p.as_ref()), + None, + direction.unwrap_or_default(), + ) + }; + iter.map(|val| { + val.and_then(|(key, value)| { + let key = >::KeyCodec::decode( + key.as_slice(), + ) + .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; + let value = + >::ValueCodec::decode( + value.as_slice(), + ) + .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; + Ok((key, value)) }) + }) } } @@ -379,7 +390,7 @@ impl ChainConfigDb for Database { } fn get_block_height(&self) -> StorageResult { - Self::latest_height(self) + self.latest_height() } } diff --git a/crates/fuel-core/src/database/block.rs b/crates/fuel-core/src/database/block.rs index 5e3e04469df..21f9c8f4664 100644 --- a/crates/fuel-core/src/database/block.rs +++ b/crates/fuel-core/src/database/block.rs @@ -1,7 +1,6 @@ use crate::database::{ Column, Database, - Error as DatabaseError, }; use fuel_core_storage::{ codec::{ @@ -49,21 +48,21 @@ use std::borrow::{ Cow, }; -/// The table of fuel block's secondary key - `BlockHeight`. -/// It links the `BlockHeight` to corresponding `BlockId`. +/// The table of fuel block's secondary key - `BlockId`. +/// It links the `BlockId` to corresponding `BlockHeight`. pub struct FuelBlockSecondaryKeyBlockHeights; impl Mappable for FuelBlockSecondaryKeyBlockHeights { - /// Secondary key - `BlockHeight`. - type Key = BlockHeight; - type OwnedKey = Self::Key; /// Primary key - `BlockId`. - type Value = BlockId; + type Key = BlockId; + type OwnedKey = Self::Key; + /// Secondary key - `BlockHeight`. + type Value = BlockHeight; type OwnedValue = Self::Value; } impl TableWithStructure for FuelBlockSecondaryKeyBlockHeights { - type Structure = Plain, Raw>; + type Structure = Plain>; fn column() -> Column { Column::FuelBlockSecondaryKeyBlockHeights @@ -80,11 +79,17 @@ fuel_core_storage::basic_storage_tests!( impl StorageInspect for Database { type Error = StorageError; - fn get(&self, key: &BlockId) -> Result>, Self::Error> { + fn get( + &self, + key: &::Key, + ) -> Result::OwnedValue>>, Self::Error> { self.data.storage::().get(key) } - fn contains_key(&self, key: &BlockId) -> Result { + fn contains_key( + &self, + key: &::Key, + ) -> Result { self.data.storage::().contains_key(key) } } @@ -92,17 +97,18 @@ impl StorageInspect for Database { impl StorageMutate for Database { fn insert( &mut self, - key: &BlockId, - value: &CompressedBlock, - ) -> Result, Self::Error> { + key: &::Key, + value: &::Value, + ) -> Result::OwnedValue>, Self::Error> { let prev = self .data .storage_as_mut::() .insert(key, value)?; let height = value.header().height(); + let block_id = value.id(); self.storage::() - .insert(height, key)?; + .insert(&block_id, key)?; // Get latest metadata entry let prev_metadata = self @@ -116,8 +122,7 @@ impl StorageMutate for Database { let mut tree: MerkleTree = MerkleTree::load(storage, prev_metadata.version) .map_err(|err| StorageError::Other(anyhow::anyhow!(err)))?; - let data = key.as_slice(); - tree.push(data)?; + tree.push(block_id.as_slice())?; // Generate new metadata for the updated tree let version = tree.leaves_count(); @@ -129,7 +134,10 @@ impl StorageMutate for Database { Ok(prev) } - fn remove(&mut self, key: &BlockId) -> Result, Self::Error> { + fn remove( + &mut self, + key: &::Key, + ) -> Result::OwnedValue>, Self::Error> { let prev: Option = self.data.storage_as_mut::().remove(key)?; @@ -137,7 +145,7 @@ impl StorageMutate for Database { let height = block.header().height(); let _ = self .storage::() - .remove(height); + .remove(&block.id()); // We can't clean up `MerkleTree`. // But if we plan to insert a new block, it will override old values in the // `FuelBlockMerkleData` table. @@ -150,68 +158,56 @@ impl StorageMutate for Database { impl Database { pub fn latest_height(&self) -> StorageResult { - self.ids_of_latest_block()? - .map(|(height, _)| height) - .ok_or(not_found!("BlockHeight")) + let pair = self + .iter_all::(Some(IterDirection::Reverse)) + .next() + .transpose()?; + + let (block_height, _) = pair.ok_or(not_found!("BlockHeight"))?; + + Ok(block_height) + } + + pub fn latest_compressed_block(&self) -> StorageResult> { + let pair = self + .iter_all::(Some(IterDirection::Reverse)) + .next() + .transpose()?; + + Ok(pair.map(|(_, compressed_block)| compressed_block)) } /// Get the current block at the head of the chain. - pub fn get_current_block(&self) -> StorageResult>> { - let block_ids = self.ids_of_latest_block()?; - match block_ids { - Some((_, id)) => Ok(StorageAsRef::storage::(self).get(&id)?), - None => Ok(None), - } + pub fn get_current_block(&self) -> StorageResult> { + self.latest_compressed_block() } pub fn block_time(&self, height: &BlockHeight) -> StorageResult { - let id = self.get_block_id(height)?.unwrap_or_default(); let block = self .storage::() - .get(&id)? + .get(height)? .ok_or(not_found!(FuelBlocks))?; Ok(block.header().time().to_owned()) } pub fn get_block_id(&self, height: &BlockHeight) -> StorageResult> { - self.storage::() + self.storage::() .get(height) - .map(|v| v.map(|v| v.into_owned())) + .map(|v| v.map(|v| v.id())) } - pub fn all_block_ids( - &self, - start: Option, - direction: IterDirection, - ) -> impl Iterator> + '_ { - let start = start.map(|b| b.to_bytes()); - self.iter_all_by_start::( - start, - Some(direction), - ) - } - - pub fn ids_of_genesis_block(&self) -> StorageResult<(BlockHeight, BlockId)> { - self.iter_all::(Some(IterDirection::Forward)) - .next() - .ok_or(DatabaseError::ChainUninitialized)? - } - - pub fn ids_of_latest_block(&self) -> StorageResult> { - let ids = self - .iter_all::(Some(IterDirection::Reverse)) - .next() - .transpose()?; - - Ok(ids) + pub fn get_block_height(&self, id: &BlockId) -> StorageResult> { + self.storage::() + .get(id) + .map(|v| v.map(|v| v.into_owned())) } /// Retrieve the full block and all associated transactions pub(crate) fn get_full_block( &self, - block_id: &BlockId, + height: &BlockHeight, ) -> StorageResult> { - let db_block = self.storage::().get(block_id)?; + let db_block = self.storage::().get(height)?; if let Some(block) = db_block { // fetch all the transactions // TODO: optimize with multi-key get @@ -334,7 +330,7 @@ mod tests { for block in &blocks { StorageMutate::::insert( &mut database, - &block.id(), + block.header().height(), &block.compress(&ChainId::default()), ) .unwrap(); @@ -398,7 +394,7 @@ mod tests { for block in &blocks { StorageMutate::::insert( database, - &block.id(), + block.header().height(), &block.compress(&ChainId::default()), ) .unwrap(); diff --git a/crates/fuel-core/src/database/coin.rs b/crates/fuel-core/src/database/coin.rs index 9a31ad3b07d..506e9f02661 100644 --- a/crates/fuel-core/src/database/coin.rs +++ b/crates/fuel-core/src/database/coin.rs @@ -126,9 +126,9 @@ impl Database { start_coin: Option, direction: Option, ) -> impl Iterator> + '_ { - self.iter_all_filtered::( - Some(*owner), - start_coin.map(|b| owner_coin_id_key(owner, &b)), + let start_coin = start_coin.map(|b| owner_coin_id_key(owner, &b)); + self.iter_all_filtered::( + Some(*owner), start_coin.as_ref(), direction, ) // Safety: key is always 64 bytes diff --git a/crates/fuel-core/src/database/contracts.rs b/crates/fuel-core/src/database/contracts.rs index ead374f4653..2dd4418ea51 100644 --- a/crates/fuel-core/src/database/contracts.rs +++ b/crates/fuel-core/src/database/contracts.rs @@ -93,9 +93,11 @@ impl Database { start_asset: Option, direction: Option, ) -> impl Iterator> + '_ { - self.iter_all_filtered::( + let start_asset = + start_asset.map(|asset| ContractsAssetKey::new(&contract, &asset)); + self.iter_all_filtered::( Some(contract), - start_asset.map(|asset_id| ContractsAssetKey::new(&contract, &asset_id)), + start_asset.as_ref(), direction, ) .map(|res| res.map(|(key, balance)| (*key.asset_id(), balance))) diff --git a/crates/fuel-core/src/database/message.rs b/crates/fuel-core/src/database/message.rs index 6c928924994..37e468e842a 100644 --- a/crates/fuel-core/src/database/message.rs +++ b/crates/fuel-core/src/database/message.rs @@ -32,10 +32,7 @@ use fuel_core_types::{ Nonce, }, }; -use std::{ - borrow::Cow, - ops::Deref, -}; +use std::borrow::Cow; fuel_core_types::fuel_vm::double_key!(OwnedMessageKey, Address, address, Nonce, nonce); @@ -120,9 +117,11 @@ impl Database { start_message_id: Option, direction: Option, ) -> impl Iterator> + '_ { - self.iter_all_filtered::( + let start_message_id = + start_message_id.map(|msg_id| OwnedMessageKey::new(owner, &msg_id)); + self.iter_all_filtered::( Some(*owner), - start_message_id.map(|msg_id| OwnedMessageKey::new(owner, &msg_id)), + start_message_id.as_ref(), direction, ) .map(|res| res.map(|(key, _)| *key.nonce())) @@ -133,8 +132,7 @@ impl Database { start: Option, direction: Option, ) -> impl Iterator> + '_ { - let start = start.map(|v| v.deref().to_vec()); - self.iter_all_by_start::(start, direction) + self.iter_all_by_start::(start.as_ref(), direction) .map(|res| res.map(|(_, message)| message)) } diff --git a/crates/fuel-core/src/database/sealed_block.rs b/crates/fuel-core/src/database/sealed_block.rs index a1cd34fa668..c7fec5f5d3e 100644 --- a/crates/fuel-core/src/database/sealed_block.rs +++ b/crates/fuel-core/src/database/sealed_block.rs @@ -1,5 +1,6 @@ use crate::database::Database; use fuel_core_storage::{ + iter::IterDirection, not_found, tables::{ FuelBlocks, @@ -15,7 +16,6 @@ use fuel_core_types::{ Genesis, Sealed, }, - primitives::BlockId, SealedBlock, SealedBlockHeader, }, @@ -25,14 +25,15 @@ use fuel_core_types::{ use std::ops::Range; impl Database { - pub fn get_sealed_block_by_id( + /// Returns `SealedBlock` by `height`. + /// Reusable across different trait implementations + pub fn get_sealed_block_by_height( &self, - block_id: &BlockId, + height: &BlockHeight, ) -> StorageResult> { // combine the block and consensus metadata into a sealed fuel block type - - let block = self.get_full_block(block_id)?; - let consensus = self.storage::().get(block_id)?; + let block = self.get_full_block(height)?; + let consensus = self.storage::().get(height)?; if let (Some(block), Some(consensus)) = (block, consensus) { let sealed_block = SealedBlock { @@ -46,51 +47,26 @@ impl Database { } } - /// Returns `SealedBlock` by `height`. - /// Reusable across different trait implementations - pub fn get_sealed_block_by_height( - &self, - height: &BlockHeight, - ) -> StorageResult> { - let block_id = match self.get_block_id(height)? { - Some(i) => i, - None => return Ok(None), - }; - self.get_sealed_block_by_id(&block_id) - } - pub fn get_genesis(&self) -> StorageResult { - let (_, genesis_block_id) = self.ids_of_genesis_block()?; - let consensus = self - .storage::() - .get(&genesis_block_id)? - .map(|c| c.into_owned()); + let pair = self + .iter_all::(Some(IterDirection::Forward)) + .next() + .transpose()?; - if let Some(Consensus::Genesis(genesis)) = consensus { + if let Some((_, Consensus::Genesis(genesis))) = pair { Ok(genesis) } else { Err(not_found!(SealedBlockConsensus)) } } - pub fn get_sealed_block_header_by_height( - &self, - height: &BlockHeight, - ) -> StorageResult> { - let block_id = match self.get_block_id(height)? { - Some(i) => i, - None => return Ok(None), - }; - self.get_sealed_block_header(&block_id) - } - pub fn get_sealed_block_headers( &self, block_height_range: Range, ) -> StorageResult> { let headers = block_height_range .map(BlockHeight::from) - .map(|height| self.get_sealed_block_header_by_height(&height)) + .map(|height| self.get_sealed_block_header(&height)) .collect::>>()? .into_iter() .flatten() @@ -100,10 +76,10 @@ impl Database { pub fn get_sealed_block_header( &self, - block_id: &BlockId, + height: &BlockHeight, ) -> StorageResult> { - let header = self.storage::().get(block_id)?; - let consensus = self.storage::().get(block_id)?; + let header = self.storage::().get(height)?; + let consensus = self.storage::().get(height)?; if let (Some(header), Some(consensus)) = (header, consensus) { let sealed_block = SealedBlockHeader { diff --git a/crates/fuel-core/src/database/transactions.rs b/crates/fuel-core/src/database/transactions.rs index 1ac351870c3..46d15caab55 100644 --- a/crates/fuel-core/src/database/transactions.rs +++ b/crates/fuel-core/src/database/transactions.rs @@ -104,8 +104,7 @@ impl Database { start: Option<&Bytes32>, direction: Option, ) -> impl Iterator> + '_ { - let start = start.map(|b| b.as_ref().to_vec()); - self.iter_all_by_start::(start, direction) + self.iter_all_by_start::(start, direction) .map(|res| res.map(|(_, tx)| tx)) } @@ -119,14 +118,17 @@ impl Database { start: Option, direction: Option, ) -> impl Iterator> + '_ { - let start = start - .map(|cursor| owned_tx_index_key(&owner, cursor.block_height, cursor.tx_idx)); - self.iter_all_filtered::(Some(owner), start, direction) - .map(|res| { - res.map(|(key, tx_id)| { - (TxPointer::new(key.block_height, key.tx_idx), tx_id) - }) - }) + let start = start.map(|cursor| { + OwnedTransactionIndexKey::new(&owner, cursor.block_height, cursor.tx_idx) + }); + self.iter_all_filtered::( + Some(owner), + start.as_ref(), + direction, + ) + .map(|res| { + res.map(|(key, tx_id)| (TxPointer::new(key.block_height, key.tx_idx), tx_id)) + }) } pub fn record_tx_id_owner( diff --git a/crates/fuel-core/src/graphql_api/ports.rs b/crates/fuel-core/src/graphql_api/ports.rs index b897acb2489..e25497bc63d 100644 --- a/crates/fuel-core/src/graphql_api/ports.rs +++ b/crates/fuel-core/src/graphql_api/ports.rs @@ -23,9 +23,12 @@ use fuel_core_storage::{ }; use fuel_core_txpool::service::TxStatusMessage; use fuel_core_types::{ - blockchain::primitives::{ - BlockId, - DaBlockHeight, + blockchain::{ + block::CompressedBlock, + primitives::{ + BlockId, + DaBlockHeight, + }, }, entities::message::{ MerkleProof, @@ -76,15 +79,15 @@ pub trait DatabaseBlocks: StorageInspect + StorageInspect { - fn block_id(&self, height: &BlockHeight) -> StorageResult; + fn block_height(&self, block_id: &BlockId) -> StorageResult; - fn blocks_ids( + fn blocks( &self, - start: Option, + height: Option, direction: IterDirection, - ) -> BoxedIter<'_, StorageResult<(BlockHeight, BlockId)>>; + ) -> BoxedIter<'_, StorageResult>; - fn ids_of_latest_block(&self) -> StorageResult<(BlockHeight, BlockId)>; + fn latest_height(&self) -> StorageResult; } /// Trait that specifies all the getters required for transactions. diff --git a/crates/fuel-core/src/query/block.rs b/crates/fuel-core/src/query/block.rs index 66cba1f941b..108f827a9ea 100644 --- a/crates/fuel-core/src/query/block.rs +++ b/crates/fuel-core/src/query/block.rs @@ -2,7 +2,6 @@ use crate::graphql_api::ports::DatabasePort; use fuel_core_storage::{ iter::{ BoxedIter, - IntoBoxedIter, IterDirection, }, not_found, @@ -23,11 +22,13 @@ use fuel_core_types::{ }; pub trait SimpleBlockData: Send + Sync { - fn block(&self, id: &BlockId) -> StorageResult; + fn block(&self, id: &BlockHeight) -> StorageResult; + + fn block_by_id(&self, id: &BlockId) -> StorageResult; } impl SimpleBlockData for D { - fn block(&self, id: &BlockId) -> StorageResult { + fn block(&self, id: &BlockHeight) -> StorageResult { let block = self .storage::() .get(id)? @@ -36,13 +37,14 @@ impl SimpleBlockData for D { Ok(block) } + + fn block_by_id(&self, id: &BlockId) -> StorageResult { + let height = self.block_height(id)?; + self.block(&height) + } } pub trait BlockQueryData: Send + Sync + SimpleBlockData { - fn block_id(&self, height: &BlockHeight) -> StorageResult; - - fn latest_block_id(&self) -> StorageResult; - fn latest_block_height(&self) -> StorageResult; fn latest_block(&self) -> StorageResult; @@ -53,24 +55,16 @@ pub trait BlockQueryData: Send + Sync + SimpleBlockData { direction: IterDirection, ) -> BoxedIter>; - fn consensus(&self, id: &BlockId) -> StorageResult; + fn consensus(&self, id: &BlockHeight) -> StorageResult; } impl BlockQueryData for D { - fn block_id(&self, height: &BlockHeight) -> StorageResult { - self.block_id(height) - } - - fn latest_block_id(&self) -> StorageResult { - self.ids_of_latest_block().map(|(_, id)| id) - } - fn latest_block_height(&self) -> StorageResult { - self.ids_of_latest_block().map(|(height, _)| height) + self.latest_height() } fn latest_block(&self) -> StorageResult { - self.latest_block_id().and_then(|id| self.block(&id)) + self.block(&self.latest_block_height()?) } fn compressed_blocks( @@ -78,18 +72,10 @@ impl BlockQueryData for D { start: Option, direction: IterDirection, ) -> BoxedIter> { - self.blocks_ids(start.map(Into::into), direction) - .map(|result| { - result.and_then(|(_, id)| { - let block = self.block(&id)?; - - Ok(block) - }) - }) - .into_boxed() + self.blocks(start, direction) } - fn consensus(&self, id: &BlockId) -> StorageResult { + fn consensus(&self, id: &BlockHeight) -> StorageResult { self.storage::() .get(id) .map(|c| c.map(|c| c.into_owned()))? diff --git a/crates/fuel-core/src/query/message.rs b/crates/fuel-core/src/query/message.rs index b1ce17e4bb9..1d7f79f3743 100644 --- a/crates/fuel-core/src/query/message.rs +++ b/crates/fuel-core/src/query/message.rs @@ -26,10 +26,7 @@ use fuel_core_storage::{ StorageAsRef, }; use fuel_core_types::{ - blockchain::{ - block::CompressedBlock, - primitives::BlockId, - }, + blockchain::block::CompressedBlock, entities::message::{ MerkleProof, Message, @@ -44,6 +41,7 @@ use fuel_core_types::{ }, fuel_types::{ Address, + BlockHeight, Bytes32, MessageId, Nonce, @@ -143,7 +141,7 @@ pub fn message_proof( database: &T, transaction_id: Bytes32, desired_nonce: Nonce, - commit_block_id: BlockId, + commit_block_height: BlockHeight, ) -> StorageResult> { // Check if the receipts for this transaction actually contain this message id or exit. let receipt = database @@ -181,7 +179,7 @@ pub fn message_proof( // Get the message fuel block header. let (message_block_header, message_block_txs) = match database - .block(&message_block_id) + .block_by_id(&message_block_id) .into_api_result::()? { Some(t) => t.into_inner(), @@ -198,7 +196,7 @@ pub fn message_proof( // Get the commit fuel block header. let commit_block_header = match database - .block(&commit_block_id) + .block(&commit_block_height) .into_api_result::()? { Some(t) => t.into_inner().0, diff --git a/crates/fuel-core/src/query/message/test.rs b/crates/fuel-core/src/query/message/test.rs index e8ca628066f..aa8415cfa35 100644 --- a/crates/fuel-core/src/query/message/test.rs +++ b/crates/fuel-core/src/query/message/test.rs @@ -1,10 +1,13 @@ use std::ops::Deref; use fuel_core_types::{ - blockchain::header::{ - ApplicationHeader, - ConsensusHeader, - PartialBlockHeader, + blockchain::{ + header::{ + ApplicationHeader, + ConsensusHeader, + PartialBlockHeader, + }, + primitives::BlockId, }, entities::message::MerkleProof, fuel_tx::{ @@ -59,7 +62,8 @@ fn receipt(i: Option) -> Receipt { mockall::mock! { pub ProofDataStorage {} impl SimpleBlockData for ProofDataStorage { - fn block(&self, block_id: &BlockId) -> StorageResult; + fn block(&self, height: &BlockHeight) -> StorageResult; + fn block_by_id(&self, id: &BlockId) -> StorageResult; } impl DatabaseMessageProof for ProofDataStorage { @@ -182,16 +186,25 @@ async fn can_build_message_proof() { }) }); - data.expect_block().times(2).returning({ + data.expect_block().times(1).returning({ let commit_block = commit_block.clone(); + move |block_height| { + let block = if commit_block.header().height() == block_height { + commit_block.clone() + } else { + panic!("Shouldn't request any other block") + }; + Ok(block) + } + }); + + data.expect_block_by_id().times(1).returning({ let message_block = message_block.clone(); move |block_id| { - let block = if &commit_block.id() == block_id { - commit_block.clone() - } else if &message_block.id() == block_id { + let block = if &message_block.id() == block_id { message_block.clone() } else { - panic!("Should request any other block") + panic!("Shouldn't request any other block") }; Ok(block) } @@ -203,7 +216,7 @@ async fn can_build_message_proof() { data.deref(), transaction_id, nonce.to_owned(), - commit_block.id(), + *commit_block.header().height(), ) .unwrap() .unwrap(); diff --git a/crates/fuel-core/src/schema/block.rs b/crates/fuel-core/src/schema/block.rs index 5d503f281bc..6c75459726e 100644 --- a/crates/fuel-core/src/schema/block.rs +++ b/crates/fuel-core/src/schema/block.rs @@ -97,8 +97,8 @@ impl Block { async fn consensus(&self, ctx: &Context<'_>) -> async_graphql::Result { let query: &Database = ctx.data_unchecked(); - let id = self.0.header().id(); - let consensus = query.consensus(&id)?; + let height = self.0.header().height(); + let consensus = query.consensus(height)?; Ok(consensus.into()) } @@ -193,23 +193,25 @@ impl BlockQuery { #[graphql(desc = "Height of the block")] height: Option, ) -> async_graphql::Result> { let data: &Database = ctx.data_unchecked(); - let id = match (id, height) { + let height = match (id, height) { (Some(_), Some(_)) => { return Err(async_graphql::Error::new( "Can't provide both an id and a height", )) } - (Some(id), None) => Ok(id.0.into()), + (Some(id), None) => data.block_height(&id.0.into()), (None, Some(height)) => { let height: u32 = height.into(); - data.block_id(&height.into()) + Ok(height.into()) } (None, None) => { return Err(async_graphql::Error::new("Missing either id or height")) } }; - id.and_then(|id| data.block(&id)).into_api_result() + height + .and_then(|height| data.block(&height)) + .into_api_result() } async fn blocks( diff --git a/crates/fuel-core/src/schema/dap.rs b/crates/fuel-core/src/schema/dap.rs index 8283336e640..832d92a1339 100644 --- a/crates/fuel-core/src/schema/dap.rs +++ b/crates/fuel-core/src/schema/dap.rs @@ -159,8 +159,7 @@ impl ConcreteStorage { fn vm_database(storage: &DatabaseTransaction) -> anyhow::Result> { let block = storage .get_current_block()? - .ok_or(not_found!("Block for VMDatabase"))? - .into_owned(); + .ok_or(not_found!("Block for VMDatabase"))?; let vm_database = VmStorage::new( storage.as_ref().clone(), diff --git a/crates/fuel-core/src/schema/message.rs b/crates/fuel-core/src/schema/message.rs index 75707190e22..d0cbeb75959 100644 --- a/crates/fuel-core/src/schema/message.rs +++ b/crates/fuel-core/src/schema/message.rs @@ -115,11 +115,12 @@ impl MessageQuery { commit_block_height: Option, ) -> async_graphql::Result> { let data: &Database = ctx.data_unchecked(); - let block_id = match (commit_block_id, commit_block_height) { - (Some(commit_block_id), None) => commit_block_id.0.into(), + let height = match (commit_block_id, commit_block_height) { + (Some(commit_block_id), None) => { + data.block_height(&commit_block_id.0.into())? + }, (None, Some(commit_block_height)) => { - let block_height = commit_block_height.0.into(); - data.block_id(&block_height)? + commit_block_height.0.into() } _ => Err(anyhow::anyhow!( "Either `commit_block_id` or `commit_block_height` must be provided exclusively" @@ -130,7 +131,7 @@ impl MessageQuery { data.deref(), transaction_id.into(), nonce.into(), - block_id, + height, )? .map(MessageProof)) } diff --git a/crates/fuel-core/src/schema/tx/types.rs b/crates/fuel-core/src/schema/tx/types.rs index 41b06f5cb3c..74f6e69ec22 100644 --- a/crates/fuel-core/src/schema/tx/types.rs +++ b/crates/fuel-core/src/schema/tx/types.rs @@ -161,7 +161,8 @@ impl SuccessStatus { async fn block(&self, ctx: &Context<'_>) -> async_graphql::Result { let query: &Database = ctx.data_unchecked(); - let block = query.block(&self.block_id)?; + let height = query.block_height(&self.block_id)?; + let block = query.block(&height)?; Ok(block.into()) } @@ -202,7 +203,8 @@ impl FailureStatus { async fn block(&self, ctx: &Context<'_>) -> async_graphql::Result { let query: &Database = ctx.data_unchecked(); - let block = query.block(&self.block_id)?; + let height = query.block_height(&self.block_id)?; + let block = query.block(&height)?; Ok(block.into()) } diff --git a/crates/fuel-core/src/service/adapters/block_importer.rs b/crates/fuel-core/src/service/adapters/block_importer.rs index 89627483c8d..b1f4cb307bc 100644 --- a/crates/fuel-core/src/service/adapters/block_importer.rs +++ b/crates/fuel-core/src/service/adapters/block_importer.rs @@ -18,6 +18,7 @@ use fuel_core_importer::{ }; use fuel_core_poa::ports::RelayerPort; use fuel_core_storage::{ + iter::IterDirection, tables::{ FuelBlocks, SealedBlockConsensus, @@ -121,7 +122,11 @@ impl RelayerPort for MaybeRelayerAdapter { impl ImporterDatabase for Database { fn latest_block_height(&self) -> StorageResult> { - Ok(self.ids_of_latest_block()?.map(|(height, _)| height)) + Ok(self + .iter_all::(Some(IterDirection::Reverse)) + .next() + .transpose()? + .map(|(height, _)| height)) } fn increase_tx_count(&self, new_txs_count: u64) -> StorageResult { @@ -135,14 +140,14 @@ impl ExecutorDatabase for Database { chain_id: &ChainId, block: &SealedBlock, ) -> StorageResult { - let block_id = block.entity.id(); + let height = block.entity.header().height(); let mut found = self .storage::() - .insert(&block_id, &block.entity.compress(chain_id))? + .insert(height, &block.entity.compress(chain_id))? .is_some(); found |= self .storage::() - .insert(&block_id, &block.consensus)? + .insert(height, &block.consensus)? .is_some(); // TODO: Use `batch_insert` from https://github.com/FuelLabs/fuel-core/pull/1576 diff --git a/crates/fuel-core/src/service/adapters/graphql_api.rs b/crates/fuel-core/src/service/adapters/graphql_api.rs index 4faea60040a..1225437d92c 100644 --- a/crates/fuel-core/src/service/adapters/graphql_api.rs +++ b/crates/fuel-core/src/service/adapters/graphql_api.rs @@ -24,6 +24,7 @@ use crate::{ }, }; use async_trait::async_trait; +use fuel_core_importer::ports::ImporterDatabase; use fuel_core_services::stream::BoxStream; use fuel_core_storage::{ iter::{ @@ -32,6 +33,7 @@ use fuel_core_storage::{ IterDirection, }, not_found, + tables::FuelBlocks, Error as StorageError, Result as StorageResult, }; @@ -43,9 +45,12 @@ use fuel_core_txpool::{ }, }; use fuel_core_types::{ - blockchain::primitives::{ - BlockId, - DaBlockHeight, + blockchain::{ + block::CompressedBlock, + primitives::{ + BlockId, + DaBlockHeight, + }, }, entities::message::{ MerkleProof, @@ -79,25 +84,25 @@ use std::{ }; impl DatabaseBlocks for Database { - fn block_id(&self, height: &BlockHeight) -> StorageResult { - self.get_block_id(height) - .and_then(|height| height.ok_or(not_found!("BlockId"))) + fn block_height(&self, id: &BlockId) -> StorageResult { + self.get_block_height(id) + .and_then(|height| height.ok_or(not_found!("BlockHeight"))) } - fn blocks_ids( + fn blocks( &self, - start: Option, + height: Option, direction: IterDirection, - ) -> BoxedIter<'_, StorageResult<(BlockHeight, BlockId)>> { - self.all_block_ids(start, direction) - .map(|result| result.map_err(StorageError::from)) + ) -> BoxedIter<'_, StorageResult> { + self.iter_all_by_start::(height.as_ref(), Some(direction)) + .map(|result| result.map(|(_, block)| block)) .into_boxed() } - fn ids_of_latest_block(&self) -> StorageResult<(BlockHeight, BlockId)> { - self.ids_of_latest_block() + fn latest_height(&self) -> StorageResult { + self.latest_block_height() .transpose() - .ok_or(not_found!("BlockId"))? + .ok_or(not_found!("BlockHeight"))? } } diff --git a/crates/fuel-core/src/service/adapters/p2p.rs b/crates/fuel-core/src/service/adapters/p2p.rs index aa3e0766d70..35dbac0f918 100644 --- a/crates/fuel-core/src/service/adapters/p2p.rs +++ b/crates/fuel-core/src/service/adapters/p2p.rs @@ -28,7 +28,7 @@ impl P2pDb for Database { &self, height: &BlockHeight, ) -> StorageResult> { - self.get_sealed_block_header_by_height(height) + self.get_sealed_block_header(height) } fn get_sealed_headers( diff --git a/crates/fuel-core/src/service/adapters/producer.rs b/crates/fuel-core/src/service/adapters/producer.rs index f966c48e337..5e5845287ef 100644 --- a/crates/fuel-core/src/service/adapters/producer.rs +++ b/crates/fuel-core/src/service/adapters/producer.rs @@ -135,9 +135,8 @@ impl fuel_core_producer::ports::Relayer for MaybeRelayerAdapter { impl fuel_core_producer::ports::BlockProducerDatabase for Database { fn get_block(&self, height: &BlockHeight) -> StorageResult> { - let id = self.get_block_id(height)?.ok_or(not_found!("BlockId"))?; self.storage::() - .get(&id)? + .get(height)? .ok_or(not_found!(FuelBlocks)) } diff --git a/crates/fuel-core/src/service/genesis.rs b/crates/fuel-core/src/service/genesis.rs index 8039f438d12..db8a0ecd874 100644 --- a/crates/fuel-core/src/service/genesis.rs +++ b/crates/fuel-core/src/service/genesis.rs @@ -19,6 +19,7 @@ use fuel_core_storage::{ Messages, }, transactional::Transactional, + IsNotFound, MerkleRoot, StorageAsMut, }; @@ -66,8 +67,10 @@ pub fn maybe_initialize_state( database: &Database, ) -> anyhow::Result<()> { // check if chain is initialized - if database.ids_of_latest_block()?.is_none() { - import_genesis_block(config, database)?; + if let Err(err) = database.get_genesis() { + if err.is_not_found() { + import_genesis_block(config, database)?; + } } Ok(()) diff --git a/crates/storage/src/structured_storage/blocks.rs b/crates/storage/src/structured_storage/blocks.rs index 2b2ba45c32c..7728dec59f8 100644 --- a/crates/storage/src/structured_storage/blocks.rs +++ b/crates/storage/src/structured_storage/blocks.rs @@ -3,7 +3,7 @@ use crate::{ codec::{ postcard::Postcard, - raw::Raw, + primitive::Primitive, }, column::Column, structure::plain::Plain, @@ -12,7 +12,7 @@ use crate::{ }; impl TableWithStructure for FuelBlocks { - type Structure = Plain; + type Structure = Plain, Postcard>; fn column() -> Column { Column::FuelBlocks diff --git a/crates/storage/src/structured_storage/sealed_block.rs b/crates/storage/src/structured_storage/sealed_block.rs index 2c201f7623d..f7b8f165ceb 100644 --- a/crates/storage/src/structured_storage/sealed_block.rs +++ b/crates/storage/src/structured_storage/sealed_block.rs @@ -3,7 +3,7 @@ use crate::{ codec::{ postcard::Postcard, - raw::Raw, + primitive::Primitive, }, column::Column, structure::plain::Plain, @@ -12,7 +12,7 @@ use crate::{ }; impl TableWithStructure for SealedBlockConsensus { - type Structure = Plain; + type Structure = Plain, Postcard>; fn column() -> Column { Column::FuelBlockConsensus @@ -22,6 +22,6 @@ impl TableWithStructure for SealedBlockConsensus { #[cfg(test)] crate::basic_storage_tests!( SealedBlockConsensus, - ::Key::from([1u8; 32]), + ::Key::default(), ::Value::default() ); diff --git a/crates/storage/src/tables.rs b/crates/storage/src/tables.rs index 042820c022d..65e2597be36 100644 --- a/crates/storage/src/tables.rs +++ b/crates/storage/src/tables.rs @@ -6,7 +6,6 @@ use fuel_core_types::{ blockchain::{ block::CompressedBlock, consensus::Consensus, - primitives::BlockId, }, entities::{ coins::coin::CompressedCoin, @@ -20,6 +19,7 @@ use fuel_core_types::{ UtxoId, }, fuel_types::{ + BlockHeight, Bytes32, ContractId, Nonce, @@ -39,8 +39,7 @@ pub struct FuelBlocks; impl Mappable for FuelBlocks { /// Unique identifier of the fuel block. type Key = Self::OwnedKey; - // TODO: Seems it would be faster to use `BlockHeight` as primary key. - type OwnedKey = BlockId; + type OwnedKey = BlockHeight; type Value = Self::OwnedValue; type OwnedValue = CompressedBlock; } @@ -75,7 +74,7 @@ pub struct SealedBlockConsensus; impl Mappable for SealedBlockConsensus { type Key = Self::OwnedKey; - type OwnedKey = BlockId; + type OwnedKey = BlockHeight; type Value = Self::OwnedValue; type OwnedValue = Consensus; } diff --git a/tests/tests/blocks.rs b/tests/tests/blocks.rs index 05f3ba38a12..4473dffcaa1 100644 --- a/tests/tests/blocks.rs +++ b/tests/tests/blocks.rs @@ -45,7 +45,7 @@ use std::{ async fn block() { // setup test data in the node let block = CompressedBlock::default(); - let id = block.id(); + let height = block.header().height(); let mut db = Database::default(); // setup server & client let srv = FuelService::from_database(db.clone(), Config::local_node()) @@ -53,13 +53,13 @@ async fn block() { .unwrap(); let client = FuelClient::from(srv.bound_address); - db.storage::().insert(&id, &block).unwrap(); + db.storage::().insert(height, &block).unwrap(); db.storage::() - .insert(&id, &Consensus::PoA(Default::default())) + .insert(height, &Consensus::PoA(Default::default())) .unwrap(); // run test - let block = client.block(&id.into()).await.unwrap(); + let block = client.block_by_height(**height).await.unwrap(); assert!(block.is_some()); } diff --git a/tests/tests/poa.rs b/tests/tests/poa.rs index 10fb590a955..cd8f32cb76d 100644 --- a/tests/tests/poa.rs +++ b/tests/tests/poa.rs @@ -1,5 +1,6 @@ use fuel_core::{ database::Database, + fuel_core_graphql_api::ports::DatabaseBlocks, service::{ Config, FuelService, @@ -9,6 +10,7 @@ use fuel_core_client::client::{ types::TransactionStatus, FuelClient, }; +use fuel_core_p2p::ports::P2pDb; use fuel_core_types::{ blockchain::{ consensus::Consensus, @@ -52,9 +54,10 @@ async fn can_get_sealed_block_from_poa_produced_block() { let block_id = BlockId::from_str(&block_id).unwrap(); + let block_height = db.block_height(&block_id).unwrap(); // check sealed block header is correct let sealed_block_header = db - .get_sealed_block_header(&block_id) + .get_sealed_block_header(&block_height) .unwrap() .expect("expected sealed header to be available"); @@ -68,9 +71,10 @@ async fn can_get_sealed_block_from_poa_produced_block() { .verify(&poa_public, &block_id.into_message()) .expect("failed to verify signature"); + let block_height = db.block_height(&block_id).unwrap(); // check sealed block is correct let sealed_block = db - .get_sealed_block_by_id(&block_id) + .get_sealed_block(&block_height) .unwrap() .expect("expected sealed header to be available"); From 359192efbd36455ff14ee5f742353cf0a052e5ad Mon Sep 17 00:00:00 2001 From: xgreenx Date: Sat, 6 Jan 2024 23:38:26 +0100 Subject: [PATCH 17/28] Updated CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index aac3f7af647..53a20f16c17 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ Description of the upcoming release here. ### Changed +- [#1587](https://github.com/FuelLabs/fuel-core/pull/1587): Use `BlockHeight` as a primary key for the `FuelsBlock` table. - [#1577](https://github.com/FuelLabs/fuel-core/pull/1577): Moved insertion of sealed blocks into the `BlockImporter` instead of the executor. #### Breaking From e27996a4cdf6f777191500232618cb03e6665d9b Mon Sep 17 00:00:00 2001 From: xgreenx Date: Sat, 6 Jan 2024 23:50:24 +0100 Subject: [PATCH 18/28] Fix compilation --- tests/tests/poa.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/tests/poa.rs b/tests/tests/poa.rs index cd8f32cb76d..b48b2799aed 100644 --- a/tests/tests/poa.rs +++ b/tests/tests/poa.rs @@ -10,7 +10,6 @@ use fuel_core_client::client::{ types::TransactionStatus, FuelClient, }; -use fuel_core_p2p::ports::P2pDb; use fuel_core_types::{ blockchain::{ consensus::Consensus, @@ -74,7 +73,7 @@ async fn can_get_sealed_block_from_poa_produced_block() { let block_height = db.block_height(&block_id).unwrap(); // check sealed block is correct let sealed_block = db - .get_sealed_block(&block_height) + .get_sealed_block_by_height(&block_height) .unwrap() .expect("expected sealed header to be available"); From 1be054c5fe0b8a2c3396fc7e8c797e1842c6def1 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Mon, 8 Jan 2024 01:10:33 +0100 Subject: [PATCH 19/28] Use `AtomicView` in the `TxPool` --- .../fuel-core/src/graphql_api/api_service.rs | 12 +- crates/fuel-core/src/graphql_api/database.rs | 64 ++- .../src/service/adapters/graphql_api.rs | 20 +- .../service/adapters/graphql_api/off_chain.rs | 24 +- .../service/adapters/graphql_api/on_chain.rs | 30 +- .../fuel-core/src/service/adapters/txpool.rs | 5 - crates/fuel-core/src/service/sub_services.rs | 2 + crates/services/txpool/Cargo.toml | 1 + crates/services/txpool/src/mock_db.rs | 19 +- crates/services/txpool/src/ports.rs | 3 - crates/services/txpool/src/service.rs | 83 +-- .../txpool/src/service/test_helpers.rs | 13 +- .../service/update_sender/tests/test_e2e.rs | 2 +- .../update_sender/tests/test_subscribe.rs | 2 +- crates/services/txpool/src/test_helpers.rs | 147 +++-- crates/services/txpool/src/txpool.rs | 285 +++++----- crates/services/txpool/src/txpool/tests.rs | 528 +++++++++--------- crates/storage/src/transactional.rs | 9 +- 18 files changed, 681 insertions(+), 568 deletions(-) diff --git a/crates/fuel-core/src/graphql_api/api_service.rs b/crates/fuel-core/src/graphql_api/api_service.rs index 15023a5995f..6f4e26c2fbb 100644 --- a/crates/fuel-core/src/graphql_api/api_service.rs +++ b/crates/fuel-core/src/graphql_api/api_service.rs @@ -1,13 +1,11 @@ use crate::{ fuel_core_graphql_api::{ - database::{ - OffChainView, - OnChainView, - }, metrics_extension::MetricsExtension, ports::{ BlockProducerPort, ConsensusModulePort, + OffChainDatabase, + OnChainDatabase, P2pPort, TxPoolPort, }, @@ -178,8 +176,10 @@ pub fn new_service( request_timeout: Duration, ) -> anyhow::Result where - OnChain: AtomicView + 'static, - OffChain: AtomicView + 'static, + OnChain: AtomicView + 'static, + OffChain: AtomicView + 'static, + OnChain::View: OnChainDatabase, + OffChain::View: OffChainDatabase, { let network_addr = config.addr; let combined_read_database = ReadDatabase::new(on_database, off_database); diff --git a/crates/fuel-core/src/graphql_api/database.rs b/crates/fuel-core/src/graphql_api/database.rs index 5aa4ba0f975..81445539d73 100644 --- a/crates/fuel-core/src/graphql_api/database.rs +++ b/crates/fuel-core/src/graphql_api/database.rs @@ -57,25 +57,77 @@ pub type OnChainView = Arc; /// The off-chain view of the database used by the [`ReadView`] to fetch off-chain data. pub type OffChainView = Arc; +/// The GraphQL can't work with the generics in [`async_graphql::Context::data_unchecked`] and requires a known type. +/// It is an `Arc` wrapper around the generic for on-chain and off-chain databases. +struct ArcWrapper { + inner: Provider, + _marker: core::marker::PhantomData, +} + +impl ArcWrapper { + fn new(inner: Provider) -> Self { + Self { + inner, + _marker: core::marker::PhantomData, + } + } +} + +impl AtomicView for ArcWrapper +where + Provider: AtomicView, + View: OnChainDatabase + 'static, +{ + type View = OnChainView; + + fn view_at(&self, height: BlockHeight) -> StorageResult { + let view = self.inner.view_at(height)?; + Ok(Arc::new(view)) + } + + fn latest_view(&self) -> Self::View { + Arc::new(self.inner.latest_view()) + } +} + +impl AtomicView for ArcWrapper +where + Provider: AtomicView, + View: OffChainDatabase + 'static, +{ + type View = OffChainView; + + fn view_at(&self, height: BlockHeight) -> StorageResult { + let view = self.inner.view_at(height)?; + Ok(Arc::new(view)) + } + + fn latest_view(&self) -> Self::View { + Arc::new(self.inner.latest_view()) + } +} + /// The container of the on-chain and off-chain database view provides. /// It is used only by [`ViewExtension`](super::view_extension::ViewExtension) to create a [`ReadView`]. pub struct ReadDatabase { /// The on-chain database view provider. - on_chain: Box>, + on_chain: Box>, /// The off-chain database view provider. - off_chain: Box>, + off_chain: Box>, } impl ReadDatabase { /// Creates a new [`ReadDatabase`] with the given on-chain and off-chain database view providers. pub fn new(on_chain: OnChain, off_chain: OffChain) -> Self where - OnChain: AtomicView + 'static, - OffChain: AtomicView + 'static, + OnChain: AtomicView + 'static, + OffChain: AtomicView + 'static, + OnChain::View: OnChainDatabase, + OffChain::View: OffChainDatabase, { Self { - on_chain: Box::new(on_chain), - off_chain: Box::new(off_chain), + on_chain: Box::new(ArcWrapper::new(on_chain)), + off_chain: Box::new(ArcWrapper::new(off_chain)), } } diff --git a/crates/fuel-core/src/service/adapters/graphql_api.rs b/crates/fuel-core/src/service/adapters/graphql_api.rs index e83efc44e08..b6f303a9b89 100644 --- a/crates/fuel-core/src/service/adapters/graphql_api.rs +++ b/crates/fuel-core/src/service/adapters/graphql_api.rs @@ -18,7 +18,10 @@ use crate::{ }; use async_trait::async_trait; use fuel_core_services::stream::BoxStream; -use fuel_core_storage::Result as StorageResult; +use fuel_core_storage::{ + transactional::AtomicView, + Result as StorageResult, +}; use fuel_core_txpool::{ service::TxStatusMessage, types::TxId, @@ -145,3 +148,18 @@ impl worker::BlockImporter for BlockImporterAdapter { ) } } + +impl AtomicView for Database { + type View = Database; + + fn view_at(&self, _: BlockHeight) -> StorageResult { + unimplemented!( + "Unimplemented until of the https://github.com/FuelLabs/fuel-core/issues/451" + ) + } + + fn latest_view(&self) -> Self::View { + // TODO: https://github.com/FuelLabs/fuel-core/issues/1581 + self.clone() + } +} diff --git a/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs b/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs index 86fc7002a02..a892b84c2bf 100644 --- a/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs +++ b/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs @@ -3,12 +3,9 @@ use crate::{ transactions::OwnedTransactionIndexCursor, Database, }, - fuel_core_graphql_api::{ - database::OffChainView, - ports::{ - worker, - OffChainDatabase, - }, + fuel_core_graphql_api::ports::{ + worker, + OffChainDatabase, }, }; use fuel_core_storage::{ @@ -18,7 +15,6 @@ use fuel_core_storage::{ IterDirection, }, not_found, - transactional::AtomicView, Error as StorageError, Result as StorageResult, }; @@ -36,7 +32,6 @@ use fuel_core_types::{ }, services::txpool::TransactionStatus, }; -use std::sync::Arc; impl OffChainDatabase for Database { fn owned_message_ids( @@ -83,19 +78,6 @@ impl OffChainDatabase for Database { } } -impl AtomicView for Database { - fn view_at(&self, _: BlockHeight) -> StorageResult { - unimplemented!( - "Unimplemented until of the https://github.com/FuelLabs/fuel-core/issues/451" - ) - } - - fn latest_view(&self) -> OffChainView { - // TODO: https://github.com/FuelLabs/fuel-core/issues/1581 - Arc::new(self.clone()) - } -} - impl worker::OffChainDatabase for Database { fn record_tx_id_owner( &mut self, diff --git a/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs b/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs index dd9c9937ffa..40931831bd2 100644 --- a/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs +++ b/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs @@ -1,14 +1,11 @@ use crate::{ database::Database, - fuel_core_graphql_api::{ - database::OnChainView, - ports::{ - DatabaseBlocks, - DatabaseChain, - DatabaseContracts, - DatabaseMessages, - OnChainDatabase, - }, + fuel_core_graphql_api::ports::{ + DatabaseBlocks, + DatabaseChain, + DatabaseContracts, + DatabaseMessages, + OnChainDatabase, }, }; use fuel_core_storage::{ @@ -18,7 +15,6 @@ use fuel_core_storage::{ IterDirection, }, not_found, - transactional::AtomicView, Error as StorageError, Result as StorageResult, }; @@ -36,7 +32,6 @@ use fuel_core_types::{ }, services::graphql_api::ContractBalance, }; -use std::sync::Arc; impl DatabaseBlocks for Database { fn block_id(&self, height: &BlockHeight) -> StorageResult { @@ -125,16 +120,3 @@ impl DatabaseChain for Database { } impl OnChainDatabase for Database {} - -impl AtomicView for Database { - fn view_at(&self, _: BlockHeight) -> StorageResult { - unimplemented!( - "Unimplemented until of the https://github.com/FuelLabs/fuel-core/issues/451" - ) - } - - fn latest_view(&self) -> OnChainView { - // TODO: https://github.com/FuelLabs/fuel-core/issues/1581 - Arc::new(self.clone()) - } -} diff --git a/crates/fuel-core/src/service/adapters/txpool.rs b/crates/fuel-core/src/service/adapters/txpool.rs index ccd33474df6..d06fc1face0 100644 --- a/crates/fuel-core/src/service/adapters/txpool.rs +++ b/crates/fuel-core/src/service/adapters/txpool.rs @@ -27,7 +27,6 @@ use fuel_core_types::{ UtxoId, }, fuel_types::{ - BlockHeight, ContractId, Nonce, }, @@ -139,8 +138,4 @@ impl fuel_core_txpool::ports::TxPoolDb for Database { fn is_message_spent(&self, id: &Nonce) -> StorageResult { self.storage::().contains_key(id) } - - fn current_block_height(&self) -> StorageResult { - self.latest_height() - } } diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index ba8dc05e93a..84e941e15f2 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -51,6 +51,7 @@ pub fn init_sub_services( let last_block = database.get_current_block()?.ok_or(anyhow::anyhow!( "The blockchain is not initialized with any block" ))?; + let last_height = *last_block.header().height(); #[cfg(feature = "relayer")] let relayer_service = if let Some(config) = &config.relayer { Some(fuel_core_relayer::new_service( @@ -140,6 +141,7 @@ pub fn init_sub_services( database.clone(), importer_adapter.clone(), p2p_adapter.clone(), + last_height, ); let tx_pool_adapter = TxPoolAdapter::new(txpool.shared.clone()); diff --git a/crates/services/txpool/Cargo.toml b/crates/services/txpool/Cargo.toml index 9c07108646c..fa0cee10746 100644 --- a/crates/services/txpool/Cargo.toml +++ b/crates/services/txpool/Cargo.toml @@ -28,6 +28,7 @@ tracing = { workspace = true } [dev-dependencies] fuel-core-trace = { path = "./../../trace" } fuel-core-txpool = { path = "", features = ["test-helpers"] } +fuel-core-types = { path = "../../types", features = ["test-helpers"] } itertools = { workspace = true } mockall = { workspace = true } proptest = { workspace = true } diff --git a/crates/services/txpool/src/mock_db.rs b/crates/services/txpool/src/mock_db.rs index 5435585a3f1..b12c1c1fd9a 100644 --- a/crates/services/txpool/src/mock_db.rs +++ b/crates/services/txpool/src/mock_db.rs @@ -1,5 +1,8 @@ use crate::ports::TxPoolDb; -use fuel_core_storage::Result as StorageResult; +use fuel_core_storage::{ + transactional::AtomicView, + Result as StorageResult, +}; use fuel_core_types::{ entities::{ coins::coin::{ @@ -91,8 +94,18 @@ impl TxPoolDb for MockDb { fn is_message_spent(&self, id: &Nonce) -> StorageResult { Ok(self.data.lock().unwrap().spent_messages.contains(id)) } +} + +pub struct MockDBProvider(pub MockDb); + +impl AtomicView for MockDBProvider { + type View = MockDb; + + fn view_at(&self, _: BlockHeight) -> StorageResult { + Ok(self.latest_view()) + } - fn current_block_height(&self) -> StorageResult { - Ok(Default::default()) + fn latest_view(&self) -> Self::View { + self.0.clone() } } diff --git a/crates/services/txpool/src/ports.rs b/crates/services/txpool/src/ports.rs index 375d7066982..7a32746c7ef 100644 --- a/crates/services/txpool/src/ports.rs +++ b/crates/services/txpool/src/ports.rs @@ -10,7 +10,6 @@ use fuel_core_types::{ UtxoId, }, fuel_types::{ - BlockHeight, ContractId, Nonce, }, @@ -55,6 +54,4 @@ pub trait TxPoolDb: Send + Sync { fn message(&self, message_id: &Nonce) -> StorageResult>; fn is_message_spent(&self, message_id: &Nonce) -> StorageResult; - - fn current_block_height(&self) -> StorageResult; } diff --git a/crates/services/txpool/src/service.rs b/crates/services/txpool/src/service.rs index 38ac9b75929..50e61fab098 100644 --- a/crates/services/txpool/src/service.rs +++ b/crates/services/txpool/src/service.rs @@ -51,6 +51,7 @@ use fuel_core_types::{ }; use anyhow::anyhow; +use fuel_core_storage::transactional::AtomicView; use fuel_core_types::services::block_importer::SharedImportResult; use parking_lot::Mutex as ParkingMutex; use std::{ @@ -119,45 +120,46 @@ impl TxStatusChange { } } -pub struct SharedState { +pub struct SharedState { tx_status_sender: TxStatusChange, - txpool: Arc>>, + txpool: Arc>>, p2p: Arc, consensus_params: ConsensusParameters, - db: DB, + current_height: Arc>, config: Config, } -impl Clone for SharedState { +impl Clone for SharedState { fn clone(&self) -> Self { Self { tx_status_sender: self.tx_status_sender.clone(), txpool: self.txpool.clone(), p2p: self.p2p.clone(), consensus_params: self.consensus_params.clone(), - db: self.db.clone(), + current_height: self.current_height.clone(), config: self.config.clone(), } } } -pub struct Task { +pub struct Task { gossiped_tx_stream: BoxStream, committed_block_stream: BoxStream, - shared: SharedState, + shared: SharedState, ttl_timer: tokio::time::Interval, } #[async_trait::async_trait] -impl RunnableService for Task +impl RunnableService for Task where - P2P: PeerToPeer + Send + Sync, - DB: TxPoolDb + Clone, + P2P: PeerToPeer, + ViewProvider: AtomicView, + View: TxPoolDb, { const NAME: &'static str = "TxPool"; - type SharedData = SharedState; - type Task = Task; + type SharedData = SharedState; + type Task = Task; type TaskParams = (); fn shared_data(&self) -> Self::SharedData { @@ -175,10 +177,11 @@ where } #[async_trait::async_trait] -impl RunnableTask for Task +impl RunnableTask for Task where - P2P: PeerToPeer + Send + Sync, - DB: TxPoolDb, + P2P: PeerToPeer, + ViewProvider: AtomicView, + View: TxPoolDb, { async fn run(&mut self, watcher: &mut StateWatcher) -> anyhow::Result { let should_continue; @@ -201,14 +204,22 @@ where result = self.committed_block_stream.next() => { if let Some(result) = result { + let new_height = *result + .sealed_block + .entity.header().height(); + let block = &result .sealed_block .entity; - self.shared.txpool.lock().block_update( - &self.shared.tx_status_sender, - block, - &result.tx_status, - ); + { + let mut lock = self.shared.txpool.lock(); + lock.block_update( + &self.shared.tx_status_sender, + block, + &result.tx_status, + ); + *self.shared.current_height.lock() = new_height; + } should_continue = true; } else { should_continue = false; @@ -218,7 +229,7 @@ where new_transaction = self.gossiped_tx_stream.next() => { if let Some(GossipData { data: Some(tx), message_id, peer_id }) = new_transaction { let id = tx.id(&self.shared.consensus_params.chain_id); - let current_height = self.shared.db.current_block_height()?; + let current_height = *self.shared.current_height.lock(); // verify tx let checked_tx = check_single_tx(tx, current_height, &self.shared.config).await; @@ -282,10 +293,7 @@ where // Instead, `fuel-core` can create a `DatabaseWithTxPool` that aggregates `TxPool` and // storage `Database` together. GraphQL will retrieve data from this `DatabaseWithTxPool` via // `StorageInspect` trait. -impl SharedState -where - DB: TxPoolDb, -{ +impl SharedState { pub fn pending_number(&self) -> usize { self.txpool.lock().pending_number() } @@ -337,10 +345,11 @@ where } } -impl SharedState +impl SharedState where P2P: PeerToPeer, - DB: TxPoolDb, + ViewProvider: AtomicView, + View: TxPoolDb, { #[tracing::instrument(name = "insert_submitted_txn", skip_all)] pub async fn insert( @@ -348,11 +357,7 @@ where txs: Vec>, ) -> Vec> { // verify txs - let block_height = self.db.current_block_height(); - let current_height = match block_height { - Ok(val) => val, - Err(e) => return vec![Err(e.into())], - }; + let current_height = *self.current_height.lock(); let checked_txs = check_transactions(&txs, current_height, &self.config).await; @@ -430,16 +435,18 @@ pub enum TxStatusMessage { FailedStatus, } -pub fn new_service( +pub fn new_service( config: Config, - db: DB, + provider: ViewProvider, importer: Importer, p2p: P2P, -) -> Service + current_height: BlockHeight, +) -> Service where Importer: BlockImporter, P2P: PeerToPeer + 'static, - DB: TxPoolDb + Clone + 'static, + ViewProvider: AtomicView, + ViewProvider::View: TxPoolDb, { let p2p = Arc::new(p2p); let gossiped_tx_stream = p2p.gossiped_transaction_events(); @@ -448,7 +455,7 @@ where ttl_timer.set_missed_tick_behavior(MissedTickBehavior::Skip); let consensus_params = config.chain_config.consensus_parameters.clone(); let number_of_active_subscription = config.number_of_active_subscription; - let txpool = Arc::new(ParkingMutex::new(TxPool::new(config.clone(), db.clone()))); + let txpool = Arc::new(ParkingMutex::new(TxPool::new(config.clone(), provider))); let task = Task { gossiped_tx_stream, committed_block_stream, @@ -464,7 +471,7 @@ where txpool, p2p, consensus_params, - db, + current_height: Arc::new(ParkingMutex::new(current_height)), config, }, ttl_timer, diff --git a/crates/services/txpool/src/service/test_helpers.rs b/crates/services/txpool/src/service/test_helpers.rs index 3cf532bfa8b..3aea0044ff2 100644 --- a/crates/services/txpool/src/service/test_helpers.rs +++ b/crates/services/txpool/src/service/test_helpers.rs @@ -1,5 +1,6 @@ use super::*; use crate::{ + mock_db::MockDBProvider, ports::BlockImporter, MockDb, }; @@ -31,7 +32,7 @@ use std::cell::RefCell; type GossipedTransaction = GossipData; pub struct TestContext { - pub(crate) service: Service, + pub(crate) service: Service, mock_db: MockDb, rng: RefCell, } @@ -41,7 +42,7 @@ impl TestContext { TestContextBuilder::new().build_and_start().await } - pub fn service(&self) -> &Service { + pub fn service(&self) -> &Service { &self.service } @@ -193,7 +194,13 @@ impl TestContextBuilder { .importer .unwrap_or_else(|| MockImporter::with_blocks(vec![])); - let service = new_service(config, mock_db.clone(), importer, p2p); + let service = new_service( + config, + MockDBProvider(mock_db.clone()), + importer, + p2p, + Default::default(), + ); TestContext { service, diff --git a/crates/services/txpool/src/service/update_sender/tests/test_e2e.rs b/crates/services/txpool/src/service/update_sender/tests/test_e2e.rs index b3871b06e86..482839b6679 100644 --- a/crates/services/txpool/src/service/update_sender/tests/test_e2e.rs +++ b/crates/services/txpool/src/service/update_sender/tests/test_e2e.rs @@ -144,7 +144,7 @@ fn test_update_sender_inner(ops: Vec) { Op::DropRecv(i) => { // Real if i < receivers.len() { - receivers.remove(i); + let _ = receivers.remove(i); } // Model if i < model_receivers.len() { diff --git a/crates/services/txpool/src/service/update_sender/tests/test_subscribe.rs b/crates/services/txpool/src/service/update_sender/tests/test_subscribe.rs index 4c0795be410..936cbadaae9 100644 --- a/crates/services/txpool/src/service/update_sender/tests/test_subscribe.rs +++ b/crates/services/txpool/src/service/update_sender/tests/test_subscribe.rs @@ -23,7 +23,7 @@ fn test_subscriber(input: Input) { let Input { tx_id, senders } = input; let mut senders = box_senders(senders); let len_before = senders.values().map(|v| v.len()).sum::(); - subscribe::<_, MockCreateChannel>( + let _ = subscribe::<_, MockCreateChannel>( Bytes32::from([tx_id; 32]), &mut senders, Box::new(()), diff --git a/crates/services/txpool/src/test_helpers.rs b/crates/services/txpool/src/test_helpers.rs index 3c487ccb5c5..5586abee542 100644 --- a/crates/services/txpool/src/test_helpers.rs +++ b/crates/services/txpool/src/test_helpers.rs @@ -1,7 +1,12 @@ // Rust isn't smart enough to detect cross module test deps #![allow(dead_code)] -use crate::MockDb; +use crate::{ + mock_db::MockDBProvider, + Config, + MockDb, + TxPool, +}; use fuel_core_types::{ entities::coins::coin::{ Coin, @@ -11,6 +16,7 @@ use fuel_core_types::{ fuel_crypto::rand::{ rngs::StdRng, Rng, + SeedableRng, }, fuel_tx::{ field::Inputs, @@ -39,6 +45,85 @@ use fuel_core_types::{ // the byte and gas price fees. pub const TEST_COIN_AMOUNT: u64 = 100_000_000u64; +pub(crate) struct TextContext { + mock_db: MockDb, + rng: StdRng, + config: Option, +} + +impl Default for TextContext { + fn default() -> Self { + Self { + mock_db: MockDb::default(), + rng: StdRng::seed_from_u64(0), + config: None, + } + } +} + +impl TextContext { + pub(crate) fn database_mut(&mut self) -> &mut MockDb { + &mut self.mock_db + } + + pub(crate) fn config(self, config: Config) -> Self { + Self { + config: Some(config), + ..self + } + } + + pub(crate) fn build(self) -> TxPool { + TxPool::new( + self.config.unwrap_or_default(), + MockDBProvider(self.mock_db), + ) + } + + pub(crate) fn setup_coin(&mut self) -> (Coin, Input) { + setup_coin(&mut self.rng, Some(&self.mock_db)) + } + + pub(crate) fn create_output_and_input( + &mut self, + amount: Word, + ) -> (Output, UnsetInput) { + let input = self.random_predicate(AssetId::BASE, amount, None); + let output = Output::coin(*input.input_owner().unwrap(), amount, AssetId::BASE); + (output, UnsetInput(input)) + } + + pub(crate) fn random_predicate( + &mut self, + asset_id: AssetId, + amount: Word, + utxo_id: Option, + ) -> Input { + random_predicate(&mut self.rng, asset_id, amount, utxo_id) + } + + pub(crate) fn custom_predicate( + &mut self, + asset_id: AssetId, + amount: Word, + code: Vec, + utxo_id: Option, + ) -> Input { + let owner = Input::predicate_owner(&code); + Input::coin_predicate( + utxo_id.unwrap_or_else(|| self.rng.gen()), + owner, + amount, + asset_id, + Default::default(), + Default::default(), + Default::default(), + code, + vec![], + ) + } +} + pub(crate) fn setup_coin(rng: &mut StdRng, mock_db: Option<&MockDb>) -> (Coin, Input) { let input = random_predicate(rng, AssetId::BASE, TEST_COIN_AMOUNT, None); add_coin_to_state(input, mock_db) @@ -64,32 +149,6 @@ pub(crate) fn add_coin_to_state(input: Input, mock_db: Option<&MockDb>) -> (Coin (coin.uncompress(utxo_id), input) } -pub(crate) fn create_output_and_input( - rng: &mut StdRng, - amount: Word, -) -> (Output, UnsetInput) { - let input = random_predicate(rng, AssetId::BASE, amount, None); - let output = Output::coin(*input.input_owner().unwrap(), amount, AssetId::BASE); - (output, UnsetInput(input)) -} - -pub struct UnsetInput(Input); - -impl UnsetInput { - pub fn into_input(self, new_utxo_id: UtxoId) -> Input { - let mut input = self.0; - match &mut input { - Input::CoinSigned(CoinSigned { utxo_id, .. }) - | Input::CoinPredicate(CoinPredicate { utxo_id, .. }) - | Input::Contract(Contract { utxo_id, .. }) => { - *utxo_id = new_utxo_id; - } - _ => {} - } - input - } -} - pub(crate) fn random_predicate( rng: &mut StdRng, asset_id: AssetId, @@ -115,25 +174,21 @@ pub(crate) fn random_predicate( .into_default_estimated() } -pub(crate) fn custom_predicate( - rng: &mut StdRng, - asset_id: AssetId, - amount: Word, - code: Vec, - utxo_id: Option, -) -> Input { - let owner = Input::predicate_owner(&code); - Input::coin_predicate( - utxo_id.unwrap_or_else(|| rng.gen()), - owner, - amount, - asset_id, - Default::default(), - Default::default(), - Default::default(), - code, - vec![], - ) +pub struct UnsetInput(Input); + +impl UnsetInput { + pub fn into_input(self, new_utxo_id: UtxoId) -> Input { + let mut input = self.0; + match &mut input { + Input::CoinSigned(CoinSigned { utxo_id, .. }) + | Input::CoinPredicate(CoinPredicate { utxo_id, .. }) + | Input::Contract(Contract { utxo_id, .. }) => { + *utxo_id = new_utxo_id; + } + _ => {} + } + input + } } pub trait IntoEstimated { diff --git a/crates/services/txpool/src/txpool.rs b/crates/services/txpool/src/txpool.rs index 1c3c0376e8d..63a84a803b5 100644 --- a/crates/services/txpool/src/txpool.rs +++ b/crates/services/txpool/src/txpool.rs @@ -37,6 +37,7 @@ use fuel_core_types::{ use crate::service::TxStatusMessage; use fuel_core_metrics::txpool_metrics::txpool_metrics; +use fuel_core_storage::transactional::AtomicView; use fuel_core_types::{ blockchain::block::Block, fuel_vm::checked_transaction::CheckPredicateParams, @@ -54,20 +55,17 @@ use std::{ use tokio_rayon::AsyncRayonHandle; #[derive(Debug, Clone)] -pub struct TxPool { +pub struct TxPool { by_hash: HashMap, by_gas_price: PriceSort, by_time: TimeSort, by_dependency: Dependency, config: Config, - database: DB, + database: ViewProvider, } -impl TxPool -where - DB: TxPoolDb, -{ - pub fn new(config: Config, database: DB) -> Self { +impl TxPool { + pub fn new(config: Config, database: ViewProvider) -> Self { let max_depth = config.max_depth; Self { @@ -93,94 +91,6 @@ where &self.by_dependency } - #[tracing::instrument(level = "info", skip_all, fields(tx_id = %tx.id()), ret, err)] - // this is atomic operation. Return removed(pushed out/replaced) transactions - fn insert_inner( - &mut self, - tx: Checked, - ) -> anyhow::Result { - let tx: CheckedTransaction = tx.into(); - - let tx = Arc::new(match tx { - CheckedTransaction::Script(script) => PoolTransaction::Script(script), - CheckedTransaction::Create(create) => PoolTransaction::Create(create), - CheckedTransaction::Mint(_) => { - return Err(anyhow::anyhow!("Mint transactions is not supported")) - } - }); - - if !tx.is_computed() { - return Err(Error::NoMetadata.into()) - } - - // verify max gas is less than block limit - if tx.max_gas() > self.config.chain_config.block_gas_limit { - return Err(Error::NotInsertedMaxGasLimit { - tx_gas: tx.max_gas(), - block_limit: self.config.chain_config.block_gas_limit, - } - .into()) - } - - if self.by_hash.contains_key(&tx.id()) { - return Err(Error::NotInsertedTxKnown.into()) - } - - let mut max_limit_hit = false; - // check if we are hitting limit of pool - if self.by_hash.len() >= self.config.max_tx { - max_limit_hit = true; - // limit is hit, check if we can push out lowest priced tx - let lowest_price = self.by_gas_price.lowest_value().unwrap_or_default(); - if lowest_price >= tx.price() { - return Err(Error::NotInsertedLimitHit.into()) - } - } - if self.config.metrics { - txpool_metrics() - .gas_price_histogram - .observe(tx.price() as f64); - - txpool_metrics() - .tx_size_histogram - .observe(tx.metered_bytes_size() as f64); - } - // check and insert dependency - let rem = self - .by_dependency - .insert(&self.by_hash, &self.database, &tx)?; - let info = TxInfo::new(tx.clone()); - let submitted_time = info.submitted_time(); - self.by_gas_price.insert(&info); - self.by_time.insert(&info); - self.by_hash.insert(tx.id(), info); - - // if some transaction were removed so we don't need to check limit - let removed = if rem.is_empty() { - if max_limit_hit { - // remove last tx from sort - let rem_tx = self.by_gas_price.lowest_tx().unwrap(); // safe to unwrap limit is hit - self.remove_inner(&rem_tx); - vec![rem_tx] - } else { - Vec::new() - } - } else { - // remove ret from by_hash and from by_price - for rem in rem.iter() { - self.remove_tx(&rem.id()); - } - - rem - }; - - Ok(InsertionResult { - inserted: tx, - submitted_time, - removed, - }) - } - /// Return all sorted transactions that are includable in next block. pub fn sorted_includable(&self) -> impl Iterator + '_ { self.by_gas_price @@ -228,47 +138,6 @@ where self.remove_by_tx_id(tx_id) } - #[tracing::instrument(level = "info", skip_all)] - /// Import a set of transactions from network gossip or GraphQL endpoints. - pub fn insert( - &mut self, - tx_status_sender: &TxStatusChange, - txs: Vec>, - ) -> Vec> { - // Check if that data is okay (witness match input/output, and if recovered signatures ara valid). - // should be done before transaction comes to txpool, or before it enters RwLocked region. - let mut res = Vec::new(); - - for tx in txs.into_iter() { - res.push(self.insert_inner(tx)); - } - - // announce to subscribers - for ret in res.iter() { - match ret { - Ok(InsertionResult { - removed, - inserted, - submitted_time, - }) => { - for removed in removed { - // small todo there is possibility to have removal reason (ReplacedByHigherGas, DependencyRemoved) - // but for now it is okay to just use Error::Removed. - tx_status_sender.send_squeezed_out(removed.id(), Error::Removed); - } - tx_status_sender.send_submitted( - inserted.id(), - Tai64::from_unix(submitted_time.as_secs() as i64), - ); - } - Err(_) => { - // @dev should not broadcast tx if error occurred - } - } - } - res - } - /// find all tx by its hash pub fn find(&self, hashes: &[TxId]) -> Vec> { let mut res = Vec::with_capacity(hashes.len()); @@ -385,6 +254,150 @@ where } } +impl TxPool +where + ViewProvider: AtomicView, + View: TxPoolDb, +{ + #[cfg(test)] + fn insert_single( + &mut self, + tx: Checked, + ) -> anyhow::Result { + let view = self.database.latest_view(); + self.insert_inner(tx, &view) + } + + #[tracing::instrument(level = "info", skip_all, fields(tx_id = %tx.id()), ret, err)] + // this is atomic operation. Return removed(pushed out/replaced) transactions + fn insert_inner( + &mut self, + tx: Checked, + view: &View, + ) -> anyhow::Result { + let tx: CheckedTransaction = tx.into(); + + let tx = Arc::new(match tx { + CheckedTransaction::Script(script) => PoolTransaction::Script(script), + CheckedTransaction::Create(create) => PoolTransaction::Create(create), + CheckedTransaction::Mint(_) => { + return Err(anyhow::anyhow!("Mint transactions is not supported")) + } + }); + + if !tx.is_computed() { + return Err(Error::NoMetadata.into()) + } + + // verify max gas is less than block limit + if tx.max_gas() > self.config.chain_config.block_gas_limit { + return Err(Error::NotInsertedMaxGasLimit { + tx_gas: tx.max_gas(), + block_limit: self.config.chain_config.block_gas_limit, + } + .into()) + } + + if self.by_hash.contains_key(&tx.id()) { + return Err(Error::NotInsertedTxKnown.into()) + } + + let mut max_limit_hit = false; + // check if we are hitting limit of pool + if self.by_hash.len() >= self.config.max_tx { + max_limit_hit = true; + // limit is hit, check if we can push out lowest priced tx + let lowest_price = self.by_gas_price.lowest_value().unwrap_or_default(); + if lowest_price >= tx.price() { + return Err(Error::NotInsertedLimitHit.into()) + } + } + if self.config.metrics { + txpool_metrics() + .gas_price_histogram + .observe(tx.price() as f64); + + txpool_metrics() + .tx_size_histogram + .observe(tx.metered_bytes_size() as f64); + } + // check and insert dependency + let rem = self.by_dependency.insert(&self.by_hash, view, &tx)?; + let info = TxInfo::new(tx.clone()); + let submitted_time = info.submitted_time(); + self.by_gas_price.insert(&info); + self.by_time.insert(&info); + self.by_hash.insert(tx.id(), info); + + // if some transaction were removed so we don't need to check limit + let removed = if rem.is_empty() { + if max_limit_hit { + // remove last tx from sort + let rem_tx = self.by_gas_price.lowest_tx().unwrap(); // safe to unwrap limit is hit + self.remove_inner(&rem_tx); + vec![rem_tx] + } else { + Vec::new() + } + } else { + // remove ret from by_hash and from by_price + for rem in rem.iter() { + self.remove_tx(&rem.id()); + } + + rem + }; + + Ok(InsertionResult { + inserted: tx, + submitted_time, + removed, + }) + } + + #[tracing::instrument(level = "info", skip_all)] + /// Import a set of transactions from network gossip or GraphQL endpoints. + pub fn insert( + &mut self, + tx_status_sender: &TxStatusChange, + txs: Vec>, + ) -> Vec> { + // Check if that data is okay (witness match input/output, and if recovered signatures ara valid). + // should be done before transaction comes to txpool, or before it enters RwLocked region. + let mut res = Vec::new(); + let view = self.database.latest_view(); + + for tx in txs.into_iter() { + res.push(self.insert_inner(tx, &view)); + } + + // announce to subscribers + for ret in res.iter() { + match ret { + Ok(InsertionResult { + removed, + inserted, + submitted_time, + }) => { + for removed in removed { + // small todo there is possibility to have removal reason (ReplacedByHigherGas, DependencyRemoved) + // but for now it is okay to just use Error::Removed. + tx_status_sender.send_squeezed_out(removed.id(), Error::Removed); + } + tx_status_sender.send_submitted( + inserted.id(), + Tai64::from_unix(submitted_time.as_secs() as i64), + ); + } + Err(_) => { + // @dev should not broadcast tx if error occurred + } + } + } + res + } +} + pub async fn check_transactions( txs: &[Arc], current_height: BlockHeight, diff --git a/crates/services/txpool/src/txpool/tests.rs b/crates/services/txpool/src/txpool/tests.rs index 2e4c7706d56..8e572c2abd5 100644 --- a/crates/services/txpool/src/txpool/tests.rs +++ b/crates/services/txpool/src/txpool/tests.rs @@ -1,12 +1,7 @@ use crate::{ - ports::TxPoolDb, test_helpers::{ - add_coin_to_state, - create_output_and_input, - custom_predicate, - random_predicate, - setup_coin, IntoEstimated, + TextContext, TEST_COIN_AMOUNT, }, txpool::test_helpers::{ @@ -17,8 +12,6 @@ use crate::{ }, Config, Error, - MockDb, - TxPool, }; use fuel_core_types::{ fuel_asm::{ @@ -26,10 +19,6 @@ use fuel_core_types::{ RegId, Word, }, - fuel_crypto::rand::{ - rngs::StdRng, - SeedableRng, - }, fuel_tx::{ input::coin::CoinPredicate, Address, @@ -45,7 +34,6 @@ use fuel_core_types::{ fuel_types::ChainId, fuel_vm::checked_transaction::Checked, }; - use std::{ cmp::Reverse, collections::HashMap, @@ -56,51 +44,43 @@ use super::check_single_tx; const GAS_LIMIT: Word = 1000; -async fn check_unwrap_tx( - tx: Transaction, - db: MockDb, - config: &Config, -) -> Checked { - check_single_tx(tx, db.current_block_height().unwrap(), config) +async fn check_unwrap_tx(tx: Transaction, config: &Config) -> Checked { + check_single_tx(tx, Default::default(), config) .await .expect("Transaction should be checked") } async fn check_tx( tx: Transaction, - db: MockDb, config: &Config, ) -> anyhow::Result> { - check_single_tx(tx, db.current_block_height().unwrap(), config).await + check_single_tx(tx, Default::default(), config).await } #[tokio::test] async fn insert_simple_tx_succeeds() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut txpool = TxPool::new(Default::default(), db.clone()); + let mut context = TextContext::default(); - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_coin) = context.setup_coin(); let tx = TransactionBuilder::script(vec![], vec![]) .script_gas_limit(GAS_LIMIT) .add_input(gas_coin) .finalize_as_transaction(); - let tx = check_unwrap_tx(tx, db.clone(), &txpool.config).await; + let mut txpool = context.build(); + let tx = check_unwrap_tx(tx, &txpool.config).await; txpool - .insert_inner(tx) + .insert_single(tx) .expect("Transaction should be OK, got Err"); } #[tokio::test] async fn insert_simple_tx_dependency_chain_succeeds() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut txpool = TxPool::new(Default::default(), db.clone()); + let mut context = TextContext::default(); - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); - let (output, unset_input) = create_output_and_input(&mut rng, 1); + let (_, gas_coin) = context.setup_coin(); + let (output, unset_input) = context.create_output_and_input(1); let tx1 = TransactionBuilder::script(vec![], vec![]) .gas_price(1) .script_gas_limit(GAS_LIMIT) @@ -108,7 +88,7 @@ async fn insert_simple_tx_dependency_chain_succeeds() { .add_output(output) .finalize_as_transaction(); - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_coin) = context.setup_coin(); let input = unset_input.into_input(UtxoId::new(tx1.id(&Default::default()), 0)); let tx2 = TransactionBuilder::script(vec![], vec![]) .gas_price(1) @@ -117,26 +97,27 @@ async fn insert_simple_tx_dependency_chain_succeeds() { .add_input(gas_coin) .finalize_as_transaction(); - let tx1 = check_unwrap_tx(tx1, db.clone(), &txpool.config).await; - let tx2 = check_unwrap_tx(tx2, db.clone(), &txpool.config).await; + let mut txpool = context.build(); + let tx1 = check_unwrap_tx(tx1, &txpool.config).await; + let tx2 = check_unwrap_tx(tx2, &txpool.config).await; - txpool.insert_inner(tx1).expect("Tx1 should be OK, got Err"); txpool - .insert_inner(tx2) + .insert_single(tx1) + .expect("Tx1 should be OK, got Err"); + txpool + .insert_single(tx2) .expect("Tx2 dependent should be OK, got Err"); } #[tokio::test] async fn faulty_t2_collided_on_contract_id_from_tx1() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut txpool = TxPool::new(Default::default(), db.clone()); + let mut context = TextContext::default(); let contract_id = Contract::EMPTY_CONTRACT_ID; // contract creation tx - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); - let (output, unset_input) = create_output_and_input(&mut rng, 10); + let (_, gas_coin) = context.setup_coin(); + let (output, unset_input) = context.create_output_and_input(10); let tx = TransactionBuilder::create( Default::default(), Default::default(), @@ -148,7 +129,7 @@ async fn faulty_t2_collided_on_contract_id_from_tx1() { .add_output(output) .finalize_as_transaction(); - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_coin) = context.setup_coin(); let input = unset_input.into_input(UtxoId::new(tx.id(&Default::default()), 1)); // attempt to insert a different creation tx with a valid dependency on the first tx, @@ -165,13 +146,14 @@ async fn faulty_t2_collided_on_contract_id_from_tx1() { .add_output(output) .finalize_as_transaction(); - let tx = check_unwrap_tx(tx, db.clone(), &txpool.config).await; - txpool.insert_inner(tx).expect("Tx1 should be Ok, got Err"); + let mut txpool = context.build(); + let tx = check_unwrap_tx(tx, &txpool.config).await; + txpool.insert_single(tx).expect("Tx1 should be Ok, got Err"); - let tx_faulty = check_unwrap_tx(tx_faulty, db.clone(), &txpool.config).await; + let tx_faulty = check_unwrap_tx(tx_faulty, &txpool.config).await; let err = txpool - .insert_inner(tx_faulty) + .insert_single(tx_faulty) .expect_err("Tx2 should be Err, got Ok"); assert!(matches!( err.downcast_ref::(), @@ -181,12 +163,10 @@ async fn faulty_t2_collided_on_contract_id_from_tx1() { #[tokio::test] async fn fail_to_insert_tx_with_dependency_on_invalid_utxo_type() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut txpool = TxPool::new(Default::default(), db.clone()); + let mut context = TextContext::default(); let contract_id = Contract::EMPTY_CONTRACT_ID; - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_coin) = context.setup_coin(); let tx_faulty = TransactionBuilder::create( Default::default(), Default::default(), @@ -201,25 +181,25 @@ async fn fail_to_insert_tx_with_dependency_on_invalid_utxo_type() { let tx = TransactionBuilder::script(vec![], vec![]) .gas_price(1) .script_gas_limit(GAS_LIMIT) - .add_input(random_predicate( - &mut rng, + .add_input(context.random_predicate( AssetId::BASE, TEST_COIN_AMOUNT, Some(UtxoId::new(tx_faulty.id(&Default::default()), 0)), )) .finalize_as_transaction(); + let mut txpool = context.build(); let tx_faulty_id = tx_faulty.id(&ChainId::default()); - let tx_faulty = check_unwrap_tx(tx_faulty, db.clone(), &txpool.config).await; + let tx_faulty = check_unwrap_tx(tx_faulty, &txpool.config).await; txpool - .insert_inner(tx_faulty.clone()) + .insert_single(tx_faulty.clone()) .expect("Tx1 should be Ok, got Err"); - let tx = check_unwrap_tx(tx, db.clone(), &txpool.config).await; + let tx = check_unwrap_tx(tx, &txpool.config).await; let err = txpool - .insert_inner(tx) + .insert_single(tx) .expect_err("Tx2 should be Err, got Ok"); assert!(matches!( err.downcast_ref::(), @@ -233,18 +213,18 @@ async fn not_inserted_known_tx() { utxo_validation: false, ..Default::default() }; - let db = MockDb::default(); - let mut txpool = TxPool::new(config, db.clone()); + let context = TextContext::default().config(config); + let mut txpool = context.build(); let tx = Transaction::default_test_tx(); - let tx = check_unwrap_tx(tx, db.clone(), &txpool.config).await; + let tx = check_unwrap_tx(tx, &txpool.config).await; txpool - .insert_inner(tx.clone()) + .insert_single(tx.clone()) .expect("Tx1 should be Ok, got Err"); let err = txpool - .insert_inner(tx) + .insert_single(tx) .expect_err("Second insertion of Tx1 should be Err, got Ok"); assert!(matches!( err.downcast_ref::(), @@ -254,20 +234,20 @@ async fn not_inserted_known_tx() { #[tokio::test] async fn try_to_insert_tx2_missing_utxo() { - let mut rng = StdRng::seed_from_u64(0); - let mut txpool = TxPool::new(Default::default(), MockDb::default()); + let mut context = TextContext::default(); - let (_, input) = setup_coin(&mut rng, None); + let input = context.random_predicate(AssetId::BASE, TEST_COIN_AMOUNT, None); let tx = TransactionBuilder::script(vec![], vec![]) .gas_price(10) .script_gas_limit(GAS_LIMIT) .add_input(input) .finalize_as_transaction(); - let tx = check_unwrap_tx(tx, txpool.database.clone(), &txpool.config).await; + let mut txpool = context.build(); + let tx = check_unwrap_tx(tx, &txpool.config).await; let err = txpool - .insert_inner(tx) + .insert_single(tx) .expect_err("Tx should be Err, got Ok"); assert!(matches!( err.downcast_ref::(), @@ -277,11 +257,9 @@ async fn try_to_insert_tx2_missing_utxo() { #[tokio::test] async fn higher_priced_tx_removes_lower_priced_tx() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut txpool = TxPool::new(Default::default(), db.clone()); + let mut context = TextContext::default(); - let (_, coin_input) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, coin_input) = context.setup_coin(); let tx1 = TransactionBuilder::script(vec![], vec![]) .gas_price(10) @@ -296,26 +274,27 @@ async fn higher_priced_tx_removes_lower_priced_tx() { .finalize_as_transaction(); let tx1_id = tx1.id(&ChainId::default()); - let tx1 = check_unwrap_tx(tx1, db.clone(), &txpool.config).await; + let mut txpool = context.build(); + let tx1 = check_unwrap_tx(tx1, &txpool.config).await; txpool - .insert_inner(tx1.clone()) + .insert_single(tx1.clone()) .expect("Tx1 should be Ok, got Err"); - let tx2 = check_unwrap_tx(tx2, db.clone(), &txpool.config).await; + let tx2 = check_unwrap_tx(tx2, &txpool.config).await; - let vec = txpool.insert_inner(tx2).expect("Tx2 should be Ok, got Err"); + let vec = txpool + .insert_single(tx2) + .expect("Tx2 should be Ok, got Err"); assert_eq!(vec.removed[0].id(), tx1_id, "Tx1 id should be removed"); } #[tokio::test] async fn underpriced_tx1_not_included_coin_collision() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut txpool = TxPool::new(Default::default(), db.clone()); + let mut context = TextContext::default(); - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); - let (output, unset_input) = create_output_and_input(&mut rng, 10); + let (_, gas_coin) = context.setup_coin(); + let (output, unset_input) = context.create_output_and_input(10); let tx1 = TransactionBuilder::script(vec![], vec![]) .gas_price(20) .script_gas_limit(GAS_LIMIT) @@ -337,19 +316,20 @@ async fn underpriced_tx1_not_included_coin_collision() { .add_input(input) .finalize_as_transaction(); - let tx1_checked = check_unwrap_tx(tx1.clone(), db.clone(), txpool.config()).await; + let mut txpool = context.build(); + let tx1_checked = check_unwrap_tx(tx1.clone(), txpool.config()).await; txpool - .insert_inner(tx1_checked) + .insert_single(tx1_checked) .expect("Tx1 should be Ok, got Err"); - let tx2_checked = check_unwrap_tx(tx2.clone(), db.clone(), txpool.config()).await; + let tx2_checked = check_unwrap_tx(tx2.clone(), txpool.config()).await; txpool - .insert_inner(tx2_checked) + .insert_single(tx2_checked) .expect("Tx2 should be Ok, got Err"); - let tx3_checked = check_unwrap_tx(tx3, db.clone(), txpool.config()).await; + let tx3_checked = check_unwrap_tx(tx3, txpool.config()).await; let err = txpool - .insert_inner(tx3_checked) + .insert_single(tx3_checked) .expect_err("Tx3 should be Err, got Ok"); assert!(matches!( err.downcast_ref::(), @@ -359,12 +339,10 @@ async fn underpriced_tx1_not_included_coin_collision() { #[tokio::test] async fn overpriced_tx_contract_input_not_inserted() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut txpool = TxPool::new(Default::default(), db.clone()); + let mut context = TextContext::default(); let contract_id = Contract::EMPTY_CONTRACT_ID; - let (_, gas_funds) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_funds) = context.setup_coin(); let tx1 = TransactionBuilder::create( Default::default(), Default::default(), @@ -375,7 +353,7 @@ async fn overpriced_tx_contract_input_not_inserted() { .add_output(create_contract_output(contract_id)) .finalize_as_transaction(); - let (_, gas_funds) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_funds) = context.setup_coin(); let tx2 = TransactionBuilder::script(vec![], vec![]) .gas_price(11) .script_gas_limit(GAS_LIMIT) @@ -388,12 +366,15 @@ async fn overpriced_tx_contract_input_not_inserted() { .add_output(Output::contract(1, Default::default(), Default::default())) .finalize_as_transaction(); - let tx1 = check_unwrap_tx(tx1, db.clone(), &txpool.config).await; - txpool.insert_inner(tx1).expect("Tx1 should be Ok, got err"); + let mut txpool = context.build(); + let tx1 = check_unwrap_tx(tx1, &txpool.config).await; + txpool + .insert_single(tx1) + .expect("Tx1 should be Ok, got err"); - let tx2 = check_unwrap_tx(tx2, db.clone(), &txpool.config).await; + let tx2 = check_unwrap_tx(tx2, &txpool.config).await; let err = txpool - .insert_inner(tx2) + .insert_single(tx2) .expect_err("Tx2 should be Err, got Ok"); assert!( matches!( @@ -406,12 +387,10 @@ async fn overpriced_tx_contract_input_not_inserted() { #[tokio::test] async fn dependent_contract_input_inserted() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut txpool = TxPool::new(Default::default(), db.clone()); + let mut context = TextContext::default(); let contract_id = Contract::EMPTY_CONTRACT_ID; - let (_, gas_funds) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_funds) = context.setup_coin(); let tx1 = TransactionBuilder::create( Default::default(), Default::default(), @@ -422,7 +401,7 @@ async fn dependent_contract_input_inserted() { .add_output(create_contract_output(contract_id)) .finalize_as_transaction(); - let (_, gas_funds) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_funds) = context.setup_coin(); let tx2 = TransactionBuilder::script(vec![], vec![]) .gas_price(10) .script_gas_limit(GAS_LIMIT) @@ -435,21 +414,24 @@ async fn dependent_contract_input_inserted() { .add_output(Output::contract(1, Default::default(), Default::default())) .finalize_as_transaction(); - let tx1 = check_unwrap_tx(tx1, db.clone(), &txpool.config).await; - let tx2 = check_unwrap_tx(tx2, db.clone(), &txpool.config).await; - txpool.insert_inner(tx1).expect("Tx1 should be Ok, got Err"); - txpool.insert_inner(tx2).expect("Tx2 should be Ok, got Err"); + let mut txpool = context.build(); + let tx1 = check_unwrap_tx(tx1, &txpool.config).await; + let tx2 = check_unwrap_tx(tx2, &txpool.config).await; + txpool + .insert_single(tx1) + .expect("Tx1 should be Ok, got Err"); + txpool + .insert_single(tx2) + .expect("Tx2 should be Ok, got Err"); } #[tokio::test] async fn more_priced_tx3_removes_tx1_and_dependent_tx2() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut txpool = TxPool::new(Default::default(), db.clone()); + let mut context = TextContext::default(); - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_coin) = context.setup_coin(); - let (output, unset_input) = create_output_and_input(&mut rng, 10); + let (output, unset_input) = context.create_output_and_input(10); let tx1 = TransactionBuilder::script(vec![], vec![]) .gas_price(10) .script_gas_limit(GAS_LIMIT) @@ -473,17 +455,20 @@ async fn more_priced_tx3_removes_tx1_and_dependent_tx2() { let tx1_id = tx1.id(&ChainId::default()); let tx2_id = tx2.id(&ChainId::default()); - let tx1 = check_unwrap_tx(tx1, db.clone(), &txpool.config).await; - let tx2 = check_unwrap_tx(tx2, db.clone(), &txpool.config).await; - let tx3 = check_unwrap_tx(tx3, db.clone(), &txpool.config).await; + let mut txpool = context.build(); + let tx1 = check_unwrap_tx(tx1, &txpool.config).await; + let tx2 = check_unwrap_tx(tx2, &txpool.config).await; + let tx3 = check_unwrap_tx(tx3, &txpool.config).await; txpool - .insert_inner(tx1.clone()) + .insert_single(tx1.clone()) .expect("Tx1 should be OK, got Err"); txpool - .insert_inner(tx2.clone()) + .insert_single(tx2.clone()) .expect("Tx2 should be OK, got Err"); - let vec = txpool.insert_inner(tx3).expect("Tx3 should be OK, got Err"); + let vec = txpool + .insert_single(tx3) + .expect("Tx3 should be OK, got Err"); assert_eq!( vec.removed.len(), 2, @@ -495,11 +480,9 @@ async fn more_priced_tx3_removes_tx1_and_dependent_tx2() { #[tokio::test] async fn more_priced_tx2_removes_tx1_and_more_priced_tx3_removes_tx2() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut txpool = TxPool::new(Default::default(), db.clone()); + let mut context = TextContext::default(); - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_coin) = context.setup_coin(); let tx1 = TransactionBuilder::script(vec![], vec![]) .gas_price(10) @@ -519,14 +502,21 @@ async fn more_priced_tx2_removes_tx1_and_more_priced_tx3_removes_tx2() { .add_input(gas_coin) .finalize_as_transaction(); - let tx1 = check_unwrap_tx(tx1, db.clone(), &txpool.config).await; - let tx2 = check_unwrap_tx(tx2, db.clone(), &txpool.config).await; - let tx3 = check_unwrap_tx(tx3, db.clone(), &txpool.config).await; + let mut txpool = context.build(); + let tx1 = check_unwrap_tx(tx1, &txpool.config).await; + let tx2 = check_unwrap_tx(tx2, &txpool.config).await; + let tx3 = check_unwrap_tx(tx3, &txpool.config).await; - txpool.insert_inner(tx1).expect("Tx1 should be OK, got Err"); - let squeezed = txpool.insert_inner(tx2).expect("Tx2 should be OK, got Err"); + txpool + .insert_single(tx1) + .expect("Tx1 should be OK, got Err"); + let squeezed = txpool + .insert_single(tx2) + .expect("Tx2 should be OK, got Err"); assert_eq!(squeezed.removed.len(), 1); - let squeezed = txpool.insert_inner(tx3).expect("Tx3 should be OK, got Err"); + let squeezed = txpool + .insert_single(tx3) + .expect("Tx3 should be OK, got Err"); assert_eq!( squeezed.removed.len(), 1, @@ -536,35 +526,33 @@ async fn more_priced_tx2_removes_tx1_and_more_priced_tx3_removes_tx2() { #[tokio::test] async fn tx_limit_hit() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut txpool = TxPool::new( - Config { - max_tx: 1, - ..Default::default() - }, - db.clone(), - ); + let mut context = TextContext::default().config(Config { + max_tx: 1, + ..Default::default() + }); - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_coin) = context.setup_coin(); let tx1 = TransactionBuilder::script(vec![], vec![]) .script_gas_limit(GAS_LIMIT) .add_input(gas_coin) .add_output(create_coin_output()) .finalize_as_transaction(); - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_coin) = context.setup_coin(); let tx2 = TransactionBuilder::script(vec![], vec![]) .script_gas_limit(GAS_LIMIT) .add_input(gas_coin) .finalize_as_transaction(); - let tx1 = check_unwrap_tx(tx1, db.clone(), &txpool.config).await; - let tx2 = check_unwrap_tx(tx2, db.clone(), &txpool.config).await; - txpool.insert_inner(tx1).expect("Tx1 should be Ok, got Err"); + let mut txpool = context.build(); + let tx1 = check_unwrap_tx(tx1, &txpool.config).await; + let tx2 = check_unwrap_tx(tx2, &txpool.config).await; + txpool + .insert_single(tx1) + .expect("Tx1 should be Ok, got Err"); let err = txpool - .insert_inner(tx2) + .insert_single(tx2) .expect_err("Tx2 should be Err, got Ok"); assert!(matches!( err.downcast_ref::(), @@ -574,18 +562,13 @@ async fn tx_limit_hit() { #[tokio::test] async fn tx_depth_hit() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut txpool = TxPool::new( - Config { - max_depth: 2, - ..Default::default() - }, - db.clone(), - ); + let mut context = TextContext::default().config(Config { + max_depth: 2, + ..Default::default() + }); - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); - let (output, unset_input) = create_output_and_input(&mut rng, 10_000); + let (_, gas_coin) = context.setup_coin(); + let (output, unset_input) = context.create_output_and_input(10_000); let tx1 = TransactionBuilder::script(vec![], vec![]) .script_gas_limit(GAS_LIMIT) .add_input(gas_coin) @@ -593,7 +576,7 @@ async fn tx_depth_hit() { .finalize_as_transaction(); let input = unset_input.into_input(UtxoId::new(tx1.id(&Default::default()), 0)); - let (output, unset_input) = create_output_and_input(&mut rng, 5_000); + let (output, unset_input) = context.create_output_and_input(5_000); let tx2 = TransactionBuilder::script(vec![], vec![]) .script_gas_limit(GAS_LIMIT) .add_input(input) @@ -606,15 +589,20 @@ async fn tx_depth_hit() { .add_input(input) .finalize_as_transaction(); - let tx1 = check_unwrap_tx(tx1, db.clone(), &txpool.config).await; - let tx2 = check_unwrap_tx(tx2, db.clone(), &txpool.config).await; - let tx3 = check_unwrap_tx(tx3, db.clone(), &txpool.config).await; + let mut txpool = context.build(); + let tx1 = check_unwrap_tx(tx1, &txpool.config).await; + let tx2 = check_unwrap_tx(tx2, &txpool.config).await; + let tx3 = check_unwrap_tx(tx3, &txpool.config).await; - txpool.insert_inner(tx1).expect("Tx1 should be OK, got Err"); - txpool.insert_inner(tx2).expect("Tx2 should be OK, got Err"); + txpool + .insert_single(tx1) + .expect("Tx1 should be OK, got Err"); + txpool + .insert_single(tx2) + .expect("Tx2 should be OK, got Err"); let err = txpool - .insert_inner(tx3) + .insert_single(tx3) .expect_err("Tx3 should be Err, got Ok"); assert!(matches!( err.downcast_ref::(), @@ -624,25 +612,23 @@ async fn tx_depth_hit() { #[tokio::test] async fn sorted_out_tx1_2_4() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut txpool = TxPool::new(Default::default(), db.clone()); + let mut context = TextContext::default(); - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_coin) = context.setup_coin(); let tx1 = TransactionBuilder::script(vec![], vec![]) .gas_price(10) .script_gas_limit(GAS_LIMIT) .add_input(gas_coin) .finalize_as_transaction(); - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_coin) = context.setup_coin(); let tx2 = TransactionBuilder::script(vec![], vec![]) .gas_price(9) .script_gas_limit(GAS_LIMIT) .add_input(gas_coin) .finalize_as_transaction(); - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_coin) = context.setup_coin(); let tx3 = TransactionBuilder::script(vec![], vec![]) .gas_price(20) .script_gas_limit(GAS_LIMIT) @@ -653,13 +639,20 @@ async fn sorted_out_tx1_2_4() { let tx2_id = tx2.id(&ChainId::default()); let tx3_id = tx3.id(&ChainId::default()); - let tx1 = check_unwrap_tx(tx1, db.clone(), &txpool.config).await; - let tx2 = check_unwrap_tx(tx2, db.clone(), &txpool.config).await; - let tx3 = check_unwrap_tx(tx3, db.clone(), &txpool.config).await; + let mut txpool = context.build(); + let tx1 = check_unwrap_tx(tx1, &txpool.config).await; + let tx2 = check_unwrap_tx(tx2, &txpool.config).await; + let tx3 = check_unwrap_tx(tx3, &txpool.config).await; - txpool.insert_inner(tx1).expect("Tx1 should be Ok, got Err"); - txpool.insert_inner(tx2).expect("Tx2 should be Ok, got Err"); - txpool.insert_inner(tx3).expect("Tx4 should be Ok, got Err"); + txpool + .insert_single(tx1) + .expect("Tx1 should be Ok, got Err"); + txpool + .insert_single(tx2) + .expect("Tx2 should be Ok, got Err"); + txpool + .insert_single(tx3) + .expect("Tx4 should be Ok, got Err"); let txs = txpool.sorted_includable().collect::>(); @@ -671,12 +664,10 @@ async fn sorted_out_tx1_2_4() { #[tokio::test] async fn find_dependent_tx1_tx2() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut txpool = TxPool::new(Default::default(), db.clone()); + let mut context = TextContext::default(); - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); - let (output, unset_input) = create_output_and_input(&mut rng, 10_000); + let (_, gas_coin) = context.setup_coin(); + let (output, unset_input) = context.create_output_and_input(10_000); let tx1 = TransactionBuilder::script(vec![], vec![]) .gas_price(11) .script_gas_limit(GAS_LIMIT) @@ -685,7 +676,7 @@ async fn find_dependent_tx1_tx2() { .finalize_as_transaction(); let input = unset_input.into_input(UtxoId::new(tx1.id(&Default::default()), 0)); - let (output, unset_input) = create_output_and_input(&mut rng, 7_500); + let (output, unset_input) = context.create_output_and_input(7_500); let tx2 = TransactionBuilder::script(vec![], vec![]) .gas_price(10) .script_gas_limit(GAS_LIMIT) @@ -704,13 +695,20 @@ async fn find_dependent_tx1_tx2() { let tx2_id = tx2.id(&ChainId::default()); let tx3_id = tx3.id(&ChainId::default()); - let tx1 = check_unwrap_tx(tx1, db.clone(), &txpool.config).await; - let tx2 = check_unwrap_tx(tx2, db.clone(), &txpool.config).await; - let tx3 = check_unwrap_tx(tx3, db.clone(), &txpool.config).await; + let mut txpool = context.build(); + let tx1 = check_unwrap_tx(tx1, &txpool.config).await; + let tx2 = check_unwrap_tx(tx2, &txpool.config).await; + let tx3 = check_unwrap_tx(tx3, &txpool.config).await; - txpool.insert_inner(tx1).expect("Tx0 should be Ok, got Err"); - txpool.insert_inner(tx2).expect("Tx1 should be Ok, got Err"); - let tx3_result = txpool.insert_inner(tx3).expect("Tx2 should be Ok, got Err"); + txpool + .insert_single(tx1) + .expect("Tx0 should be Ok, got Err"); + txpool + .insert_single(tx2) + .expect("Tx1 should be Ok, got Err"); + let tx3_result = txpool + .insert_single(tx3) + .expect("Tx2 should be Ok, got Err"); let mut seen = HashMap::new(); txpool @@ -728,33 +726,28 @@ async fn find_dependent_tx1_tx2() { #[tokio::test] async fn tx_at_least_min_gas_price_is_insertable() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut txpool = TxPool::new( - Config { - min_gas_price: 10, - ..Default::default() - }, - db.clone(), - ); + let mut context = TextContext::default().config(Config { + min_gas_price: 10, + ..Default::default() + }); - let (_, gas_coin) = setup_coin(&mut rng, Some(&txpool.database)); + let (_, gas_coin) = context.setup_coin(); let tx = TransactionBuilder::script(vec![], vec![]) .gas_price(10) .script_gas_limit(GAS_LIMIT) .add_input(gas_coin) .finalize_as_transaction(); - let tx = check_unwrap_tx(tx, txpool.database.clone(), &txpool.config).await; - txpool.insert_inner(tx).expect("Tx should be Ok, got Err"); + let mut txpool = context.build(); + let tx = check_unwrap_tx(tx, &txpool.config).await; + txpool.insert_single(tx).expect("Tx should be Ok, got Err"); } #[tokio::test] async fn tx_below_min_gas_price_is_not_insertable() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); + let mut context = TextContext::default(); - let (_, gas_coin) = setup_coin(&mut rng, Some(&db)); + let gas_coin = context.random_predicate(AssetId::BASE, TEST_COIN_AMOUNT, None); let tx = TransactionBuilder::script(vec![], vec![]) .gas_price(10) .script_gas_limit(GAS_LIMIT) @@ -763,7 +756,6 @@ async fn tx_below_min_gas_price_is_not_insertable() { let err = check_tx( tx, - db, &Config { min_gas_price: 11, ..Default::default() @@ -780,6 +772,7 @@ async fn tx_below_min_gas_price_is_not_insertable() { #[tokio::test] async fn tx_inserted_into_pool_when_input_message_id_exists_in_db() { + let mut context = TextContext::default(); let (message, input) = create_message_predicate_from_message(5000, 0); let tx = TransactionBuilder::script(vec![], vec![]) @@ -787,14 +780,13 @@ async fn tx_inserted_into_pool_when_input_message_id_exists_in_db() { .add_input(input) .finalize_as_transaction(); - let db = MockDb::default(); - db.insert_message(message); + context.database_mut().insert_message(message); let tx1_id = tx.id(&ChainId::default()); - let mut txpool = TxPool::new(Default::default(), db.clone()); + let mut txpool = context.build(); - let tx = check_unwrap_tx(tx, db.clone(), &txpool.config).await; - txpool.insert_inner(tx).expect("should succeed"); + let tx = check_unwrap_tx(tx, &txpool.config).await; + txpool.insert_single(tx).expect("should succeed"); let tx_info = txpool.find_one(&tx1_id).unwrap(); assert_eq!(tx_info.tx().id(), tx1_id); @@ -802,6 +794,7 @@ async fn tx_inserted_into_pool_when_input_message_id_exists_in_db() { #[tokio::test] async fn tx_rejected_when_input_message_id_is_spent() { + let mut context = TextContext::default(); let (message, input) = create_message_predicate_from_message(5_000, 0); let tx = TransactionBuilder::script(vec![], vec![]) @@ -809,13 +802,12 @@ async fn tx_rejected_when_input_message_id_is_spent() { .add_input(input) .finalize_as_transaction(); - let db = MockDb::default(); - db.insert_message(message.clone()); - db.spend_message(*message.id()); - let mut txpool = TxPool::new(Default::default(), db.clone()); + context.database_mut().insert_message(message.clone()); + context.database_mut().spend_message(*message.id()); + let mut txpool = context.build(); - let tx = check_unwrap_tx(tx, db.clone(), &txpool.config).await; - let err = txpool.insert_inner(tx).expect_err("should fail"); + let tx = check_unwrap_tx(tx, &txpool.config).await; + let err = txpool.insert_single(tx).expect_err("should fail"); // check error assert!(matches!( @@ -826,18 +818,18 @@ async fn tx_rejected_when_input_message_id_is_spent() { #[tokio::test] async fn tx_rejected_from_pool_when_input_message_id_does_not_exist_in_db() { + let context = TextContext::default(); let (message, input) = create_message_predicate_from_message(5000, 0); let tx = TransactionBuilder::script(vec![], vec![]) .script_gas_limit(GAS_LIMIT) .add_input(input) .finalize_as_transaction(); - let db = MockDb::default(); // Do not insert any messages into the DB to ensure there is no matching message for the // tx. - let mut txpool = TxPool::new(Default::default(), db.clone()); - let tx = check_unwrap_tx(tx, db.clone(), &txpool.config).await; - let err = txpool.insert_inner(tx).expect_err("should fail"); + let mut txpool = context.build(); + let tx = check_unwrap_tx(tx, &txpool.config).await; + let err = txpool.insert_single(tx).expect_err("should fail"); // check error assert!(matches!( @@ -849,6 +841,7 @@ async fn tx_rejected_from_pool_when_input_message_id_does_not_exist_in_db() { #[tokio::test] async fn tx_rejected_from_pool_when_gas_price_is_lower_than_another_tx_with_same_message_id( ) { + let mut context = TextContext::default(); let message_amount = 10_000; let gas_price_high = 2u64; let gas_price_low = 1u64; @@ -867,25 +860,24 @@ async fn tx_rejected_from_pool_when_gas_price_is_lower_than_another_tx_with_same .add_input(conflicting_message_input) .finalize_as_transaction(); - let db = MockDb::default(); - db.insert_message(message.clone()); + context.database_mut().insert_message(message.clone()); - let mut txpool = TxPool::new(Default::default(), db.clone()); + let mut txpool = context.build(); let tx_high_id = tx_high.id(&ChainId::default()); - let tx_high = check_unwrap_tx(tx_high, db.clone(), &txpool.config).await; + let tx_high = check_unwrap_tx(tx_high, &txpool.config).await; // Insert a tx for the message id with a high gas amount txpool - .insert_inner(tx_high) + .insert_single(tx_high) .expect("expected successful insertion"); - let tx_low = check_unwrap_tx(tx_low, db.clone(), &txpool.config).await; + let tx_low = check_unwrap_tx(tx_low, &txpool.config).await; // Insert a tx for the message id with a low gas amount // Because the new transaction's id matches an existing transaction, we compare the gas // prices of both the new and existing transactions. Since the existing transaction's gas // price is higher, we must now reject the new transaction. - let err = txpool.insert_inner(tx_low).expect_err("expected failure"); + let err = txpool.insert_single(tx_low).expect_err("expected failure"); // check error assert!(matches!( @@ -896,6 +888,7 @@ async fn tx_rejected_from_pool_when_gas_price_is_lower_than_another_tx_with_same #[tokio::test] async fn higher_priced_tx_squeezes_out_lower_priced_tx_with_same_message_id() { + let mut context = TextContext::default(); let message_amount = 10_000; let gas_price_high = 2u64; let gas_price_low = 1u64; @@ -909,13 +902,12 @@ async fn higher_priced_tx_squeezes_out_lower_priced_tx_with_same_message_id() { .add_input(conflicting_message_input.clone()) .finalize_as_transaction(); - let db = MockDb::default(); - db.insert_message(message); + context.database_mut().insert_message(message); - let mut txpool = TxPool::new(Default::default(), db.clone()); + let mut txpool = context.build(); let tx_low_id = tx_low.id(&ChainId::default()); - let tx_low = check_unwrap_tx(tx_low, db.clone(), &txpool.config).await; - txpool.insert_inner(tx_low).expect("should succeed"); + let tx_low = check_unwrap_tx(tx_low, &txpool.config).await; + txpool.insert_single(tx_low).expect("should succeed"); // Insert a tx for the message id with a high gas amount // Because the new transaction's id matches an existing transaction, we compare the gas @@ -926,8 +918,8 @@ async fn higher_priced_tx_squeezes_out_lower_priced_tx_with_same_message_id() { .script_gas_limit(GAS_LIMIT) .add_input(conflicting_message_input) .finalize_as_transaction(); - let tx_high = check_unwrap_tx(tx_high, db.clone(), &txpool.config).await; - let squeezed_out_txs = txpool.insert_inner(tx_high).expect("should succeed"); + let tx_high = check_unwrap_tx(tx_high, &txpool.config).await; + let squeezed_out_txs = txpool.insert_single(tx_high).expect("should succeed"); assert_eq!(squeezed_out_txs.removed.len(), 1); assert_eq!(squeezed_out_txs.removed[0].id(), tx_low_id,); @@ -941,6 +933,7 @@ async fn message_of_squeezed_out_tx_can_be_resubmitted_at_lower_gas_price() { // tx3 (message 2) gas_price 1 // works since tx1 is no longer part of txpool state even though gas price is less + let mut context = TextContext::default(); let (message_1, message_input_1) = create_message_predicate_from_message(10_000, 0); let (message_2, message_input_2) = create_message_predicate_from_message(20_000, 1); @@ -964,38 +957,35 @@ async fn message_of_squeezed_out_tx_can_be_resubmitted_at_lower_gas_price() { .add_input(message_input_2) .finalize_as_transaction(); - let db = MockDb::default(); - db.insert_message(message_1); - db.insert_message(message_2); - let mut txpool = TxPool::new(Default::default(), db.clone()); + context.database_mut().insert_message(message_1); + context.database_mut().insert_message(message_2); + let mut txpool = context.build(); - let tx1 = check_unwrap_tx(tx1, db.clone(), &txpool.config).await; - let tx2 = check_unwrap_tx(tx2, db.clone(), &txpool.config).await; - let tx3 = check_unwrap_tx(tx3, db.clone(), &txpool.config).await; + let tx1 = check_unwrap_tx(tx1, &txpool.config).await; + let tx2 = check_unwrap_tx(tx2, &txpool.config).await; + let tx3 = check_unwrap_tx(tx3, &txpool.config).await; - txpool.insert_inner(tx1).expect("should succeed"); + txpool.insert_single(tx1).expect("should succeed"); - txpool.insert_inner(tx2).expect("should succeed"); + txpool.insert_single(tx2).expect("should succeed"); - txpool.insert_inner(tx3).expect("should succeed"); + txpool.insert_single(tx3).expect("should succeed"); } #[tokio::test] async fn predicates_with_incorrect_owner_fails() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let mut coin = random_predicate(&mut rng, AssetId::BASE, TEST_COIN_AMOUNT, None); + let mut context = TextContext::default(); + let mut coin = context.random_predicate(AssetId::BASE, TEST_COIN_AMOUNT, None); if let Input::CoinPredicate(CoinPredicate { owner, .. }) = &mut coin { *owner = Address::zeroed(); } - let (_, gas_coin) = add_coin_to_state(coin, Some(&db.clone())); let tx = TransactionBuilder::script(vec![], vec![]) .script_gas_limit(GAS_LIMIT) - .add_input(gas_coin) + .add_input(coin) .finalize_as_transaction(); - let err = check_tx(tx, db.clone(), &Default::default()) + let err = check_tx(tx, &Default::default()) .await .expect_err("Transaction should be err, got ok"); @@ -1007,8 +997,7 @@ async fn predicates_with_incorrect_owner_fails() { #[tokio::test] async fn predicate_without_enough_gas_returns_out_of_gas() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); + let mut context = TextContext::default(); let mut config = Config::default(); config .chain_config @@ -1020,23 +1009,22 @@ async fn predicate_without_enough_gas_returns_out_of_gas() { .consensus_parameters .tx_params .max_gas_per_tx = 10000; - let coin = custom_predicate( - &mut rng, - AssetId::BASE, - TEST_COIN_AMOUNT, - // forever loop - vec![op::jmp(RegId::ZERO)].into_iter().collect(), - None, - ) - .into_estimated(&config.chain_config.consensus_parameters); + let coin = context + .custom_predicate( + AssetId::BASE, + TEST_COIN_AMOUNT, + // forever loop + vec![op::jmp(RegId::ZERO)].into_iter().collect(), + None, + ) + .into_estimated(&config.chain_config.consensus_parameters); - let (_, gas_coin) = add_coin_to_state(coin, Some(&db.clone())); let tx = TransactionBuilder::script(vec![], vec![]) .script_gas_limit(GAS_LIMIT) - .add_input(gas_coin) + .add_input(coin) .finalize_as_transaction(); - let err = check_tx(tx, db.clone(), &Default::default()) + let err = check_tx(tx, &Default::default()) .await .expect_err("Transaction should be err, got ok"); @@ -1049,25 +1037,23 @@ async fn predicate_without_enough_gas_returns_out_of_gas() { #[tokio::test] async fn predicate_that_returns_false_is_invalid() { - let mut rng = StdRng::seed_from_u64(0); - let db = MockDb::default(); - let coin = custom_predicate( - &mut rng, - AssetId::BASE, - TEST_COIN_AMOUNT, - // forever loop - vec![op::ret(RegId::ZERO)].into_iter().collect(), - None, - ) - .into_default_estimated(); + let mut context = TextContext::default(); + let coin = context + .custom_predicate( + AssetId::BASE, + TEST_COIN_AMOUNT, + // forever loop + vec![op::ret(RegId::ZERO)].into_iter().collect(), + None, + ) + .into_default_estimated(); - let (_, gas_coin) = add_coin_to_state(coin, Some(&db.clone())); let tx = TransactionBuilder::script(vec![], vec![]) .script_gas_limit(GAS_LIMIT) - .add_input(gas_coin) + .add_input(coin) .finalize_as_transaction(); - let err = check_tx(tx, db.clone(), &Default::default()) + let err = check_tx(tx, &Default::default()) .await .expect_err("Transaction should be err, got ok"); diff --git a/crates/storage/src/transactional.rs b/crates/storage/src/transactional.rs index 854557bd117..31b4ac51fe3 100644 --- a/crates/storage/src/transactional.rs +++ b/crates/storage/src/transactional.rs @@ -79,10 +79,13 @@ impl StorageTransaction { /// Provides a view of the storage at the given height. /// It guarantees to be atomic, meaning the view is immutable to outside modifications. -pub trait AtomicView: Send + Sync { +pub trait AtomicView: Send + Sync { + /// The type of the storage view. + type View; + /// Returns the view of the storage at the given `height`. - fn view_at(&self, height: BlockHeight) -> StorageResult; + fn view_at(&self, height: BlockHeight) -> StorageResult; /// Returns the view of the storage for the latest block height. - fn latest_view(&self) -> View; + fn latest_view(&self) -> Self::View; } From ea69532f977ef37513fe7a8a310a73930092949d Mon Sep 17 00:00:00 2001 From: xgreenx Date: Mon, 8 Jan 2024 02:19:00 +0100 Subject: [PATCH 20/28] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f2d057265e4..0dbe2824bfd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ Description of the upcoming release here. ### Changed +- [#1590](https://github.com/FuelLabs/fuel-core/pull/1590): Use `AtomicView` in the `TxPool` to read the state of the database during insertion of the transactions. - [#1585](https://github.com/FuelLabs/fuel-core/pull/1585): Let `NetworkBehaviour` macro generate `FuelBehaviorEvent` in p2p - [#1579](https://github.com/FuelLabs/fuel-core/pull/1579): The change extracts the off-chain-related logic from the executor and moves it to the GraphQL off-chain worker. It creates two new concepts - Off-chain and On-chain databases where the GraphQL worker has exclusive ownership of the database and may modify it without intersecting with the On-chain database. - [#1577](https://github.com/FuelLabs/fuel-core/pull/1577): Moved insertion of sealed blocks into the `BlockImporter` instead of the executor. From c5956a8ceed2f5c75a08e8913ab528ab4bd94dec Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 19 Jan 2024 03:12:56 +0100 Subject: [PATCH 21/28] Use "blueprint" instead of "structure" --- CHANGELOG.md | 34 ++++++------ Cargo.lock | 4 +- crates/fuel-core/src/database.rs | 24 ++++----- crates/fuel-core/src/database/block.rs | 8 +-- crates/fuel-core/src/database/coin.rs | 8 +-- crates/fuel-core/src/database/message.rs | 8 +-- crates/fuel-core/src/database/metadata.rs | 8 +-- crates/fuel-core/src/database/transactions.rs | 12 ++--- crates/services/relayer/src/ports.rs | 8 +-- .../src/{structure.rs => blueprint.rs} | 18 +++---- .../src/{structure => blueprint}/plain.rs | 28 +++++----- .../src/{structure => blueprint}/sparse.rs | 40 +++++++------- crates/storage/src/lib.rs | 2 +- crates/storage/src/structured_storage.rs | 52 +++++++++---------- .../src/structured_storage/balances.rs | 14 ++--- .../storage/src/structured_storage/blocks.rs | 8 +-- .../storage/src/structured_storage/coins.rs | 8 +-- .../src/structured_storage/contracts.rs | 16 +++--- .../src/structured_storage/merkle_data.rs | 8 +-- .../src/structured_storage/messages.rs | 12 ++--- .../src/structured_storage/receipts.rs | 8 +-- .../src/structured_storage/sealed_block.rs | 8 +-- .../storage/src/structured_storage/state.rs | 14 ++--- .../src/structured_storage/transactions.rs | 12 ++--- 24 files changed, 181 insertions(+), 181 deletions(-) rename crates/storage/src/{structure.rs => blueprint.rs} (87%) rename crates/storage/src/{structure => blueprint}/plain.rs (85%) rename crates/storage/src/{structure => blueprint}/sparse.rs (94%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ff19589e71..4aad039f8b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,17 +18,17 @@ Description of the upcoming release here. #### Breaking - [#1593](https://github.com/FuelLabs/fuel-core/pull/1593) Make `Block` type a version-able enum -- [#1576](https://github.com/FuelLabs/fuel-core/pull/1576): The change moves the implementation of the storage traits for required tables from `fuel-core` to `fuel-core-storage` crate. The change also adds a more flexible configuration of the encoding/decoding per the table and allows the implementation of specific behaviors for the table in a much easier way. It unifies the encoding between database, SMTs, and iteration, preventing mismatching bytes representation on the Rust type system level. Plus, it increases the re-usage of the code by applying the same structure to other tables. +- [#1576](https://github.com/FuelLabs/fuel-core/pull/1576): The change moves the implementation of the storage traits for required tables from `fuel-core` to `fuel-core-storage` crate. The change also adds a more flexible configuration of the encoding/decoding per the table and allows the implementation of specific behaviors for the table in a much easier way. It unifies the encoding between database, SMTs, and iteration, preventing mismatching bytes representation on the Rust type system level. Plus, it increases the re-usage of the code by applying the same blueprint to other tables. It is a breaking PR because it changes database encoding/decoding for some tables. ### StructuredStorage - The change adds a new type `StructuredStorage`. It is a wrapper around the key-value storage that implements the storage traits(`StorageInspect`, `StorageMutate`, `StorageRead`, etc) for the tables with structure. This structure works in tandem with the `TableWithStructure` trait. The table may implement `TableWithStructure` specifying the structure, as an example: + The change adds a new type `StructuredStorage`. It is a wrapper around the key-value storage that implements the storage traits(`StorageInspect`, `StorageMutate`, `StorageRead`, etc) for the tables with blueprint. This blueprint works in tandem with the `TableWithBlueprint` trait. The table may implement `TableWithBlueprint` specifying the blueprint, as an example: ```rust - impl TableWithStructure for ContractsRawCode { - type Structure = Plain; + impl TableWithBlueprint for ContractsRawCode { + type Blueprint = Plain; fn column() -> Column { Column::ContractsRawCode @@ -36,13 +36,13 @@ Description of the upcoming release here. } ``` - It is a definition of the structure for the `ContractsRawCode` table. It has a plain structure meaning it simply encodes/decodes bytes and stores/loads them into/from the storage. As a key codec and value codec, it uses a `Raw` encoding/decoding that simplifies writing bytes and loads them back into the memory without applying any serialization or deserialization algorithm. + It is a definition of the blueprint for the `ContractsRawCode` table. It has a plain blueprint meaning it simply encodes/decodes bytes and stores/loads them into/from the storage. As a key codec and value codec, it uses a `Raw` encoding/decoding that simplifies writing bytes and loads them back into the memory without applying any serialization or deserialization algorithm. - If the table implements `TableWithStructure` and the selected codec satisfies all structure requirements, the corresponding storage traits for that table are implemented on the `StructuredStorage` type. + If the table implements `TableWithBlueprint` and the selected codec satisfies all blueprint requirements, the corresponding storage traits for that table are implemented on the `StructuredStorage` type. ### Codecs - Each structure allows customizing the key and value codecs. It allows the use of different codecs for different tables, taking into account the complexity and weight of the data and providing a way of more optimal implementation. + Each blueprint allows customizing the key and value codecs. It allows the use of different codecs for different tables, taking into account the complexity and weight of the data and providing a way of more optimal implementation. That property may be very useful to perform migration in a more easier way. Plus, it also can be a `no_std` migration potentially allowing its fraud proving. @@ -50,8 +50,8 @@ Description of the upcoming release here. ```rust /// Define the table for V1 value encoding/decoding. - impl TableWithStructure for ContractsRawCodeV1 { - type Structure = Plain; + impl TableWithBlueprint for ContractsRawCodeV1 { + type Blueprint = Plain; fn column() -> Column { Column::ContractsRawCode @@ -62,8 +62,8 @@ Description of the upcoming release here. /// It uses `Postcard` codec for the value instead of `Raw` codec. /// /// # Dev-note: The columns is the same. - impl TableWithStructure for ContractsRawCodeV2 { - type Structure = Plain; + impl TableWithBlueprint for ContractsRawCodeV2 { + type Blueprint = Plain; fn column() -> Column { Column::ContractsRawCode @@ -81,15 +81,15 @@ Description of the upcoming release here. ### Structures - The structure of the table defines its behavior. As an example, a `Plain` structure simply encodes/decodes bytes and stores/loads them into/from the storage. The `SMT` structure builds a sparse merkle tree on top of the key-value pairs. + The blueprint of the table defines its behavior. As an example, a `Plain` blueprint simply encodes/decodes bytes and stores/loads them into/from the storage. The `SMT` blueprint builds a sparse merkle tree on top of the key-value pairs. - Implementing a structure one time, we can apply it to any table satisfying the requirements of this structure. It increases the re-usage of the code and minimizes duplication. + Implementing a blueprint one time, we can apply it to any table satisfying the requirements of this blueprint. It increases the re-usage of the code and minimizes duplication. It can be useful if we decide to create global roots for all required tables that are used in fraud proving. ```rust - impl TableWithStructure for SpentMessages { - type Structure = Plain; + impl TableWithBlueprint for SpentMessages { + type Blueprint = Plain; fn column() -> Column { Column::SpentMessages @@ -99,8 +99,8 @@ Description of the upcoming release here. | \|/ - impl TableWithStructure for SpentMessages { - type Structure = + impl TableWithBlueprint for SpentMessages { + type Blueprint = Sparse; fn column() -> Column { diff --git a/Cargo.lock b/Cargo.lock index 82749705c8d..96c51e0ff38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3986,7 +3986,7 @@ dependencies = [ "autocfg", "impl-tools-lib", "proc-macro-error", - "syn 2.0.43", + "syn 2.0.48", ] [[package]] @@ -3998,7 +3998,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.48", ] [[package]] diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index eef1bd5bb95..8d4538b2d32 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -12,6 +12,7 @@ use fuel_core_chain_config::{ MessageConfig, }; use fuel_core_storage::{ + blueprint::Blueprint, codec::Decode, iter::IterDirection, kv_store::{ @@ -20,10 +21,9 @@ use fuel_core_storage::{ Value, WriteOperation, }, - structure::Structure, structured_storage::{ StructuredStorage, - TableWithStructure, + TableWithBlueprint, }, transactional::{ StorageTransaction, @@ -258,8 +258,8 @@ impl Database { direction: Option, ) -> impl Iterator> + '_ where - M: Mappable + TableWithStructure, - M::Structure: Structure, + M: Mappable + TableWithBlueprint, + M::Blueprint: Blueprint, { self.iter_all_filtered::, Vec>(None, None, direction) } @@ -269,8 +269,8 @@ impl Database { prefix: Option

, ) -> impl Iterator> + '_ where - M: Mappable + TableWithStructure, - M::Structure: Structure, + M: Mappable + TableWithBlueprint, + M::Blueprint: Blueprint, P: AsRef<[u8]>, { self.iter_all_filtered::(prefix, None, None) @@ -282,8 +282,8 @@ impl Database { direction: Option, ) -> impl Iterator> + '_ where - M: Mappable + TableWithStructure, - M::Structure: Structure, + M: Mappable + TableWithBlueprint, + M::Blueprint: Blueprint, S: AsRef<[u8]>, { self.iter_all_filtered::(None, start, direction) @@ -296,8 +296,8 @@ impl Database { direction: Option, ) -> impl Iterator> + '_ where - M: Mappable + TableWithStructure, - M::Structure: Structure, + M: Mappable + TableWithBlueprint, + M::Blueprint: Blueprint, P: AsRef<[u8]>, S: AsRef<[u8]>, { @@ -312,12 +312,12 @@ impl Database { .map(|val| { val.and_then(|(key, value)| { let key = - >::KeyCodec::decode( + >::KeyCodec::decode( key.as_slice(), ) .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; let value = - >::ValueCodec::decode( + >::ValueCodec::decode( value.as_slice(), ) .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; diff --git a/crates/fuel-core/src/database/block.rs b/crates/fuel-core/src/database/block.rs index 5e3e04469df..f270e581f6f 100644 --- a/crates/fuel-core/src/database/block.rs +++ b/crates/fuel-core/src/database/block.rs @@ -4,14 +4,14 @@ use crate::database::{ Error as DatabaseError, }; use fuel_core_storage::{ + blueprint::plain::Plain, codec::{ primitive::Primitive, raw::Raw, }, iter::IterDirection, not_found, - structure::plain::Plain, - structured_storage::TableWithStructure, + structured_storage::TableWithBlueprint, tables::{ merkle::{ DenseMerkleMetadata, @@ -62,8 +62,8 @@ impl Mappable for FuelBlockSecondaryKeyBlockHeights { type OwnedValue = Self::Value; } -impl TableWithStructure for FuelBlockSecondaryKeyBlockHeights { - type Structure = Plain, Raw>; +impl TableWithBlueprint for FuelBlockSecondaryKeyBlockHeights { + type Blueprint = Plain, Raw>; fn column() -> Column { Column::FuelBlockSecondaryKeyBlockHeights diff --git a/crates/fuel-core/src/database/coin.rs b/crates/fuel-core/src/database/coin.rs index 9a31ad3b07d..d1979c86ff0 100644 --- a/crates/fuel-core/src/database/coin.rs +++ b/crates/fuel-core/src/database/coin.rs @@ -4,6 +4,7 @@ use crate::database::{ }; use fuel_core_chain_config::CoinConfig; use fuel_core_storage::{ + blueprint::plain::Plain, codec::{ postcard::Postcard, primitive::utxo_id_to_bytes, @@ -11,8 +12,7 @@ use fuel_core_storage::{ }, iter::IterDirection, not_found, - structure::plain::Plain, - structured_storage::TableWithStructure, + structured_storage::TableWithBlueprint, tables::Coins, Error as StorageError, Mappable, @@ -52,8 +52,8 @@ impl Mappable for OwnedCoins { type OwnedValue = (); } -impl TableWithStructure for OwnedCoins { - type Structure = Plain; +impl TableWithBlueprint for OwnedCoins { + type Blueprint = Plain; fn column() -> Column { Column::OwnedCoins diff --git a/crates/fuel-core/src/database/message.rs b/crates/fuel-core/src/database/message.rs index 6c928924994..96ed1984479 100644 --- a/crates/fuel-core/src/database/message.rs +++ b/crates/fuel-core/src/database/message.rs @@ -4,6 +4,7 @@ use crate::database::{ }; use fuel_core_chain_config::MessageConfig; use fuel_core_storage::{ + blueprint::plain::Plain, codec::{ manual::Manual, postcard::Postcard, @@ -11,8 +12,7 @@ use fuel_core_storage::{ Encode, }, iter::IterDirection, - structure::plain::Plain, - structured_storage::TableWithStructure, + structured_storage::TableWithBlueprint, tables::{ Messages, SpentMessages, @@ -64,8 +64,8 @@ impl Decode for Manual { } } -impl TableWithStructure for OwnedMessageIds { - type Structure = Plain, Postcard>; +impl TableWithBlueprint for OwnedMessageIds { + type Blueprint = Plain, Postcard>; fn column() -> fuel_core_storage::column::Column { Column::OwnedMessageIds diff --git a/crates/fuel-core/src/database/metadata.rs b/crates/fuel-core/src/database/metadata.rs index a7bf078d053..665b72e42f8 100644 --- a/crates/fuel-core/src/database/metadata.rs +++ b/crates/fuel-core/src/database/metadata.rs @@ -9,11 +9,11 @@ use crate::{ }; use fuel_core_chain_config::ChainConfig; use fuel_core_storage::{ + blueprint::plain::Plain, codec::postcard::Postcard, - structure::plain::Plain, structured_storage::{ StructuredStorage, - TableWithStructure, + TableWithBlueprint, }, Mappable, Result as StorageResult, @@ -34,11 +34,11 @@ where type OwnedValue = V; } -impl TableWithStructure for MetadataTable +impl TableWithBlueprint for MetadataTable where V: Clone, { - type Structure = Plain; + type Blueprint = Plain; fn column() -> Column { Column::Metadata diff --git a/crates/fuel-core/src/database/transactions.rs b/crates/fuel-core/src/database/transactions.rs index 1ac351870c3..027439c08c0 100644 --- a/crates/fuel-core/src/database/transactions.rs +++ b/crates/fuel-core/src/database/transactions.rs @@ -7,6 +7,7 @@ use core::{ mem::size_of, }; use fuel_core_storage::{ + blueprint::plain::Plain, codec::{ manual::Manual, postcard::Postcard, @@ -15,8 +16,7 @@ use fuel_core_storage::{ Encode, }, iter::IterDirection, - structure::plain::Plain, - structured_storage::TableWithStructure, + structured_storage::TableWithBlueprint, tables::Transactions, Mappable, Result as StorageResult, @@ -45,8 +45,8 @@ impl Mappable for OwnedTransactions { type OwnedValue = Self::Value; } -impl TableWithStructure for OwnedTransactions { - type Structure = Plain, Raw>; +impl TableWithBlueprint for OwnedTransactions { + type Blueprint = Plain, Raw>; fn column() -> Column { Column::TransactionsByOwnerBlockIdx @@ -63,8 +63,8 @@ impl Mappable for TransactionStatuses { type OwnedValue = Self::Value; } -impl TableWithStructure for TransactionStatuses { - type Structure = Plain; +impl TableWithBlueprint for TransactionStatuses { + type Blueprint = Plain; fn column() -> Column { Column::TransactionStatus diff --git a/crates/services/relayer/src/ports.rs b/crates/services/relayer/src/ports.rs index e4bea0c252a..2dbd210678c 100644 --- a/crates/services/relayer/src/ports.rs +++ b/crates/services/relayer/src/ports.rs @@ -2,13 +2,13 @@ use async_trait::async_trait; use fuel_core_storage::{ + blueprint::plain::Plain, codec::{ postcard::Postcard, primitive::Primitive, }, column::Column, - structure::plain::Plain, - structured_storage::TableWithStructure, + structured_storage::TableWithBlueprint, tables::Messages, transactional::Transactional, Error as StorageError, @@ -146,8 +146,8 @@ impl Mappable for RelayerMetadata { /// changed from a unit value. const METADATA_KEY: () = (); -impl TableWithStructure for RelayerMetadata { - type Structure = Plain>; +impl TableWithBlueprint for RelayerMetadata { + type Blueprint = Plain>; fn column() -> Column { Column::RelayerMetadata diff --git a/crates/storage/src/structure.rs b/crates/storage/src/blueprint.rs similarity index 87% rename from crates/storage/src/structure.rs rename to crates/storage/src/blueprint.rs index 067f17e698c..53bb1d853a6 100644 --- a/crates/storage/src/structure.rs +++ b/crates/storage/src/blueprint.rs @@ -1,7 +1,7 @@ //! The module defines structures for the [`Mappable`] tables. -//! Each table may have its structure that defines how it works with the storage. -//! The table may have a plain structure that simply works in CRUD mode, or it may be an SMT-based -//! structure that maintains a valid Merkle tree over the storage entries. +//! Each table may have its blueprint that defines how it works with the storage. +//! The table may have a plain blueprint that simply works in CRUD mode, or it may be an SMT-based +//! blueprint that maintains a valid Merkle tree over the storage entries. use crate::{ codec::{ @@ -22,14 +22,14 @@ pub mod sparse; /// This trait allows defining the agnostic implementation for all storage /// traits(`StorageInspect,` `StorageMutate,` etc) while the main logic is -/// hidden inside the structure. It allows quickly adding support for new +/// hidden inside the blueprint. It allows quickly adding support for new /// structures only by implementing the trait and reusing the existing -/// infrastructure in other places. It allows changing the structure on the +/// infrastructure in other places. It allows changing the blueprint on the /// fly in the definition of the table without affecting other areas of the codebase. /// -/// The structure is responsible for encoding/decoding(usually it is done via `KeyCodec` and `ValueCodec`) +/// The blueprint is responsible for encoding/decoding(usually it is done via `KeyCodec` and `ValueCodec`) /// the key and value and putting/extracting it to/from the storage. -pub trait Structure +pub trait Blueprint where M: Mappable, S: KeyValueStore, @@ -101,9 +101,9 @@ where } } -/// It is an extension of the structure that allows supporting batch operations. +/// It is an extension of the blueprint that allows supporting batch operations. /// Usually, they are more performant than initializing/inserting/removing values one by one. -pub trait SupportsBatching: Structure +pub trait SupportsBatching: Blueprint where M: Mappable, S: BatchOperations, diff --git a/crates/storage/src/structure/plain.rs b/crates/storage/src/blueprint/plain.rs similarity index 85% rename from crates/storage/src/structure/plain.rs rename to crates/storage/src/blueprint/plain.rs index d1a732b9a64..3eeac8bb510 100644 --- a/crates/storage/src/structure/plain.rs +++ b/crates/storage/src/blueprint/plain.rs @@ -1,9 +1,13 @@ -//! This module implements the plain structure for the storage. -//! The plain structure is the simplest one. It doesn't maintain any additional data structures +//! This module implements the plain blueprint for the storage. +//! The plain blueprint is the simplest one. It doesn't maintain any additional data structures //! and doesn't provide any additional functionality. It is just a key-value store that encodes/decodes //! the key and value and puts/takes them into/from the storage. use crate::{ + blueprint::{ + Blueprint, + SupportsBatching, + }, codec::{ Decode, Encode, @@ -15,23 +19,19 @@ use crate::{ KeyValueStore, WriteOperation, }, - structure::{ - Structure, - SupportsBatching, - }, - structured_storage::TableWithStructure, + structured_storage::TableWithBlueprint, Error as StorageError, Mappable, Result as StorageResult, }; -/// The type that represents the plain structure. +/// The type that represents the plain blueprint. /// The `KeyCodec` and `ValueCodec` are used to encode/decode the key and value. pub struct Plain { _marker: core::marker::PhantomData<(KeyCodec, ValueCodec)>, } -impl Structure for Plain +impl Blueprint for Plain where M: Mappable, S: KeyValueStore, @@ -95,8 +95,8 @@ where impl SupportsBatching for Plain where S: BatchOperations, - M: Mappable + TableWithStructure>, - M::Structure: Structure, + M: Mappable + TableWithBlueprint>, + M::Blueprint: Blueprint, { fn init( storage: &mut S, @@ -112,10 +112,10 @@ where set: &mut dyn Iterator, ) -> StorageResult<()> { storage.batch_write(&mut set.map(|(key, value)| { - let key_encoder = >::KeyCodec::encode(key); + let key_encoder = >::KeyCodec::encode(key); let key_bytes = key_encoder.as_bytes().to_vec(); let value = - >::ValueCodec::encode_as_value(value); + >::ValueCodec::encode_as_value(value); (key_bytes, column, WriteOperation::Insert(value)) })) } @@ -126,7 +126,7 @@ where set: &mut dyn Iterator, ) -> StorageResult<()> { storage.batch_write(&mut set.map(|key| { - let key_encoder = >::KeyCodec::encode(key); + let key_encoder = >::KeyCodec::encode(key); let key_bytes = key_encoder.as_bytes().to_vec(); (key_bytes, column, WriteOperation::Remove) })) diff --git a/crates/storage/src/structure/sparse.rs b/crates/storage/src/blueprint/sparse.rs similarity index 94% rename from crates/storage/src/structure/sparse.rs rename to crates/storage/src/blueprint/sparse.rs index 028eef57056..39768c2047d 100644 --- a/crates/storage/src/structure/sparse.rs +++ b/crates/storage/src/blueprint/sparse.rs @@ -1,9 +1,13 @@ -//! The module defines the `Sparse` structure for the storage. -//! The `Sparse` structure implements the sparse merkle tree on top of the storage. -//! It is like a [`Plain`](super::plain::Plain) structure that builds the sparse +//! The module defines the `Sparse` blueprint for the storage. +//! The `Sparse` blueprint implements the sparse merkle tree on top of the storage. +//! It is like a [`Plain`](super::plain::Plain) blueprint that builds the sparse //! merkle tree parallel to the normal storage and maintains it. use crate::{ + blueprint::{ + Blueprint, + SupportsBatching, + }, codec::{ Decode, Encode, @@ -16,13 +20,9 @@ use crate::{ StorageColumn, WriteOperation, }, - structure::{ - Structure, - SupportsBatching, - }, structured_storage::{ StructuredStorage, - TableWithStructure, + TableWithBlueprint, }, tables::merkle::SparseMerkleMetadata, Error as StorageError, @@ -58,11 +58,11 @@ pub trait PrimaryKey { fn primary_key(key: &Self::InputKey) -> &Self::OutputKey; } -/// The `Sparse` structure builds the storage as a [`Plain`](super::plain::Plain) -/// structure and maintains the sparse merkle tree by the `Metadata` and `Nodes` tables. +/// The `Sparse` blueprint builds the storage as a [`Plain`](super::plain::Plain) +/// blueprint and maintains the sparse merkle tree by the `Metadata` and `Nodes` tables. /// /// It uses the `KeyCodec` and `ValueCodec` to encode/decode the key and value in the -/// same way as a plain structure. +/// same way as a plain blueprint. /// /// The `Metadata` table stores the metadata of the tree(like a root of the tree), /// and the `Nodes` table stores the tree's nodes. The SMT is built over the encoded @@ -163,7 +163,7 @@ where } } -impl Structure +impl Blueprint for Sparse where M: Mappable, @@ -246,8 +246,8 @@ impl where S: KeyValueStore, M: Mappable - + TableWithStructure< - Structure = Sparse, + + TableWithBlueprint< + Blueprint = Sparse, >, Self: StorageMutate + StorageInspect, @@ -266,17 +266,17 @@ where } type NodeKeyCodec = - <::Structure as Structure>::KeyCodec; + <::Blueprint as Blueprint>::KeyCodec; type NodeValueCodec = - <::Structure as Structure>::ValueCodec; + <::Blueprint as Blueprint>::ValueCodec; impl SupportsBatching for Sparse where S: BatchOperations, M: Mappable - + TableWithStructure< - Structure = Sparse, + + TableWithBlueprint< + Blueprint = Sparse, >, KeyCodec: Encode + Decode, ValueCodec: Encode + Decode, @@ -285,9 +285,9 @@ where Key = MerkleRoot, Value = sparse::Primitive, OwnedValue = sparse::Primitive, - > + TableWithStructure, + > + TableWithBlueprint, KeyConverter: PrimaryKey, - Nodes::Structure: Structure, + Nodes::Blueprint: Blueprint, for<'a> StructuredStorage<&'a mut S>: StorageMutate + StorageMutate + StorageMutate, diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index 9460834cce4..facb1886609 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -21,11 +21,11 @@ pub use fuel_vm_private::{ }, }; +pub mod blueprint; pub mod codec; pub mod column; pub mod iter; pub mod kv_store; -pub mod structure; pub mod structured_storage; pub mod tables; #[cfg(feature = "test-helpers")] diff --git a/crates/storage/src/structured_storage.rs b/crates/storage/src/structured_storage.rs index 974c2ae768e..63647b03104 100644 --- a/crates/storage/src/structured_storage.rs +++ b/crates/storage/src/structured_storage.rs @@ -1,16 +1,16 @@ //! The module contains the [`StructuredStorage`] wrapper around the key-value storage -//! that implements the storage traits for the tables with structure. +//! that implements the storage traits for the tables with blueprint. use crate::{ + blueprint::{ + Blueprint, + SupportsBatching, + }, column::Column, kv_store::{ BatchOperations, KeyValueStore, }, - structure::{ - Structure, - SupportsBatching, - }, Error as StorageError, Mappable, StorageBatchMutate, @@ -31,19 +31,19 @@ pub mod sealed_block; pub mod state; pub mod transactions; -/// The table can implement this trait to indicate that it has a structure. +/// The table can implement this trait to indicate that it has a blueprint. /// It inherits the default implementation of the storage traits through the [`StructuredStorage`] /// for the table. -pub trait TableWithStructure: Mappable + Sized { - /// The type of the structure used by the table. - type Structure; +pub trait TableWithBlueprint: Mappable + Sized { + /// The type of the blueprint used by the table. + type Blueprint; /// The column occupied by the table. fn column() -> Column; } /// The wrapper around the key-value storage that implements the storage traits for the tables -/// with structure. +/// with blueprint. #[derive(Clone, Debug)] pub struct StructuredStorage { pub(crate) storage: S, @@ -71,33 +71,33 @@ impl AsMut for StructuredStorage { impl StorageInspect for StructuredStorage where S: KeyValueStore, - M: Mappable + TableWithStructure, - M::Structure: Structure, + M: Mappable + TableWithBlueprint, + M::Blueprint: Blueprint, { type Error = StorageError; fn get(&self, key: &M::Key) -> Result>, Self::Error> { - ::Structure::get(&self.storage, key, M::column()) + ::Blueprint::get(&self.storage, key, M::column()) .map(|value| value.map(Cow::Owned)) } fn contains_key(&self, key: &M::Key) -> Result { - ::Structure::exists(&self.storage, key, M::column()) + ::Blueprint::exists(&self.storage, key, M::column()) } } impl StorageMutate for StructuredStorage where S: KeyValueStore, - M: Mappable + TableWithStructure, - M::Structure: Structure, + M: Mappable + TableWithBlueprint, + M::Blueprint: Blueprint, { fn insert( &mut self, key: &M::Key, value: &M::Value, ) -> Result, Self::Error> { - ::Structure::replace( + ::Blueprint::replace( &mut self.storage, key, M::column(), @@ -106,18 +106,18 @@ where } fn remove(&mut self, key: &M::Key) -> Result, Self::Error> { - ::Structure::take(&mut self.storage, key, M::column()) + ::Blueprint::take(&mut self.storage, key, M::column()) } } impl StorageSize for StructuredStorage where S: KeyValueStore, - M: Mappable + TableWithStructure, - M::Structure: Structure, + M: Mappable + TableWithBlueprint, + M::Blueprint: Blueprint, { fn size_of_value(&self, key: &M::Key) -> Result, Self::Error> { - ::Structure::size_of_value( + ::Blueprint::size_of_value( &self.storage, key, M::column(), @@ -128,28 +128,28 @@ where impl StorageBatchMutate for StructuredStorage where S: BatchOperations, - M: Mappable + TableWithStructure, - M::Structure: SupportsBatching, + M: Mappable + TableWithBlueprint, + M::Blueprint: SupportsBatching, { fn init_storage( &mut self, set: &mut dyn Iterator, ) -> Result<(), Self::Error> { - ::Structure::init(&mut self.storage, M::column(), set) + ::Blueprint::init(&mut self.storage, M::column(), set) } fn insert_batch( &mut self, set: &mut dyn Iterator, ) -> Result<(), Self::Error> { - ::Structure::insert(&mut self.storage, M::column(), set) + ::Blueprint::insert(&mut self.storage, M::column(), set) } fn remove_batch( &mut self, set: &mut dyn Iterator, ) -> Result<(), Self::Error> { - ::Structure::remove(&mut self.storage, M::column(), set) + ::Blueprint::remove(&mut self.storage, M::column(), set) } } diff --git a/crates/storage/src/structured_storage/balances.rs b/crates/storage/src/structured_storage/balances.rs index fd2d274d713..2bd9019e9cc 100644 --- a/crates/storage/src/structured_storage/balances.rs +++ b/crates/storage/src/structured_storage/balances.rs @@ -1,16 +1,16 @@ //! The module contains implementations and tests for the `ContractsAssets` table. use crate::{ + blueprint::sparse::{ + PrimaryKey, + Sparse, + }, codec::{ manual::Manual, primitive::Primitive, }, column::Column, - structure::sparse::{ - PrimaryKey, - Sparse, - }, - structured_storage::TableWithStructure, + structured_storage::TableWithBlueprint, tables::{ merkle::{ ContractsAssetsMerkleData, @@ -35,8 +35,8 @@ impl PrimaryKey for KeyConverter { } } -impl TableWithStructure for ContractsAssets { - type Structure = Sparse< +impl TableWithBlueprint for ContractsAssets { + type Blueprint = Sparse< Manual, Primitive<8>, ContractsAssetsMerkleMetadata, diff --git a/crates/storage/src/structured_storage/blocks.rs b/crates/storage/src/structured_storage/blocks.rs index 2b2ba45c32c..f31cbef5800 100644 --- a/crates/storage/src/structured_storage/blocks.rs +++ b/crates/storage/src/structured_storage/blocks.rs @@ -1,18 +1,18 @@ //! The module contains implementations and tests for the `FuelBlocks` table. use crate::{ + blueprint::plain::Plain, codec::{ postcard::Postcard, raw::Raw, }, column::Column, - structure::plain::Plain, - structured_storage::TableWithStructure, + structured_storage::TableWithBlueprint, tables::FuelBlocks, }; -impl TableWithStructure for FuelBlocks { - type Structure = Plain; +impl TableWithBlueprint for FuelBlocks { + type Blueprint = Plain; fn column() -> Column { Column::FuelBlocks diff --git a/crates/storage/src/structured_storage/coins.rs b/crates/storage/src/structured_storage/coins.rs index e4e24d96ca5..53d45f6ca64 100644 --- a/crates/storage/src/structured_storage/coins.rs +++ b/crates/storage/src/structured_storage/coins.rs @@ -1,18 +1,18 @@ //! The module contains implementations and tests for the `Coins` table. use crate::{ + blueprint::plain::Plain, codec::{ postcard::Postcard, primitive::Primitive, }, column::Column, - structure::plain::Plain, - structured_storage::TableWithStructure, + structured_storage::TableWithBlueprint, tables::Coins, }; -impl TableWithStructure for Coins { - type Structure = Plain, Postcard>; +impl TableWithBlueprint for Coins { + type Blueprint = Plain, Postcard>; fn column() -> Column { Column::Coins diff --git a/crates/storage/src/structured_storage/contracts.rs b/crates/storage/src/structured_storage/contracts.rs index df79222ca42..5e935a2f078 100644 --- a/crates/storage/src/structured_storage/contracts.rs +++ b/crates/storage/src/structured_storage/contracts.rs @@ -1,16 +1,16 @@ //! The module contains implementations and tests for the contracts tables. use crate::{ + blueprint::plain::Plain, codec::{ postcard::Postcard, raw::Raw, }, column::Column, kv_store::KeyValueStore, - structure::plain::Plain, structured_storage::{ StructuredStorage, - TableWithStructure, + TableWithBlueprint, }, tables::{ ContractsInfo, @@ -26,8 +26,8 @@ use fuel_core_types::fuel_tx::ContractId; // and deserialization and uses `Raw` codec. Because the value is a contract byte code represented // by bytes, we don't use `serde::Deserialization` and `serde::Serialization` for `Vec`, // because we don't need to store the size of the contract. We store/load raw bytes. -impl TableWithStructure for ContractsRawCode { - type Structure = Plain; +impl TableWithBlueprint for ContractsRawCode { + type Blueprint = Plain; fn column() -> Column { Column::ContractsRawCode @@ -54,16 +54,16 @@ where } } -impl TableWithStructure for ContractsInfo { - type Structure = Plain; +impl TableWithBlueprint for ContractsInfo { + type Blueprint = Plain; fn column() -> Column { Column::ContractsInfo } } -impl TableWithStructure for ContractsLatestUtxo { - type Structure = Plain; +impl TableWithBlueprint for ContractsLatestUtxo { + type Blueprint = Plain; fn column() -> Column { Column::ContractsLatestUtxo diff --git a/crates/storage/src/structured_storage/merkle_data.rs b/crates/storage/src/structured_storage/merkle_data.rs index 27f40dff04d..b597be35f82 100644 --- a/crates/storage/src/structured_storage/merkle_data.rs +++ b/crates/storage/src/structured_storage/merkle_data.rs @@ -1,14 +1,14 @@ //! The module contains implementations and tests for merkle related tables. use crate::{ + blueprint::plain::Plain, codec::{ postcard::Postcard, primitive::Primitive, raw::Raw, }, column::Column, - structure::plain::Plain, - structured_storage::TableWithStructure, + structured_storage::TableWithBlueprint, tables::merkle::{ ContractsAssetsMerkleData, ContractsAssetsMerkleMetadata, @@ -24,8 +24,8 @@ macro_rules! merkle_table { merkle_table!($table, Raw); }; ($table:ident, $key_codec:ident) => { - impl TableWithStructure for $table { - type Structure = Plain<$key_codec, Postcard>; + impl TableWithBlueprint for $table { + type Blueprint = Plain<$key_codec, Postcard>; fn column() -> Column { Column::$table diff --git a/crates/storage/src/structured_storage/messages.rs b/crates/storage/src/structured_storage/messages.rs index 0d9390fe6be..08addab8ea5 100644 --- a/crates/storage/src/structured_storage/messages.rs +++ b/crates/storage/src/structured_storage/messages.rs @@ -1,29 +1,29 @@ //! The module contains implementations and tests for the messages tables. use crate::{ + blueprint::plain::Plain, codec::{ postcard::Postcard, raw::Raw, }, column::Column, - structure::plain::Plain, - structured_storage::TableWithStructure, + structured_storage::TableWithBlueprint, tables::{ Messages, SpentMessages, }, }; -impl TableWithStructure for Messages { - type Structure = Plain; +impl TableWithBlueprint for Messages { + type Blueprint = Plain; fn column() -> Column { Column::Messages } } -impl TableWithStructure for SpentMessages { - type Structure = Plain; +impl TableWithBlueprint for SpentMessages { + type Blueprint = Plain; fn column() -> Column { Column::SpentMessages diff --git a/crates/storage/src/structured_storage/receipts.rs b/crates/storage/src/structured_storage/receipts.rs index 0e78ee36fdb..5e40cd2e4db 100644 --- a/crates/storage/src/structured_storage/receipts.rs +++ b/crates/storage/src/structured_storage/receipts.rs @@ -1,18 +1,18 @@ //! The module contains implementations and tests for the `Receipts` table. use crate::{ + blueprint::plain::Plain, codec::{ postcard::Postcard, raw::Raw, }, column::Column, - structure::plain::Plain, - structured_storage::TableWithStructure, + structured_storage::TableWithBlueprint, tables::Receipts, }; -impl TableWithStructure for Receipts { - type Structure = Plain; +impl TableWithBlueprint for Receipts { + type Blueprint = Plain; fn column() -> Column { Column::Receipts diff --git a/crates/storage/src/structured_storage/sealed_block.rs b/crates/storage/src/structured_storage/sealed_block.rs index 2c201f7623d..c0fb6d8db21 100644 --- a/crates/storage/src/structured_storage/sealed_block.rs +++ b/crates/storage/src/structured_storage/sealed_block.rs @@ -1,18 +1,18 @@ //! The module contains implementations and tests for the `SealedBlockConsensus` table. use crate::{ + blueprint::plain::Plain, codec::{ postcard::Postcard, raw::Raw, }, column::Column, - structure::plain::Plain, - structured_storage::TableWithStructure, + structured_storage::TableWithBlueprint, tables::SealedBlockConsensus, }; -impl TableWithStructure for SealedBlockConsensus { - type Structure = Plain; +impl TableWithBlueprint for SealedBlockConsensus { + type Blueprint = Plain; fn column() -> Column { Column::FuelBlockConsensus diff --git a/crates/storage/src/structured_storage/state.rs b/crates/storage/src/structured_storage/state.rs index a0e3b76ff4a..c28b8c2a304 100644 --- a/crates/storage/src/structured_storage/state.rs +++ b/crates/storage/src/structured_storage/state.rs @@ -1,16 +1,16 @@ //! The module contains implementations and tests for the `ContractsState` table. use crate::{ + blueprint::sparse::{ + PrimaryKey, + Sparse, + }, codec::{ manual::Manual, raw::Raw, }, column::Column, - structure::sparse::{ - PrimaryKey, - Sparse, - }, - structured_storage::TableWithStructure, + structured_storage::TableWithBlueprint, tables::{ merkle::{ ContractsStateMerkleData, @@ -35,8 +35,8 @@ impl PrimaryKey for KeyConverter { } } -impl TableWithStructure for ContractsState { - type Structure = Sparse< +impl TableWithBlueprint for ContractsState { + type Blueprint = Sparse< Manual, Raw, ContractsStateMerkleMetadata, diff --git a/crates/storage/src/structured_storage/transactions.rs b/crates/storage/src/structured_storage/transactions.rs index c2cc0affcd4..5605ecdbe19 100644 --- a/crates/storage/src/structured_storage/transactions.rs +++ b/crates/storage/src/structured_storage/transactions.rs @@ -1,21 +1,21 @@ //! The module contains implementations and tests for the `Transactions` table. use crate::{ + blueprint::plain::Plain, codec::{ postcard::Postcard, raw::Raw, }, column::Column, - structure::plain::Plain, - structured_storage::TableWithStructure, + structured_storage::TableWithBlueprint, tables::{ ProcessedTransactions, Transactions, }, }; -impl TableWithStructure for Transactions { - type Structure = Plain; +impl TableWithBlueprint for Transactions { + type Blueprint = Plain; fn column() -> Column { Column::Transactions @@ -29,8 +29,8 @@ crate::basic_storage_tests!( ::Value::default() ); -impl TableWithStructure for ProcessedTransactions { - type Structure = Plain; +impl TableWithBlueprint for ProcessedTransactions { + type Blueprint = Plain; fn column() -> Column { Column::ProcessedTransactions From cc9966c1d9d30592a9f6a4a2056600726862bffc Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 19 Jan 2024 03:41:05 +0100 Subject: [PATCH 22/28] Fix documents --- ci_checks.sh | 1 + crates/storage/src/column.rs | 18 +++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/ci_checks.sh b/ci_checks.sh index d1ffaa75e0f..b78fae2781f 100755 --- a/ci_checks.sh +++ b/ci_checks.sh @@ -11,6 +11,7 @@ cargo +nightly fmt --all -- --check && cargo sort -w --check && source .github/workflows/scripts/verify_openssl.sh && cargo clippy --all-targets --all-features && +cargo doc --all-features --workspace && cargo make check --locked && cargo make check --all-features --locked && cargo check -p fuel-core-types --target wasm32-unknown-unknown --no-default-features && diff --git a/crates/storage/src/column.rs b/crates/storage/src/column.rs index 70813e0de7d..45d4cbc11e9 100644 --- a/crates/storage/src/column.rs +++ b/crates/storage/src/column.rs @@ -64,25 +64,25 @@ column_definition! { Transactions = 6, /// See [`FuelBlocks`](crate::tables::FuelBlocks) FuelBlocks = 7, - /// See [`FuelBlockMerkleData`](storage::FuelBlockMerkleData) + /// See [`FuelBlockMerkleData`](crate::tables::merkle::FuelBlockMerkleData) FuelBlockMerkleData = 8, - /// See [`FuelBlockMerkleMetadata`](storage::FuelBlockMerkleMetadata) + /// See [`FuelBlockMerkleMetadata`](crate::tables::merkle::FuelBlockMerkleMetadata) FuelBlockMerkleMetadata = 9, /// Messages that have been spent. /// Existence of a key in this column means that the message has been spent. /// See [`SpentMessages`](crate::tables::SpentMessages) SpentMessages = 10, - /// See [`ContractsAssetsMerkleData`](storage::ContractsAssetsMerkleData) + /// See [`ContractsAssetsMerkleData`](crate::tables::merkle::ContractsAssetsMerkleData) ContractsAssetsMerkleData = 11, - /// See [`ContractsAssetsMerkleMetadata`](storage::ContractsAssetsMerkleMetadata) + /// See [`ContractsAssetsMerkleMetadata`](crate::tables::merkle::ContractsAssetsMerkleMetadata) ContractsAssetsMerkleMetadata = 12, - /// See [`ContractsStateMerkleData`](storage::ContractsStateMerkleData) + /// See [`ContractsStateMerkleData`](crate::tables::merkle::ContractsStateMerkleData) ContractsStateMerkleData = 13, - /// See [`ContractsStateMerkleMetadata`](storage::ContractsStateMerkleMetadata) + /// See [`ContractsStateMerkleMetadata`](crate::tables::merkle::ContractsStateMerkleMetadata) ContractsStateMerkleMetadata = 14, /// See [`Messages`](crate::tables::Messages) Messages = 15, - /// See [`ProcessedTransactions`](storage::ProcessedTransactions) + /// See [`ProcessedTransactions`](crate::tables::ProcessedTransactions) ProcessedTransactions = 16, // TODO: Extract the columns below into a separate enum to not mix @@ -95,12 +95,12 @@ column_definition! { Metadata = 17, /// See [`Receipts`](crate::tables::Receipts) Receipts = 18, - /// See [`FuelBlockSecondaryKeyBlockHeights`](storage::FuelBlockSecondaryKeyBlockHeights) + /// See `FuelBlockSecondaryKeyBlockHeights` FuelBlockSecondaryKeyBlockHeights = 19, /// See [`SealedBlockConsensus`](crate::tables::SealedBlockConsensus) FuelBlockConsensus = 20, /// Metadata for the relayer - /// See [`RelayerMetadata`](fuel_core_relayer::ports::RelayerMetadata) + /// See `RelayerMetadata` RelayerMetadata = 21, // Below are not required tables. They are used for API and may be removed or moved to another place in the future. From 2d3e471ac683777b84cce89d2adcfd3fe5f9de64 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 19 Jan 2024 05:23:12 +0100 Subject: [PATCH 23/28] Merge latest modifications from move storage PR --- crates/fuel-core/src/database.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index 90bf8a4d884..67fbe750e3d 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -12,6 +12,7 @@ use fuel_core_chain_config::{ MessageConfig, }; use fuel_core_storage::{ + blueprint::Blueprint, codec::{ Decode, Encode, @@ -24,7 +25,6 @@ use fuel_core_storage::{ Value, WriteOperation, }, - structure::Blueprint, structured_storage::{ StructuredStorage, TableWithBlueprint, @@ -305,7 +305,7 @@ impl Database { { let iter = if let Some(start) = start { let encoder = - >::KeyCodec::encode(start); + >::KeyCodec::encode(start); self.data.as_ref().iter_all( M::column(), From c7bce203dabe92cd4024f7b1cd3681132bec1b1b Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 19 Jan 2024 05:31:10 +0100 Subject: [PATCH 24/28] Fix conflicts --- crates/fuel-core/src/graphql_api/database.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/fuel-core/src/graphql_api/database.rs b/crates/fuel-core/src/graphql_api/database.rs index 5aa4ba0f975..feb9a638c18 100644 --- a/crates/fuel-core/src/graphql_api/database.rs +++ b/crates/fuel-core/src/graphql_api/database.rs @@ -58,7 +58,7 @@ pub type OnChainView = Arc; pub type OffChainView = Arc; /// The container of the on-chain and off-chain database view provides. -/// It is used only by [`ViewExtension`](super::view_extension::ViewExtension) to create a [`ReadView`]. +/// It is used only by `ViewExtension` to create a [`ReadView`]. pub struct ReadDatabase { /// The on-chain database view provider. on_chain: Box>, From 36445b7cd95825ee3c41b8a63e7b8c9fd16c6532 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 19 Jan 2024 18:50:05 +0100 Subject: [PATCH 25/28] Merged master --- crates/fuel-core/src/graphql_api/database.rs | 25 +++++++------- crates/fuel-core/src/query/block.rs | 8 ----- crates/fuel-core/src/schema/block.rs | 1 + crates/fuel-core/src/schema/tx/types.rs | 1 + .../src/service/adapters/graphql_api.rs | 1 - .../service/adapters/graphql_api/on_chain.rs | 33 +++++++++++-------- 6 files changed, 35 insertions(+), 34 deletions(-) diff --git a/crates/fuel-core/src/graphql_api/database.rs b/crates/fuel-core/src/graphql_api/database.rs index feb9a638c18..eb0a3c00f93 100644 --- a/crates/fuel-core/src/graphql_api/database.rs +++ b/crates/fuel-core/src/graphql_api/database.rs @@ -24,9 +24,12 @@ use fuel_core_txpool::types::{ TxId, }; use fuel_core_types::{ - blockchain::primitives::{ - BlockId, - DaBlockHeight, + blockchain::{ + block::CompressedBlock, + primitives::{ + BlockId, + DaBlockHeight, + }, }, entities::message::{ MerkleProof, @@ -97,20 +100,20 @@ pub struct ReadView { } impl DatabaseBlocks for ReadView { - fn block_id(&self, height: &BlockHeight) -> StorageResult { - self.on_chain.block_id(height) + fn block_height(&self, block_id: &BlockId) -> StorageResult { + self.on_chain.block_height(block_id) } - fn blocks_ids( + fn blocks( &self, - start: Option, + height: Option, direction: IterDirection, - ) -> BoxedIter<'_, StorageResult<(BlockHeight, BlockId)>> { - self.on_chain.blocks_ids(start, direction) + ) -> BoxedIter<'_, StorageResult> { + self.on_chain.blocks(height, direction) } - fn ids_of_latest_block(&self) -> StorageResult<(BlockHeight, BlockId)> { - self.on_chain.ids_of_latest_block() + fn latest_height(&self) -> StorageResult { + self.on_chain.latest_height() } } diff --git a/crates/fuel-core/src/query/block.rs b/crates/fuel-core/src/query/block.rs index 830c1833a73..9740b645938 100644 --- a/crates/fuel-core/src/query/block.rs +++ b/crates/fuel-core/src/query/block.rs @@ -59,14 +59,6 @@ pub trait BlockQueryData: Send + Sync + SimpleBlockData { } impl BlockQueryData for D { - fn block_id(&self, height: &BlockHeight) -> StorageResult { - self.block_id(height) - } - - fn latest_block_id(&self) -> StorageResult { - self.ids_of_latest_block().map(|(_, id)| id) - } - fn latest_block_height(&self) -> StorageResult { self.latest_height() } diff --git a/crates/fuel-core/src/schema/block.rs b/crates/fuel-core/src/schema/block.rs index cd8b62323bf..570373516e5 100644 --- a/crates/fuel-core/src/schema/block.rs +++ b/crates/fuel-core/src/schema/block.rs @@ -6,6 +6,7 @@ use crate::{ fuel_core_graphql_api::{ api_service::ConsensusModule, database::ReadView, + ports::DatabaseBlocks, Config as GraphQLConfig, IntoApiResult, }, diff --git a/crates/fuel-core/src/schema/tx/types.rs b/crates/fuel-core/src/schema/tx/types.rs index 8aeba588a2a..efd58aeeaec 100644 --- a/crates/fuel-core/src/schema/tx/types.rs +++ b/crates/fuel-core/src/schema/tx/types.rs @@ -7,6 +7,7 @@ use crate::{ fuel_core_graphql_api::{ api_service::TxPool, database::ReadView, + ports::DatabaseBlocks, Config, IntoApiResult, }, diff --git a/crates/fuel-core/src/service/adapters/graphql_api.rs b/crates/fuel-core/src/service/adapters/graphql_api.rs index 35bb44e624e..e83efc44e08 100644 --- a/crates/fuel-core/src/service/adapters/graphql_api.rs +++ b/crates/fuel-core/src/service/adapters/graphql_api.rs @@ -17,7 +17,6 @@ use crate::{ }, }; use async_trait::async_trait; -use fuel_core_importer::ports::ImporterDatabase; use fuel_core_services::stream::BoxStream; use fuel_core_storage::Result as StorageResult; use fuel_core_txpool::{ diff --git a/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs b/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs index dd9c9937ffa..d09f045cfb0 100644 --- a/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs +++ b/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs @@ -11,6 +11,7 @@ use crate::{ }, }, }; +use fuel_core_importer::ports::ImporterDatabase; use fuel_core_storage::{ iter::{ BoxedIter, @@ -18,15 +19,19 @@ use fuel_core_storage::{ IterDirection, }, not_found, + tables::FuelBlocks, transactional::AtomicView, Error as StorageError, Result as StorageResult, }; use fuel_core_txpool::types::ContractId; use fuel_core_types::{ - blockchain::primitives::{ - BlockId, - DaBlockHeight, + blockchain::{ + block::CompressedBlock, + primitives::{ + BlockId, + DaBlockHeight, + }, }, entities::message::Message, fuel_tx::AssetId, @@ -39,25 +44,25 @@ use fuel_core_types::{ use std::sync::Arc; impl DatabaseBlocks for Database { - fn block_id(&self, height: &BlockHeight) -> StorageResult { - self.get_block_id(height) - .and_then(|height| height.ok_or(not_found!("BlockId"))) + fn block_height(&self, id: &BlockId) -> StorageResult { + self.get_block_height(id) + .and_then(|height| height.ok_or(not_found!("BlockHeight"))) } - fn blocks_ids( + fn blocks( &self, - start: Option, + height: Option, direction: IterDirection, - ) -> BoxedIter<'_, StorageResult<(BlockHeight, BlockId)>> { - self.all_block_ids(start, direction) - .map(|result| result.map_err(StorageError::from)) + ) -> BoxedIter<'_, StorageResult> { + self.iter_all_by_start::(height.as_ref(), Some(direction)) + .map(|result| result.map(|(_, block)| block)) .into_boxed() } - fn ids_of_latest_block(&self) -> StorageResult<(BlockHeight, BlockId)> { - self.ids_of_latest_block() + fn latest_height(&self) -> StorageResult { + self.latest_block_height() .transpose() - .ok_or(not_found!("BlockId"))? + .ok_or(not_found!("BlockHeight"))? } } From 6a503b9a118ad72351fca18baebc2ec53d923d8f Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 19 Jan 2024 19:03:26 +0100 Subject: [PATCH 26/28] Apply comments --- crates/fuel-core/src/database.rs | 49 +++++++++++++--------------- crates/fuel-core/src/query/block.rs | 6 ++-- crates/fuel-core/src/schema/block.rs | 4 +-- 3 files changed, 28 insertions(+), 31 deletions(-) diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index 67fbe750e3d..913bc445f16 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -303,38 +303,35 @@ impl Database { M::Blueprint: Blueprint, P: AsRef<[u8]>, { - let iter = if let Some(start) = start { - let encoder = - >::KeyCodec::encode(start); + let encoder = start.map(|start| { + >::KeyCodec::encode(start) + }); - self.data.as_ref().iter_all( - M::column(), - prefix.as_ref().map(|p| p.as_ref()), - Some(encoder.as_bytes().as_ref()), - direction.unwrap_or_default(), - ) - } else { - self.data.as_ref().iter_all( + let start = encoder.as_ref().map(|encoder| encoder.as_bytes()); + + self.data + .as_ref() + .iter_all( M::column(), prefix.as_ref().map(|p| p.as_ref()), - None, + start.as_ref().map(|cow| cow.as_ref()), direction.unwrap_or_default(), ) - }; - iter.map(|val| { - val.and_then(|(key, value)| { - let key = >::KeyCodec::decode( - key.as_slice(), - ) - .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; - let value = - >::ValueCodec::decode( - value.as_slice(), - ) - .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; - Ok((key, value)) + .map(|val| { + val.and_then(|(key, value)| { + let key = + >::KeyCodec::decode( + key.as_slice(), + ) + .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; + let value = + >::ValueCodec::decode( + value.as_slice(), + ) + .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; + Ok((key, value)) + }) }) - }) } } diff --git a/crates/fuel-core/src/query/block.rs b/crates/fuel-core/src/query/block.rs index 9740b645938..2d7edbd0b3f 100644 --- a/crates/fuel-core/src/query/block.rs +++ b/crates/fuel-core/src/query/block.rs @@ -51,7 +51,7 @@ pub trait BlockQueryData: Send + Sync + SimpleBlockData { fn compressed_blocks( &self, - start: Option, + height: Option, direction: IterDirection, ) -> BoxedIter>; @@ -69,10 +69,10 @@ impl BlockQueryData for D { fn compressed_blocks( &self, - start: Option, + height: Option, direction: IterDirection, ) -> BoxedIter> { - self.blocks(start, direction) + self.blocks(height, direction) } fn consensus(&self, id: &BlockHeight) -> StorageResult { diff --git a/crates/fuel-core/src/schema/block.rs b/crates/fuel-core/src/schema/block.rs index 570373516e5..41c3f75b92f 100644 --- a/crates/fuel-core/src/schema/block.rs +++ b/crates/fuel-core/src/schema/block.rs @@ -264,14 +264,14 @@ impl HeaderQuery { fn blocks_query( query: &ReadView, - start: Option, + height: Option, direction: IterDirection, ) -> BoxedIter> where T: async_graphql::OutputType, T: From, { - let blocks = query.compressed_blocks(start, direction).map(|result| { + let blocks = query.compressed_blocks(height, direction).map(|result| { result.map(|block| ((*block.header().height()).into(), block.into())) }); From 067792b5f11d6006ce5430922b1526ddf1700523 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 19 Jan 2024 19:19:03 +0100 Subject: [PATCH 27/28] Apply comments --- crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs b/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs index 1a7eb47fec2..09ec40a9897 100644 --- a/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs +++ b/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs @@ -16,6 +16,7 @@ use fuel_core_storage::{ IterDirection, }, not_found, + tables::FuelBlocks, Error as StorageError, Result as StorageResult, }; From 8369cd5555901751583c3470842a09cefa2d1b67 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Fri, 19 Jan 2024 19:22:57 +0100 Subject: [PATCH 28/28] Move arc wrapper into its own module --- crates/fuel-core/src/graphql_api/database.rs | 71 ++++--------------- .../src/graphql_api/database/arc_wrapper.rs | 66 +++++++++++++++++ 2 files changed, 79 insertions(+), 58 deletions(-) create mode 100644 crates/fuel-core/src/graphql_api/database/arc_wrapper.rs diff --git a/crates/fuel-core/src/graphql_api/database.rs b/crates/fuel-core/src/graphql_api/database.rs index 26711491a66..3b59cfb7723 100644 --- a/crates/fuel-core/src/graphql_api/database.rs +++ b/crates/fuel-core/src/graphql_api/database.rs @@ -1,11 +1,16 @@ -use crate::fuel_core_graphql_api::ports::{ - DatabaseBlocks, - DatabaseChain, - DatabaseContracts, - DatabaseMessageProof, - DatabaseMessages, - OffChainDatabase, - OnChainDatabase, +mod arc_wrapper; + +use crate::fuel_core_graphql_api::{ + database::arc_wrapper::ArcWrapper, + ports::{ + DatabaseBlocks, + DatabaseChain, + DatabaseContracts, + DatabaseMessageProof, + DatabaseMessages, + OffChainDatabase, + OnChainDatabase, + }, }; use fuel_core_storage::{ iter::{ @@ -60,56 +65,6 @@ pub type OnChainView = Arc; /// The off-chain view of the database used by the [`ReadView`] to fetch off-chain data. pub type OffChainView = Arc; -/// The GraphQL can't work with the generics in [`async_graphql::Context::data_unchecked`] and requires a known type. -/// It is an `Arc` wrapper around the generic for on-chain and off-chain databases. -struct ArcWrapper { - inner: Provider, - _marker: core::marker::PhantomData, -} - -impl ArcWrapper { - fn new(inner: Provider) -> Self { - Self { - inner, - _marker: core::marker::PhantomData, - } - } -} - -impl AtomicView for ArcWrapper -where - Provider: AtomicView, - View: OnChainDatabase + 'static, -{ - type View = OnChainView; - - fn view_at(&self, height: BlockHeight) -> StorageResult { - let view = self.inner.view_at(height)?; - Ok(Arc::new(view)) - } - - fn latest_view(&self) -> Self::View { - Arc::new(self.inner.latest_view()) - } -} - -impl AtomicView for ArcWrapper -where - Provider: AtomicView, - View: OffChainDatabase + 'static, -{ - type View = OffChainView; - - fn view_at(&self, height: BlockHeight) -> StorageResult { - let view = self.inner.view_at(height)?; - Ok(Arc::new(view)) - } - - fn latest_view(&self) -> Self::View { - Arc::new(self.inner.latest_view()) - } -} - /// The container of the on-chain and off-chain database view provides. /// It is used only by `ViewExtension` to create a [`ReadView`]. pub struct ReadDatabase { diff --git a/crates/fuel-core/src/graphql_api/database/arc_wrapper.rs b/crates/fuel-core/src/graphql_api/database/arc_wrapper.rs new file mode 100644 index 00000000000..470e7e9b81a --- /dev/null +++ b/crates/fuel-core/src/graphql_api/database/arc_wrapper.rs @@ -0,0 +1,66 @@ +use crate::fuel_core_graphql_api::{ + database::{ + OffChainView, + OnChainView, + }, + ports::{ + OffChainDatabase, + OnChainDatabase, + }, +}; +use fuel_core_storage::{ + transactional::AtomicView, + Result as StorageResult, +}; +use fuel_core_types::fuel_types::BlockHeight; +use std::sync::Arc; + +/// The GraphQL can't work with the generics in [`async_graphql::Context::data_unchecked`] and requires a known type. +/// It is an `Arc` wrapper around the generic for on-chain and off-chain databases. +pub struct ArcWrapper { + inner: Provider, + _marker: core::marker::PhantomData, +} + +impl ArcWrapper { + pub fn new(inner: Provider) -> Self { + Self { + inner, + _marker: core::marker::PhantomData, + } + } +} + +impl AtomicView for ArcWrapper +where + Provider: AtomicView, + View: OnChainDatabase + 'static, +{ + type View = OnChainView; + + fn view_at(&self, height: BlockHeight) -> StorageResult { + let view = self.inner.view_at(height)?; + Ok(Arc::new(view)) + } + + fn latest_view(&self) -> Self::View { + Arc::new(self.inner.latest_view()) + } +} + +impl AtomicView for ArcWrapper +where + Provider: AtomicView, + View: OffChainDatabase + 'static, +{ + type View = OffChainView; + + fn view_at(&self, height: BlockHeight) -> StorageResult { + let view = self.inner.view_at(height)?; + Ok(Arc::new(view)) + } + + fn latest_view(&self) -> Self::View { + Arc::new(self.inner.latest_view()) + } +}