diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 66554e31..e8ee3d27 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -8,6 +8,7 @@ on: pull_request: branches: - master + workflow_dispatch: env: CARGO_TERM_COLOR: always diff --git a/Cargo.toml b/Cargo.toml index 450b4372..caf6a403 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,11 +44,11 @@ members = ["tools/benchmark", "aerospike-core", "aerospike-rt", "aerospike-sync" [dev-dependencies] log = "0.4" -env_logger = "0.9.3" +env_logger = "0.10.0" hex = "0.4" -bencher = "0.1" +criterion = { version = "0.5.1", features = ["async_tokio", "async_futures", "async"]} serde_json = "1.0" -rand = "0.7" +rand = "0.8.5" lazy_static = "1.4" aerospike-macro = {path = "./aerospike-macro"} aerospike-rt = {path = "./aerospike-rt"} diff --git a/aerospike-core/Cargo.toml b/aerospike-core/Cargo.toml index c34737da..33eef745 100644 --- a/aerospike-core/Cargo.toml +++ b/aerospike-core/Cargo.toml @@ -17,8 +17,10 @@ error-chain = "0.12" pwhash = "0.3" serde = { version = "1.0", features = ["derive"], optional = true } aerospike-rt = {path = "../aerospike-rt"} +aerospike-macro = {path = "../aerospike-macro"} futures = {version = "0.3.16" } async-trait = "0.1.51" +num = "0.4.0" [features] serialization = ["serde"] diff --git a/aerospike-core/src/batch/batch_executor.rs b/aerospike-core/src/batch/batch_executor.rs index 7acf8ae7..e3d258b8 100644 --- a/aerospike-core/src/batch/batch_executor.rs +++ b/aerospike-core/src/batch/batch_executor.rs @@ -21,6 +21,7 @@ use crate::batch::BatchRead; use crate::cluster::partition::Partition; use crate::cluster::{Cluster, Node}; use crate::commands::BatchReadCommand; +use crate::derive::readable::ReadableBins; use crate::errors::{Error, Result}; use crate::policy::{BatchPolicy, Concurrency}; use crate::Key; @@ -35,29 +36,29 @@ impl BatchExecutor { BatchExecutor { cluster } } - pub async fn execute_batch_read( + pub async fn execute_batch_read( &self, policy: &BatchPolicy, - batch_reads: Vec, - ) -> Result> { + batch_reads: Vec>, + ) -> Result>> { let mut batch_nodes = self.get_batch_nodes(&batch_reads).await?; let jobs = batch_nodes .drain() .map(|(node, reads)| BatchReadCommand::new(policy, node, reads)) .collect(); let reads = self.execute_batch_jobs(jobs, &policy.concurrency).await?; - let mut res: Vec = vec![]; + let mut res: Vec> = vec![]; for mut read in reads { res.append(&mut read.batch_reads); } Ok(res) } - async fn execute_batch_jobs( + async fn execute_batch_jobs( &self, - jobs: Vec, + jobs: Vec>, concurrency: &Concurrency, - ) -> Result> { + ) -> Result>> { let threads = match *concurrency { Concurrency::Sequential => 1, Concurrency::Parallel => jobs.len(), @@ -97,12 +98,12 @@ impl BatchExecutor { } } - async fn get_batch_nodes( + async fn get_batch_nodes( &self, - batch_reads: &[BatchRead], - ) -> Result, Vec>> { + batch_reads: &[BatchRead], + ) -> Result, Vec>>> { let mut map = HashMap::new(); - for (_, batch_read) in batch_reads.iter().enumerate() { + for batch_read in batch_reads { let node = self.node_for_key(&batch_read.key).await?; map.entry(node) .or_insert_with(Vec::new) diff --git a/aerospike-core/src/batch/batch_read.rs b/aerospike-core/src/batch/batch_read.rs index 420f634a..6445642c 100644 --- a/aerospike-core/src/batch/batch_read.rs +++ b/aerospike-core/src/batch/batch_read.rs @@ -13,6 +13,7 @@ // License for the specific language governing permissions and limitations under // the License. +use crate::derive::readable::ReadableBins; use crate::Bins; use crate::Key; use crate::Record; @@ -22,7 +23,7 @@ use serde::Serialize; /// Key and bin names used in batch read commands where variable bins are needed for each key. #[cfg_attr(feature = "serialization", derive(Serialize))] #[derive(Debug, Clone)] -pub struct BatchRead { +pub struct BatchRead { /// Key. pub key: Key, @@ -30,10 +31,10 @@ pub struct BatchRead { pub bins: Bins, /// Will contain the record after the batch read operation. - pub record: Option, + pub record: Option>, } -impl BatchRead { +impl BatchRead { /// Create a new `BatchRead` instance for the given key and bin selector. pub const fn new(key: Key, bins: Bins) -> Self { BatchRead { @@ -44,7 +45,7 @@ impl BatchRead { } #[doc(hidden)] - pub fn match_header(&self, other: &BatchRead, match_set: bool) -> bool { + pub fn match_header(&self, other: &BatchRead, match_set: bool) -> bool { let key = &self.key; let other_key = &other.key; (key.namespace == other_key.namespace) diff --git a/aerospike-core/src/client.rs b/aerospike-core/src/client.rs index f9e0751d..6ad5a0eb 100644 --- a/aerospike-core/src/client.rs +++ b/aerospike-core/src/client.rs @@ -13,6 +13,7 @@ // License for the specific language governing permissions and limitations under // the License. +use std::collections::HashMap; use std::path::Path; use std::str; use std::sync::Arc; @@ -24,14 +25,16 @@ use crate::commands::{ DeleteCommand, ExecuteUDFCommand, ExistsCommand, OperateCommand, QueryCommand, ReadCommand, ScanCommand, TouchCommand, WriteCommand, }; +use crate::derive::readable::ReadableBins; +use crate::derive::writable::WritableBins; use crate::errors::{ErrorKind, Result, ResultExt}; use crate::net::ToHosts; use crate::operations::{Operation, OperationType}; use crate::policy::{BatchPolicy, ClientPolicy, QueryPolicy, ReadPolicy, ScanPolicy, WritePolicy}; use crate::task::{IndexTask, RegisterTask}; use crate::{ - BatchRead, Bin, Bins, CollectionIndexType, IndexType, Key, Record, Recordset, ResultCode, - Statement, UDFLang, Value, + BatchRead, Bins, CollectionIndexType, IndexType, Key, Record, Recordset, ResultCode, Statement, + UDFLang, Value, }; use aerospike_rt::fs::File; #[cfg(all(any(feature = "rt-tokio"), not(feature = "rt-async-std")))] @@ -179,7 +182,12 @@ impl Client { /// /// # Panics /// Panics if the return is invalid - pub async fn get(&self, policy: &ReadPolicy, key: &Key, bins: T) -> Result + pub async fn get( + &self, + policy: &ReadPolicy, + key: &Key, + bins: T, + ) -> Result> where T: Into + Send + Sync + 'static, { @@ -223,11 +231,11 @@ impl Client { /// => println!("Error executing batch request: {}", err), /// } /// ``` - pub async fn batch_get( + pub async fn batch_get( &self, policy: &BatchPolicy, - batch_reads: Vec, - ) -> Result> { + batch_reads: Vec>, + ) -> Result>> { let executor = BatchExecutor::new(self.cluster.clone()); executor.execute_batch_read(policy, batch_reads).await } @@ -268,7 +276,12 @@ impl Client { /// Err(err) => println!("Error writing record: {}", err), /// } /// ``` - pub async fn put(&self, policy: &WritePolicy, key: &Key, bins: &[Bin]) -> Result<()> { + pub async fn put( + &self, + policy: &WritePolicy, + key: &Key, + bins: &T, + ) -> Result<()> { let mut command = WriteCommand::new( policy, self.cluster.clone(), @@ -301,7 +314,12 @@ impl Client { /// Err(err) => println!("Error writing record: {}", err), /// } /// ``` - pub async fn add(&self, policy: &WritePolicy, key: &Key, bins: &[Bin]) -> Result<()> { + pub async fn add( + &self, + policy: &WritePolicy, + key: &Key, + bins: &T, + ) -> Result<()> { let mut command = WriteCommand::new(policy, self.cluster.clone(), key, bins, OperationType::Incr); command.execute().await @@ -310,7 +328,12 @@ impl Client { /// Append bin string values to existing record bin values. The policy specifies the /// transaction timeout, record expiration and how the transaction is handled when the record /// already exists. This call only works for string values. - pub async fn append(&self, policy: &WritePolicy, key: &Key, bins: &[Bin]) -> Result<()> { + pub async fn append( + &self, + policy: &WritePolicy, + key: &Key, + bins: &T, + ) -> Result<()> { let mut command = WriteCommand::new( policy, self.cluster.clone(), @@ -324,7 +347,12 @@ impl Client { /// Prepend bin string values to existing record bin values. The policy specifies the /// transaction timeout, record expiration and how the transaction is handled when the record /// already exists. This call only works for string values. - pub async fn prepend(&self, policy: &WritePolicy, key: &Key, bins: &[Bin]) -> Result<()> { + pub async fn prepend( + &self, + policy: &WritePolicy, + key: &Key, + bins: &T, + ) -> Result<()> { let mut command = WriteCommand::new( policy, self.cluster.clone(), @@ -421,12 +449,12 @@ impl Client { /// ``` /// # Panics /// Panics if the return is invalid - pub async fn operate( + pub async fn operate( &self, policy: &WritePolicy, key: &Key, ops: &[Operation<'_>], - ) -> Result { + ) -> Result> { let mut command = OperateCommand::new(policy, self.cluster.clone(), key, ops); command.execute().await?; Ok(command.read_command.record.unwrap()) @@ -556,7 +584,7 @@ impl Client { function_name: &str, args: Option<&[Value]>, ) -> Result> { - let mut command = ExecuteUDFCommand::new( + let mut command: ExecuteUDFCommand> = ExecuteUDFCommand::new( policy, self.cluster.clone(), key, @@ -616,13 +644,13 @@ impl Client { /// /// # Panics /// Panics if the async block fails - pub async fn scan( + pub async fn scan( &self, policy: &ScanPolicy, namespace: &str, set_name: &str, bins: T, - ) -> Result> + ) -> Result>> where T: Into + Send + Sync + 'static, { @@ -638,7 +666,7 @@ impl Client { let set_name = set_name.to_owned(); let bins = bins.clone(); - aerospike_rt::spawn(async move { + let _ = aerospike_rt::spawn(async move { let mut command = ScanCommand::new( &policy, node, &namespace, &set_name, bins, recordset, partitions, ); @@ -657,14 +685,14 @@ impl Client { /// /// # Panics /// panics if the async block fails - pub async fn scan_node( + pub async fn scan_node( &self, policy: &ScanPolicy, node: Arc, namespace: &str, set_name: &str, bins: T, - ) -> Result> + ) -> Result>> where T: Into + Send + Sync + 'static, { @@ -676,7 +704,7 @@ impl Client { let namespace = namespace.to_owned(); let set_name = set_name.to_owned(); - aerospike_rt::spawn(async move { + let _ = aerospike_rt::spawn(async move { let mut command = ScanCommand::new( &policy, node, @@ -718,11 +746,11 @@ impl Client { /// /// # Panics /// Panics if the async block fails - pub async fn query( + pub async fn query( &self, policy: &QueryPolicy, statement: Statement, - ) -> Result> { + ) -> Result>> { statement.validate()?; let statement = Arc::new(statement); @@ -737,7 +765,7 @@ impl Client { let t_recordset = recordset.clone(); let policy = policy.clone(); let statement = statement.clone(); - aerospike_rt::spawn(async move { + let _ = aerospike_rt::spawn(async move { let mut command = QueryCommand::new(&policy, node, statement, t_recordset, partitions); command.execute().await.unwrap(); @@ -753,12 +781,12 @@ impl Client { /// /// # Panics /// Panics when the async block fails - pub async fn query_node( + pub async fn query_node( &self, policy: &QueryPolicy, node: Arc, statement: Statement, - ) -> Result> { + ) -> Result>> { statement.validate()?; let recordset = Arc::new(Recordset::new(policy.record_queue_size, 1)); @@ -770,7 +798,7 @@ impl Client { .node_partitions(node.as_ref(), &statement.namespace) .await; - aerospike_rt::spawn(async move { + let _ = aerospike_rt::spawn(async move { let mut command = QueryCommand::new(&policy, node, statement, t_recordset, partitions); command.execute().await.unwrap(); }) diff --git a/aerospike-core/src/commands/batch_read_command.rs b/aerospike-core/src/commands/batch_read_command.rs index d6487bbd..5d913bf2 100644 --- a/aerospike-core/src/commands/batch_read_command.rs +++ b/aerospike-core/src/commands/batch_read_command.rs @@ -13,31 +13,31 @@ // limitations under the License. use aerospike_rt::time::{Duration, Instant}; -use std::collections::HashMap; use std::sync::Arc; use crate::cluster::Node; use crate::commands::{self, Command}; +use crate::derive::readable::ReadableBins; use crate::errors::{ErrorKind, Result, ResultExt}; use crate::net::Connection; use crate::policy::{BatchPolicy, Policy, PolicyLike}; -use crate::{value, BatchRead, Record, ResultCode, Value}; +use crate::{BatchRead, Record, ResultCode}; use aerospike_rt::sleep; -struct BatchRecord { +struct BatchRecord { batch_index: usize, - record: Option, + record: Option>, } #[derive(Clone, Debug)] -pub struct BatchReadCommand { +pub struct BatchReadCommand { policy: BatchPolicy, pub node: Arc, - pub batch_reads: Vec, + pub batch_reads: Vec>, } -impl BatchReadCommand { - pub fn new(policy: &BatchPolicy, node: Arc, batch_reads: Vec) -> Self { +impl BatchReadCommand { + pub fn new(policy: &BatchPolicy, node: Arc, batch_reads: Vec>) -> Self { BatchReadCommand { policy: policy.clone(), node, @@ -146,7 +146,7 @@ impl BatchReadCommand { Ok(true) } - async fn parse_record(&mut self, conn: &mut Connection) -> Result> { + async fn parse_record(&mut self, conn: &mut Connection) -> Result>> { let found_key = match ResultCode::from(conn.buffer.read_u8(Some(5))) { ResultCode::Ok => true, ResultCode::KeyNotFoundError => false, @@ -166,26 +166,11 @@ impl BatchReadCommand { let field_count = conn.buffer.read_u16(None) as usize; // almost certainly 0 let op_count = conn.buffer.read_u16(None) as usize; - let key = commands::StreamCommand::parse_key(conn, field_count).await?; + let key = commands::StreamCommand::::parse_key(conn, field_count).await?; let record = if found_key { - let mut bins: HashMap = HashMap::with_capacity(op_count); - - for _ in 0..op_count { - conn.read_buffer(8).await?; - let op_size = conn.buffer.read_u32(None) as usize; - conn.buffer.skip(1); - let particle_type = conn.buffer.read_u8(None); - conn.buffer.skip(1); - let name_size = conn.buffer.read_u8(None) as usize; - conn.read_buffer(name_size).await?; - let name = conn.buffer.read_str(name_size)?; - let particle_bytes_size = op_size - (4 + name_size); - conn.read_buffer(particle_bytes_size).await?; - let value = - value::bytes_to_particle(particle_type, &mut conn.buffer, particle_bytes_size)?; - bins.insert(name, value); - } + let mut data_points = conn.pre_parse_stream_bins(op_count).await?; + let bins = T::read_bins_from_bytes(&mut data_points)?; Some(Record::new(Some(key), bins, generation, expiration)) } else { @@ -199,7 +184,7 @@ impl BatchReadCommand { } #[async_trait::async_trait] -impl commands::Command for BatchReadCommand { +impl commands::Command for BatchReadCommand { async fn write_timeout( &mut self, conn: &mut Connection, @@ -209,10 +194,6 @@ impl commands::Command for BatchReadCommand { Ok(()) } - async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()> { - conn.flush().await - } - fn prepare_buffer(&mut self, conn: &mut Connection) -> Result<()> { conn.buffer .set_batch_read(&self.policy, self.batch_reads.clone()) @@ -233,4 +214,8 @@ impl commands::Command for BatchReadCommand { } Ok(()) } + + async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()> { + conn.flush().await + } } diff --git a/aerospike-core/src/commands/buffer.rs b/aerospike-core/src/commands/buffer.rs index 02f087e7..51b1fdd3 100644 --- a/aerospike-core/src/commands/buffer.rs +++ b/aerospike-core/src/commands/buffer.rs @@ -18,6 +18,8 @@ use std::time::Duration; use byteorder::{ByteOrder, LittleEndian, NetworkEndian}; use crate::commands::field_type::FieldType; +use crate::derive::readable::ReadableBins; +use crate::derive::writable::WritableBins; use crate::errors::Result; use crate::expressions::FilterExpression; use crate::msgpack::encoder; @@ -95,15 +97,16 @@ const AS_MSG_TYPE: u8 = 3; // LDT elements in your queries. const MAX_BUFFER_SIZE: usize = 1024 * 1024 + 8; // 1 MB + header -// Holds data buffer for the command -#[derive(Debug, Default)] +/// Aerospike Wire Buffer. This holds the raw communication Buffer for the commands to read and write. +#[derive(Debug, Default, Clone)] pub struct Buffer { pub data_buffer: Vec, pub data_offset: usize, - pub reclaim_threshold: usize, + pub(crate) reclaim_threshold: usize, } impl Buffer { + /// Create new Buffer Instance pub fn new(reclaim_threshold: usize) -> Self { Buffer { data_buffer: Vec::with_capacity(1024), @@ -116,11 +119,12 @@ impl Buffer { self.data_offset = MSG_TOTAL_HEADER_SIZE as usize; } - pub fn size_buffer(&mut self) -> Result<()> { + pub(crate) fn size_buffer(&mut self) -> Result<()> { let offset = self.data_offset; self.resize_buffer(offset) } + /// Size the Buffer Byte Vec to the given length pub fn resize_buffer(&mut self, size: usize) -> Result<()> { // Corrupted data streams can result in a huge length. // Do a sanity check here. @@ -137,12 +141,13 @@ impl Buffer { Ok(()) } + /// Reset the Buffer Offset to 0 pub fn reset_offset(&mut self) { // reset data offset self.data_offset = 0; } - pub fn end(&mut self) { + pub(crate) fn end(&mut self) { let size = ((self.data_offset - 8) as i64) | ((i64::from(CL_MSG_VERSION) << 56) as i64) | (i64::from(AS_MSG_TYPE) << 48); @@ -153,12 +158,12 @@ impl Buffer { } // Writes the command for write operations - pub fn set_write( + pub(crate) fn set_write( &mut self, policy: &WritePolicy, op_type: OperationType, key: &Key, - bins: &[Bin], + bins: &T, ) -> Result<()> { self.begin(); let mut field_count = self.estimate_key_size(key, policy.send_key); @@ -167,9 +172,10 @@ impl Buffer { field_count += 1; } - for bin in bins { - self.estimate_operation_size_for_bin(bin.as_ref()); - } + self.data_offset += bins.writable_bins_size(); + //for bin in bins { + // self.estimate_operation_size_for_bin(bin.as_ref()); + //} self.size_buffer()?; self.write_header_with_policy( @@ -177,23 +183,24 @@ impl Buffer { 0, INFO2_WRITE, field_count as u16, - bins.len() as u16, + bins.writable_bins_count() as u16, ); self.write_key(key, policy.send_key); if let Some(filter) = policy.filter_expression() { self.write_filter_expression(filter, filter_size); } - for bin in bins { + bins.write_as_bins(self, op_type as u8)?; + /*for bin in bins { self.write_operation_for_bin(bin.as_ref(), op_type); - } + }*/ self.end(); Ok(()) } // Writes the command for write operations - pub fn set_delete(&mut self, policy: &WritePolicy, key: &Key) -> Result<()> { + pub(crate) fn set_delete(&mut self, policy: &WritePolicy, key: &Key) -> Result<()> { self.begin(); let mut field_count = self.estimate_key_size(key, false); let filter_size = self.estimate_filter_size(policy.filter_expression()); @@ -214,7 +221,7 @@ impl Buffer { } // Writes the command for touch operations - pub fn set_touch(&mut self, policy: &WritePolicy, key: &Key) -> Result<()> { + pub(crate) fn set_touch(&mut self, policy: &WritePolicy, key: &Key) -> Result<()> { self.begin(); let mut field_count = self.estimate_key_size(key, policy.send_key); let filter_size = self.estimate_filter_size(policy.filter_expression()); @@ -236,7 +243,7 @@ impl Buffer { } // Writes the command for exist operations - pub fn set_exists(&mut self, policy: &WritePolicy, key: &Key) -> Result<()> { + pub(crate) fn set_exists(&mut self, policy: &WritePolicy, key: &Key) -> Result<()> { self.begin(); let mut field_count = self.estimate_key_size(key, false); let filter_size = self.estimate_filter_size(policy.filter_expression()); @@ -263,7 +270,7 @@ impl Buffer { } // Writes the command for get operations - pub fn set_read(&mut self, policy: &ReadPolicy, key: &Key, bins: &Bins) -> Result<()> { + pub(crate) fn set_read(&mut self, policy: &ReadPolicy, key: &Key, bins: &Bins) -> Result<()> { match bins { Bins::None => self.set_read_header(policy, key), Bins::All => self.set_read_for_key_only(policy, key), @@ -297,7 +304,7 @@ impl Buffer { } // Writes the command for getting metadata operations - pub fn set_read_header(&mut self, policy: &ReadPolicy, key: &Key) -> Result<()> { + pub(crate) fn set_read_header(&mut self, policy: &ReadPolicy, key: &Key) -> Result<()> { self.begin(); let mut field_count = self.estimate_key_size(key, false); let filter_size = self.estimate_filter_size(policy.filter_expression()); @@ -319,7 +326,7 @@ impl Buffer { Ok(()) } - pub fn set_read_for_key_only(&mut self, policy: &ReadPolicy, key: &Key) -> Result<()> { + pub(crate) fn set_read_for_key_only(&mut self, policy: &ReadPolicy, key: &Key) -> Result<()> { self.begin(); let mut field_count = self.estimate_key_size(key, false); @@ -341,10 +348,10 @@ impl Buffer { } // Writes the command for batch read operations - pub fn set_batch_read( + pub(crate) fn set_batch_read( &mut self, policy: &BatchPolicy, - batch_reads: Vec, + batch_reads: Vec>, ) -> Result<()> { let field_count_row = if policy.send_set_name { 2 } else { 1 }; @@ -357,7 +364,7 @@ impl Buffer { field_count += 1; } - let mut prev: Option<&BatchRead> = None; + let mut prev: Option<&BatchRead> = None; for batch_read in &batch_reads { self.data_offset += batch_read.key.digest.len() + 4; match prev { @@ -462,7 +469,7 @@ impl Buffer { } // Writes the command for getting metadata operations - pub fn set_operate<'a>( + pub(crate) fn set_operate<'a>( &mut self, policy: &WritePolicy, key: &Key, @@ -549,7 +556,7 @@ impl Buffer { Ok(()) } - pub fn set_udf( + pub(crate) fn set_udf( &mut self, policy: &WritePolicy, key: &Key, @@ -581,7 +588,7 @@ impl Buffer { Ok(()) } - pub fn set_scan( + pub(crate) fn set_scan( &mut self, policy: &ScanPolicy, namespace: &str, @@ -696,7 +703,7 @@ impl Buffer { } #[allow(clippy::cognitive_complexity)] - pub fn set_query( + pub(crate) fn set_query( &mut self, policy: &QueryPolicy, statement: &Statement, @@ -1156,22 +1163,26 @@ impl Buffer { // Data buffer implementations + /// Get the current Data Offset pub const fn data_offset(&self) -> usize { self.data_offset } - pub fn skip_bytes(&mut self, count: usize) { + pub(crate) fn skip_bytes(&mut self, count: usize) { self.data_offset += count; } + /// Skips the amount of next bytes pub fn skip(&mut self, count: usize) { self.data_offset += count; } + /// Returns the current byte without moving the index pub fn peek(&self) -> u8 { self.data_buffer[self.data_offset] } + /// Reads a u8 Value from the Buffer #[allow(clippy::option_if_let_else)] pub fn read_u8(&mut self, pos: Option) -> u8 { if let Some(pos) = pos { @@ -1183,6 +1194,7 @@ impl Buffer { } } + /// Reads a i8 Value from the Buffer #[allow(clippy::option_if_let_else)] pub fn read_i8(&mut self, pos: Option) -> i8 { if let Some(pos) = pos { @@ -1194,6 +1206,7 @@ impl Buffer { } } + /// Reads a u16 Value from the Buffer #[allow(clippy::option_if_let_else)] pub fn read_u16(&mut self, pos: Option) -> u16 { let len = 2; @@ -1208,11 +1221,13 @@ impl Buffer { } } + /// Reads a i16 Value from the Buffer pub fn read_i16(&mut self, pos: Option) -> i16 { let val = self.read_u16(pos); val as i16 } + /// Reads a u32 Value from the Buffer #[allow(clippy::option_if_let_else)] pub fn read_u32(&mut self, pos: Option) -> u32 { let len = 4; @@ -1227,11 +1242,13 @@ impl Buffer { } } + /// Reads a i32 Value from the Buffer pub fn read_i32(&mut self, pos: Option) -> i32 { let val = self.read_u32(pos); val as i32 } + /// Reads a u64 Value from the Buffer #[allow(clippy::option_if_let_else)] pub fn read_u64(&mut self, pos: Option) -> u64 { let len = 8; @@ -1246,17 +1263,19 @@ impl Buffer { } } + /// Reads a i16 Value from the Buffer pub fn read_i64(&mut self, pos: Option) -> i64 { let val = self.read_u64(pos); val as i64 } - pub fn read_msg_size(&mut self, pos: Option) -> usize { + pub(crate) fn read_msg_size(&mut self, pos: Option) -> usize { let size = self.read_i64(pos); let size = size & 0xFFFF_FFFF_FFFF; size as usize } + /// Reads a f32 Value from the Buffer #[allow(clippy::option_if_let_else)] pub fn read_f32(&mut self, pos: Option) -> f32 { let len = 4; @@ -1271,6 +1290,7 @@ impl Buffer { } } + /// Reads a f64 Value from the Buffer #[allow(clippy::option_if_let_else)] pub fn read_f64(&mut self, pos: Option) -> f64 { let len = 8; @@ -1285,26 +1305,31 @@ impl Buffer { } } + /// Reads a String Value from the Buffer pub fn read_str(&mut self, len: usize) -> Result { let s = str::from_utf8(&self.data_buffer[self.data_offset..self.data_offset + len])?; self.data_offset += len; Ok(s.to_owned()) } + /// Reads a raw Bytes from the Buffer pub fn read_bytes(&mut self, pos: usize, count: usize) -> &[u8] { &self.data_buffer[pos..pos + count] } + /// Reads a byte Slice from the Buffer pub fn read_slice(&mut self, count: usize) -> &[u8] { &self.data_buffer[self.data_offset..self.data_offset + count] } + /// Reads a Blob Value from teh Buffer pub fn read_blob(&mut self, len: usize) -> Vec { let val = self.data_buffer[self.data_offset..self.data_offset + len].to_vec(); self.data_offset += len; val } + /// Reads a bool Value from the Buffer pub fn read_bool(&mut self, len: usize) -> bool { if len <= 0 { false @@ -1315,18 +1340,21 @@ impl Buffer { } } + /// Writes a u8 Value to the Buffer pub fn write_u8(&mut self, val: u8) -> usize { self.data_buffer[self.data_offset] = val; self.data_offset += 1; 1 } + /// Writes a i8 Value to the Buffer pub fn write_i8(&mut self, val: i8) -> usize { self.data_buffer[self.data_offset] = val as u8; self.data_offset += 1; 1 } + /// Writes a u16 Value to the Buffer pub fn write_u16(&mut self, val: u16) -> usize { NetworkEndian::write_u16( &mut self.data_buffer[self.data_offset..self.data_offset + 2], @@ -1336,6 +1364,7 @@ impl Buffer { 2 } + /// Writes a u16 as Little Endian Value to the Buffer pub fn write_u16_little_endian(&mut self, val: u16) -> usize { LittleEndian::write_u16( &mut self.data_buffer[self.data_offset..self.data_offset + 2], @@ -1345,10 +1374,12 @@ impl Buffer { 2 } + /// Writes a i16 Value to the Buffer pub fn write_i16(&mut self, val: i16) -> usize { self.write_u16(val as u16) } + /// Writes a u32 Value to the Buffer pub fn write_u32(&mut self, val: u32) -> usize { NetworkEndian::write_u32( &mut self.data_buffer[self.data_offset..self.data_offset + 4], @@ -1358,10 +1389,12 @@ impl Buffer { 4 } + /// Writes a i32 Value to the Buffer pub fn write_i32(&mut self, val: i32) -> usize { self.write_u32(val as u32) } + /// Writes a u64 Value to the Buffer pub fn write_u64(&mut self, val: u64) -> usize { NetworkEndian::write_u64( &mut self.data_buffer[self.data_offset..self.data_offset + 8], @@ -1371,15 +1404,18 @@ impl Buffer { 8 } + /// Writes a i64 Value to the Buffer pub fn write_i64(&mut self, val: i64) -> usize { self.write_u64(val as u64) } + /// Writes a bool Value to the Buffer pub fn write_bool(&mut self, val: bool) -> usize { let val = if val { 1 } else { 0 }; self.write_i8(val) } + /// Writes a f32 Value to the Buffer pub fn write_f32(&mut self, val: f32) -> usize { NetworkEndian::write_f32( &mut self.data_buffer[self.data_offset..self.data_offset + 4], @@ -1389,6 +1425,7 @@ impl Buffer { 4 } + /// Writes a f64 Value to the Buffer pub fn write_f64(&mut self, val: f64) -> usize { NetworkEndian::write_f64( &mut self.data_buffer[self.data_offset..self.data_offset + 8], @@ -1398,6 +1435,7 @@ impl Buffer { 8 } + /// Writes raw Bytes to the Buffer pub fn write_bytes(&mut self, bytes: &[u8]) -> usize { for b in bytes { self.write_u8(*b); @@ -1405,10 +1443,12 @@ impl Buffer { bytes.len() } + /// Writes a String Reference to the Buffer pub fn write_str(&mut self, val: &str) -> usize { self.write_bytes(val.as_bytes()) } + /// Writes a GeoJSON Value to the Buffer pub fn write_geo(&mut self, value: &str) -> usize { self.write_u8(0); self.write_u8(0); @@ -1417,14 +1457,24 @@ impl Buffer { 3 + value.len() } - pub fn write_timeout(&mut self, val: Option) { + pub(crate) fn write_timeout(&mut self, val: Option) { if let Some(val) = val { let millis: i32 = (val.as_secs() * 1_000) as i32 + val.subsec_millis() as i32; NetworkEndian::write_i32(&mut self.data_buffer[22..22 + 4], millis); } } - pub fn dump_buffer(&self) { + pub(crate) fn dump_buffer(&self) { println!(">>>>>>>>>>>>>>> {:?}", self.data_buffer.clone()); } + + pub(crate) fn dump_hex(&self) { + self.data_buffer.iter().enumerate().for_each(|f| { + if f.0 == self.data_offset { + println!("{:#04x} <--- Current Offset", f.1); + } else { + println!("{:#04x}", f.1); + } + }); + } } diff --git a/aerospike-core/src/commands/execute_udf_command.rs b/aerospike-core/src/commands/execute_udf_command.rs index 1db65621..252c09b0 100644 --- a/aerospike-core/src/commands/execute_udf_command.rs +++ b/aerospike-core/src/commands/execute_udf_command.rs @@ -18,20 +18,21 @@ use std::time::Duration; use crate::cluster::{Cluster, Node}; use crate::commands::{Command, ReadCommand, SingleCommand}; +use crate::derive::readable::ReadableBins; use crate::errors::Result; use crate::net::Connection; use crate::policy::WritePolicy; use crate::{Bins, Key, Value}; -pub struct ExecuteUDFCommand<'a> { - pub read_command: ReadCommand<'a>, +pub struct ExecuteUDFCommand<'a, T: ReadableBins> { + pub read_command: ReadCommand<'a, T>, policy: &'a WritePolicy, package_name: &'a str, function_name: &'a str, args: Option<&'a [Value]>, } -impl<'a> ExecuteUDFCommand<'a> { +impl<'a, T: ReadableBins> ExecuteUDFCommand<'a, T> { pub fn new( policy: &'a WritePolicy, cluster: Arc, @@ -55,7 +56,7 @@ impl<'a> ExecuteUDFCommand<'a> { } #[async_trait::async_trait] -impl<'a> Command for ExecuteUDFCommand<'a> { +impl<'a, T: ReadableBins> Command for ExecuteUDFCommand<'a, T> { async fn write_timeout( &mut self, conn: &mut Connection, @@ -65,10 +66,6 @@ impl<'a> Command for ExecuteUDFCommand<'a> { Ok(()) } - async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()> { - conn.flush().await - } - fn prepare_buffer(&mut self, conn: &mut Connection) -> Result<()> { conn.buffer.set_udf( self.policy, @@ -86,4 +83,8 @@ impl<'a> Command for ExecuteUDFCommand<'a> { async fn parse_result(&mut self, conn: &mut Connection) -> Result<()> { self.read_command.parse_result(conn).await } + + async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()> { + conn.flush().await + } } diff --git a/aerospike-core/src/commands/operate_command.rs b/aerospike-core/src/commands/operate_command.rs index 8cf23985..b874430a 100644 --- a/aerospike-core/src/commands/operate_command.rs +++ b/aerospike-core/src/commands/operate_command.rs @@ -17,19 +17,20 @@ use std::time::Duration; use crate::cluster::{Cluster, Node}; use crate::commands::{Command, ReadCommand, SingleCommand}; +use crate::derive::readable::ReadableBins; use crate::errors::Result; use crate::net::Connection; use crate::operations::Operation; use crate::policy::WritePolicy; use crate::{Bins, Key}; -pub struct OperateCommand<'a> { - pub read_command: ReadCommand<'a>, +pub struct OperateCommand<'a, T: ReadableBins> { + pub read_command: ReadCommand<'a, T>, policy: &'a WritePolicy, operations: &'a [Operation<'a>], } -impl<'a> OperateCommand<'a> { +impl<'a, T: ReadableBins> OperateCommand<'a, T> { pub fn new( policy: &'a WritePolicy, cluster: Arc, @@ -49,7 +50,7 @@ impl<'a> OperateCommand<'a> { } #[async_trait::async_trait] -impl<'a> Command for OperateCommand<'a> { +impl<'a, T: ReadableBins> Command for OperateCommand<'a, T> { async fn write_timeout( &mut self, conn: &mut Connection, @@ -59,10 +60,6 @@ impl<'a> Command for OperateCommand<'a> { Ok(()) } - async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()> { - conn.flush().await - } - fn prepare_buffer(&mut self, conn: &mut Connection) -> Result<()> { conn.buffer.set_operate( self.policy, @@ -78,4 +75,8 @@ impl<'a> Command for OperateCommand<'a> { async fn parse_result(&mut self, conn: &mut Connection) -> Result<()> { self.read_command.parse_result(conn).await } + + async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()> { + conn.flush().await + } } diff --git a/aerospike-core/src/commands/query_command.rs b/aerospike-core/src/commands/query_command.rs index 28a28dab..a748f509 100644 --- a/aerospike-core/src/commands/query_command.rs +++ b/aerospike-core/src/commands/query_command.rs @@ -17,24 +17,25 @@ use std::time::Duration; use crate::cluster::Node; use crate::commands::{Command, SingleCommand, StreamCommand}; +use crate::derive::readable::ReadableBins; use crate::errors::Result; use crate::net::Connection; use crate::policy::QueryPolicy; use crate::{Recordset, Statement}; -pub struct QueryCommand<'a> { - stream_command: StreamCommand, +pub struct QueryCommand<'a, T: ReadableBins> { + stream_command: StreamCommand, policy: &'a QueryPolicy, statement: Arc, partitions: Vec, } -impl<'a> QueryCommand<'a> { +impl<'a, T: ReadableBins> QueryCommand<'a, T> { pub fn new( policy: &'a QueryPolicy, node: Arc, statement: Arc, - recordset: Arc, + recordset: Arc>, partitions: Vec, ) -> Self { QueryCommand { @@ -51,7 +52,7 @@ impl<'a> QueryCommand<'a> { } #[async_trait::async_trait] -impl<'a> Command for QueryCommand<'a> { +impl<'a, T: ReadableBins> Command for QueryCommand<'a, T> { async fn write_timeout( &mut self, conn: &mut Connection, @@ -61,10 +62,6 @@ impl<'a> Command for QueryCommand<'a> { Ok(()) } - async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()> { - conn.flush().await - } - fn prepare_buffer(&mut self, conn: &mut Connection) -> Result<()> { conn.buffer.set_query( self.policy, @@ -82,4 +79,8 @@ impl<'a> Command for QueryCommand<'a> { async fn parse_result(&mut self, conn: &mut Connection) -> Result<()> { StreamCommand::parse_result(&mut self.stream_command, conn).await } + + async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()> { + conn.flush().await + } } diff --git a/aerospike-core/src/commands/read_command.rs b/aerospike-core/src/commands/read_command.rs index be79be1b..e3ab5a93 100644 --- a/aerospike-core/src/commands/read_command.rs +++ b/aerospike-core/src/commands/read_command.rs @@ -12,28 +12,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::hash_map::Entry::{Occupied, Vacant}; -use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use crate::cluster::{Cluster, Node}; use crate::commands::buffer; use crate::commands::{Command, SingleCommand}; +use crate::derive::readable::ReadableBins; use crate::errors::{ErrorKind, Result}; use crate::net::Connection; use crate::policy::ReadPolicy; -use crate::value::bytes_to_particle; -use crate::{Bins, Key, Record, ResultCode, Value}; +use crate::{Bins, Key, Record, ResultCode}; -pub struct ReadCommand<'a> { +pub struct ReadCommand<'a, T: ReadableBins> { pub single_command: SingleCommand<'a>, - pub record: Option, + pub record: Option>, policy: &'a ReadPolicy, bins: Bins, } -impl<'a> ReadCommand<'a> { +impl<'a, T: ReadableBins> ReadCommand<'a, T> { pub fn new(policy: &'a ReadPolicy, cluster: Arc, key: &'a Key, bins: Bins) -> Self { ReadCommand { single_command: SingleCommand::new(cluster, key), @@ -47,56 +45,54 @@ impl<'a> ReadCommand<'a> { SingleCommand::execute(self.policy, self).await } - fn parse_record( + async fn parse_record( &mut self, conn: &mut Connection, op_count: usize, field_count: usize, generation: u32, expiration: u32, - ) -> Result { - let mut bins: HashMap = HashMap::with_capacity(op_count); - + ) -> Result> { // There can be fields in the response (setname etc). For now, ignore them. Expose them to // the API if needed in the future. for _ in 0..field_count { + conn.read_buffer(4).await?; let field_size = conn.buffer.read_u32(None) as usize; - conn.buffer.skip(4 + field_size); + conn.read_buffer(field_size).await?; + conn.buffer.skip(field_size); } - for _ in 0..op_count { - let op_size = conn.buffer.read_u32(None) as usize; - conn.buffer.skip(1); - let particle_type = conn.buffer.read_u8(None); - conn.buffer.skip(1); - let name_size = conn.buffer.read_u8(None) as usize; - let name: String = conn.buffer.read_str(name_size)?; - - let particle_bytes_size = op_size - (4 + name_size); - let value = bytes_to_particle(particle_type, &mut conn.buffer, particle_bytes_size)?; - - if !value.is_nil() { - // list/map operations may return multiple values for the same bin. - match bins.entry(name) { - Vacant(entry) => { - entry.insert(value); - } - Occupied(entry) => match *entry.into_mut() { - Value::List(ref mut list) => list.push(value), - ref mut prev => { - *prev = as_list!(prev.clone(), value); - } - }, - } - } + let mut pre_data = conn.pre_parse_stream_bins(op_count).await?; + let bins = T::read_bins_from_bytes(&mut pre_data)?; + Ok(Record::new(None, bins, generation, expiration)) + } + + async fn parse_udf_error( + &mut self, + conn: &mut Connection, + op_count: usize, + field_count: usize, + ) -> Result { + // There can be fields in the response (setname etc). For now, ignore them. Expose them to + // the API if needed in the future. + for _ in 0..field_count { + conn.read_buffer(4).await?; + let field_size = conn.buffer.read_u32(None) as usize; + conn.read_buffer(field_size).await?; + conn.buffer.skip(field_size); } - Ok(Record::new(None, bins, generation, expiration)) + let pre_data = conn.pre_parse_stream_bins(op_count).await?; + let fail = pre_data.get("FAILURE"); + if let Some(fail) = fail { + return fail.value.buffer.clone().read_str(fail.value.byte_length); + } + Ok(String::from("UDF Error")) } } #[async_trait::async_trait] -impl<'a> Command for ReadCommand<'a> { +impl<'a, T: ReadableBins> Command for ReadCommand<'a, T> { async fn write_timeout( &mut self, conn: &mut Connection, @@ -106,10 +102,6 @@ impl<'a> Command for ReadCommand<'a> { Ok(()) } - async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()> { - conn.flush().await - } - fn prepare_buffer(&mut self, conn: &mut Connection) -> Result<()> { conn.buffer .set_read(self.policy, self.single_command.key, &self.bins) @@ -129,44 +121,34 @@ impl<'a> Command for ReadCommand<'a> { } conn.buffer.reset_offset(); - let sz = conn.buffer.read_u64(Some(0)); - let header_length = conn.buffer.read_u8(Some(8)); + conn.buffer.skip(9); let result_code = conn.buffer.read_u8(Some(13)); let generation = conn.buffer.read_u32(Some(14)); let expiration = conn.buffer.read_u32(Some(18)); let field_count = conn.buffer.read_u16(Some(26)) as usize; // almost certainly 0 let op_count = conn.buffer.read_u16(Some(28)) as usize; - let receive_size = ((sz & 0xFFFF_FFFF_FFFF) - u64::from(header_length)) as usize; - - // Read remaining message bytes - if receive_size > 0 { - if let Err(err) = conn.read_buffer(receive_size).await { - warn!("Parse result error: {}", err); - bail!(err); - } - } match ResultCode::from(result_code) { ResultCode::Ok => { let record = if self.bins.is_none() { - Record::new(None, HashMap::new(), generation, expiration) + Record::new(None, T::new_empty()?, generation, expiration) } else { - self.parse_record(conn, op_count, field_count, generation, expiration)? + self.parse_record(conn, op_count, field_count, generation, expiration) + .await? }; self.record = Some(record); Ok(()) } ResultCode::UdfBadResponse => { // record bin "FAILURE" contains details about the UDF error - let record = - self.parse_record(conn, op_count, field_count, generation, expiration)?; - let reason = record - .bins - .get("FAILURE") - .map_or(String::from("UDF Error"), ToString::to_string); + let reason = self.parse_udf_error(conn, op_count, field_count).await?; Err(ErrorKind::UdfBadResponse(reason).into()) } rc => Err(ErrorKind::ServerError(rc).into()), } } + + async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()> { + conn.flush().await + } } diff --git a/aerospike-core/src/commands/scan_command.rs b/aerospike-core/src/commands/scan_command.rs index 808ad434..37d549e1 100644 --- a/aerospike-core/src/commands/scan_command.rs +++ b/aerospike-core/src/commands/scan_command.rs @@ -18,13 +18,14 @@ use std::time::Duration; use crate::cluster::Node; use crate::commands::{Command, SingleCommand, StreamCommand}; +use crate::derive::readable::ReadableBins; use crate::errors::Result; use crate::net::Connection; use crate::policy::ScanPolicy; use crate::{Bins, Recordset}; -pub struct ScanCommand<'a> { - stream_command: StreamCommand, +pub struct ScanCommand<'a, T: ReadableBins> { + stream_command: StreamCommand, policy: &'a ScanPolicy, namespace: &'a str, set_name: &'a str, @@ -32,14 +33,14 @@ pub struct ScanCommand<'a> { partitions: Vec, } -impl<'a> ScanCommand<'a> { +impl<'a, T: ReadableBins> ScanCommand<'a, T> { pub fn new( policy: &'a ScanPolicy, node: Arc, namespace: &'a str, set_name: &'a str, bins: Bins, - recordset: Arc, + recordset: Arc>, partitions: Vec, ) -> Self { ScanCommand { @@ -58,7 +59,7 @@ impl<'a> ScanCommand<'a> { } #[async_trait::async_trait] -impl<'a> Command for ScanCommand<'a> { +impl<'a, T: ReadableBins> Command for ScanCommand<'a, T> { async fn write_timeout( &mut self, conn: &mut Connection, @@ -68,10 +69,6 @@ impl<'a> Command for ScanCommand<'a> { Ok(()) } - async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()> { - conn.flush().await - } - fn prepare_buffer(&mut self, conn: &mut Connection) -> Result<()> { conn.buffer.set_scan( self.policy, @@ -90,4 +87,8 @@ impl<'a> Command for ScanCommand<'a> { async fn parse_result(&mut self, conn: &mut Connection) -> Result<()> { StreamCommand::parse_result(&mut self.stream_command, conn).await } + + async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()> { + conn.flush().await + } } diff --git a/aerospike-core/src/commands/stream_command.rs b/aerospike-core/src/commands/stream_command.rs index 05989f34..a5aaf7a4 100644 --- a/aerospike-core/src/commands/stream_command.rs +++ b/aerospike-core/src/commands/stream_command.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::HashMap; use std::sync::Arc; use std::thread; use std::time::Duration; @@ -21,30 +20,34 @@ use crate::cluster::Node; use crate::commands::buffer; use crate::commands::field_type::FieldType; use crate::commands::Command; +use crate::derive::readable::ReadableBins; use crate::errors::{ErrorKind, Result}; use crate::net::Connection; use crate::query::Recordset; use crate::value::bytes_to_particle; use crate::{Key, Record, ResultCode, Value}; -pub struct StreamCommand { +pub struct StreamCommand { node: Arc, - pub recordset: Arc, + pub recordset: Arc>, } -impl Drop for StreamCommand { +impl Drop for StreamCommand { fn drop(&mut self) { // signal_end self.recordset.signal_end(); } } -impl StreamCommand { - pub fn new(node: Arc, recordset: Arc) -> Self { +impl StreamCommand { + pub fn new(node: Arc, recordset: Arc>) -> Self { StreamCommand { node, recordset } } - async fn parse_record(conn: &mut Connection, size: usize) -> Result<(Option, bool)> { + async fn parse_record(conn: &mut Connection, size: usize) -> Result<(Option>, bool)> + where + T: ReadableBins, + { let result_code = ResultCode::from(conn.buffer.read_u8(Some(5))); if result_code != ResultCode::Ok { if conn.bytes_read() < size { @@ -71,31 +74,15 @@ impl StreamCommand { let field_count = conn.buffer.read_u16(None) as usize; // almost certainly 0 let op_count = conn.buffer.read_u16(None) as usize; - let key = StreamCommand::parse_key(conn, field_count).await?; + let key = StreamCommand::::parse_key(conn, field_count).await?; // Partition is done, don't go further if info3 & buffer::_INFO3_PARTITION_DONE != 0 { return Ok((None, true)); } - let mut bins: HashMap = HashMap::with_capacity(op_count); - - for _ in 0..op_count { - conn.read_buffer(8).await?; - let op_size = conn.buffer.read_u32(None) as usize; - conn.buffer.skip(1); - let particle_type = conn.buffer.read_u8(None); - conn.buffer.skip(1); - let name_size = conn.buffer.read_u8(None) as usize; - conn.read_buffer(name_size).await?; - let name: String = conn.buffer.read_str(name_size)?; - - let particle_bytes_size = op_size - (4 + name_size); - conn.read_buffer(particle_bytes_size).await?; - let value = bytes_to_particle(particle_type, &mut conn.buffer, particle_bytes_size)?; - - bins.insert(name, value); - } + let mut data_points = conn.pre_parse_stream_bins(op_count).await?; + let bins = T::read_bins_from_bytes(&mut data_points)?; let record = Record::new(Some(key), bins, generation, expiration); Ok((Some(record), true)) @@ -181,7 +168,7 @@ impl StreamCommand { } #[async_trait::async_trait] -impl Command for StreamCommand { +impl Command for StreamCommand { async fn write_timeout( &mut self, conn: &mut Connection, @@ -191,10 +178,6 @@ impl Command for StreamCommand { Ok(()) } - async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()> { - conn.flush().await - } - #[allow(unused_variables)] fn prepare_buffer(&mut self, conn: &mut Connection) -> Result<()> { // should be implemented downstream @@ -221,4 +204,8 @@ impl Command for StreamCommand { Ok(()) } + + async fn write_buffer(&mut self, conn: &mut Connection) -> Result<()> { + conn.flush().await + } } diff --git a/aerospike-core/src/commands/write_command.rs b/aerospike-core/src/commands/write_command.rs index e92695d1..15f9f5f0 100644 --- a/aerospike-core/src/commands/write_command.rs +++ b/aerospike-core/src/commands/write_command.rs @@ -18,25 +18,26 @@ use std::time::Duration; use crate::cluster::{Cluster, Node}; use crate::commands::buffer; use crate::commands::{Command, SingleCommand}; +use crate::derive::writable::WritableBins; use crate::errors::{ErrorKind, Result}; use crate::net::Connection; use crate::operations::OperationType; use crate::policy::WritePolicy; -use crate::{Bin, Key, ResultCode}; +use crate::{Key, ResultCode}; -pub struct WriteCommand<'a> { +pub struct WriteCommand<'a, T: WritableBins> { single_command: SingleCommand<'a>, policy: &'a WritePolicy, - bins: &'a [Bin], + bins: &'a T, operation: OperationType, } -impl<'a> WriteCommand<'a> { +impl<'a, T: WritableBins> WriteCommand<'a, T> { pub fn new( policy: &'a WritePolicy, cluster: Arc, key: &'a Key, - bins: &'a [Bin], + bins: &'a T, operation: OperationType, ) -> Self { WriteCommand { @@ -53,7 +54,7 @@ impl<'a> WriteCommand<'a> { } #[async_trait::async_trait] -impl<'a> Command for WriteCommand<'a> { +impl<'a, T: WritableBins> Command for WriteCommand<'a, T> { async fn write_timeout( &mut self, conn: &mut Connection, diff --git a/aerospike-core/src/derive/mod.rs b/aerospike-core/src/derive/mod.rs new file mode 100644 index 00000000..024f86b3 --- /dev/null +++ b/aerospike-core/src/derive/mod.rs @@ -0,0 +1,19 @@ +// Copyright 2015-2020 Aerospike, Inc. +// +// Portions may be licensed to Aerospike, Inc. under one or more contributor +// license agreements. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +//! Traits and Implementations for reading and writing data from/into structs and variables + +pub mod readable; +pub mod writable; diff --git a/aerospike-core/src/derive/readable.rs b/aerospike-core/src/derive/readable.rs new file mode 100644 index 00000000..6bec5f70 --- /dev/null +++ b/aerospike-core/src/derive/readable.rs @@ -0,0 +1,637 @@ +// Copyright 2015-2020 Aerospike, Inc. +// +// Portions may be licensed to Aerospike, Inc. under one or more contributor +// license agreements. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +//! Traits and Implementations for reading data into structs and variables + +use crate::errors::{ErrorKind, Result}; +use crate::{Buffer, ParticleType, Value}; +use rand::distributions::weighted::alias_method::Weight; +use std::collections::HashMap; + +use crate::value::bytes_to_particle; +pub use aerospike_macro::{ReadableBins, ReadableValue}; + +/// The ReadableBins Trait is used to convert Aerospike Wire Data to Objects +pub trait ReadableBins: Sync + Sized + Send + Clone { + /// Convert the pre-parsed Bins to a compatible Object + /// The String in `data_points` is the field name returned by the Server. + /// This can vary from the actual name in the Object if the rename attribute is used. + fn read_bins_from_bytes(data_points: &mut HashMap) -> Result; + /// Default Fallback for Empty Bins + /// Should be implemented for Types like Options and Lists. + /// Defaults to throwing an Error + fn new_empty() -> Result { + bail!("No empty implementation found") + } +} + +/// The ReadableValue Trait is used to convert Aerospike Wire Data into the Value of Objects +pub trait ReadableValue: Sync + Sized + Send + Clone { + /// Read the data from the Wire Buffer. + /// This method is primarily used for pre-parsing checks + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result; + /// Actual conversion of the bytes to the value + fn parse_value(data_point: &mut PreParsedValue) -> Result; + /// CDT values are parsed differently from normal Values. This buffer is not a copy, so modifications can cause wrong behavior. + fn parse_cdt_value(buff: &mut Buffer) -> Result; +} + +/// Before giving data to the Readable Traits, the client pre-parses the wire data into this format#[derive(Debug)] +#[derive(Debug)] +pub struct PreParsedBin { + /// Value is always given for any datatype. + pub value: PreParsedValue, + /// Lists and Maps can have other sub-values for entries. In this case, they need to be appended to the parsed value + pub sub_values: Vec, +} + +/// Includes the data for the Value part of a Bin. +#[derive(Debug)] +pub struct PreParsedValue { + /// The Particle Type the Sever stored the Value as. + pub particle_type: u8, + /// Part of the wire Buffer with only the relevant Value Data inside. Value starts at offset 0 without meta around it. + pub buffer: Buffer, + /// Amount of bytes that should be parsed as Value in the buffer. Should be used instead of buffer length for safety reasons. + pub byte_length: usize, +} + +impl ReadableBins for HashMap { + fn read_bins_from_bytes(data_points: &mut HashMap) -> Result { + let mut hm = HashMap::new(); + for (k, d) in data_points { + let x = Value::read_value_from_bytes(d)?; + hm.insert(k.to_string(), x); + } + + Ok(hm) + } + + fn new_empty() -> Result { + Ok(HashMap::new()) + } +} + +impl ReadableValue for Value { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + let mut val = Value::parse_value(&mut data_point.value)?; + + for sv in &mut data_point.sub_values { + let sval = Value::parse_value(sv)?; + match val { + Value::List(ref mut list) => list.push(sval), + ref mut prev => { + *prev = as_list!(prev.clone(), sval); + } + } + } + return Ok(val); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + bytes_to_particle( + data_point.particle_type, + &mut data_point.buffer, + data_point.byte_length, + ) + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + crate::msgpack::decoder::unpack_value(buff) + } +} + +impl ReadableValue for i64 { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + if data_point.value.particle_type == 0 { + bail!(ErrorKind::Derive( + "No Value received for Integer".to_string() + )) + } + if !data_point.sub_values.is_empty() { + bail!(ErrorKind::Derive( + "Multiple Values received for Integer".to_string() + )) + } + return Self::parse_value(&mut data_point.value); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + return Ok(data_point.buffer.read_i64(None)); + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + let ptype = buff.read_u8(None); + match ptype { + 0x00..=0x7f => Ok(i64::from(ptype) as i64), + 0xcc => Ok(buff.read_u8(None) as i64), + 0xcd => Ok(buff.read_u16(None) as i64), + 0xce => Ok(buff.read_u32(None) as i64), + 0xcf => Ok(buff.read_u64(None) as i64), + 0xd0 => Ok(buff.read_i8(None) as i64), + 0xd1 => Ok(buff.read_i16(None) as i64), + 0xd2 => Ok(buff.read_i32(None) as i64), + 0xd3 => Ok(buff.read_i64(None) as i64), + _ => Err( + ErrorKind::Derive("Invalid Data Type for derive i64 CDT Value".to_string()).into(), + ), + } + } +} + +impl ReadableValue for i32 { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + if data_point.value.particle_type == 0 { + bail!(ErrorKind::Derive( + "No Value received for Integer".to_string() + )) + } + if !data_point.sub_values.is_empty() { + bail!(ErrorKind::Derive( + "Multiple Values received for Integer".to_string() + )) + } + return Self::parse_value(&mut data_point.value); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + let data = data_point.buffer.read_i64(None); + if data > i32::MAX as i64 { + bail!(ErrorKind::Derive(format!( + "Tried to cast an Integer > {} to i32", + i32::MAX + ))) + } + Ok(data as i32) + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + let ptype = buff.read_u8(None); + let data = match ptype { + 0x00..=0x7f => i64::from(ptype) as i64, + 0xcc => buff.read_u8(None) as i64, + 0xcd => buff.read_u16(None) as i64, + 0xce => buff.read_u32(None) as i64, + 0xcf => buff.read_u64(None) as i64, + 0xd0 => buff.read_i8(None) as i64, + 0xd1 => buff.read_i16(None) as i64, + 0xd2 => buff.read_i32(None) as i64, + 0xd3 => buff.read_i64(None) as i64, + _ => { + return Err(ErrorKind::Derive( + "Invalid Data Type for derive i32 CDT Value".to_string(), + ) + .into()) + } + }; + if data > i32::MAX as i64 { + bail!(ErrorKind::Derive(format!( + "Tried to cast an Integer > {} to i32", + i32::MAX + ))) + } + Ok(data as i32) + } +} + +impl ReadableValue for i16 { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + if data_point.value.particle_type == 0 { + bail!(ErrorKind::Derive( + "No Value received for Integer".to_string() + )) + } + if !data_point.sub_values.is_empty() { + bail!(ErrorKind::Derive( + "Multiple Values received for Integer".to_string() + )) + } + return Self::parse_value(&mut data_point.value); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + let data = data_point.buffer.read_i64(None); + if data > i16::MAX as i64 { + bail!(ErrorKind::Derive(format!( + "Tried to cast an Integer > {} to i16", + i16::MAX + ))) + } + Ok(data as i16) + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + let ptype = buff.read_u8(None); + let data = match ptype { + 0x00..=0x7f => i64::from(ptype) as i64, + 0xcc => buff.read_u8(None) as i64, + 0xcd => buff.read_u16(None) as i64, + 0xce => buff.read_u32(None) as i64, + 0xcf => buff.read_u64(None) as i64, + 0xd0 => buff.read_i8(None) as i64, + 0xd1 => buff.read_i16(None) as i64, + 0xd2 => buff.read_i32(None) as i64, + 0xd3 => buff.read_i64(None) as i64, + _ => { + return Err(ErrorKind::Derive( + "Invalid Data Type for derive i16 CDT Value".to_string(), + ) + .into()) + } + }; + if data > i16::MAX as i64 { + bail!(ErrorKind::Derive(format!( + "Tried to cast an Integer > {} to i16", + i16::MAX + ))) + } + Ok(data as i16) + } +} + +impl ReadableValue for i8 { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + if data_point.value.particle_type == 0 { + bail!(ErrorKind::Derive( + "No Value received for Integer".to_string() + )) + } + if !data_point.sub_values.is_empty() { + bail!(ErrorKind::Derive( + "Multiple Values received for Integer".to_string() + )) + } + return Self::parse_value(&mut data_point.value); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + let data = data_point.buffer.read_i64(None); + if data > i8::MAX as i64 { + bail!(ErrorKind::Derive(format!( + "Tried to cast an Integer > {} to i8", + i8::MAX + ))) + } + Ok(data as i8) + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + let ptype = buff.read_u8(None); + let data = match ptype { + 0x00..=0x7f => i64::from(ptype), + 0xcc => buff.read_u8(None) as i64, + 0xcd => buff.read_u16(None) as i64, + 0xce => buff.read_u32(None) as i64, + 0xcf => buff.read_u64(None) as i64, + 0xd0 => buff.read_i8(None) as i64, + 0xd1 => buff.read_i16(None) as i64, + 0xd2 => buff.read_i32(None) as i64, + 0xd3 => buff.read_i64(None) as i64, + _ => { + return Err(ErrorKind::Derive( + "Invalid Data Type for derive i8 CDT Value".to_string(), + ) + .into()) + } + }; + if data > i8::MAX as i64 { + bail!(ErrorKind::Derive(format!( + "Tried to cast an Integer > {} to i8", + i8::MAX + ))) + } + Ok(data as i8) + } +} + +impl ReadableValue for isize { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + if data_point.value.particle_type == 0 { + bail!(ErrorKind::Derive( + "No Value received for Integer".to_string() + )) + } + if !data_point.sub_values.is_empty() { + bail!(ErrorKind::Derive( + "Multiple Values received for Integer".to_string() + )) + } + return Self::parse_value(&mut data_point.value); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + return Ok(data_point.buffer.read_i64(None) as isize); + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + let ptype = buff.read_u8(None); + match ptype { + 0x00..=0x7f => Ok(i64::from(ptype) as isize), + 0xcc => Ok(buff.read_u8(None) as isize), + 0xcd => Ok(buff.read_u16(None) as isize), + 0xce => Ok(buff.read_u32(None) as isize), + 0xcf => Ok(buff.read_u64(None) as isize), + 0xd0 => Ok(buff.read_i8(None) as isize), + 0xd1 => Ok(buff.read_i16(None) as isize), + 0xd2 => Ok(buff.read_i32(None) as isize), + 0xd3 => Ok(buff.read_i64(None) as isize), + _ => Err( + ErrorKind::Derive("Invalid Data Type for derive isize CDT Value".to_string()) + .into(), + ), + } + } +} + +impl ReadableValue for f64 { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + if data_point.value.particle_type == 0 { + bail!(ErrorKind::Derive("No Value received for Float".to_string())) + } + if !data_point.sub_values.is_empty() { + bail!(ErrorKind::Derive( + "Multiple Values received for Float".to_string() + )) + } + return Self::parse_value(&mut data_point.value); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + return Ok(data_point.buffer.read_f64(None)); + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + let ptype = buff.read_u8(None); + match ptype { + 0xca => Ok(buff.read_f32(None) as f64), + 0xcb => Ok(buff.read_f64(None)), + _ => bail!(ErrorKind::Derive( + "Invalid Data Type for derive float CDT Value".to_string() + )), + } + } +} + +impl ReadableValue for String { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + if data_point.value.particle_type == 0 { + bail!(ErrorKind::Derive( + "No Value received for String".to_string() + )) + } + if !data_point.sub_values.is_empty() { + bail!(ErrorKind::Derive( + "Multiple Values received for string".to_string() + )) + } + return Self::parse_value(&mut data_point.value); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + return data_point.buffer.read_str(data_point.byte_length); + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + // todo: fix this + let len = (buff.read_u8(None) & 0x1f) as usize; + let ptype = buff.read_u8(None); + if ptype != ParticleType::STRING as u8 { + bail!(ErrorKind::Derive( + "Invalid Data Type for derive string CDT Value".to_string() + )) + } + return buff.read_str(len - 1); + } +} + +impl ReadableValue for Option { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + if data_point.value.particle_type == 0 { + return Ok(None); + } + Ok(Some(T::read_value_from_bytes(data_point)?)) + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + Ok(Some(T::parse_value(data_point)?)) + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + if buff.data_buffer.len() > 0 { + Ok(Some(T::parse_cdt_value(buff)?)) + } else { + Ok(None) + } + } +} + +impl ReadableValue for Vec { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + let mut v: Vec = Vec::with_capacity(data_point.sub_values.len() + 1); + v.extend(Self::parse_value(&mut data_point.value)?); + for sv in &mut data_point.sub_values { + v.extend(Self::parse_value(sv)?) + } + return Ok(v); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + if data_point.particle_type == ParticleType::LIST as u8 { + Self::parse_cdt_value(&mut data_point.buffer) + } else { + bail!(ErrorKind::Derive( + "Invalid Data Type for derive List CDT Type".to_string() + )) + } + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + let ltype = buff.read_u8(None); + let count: usize = match ltype { + 0x90..=0x9f => (ltype & 0x0f) as usize, + 0xdc => buff.read_u16(None) as usize, + 0xdd => buff.read_u32(None) as usize, + _ => { + bail!(ErrorKind::Derive( + "Invalid Data Type for derive List CDT Type".to_string() + )) + } + }; + + let mut list = Vec::with_capacity(count); + for _ in 0..count { + list.push(T::parse_cdt_value(buff)?); + } + return Ok(list); + } +} + +impl ReadableValue for HashMap { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + let mut v: HashMap = HashMap::with_capacity(data_point.sub_values.len() + 1); + let v1 = Self::parse_value(&mut data_point.value)?; + v1.into_iter().for_each(|(key, val)| { v.insert(key, val); }); + for sv in &mut data_point.sub_values { + let v2 = Self::parse_value(sv)?; + v2.into_iter().for_each(|(key, val)| { v.insert(key, val); }); + } + return Ok(v); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + if data_point.particle_type == ParticleType::MAP as u8 { + Self::parse_cdt_value(&mut data_point.buffer) + } else { + bail!(ErrorKind::Derive( + "Invalid Data Type for derive List CDT Type".to_string() + )) + } + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + if buff.data_buffer.is_empty() { + return Ok(HashMap::new()); + } + + let ltype = buff.read_u8(None); + + let count: usize = match ltype { + 0x80..=0x8f => (ltype & 0x0f) as usize, + 0xde => buff.read_u16(None) as usize, + 0xdf => buff.read_u32(None) as usize, + _ => { + bail!(ErrorKind::Derive( + "Invalid Data Type for derive Map CDT Type".to_string() + )) + } + }; + + let mut map: HashMap = HashMap::with_capacity(count); + + for _ in 0..count { + let key = T::parse_cdt_value(buff)?; + let val = S::parse_cdt_value(buff)?; + map.insert(key, val); + } + return Ok(map); + } +} + +/// Used to skip values for struct derives +pub fn skip_map_value_bytes(buff: &mut Buffer) -> Result<()> { + let vtype = buff.read_u8(None); + match vtype as usize { + 0x00..=0x7f | 0xc2 | 0xc3 | 0xe0..=0xff => {} + 0x80..=0x8f => { + let len = (vtype & 0x0f) as usize; + for _ in 0..len { + skip_map_value_bytes(buff)?; + skip_map_value_bytes(buff)?; + } + }, + 0x90..=0x9f => { + let len = vtype & 0x0f; + for _ in 0..len { + skip_map_value_bytes(buff)?; + } + } + 0xa0..=0xbf => { + let len = vtype & 0x1f; + for _ in 0..len { + skip_map_value_bytes(buff)?; + } + } + 0xdc => { + let len = buff.read_u16(None); + for _ in 0..len { + skip_map_value_bytes(buff)?; + } + } + 0xdd => { + let len = 0..buff.read_u32(None); + for _ in len { + skip_map_value_bytes(buff)?; + } + } + 0xde => { + let len = 0..buff.read_u16(None); + for _ in len { + skip_map_value_bytes(buff)?; + skip_map_value_bytes(buff)?; + } + } + 0xdf => { + let len = 0..buff.read_u32(None); + for _ in len { + skip_map_value_bytes(buff)?; + skip_map_value_bytes(buff)?; + } + } + 0xc4 | 0xd9 => { + let l = buff.read_u8(None) as usize; + buff.skip(l) + } + 0xc4 | 0xd9 => { + let l = buff.read_u16(None) as usize; + buff.skip(l) + } + 0xc4 | 0xd9 => { + let l = buff.read_u32(None) as usize; + buff.skip(l) + } + 0xc7 => { + let l = buff.read_u8(None); + buff.skip(1 + l as usize) + } + 0xc8 => { + let l = buff.read_u16(None); + buff.skip(1 + l as usize) + } + 0xc9 => { + let l = buff.read_u32(None); + buff.skip(1 + l as usize) + } + 0xcc | 0xd0 => buff.skip(1), + 0xcd | 0xd1 | 0xd4 => buff.skip(2), + 0xce | 0xd2 | 0xca => buff.skip(4), + 0xcf | 0xd3 | 0xcb => buff.skip(8), + 0xd5 => buff.skip(3), + 0xd6 => buff.skip(5), + 0xd7 => buff.skip(9), + 0xd8 => buff.skip(17), + _ => { + return Err(ErrorKind::BadResponse(format!( + "Error unpacking value of type '{:x}'", + vtype + )) + .into()) + } + }; + Ok(()) +} + +/// Used to parse values for struct derives +pub fn read_map_value_bytes(buff: &mut Buffer, offset: Option<&usize>) -> Result { + println!("{:?}", offset); + if let Some(offset) = offset { + buff.data_offset = *offset; + let v: T = ReadableValue::parse_cdt_value(buff)?; + Ok(v) + } else { + let mut tmp = Buffer::new(0); + let v: T = ReadableValue::parse_cdt_value(&mut tmp)?; + Ok(v) + } +} diff --git a/aerospike-core/src/derive/writable.rs b/aerospike-core/src/derive/writable.rs new file mode 100644 index 00000000..0bb099c7 --- /dev/null +++ b/aerospike-core/src/derive/writable.rs @@ -0,0 +1,293 @@ +// Copyright 2015-2020 Aerospike, Inc. +// +// Portions may be licensed to Aerospike, Inc. under one or more contributor +// license agreements. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +//! Traits and Implementations for writing data from structs and variables + +use std::collections::HashMap; +use crate::errors::{ErrorKind, Result}; +use crate::{Bin, Buffer, ParticleType}; + +pub use aerospike_macro::{WritableBins, WritableValue}; + +/// The WritableBins Trait is used to convert Objects to Aerospike Wire Data +pub trait WritableBins: Sync { + /// Writes the Object as Bins to the Wire + fn write_as_bins(&self, buffer: &mut Buffer, op_type: u8) -> Result<()>; + /// The encoded size to size the buffer and set the offsets accordingly. + /// Calculated by bin_name_bytes + value_bytes + 8 + /// Defaults to 0 + fn writable_bins_size(&self) -> usize { + 0 + } + /// The amount of bins that will be processed. This is usually just the amount of struct members or list entries. + /// Defaults to 0 + fn writable_bins_count(&self) -> usize { + 0 + } +} + +/// The WritableValue Trait is used to convert Object Values to Aerospike Wire Data +pub trait WritableValue: Sync { + /// Write the Object as Value of a Bin + /// Requires `writable_value_size` and `writable_value_particle_type` to be overwritten to return the correct values + /// Needs to return the byte size of the value + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize; + /// Writes the Object as Value of a CDT + /// Most CDT Objects (content of maps/lists etc.) are encoded differently from the normal Values + fn write_as_cdt_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + self.write_as_value(buffer) + } + /// The particle Type of the value to write. + /// This sets the Value Type for the Aerospike Server + fn writable_value_particle_type(&self) -> ParticleType; + /// Defines if the Object can be encoded + /// For example empty Lists or Options should return false if no data is inside + /// Defaults to true + fn writable_value_encodable(&self) -> bool { + true + } +} + +macro_rules! impl_writable_value_for_num { + ($ty:ident) => { + impl WritableValue for $ty { + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + if let Some(ref mut buf) = *buffer { + buf.write_i64(*self as i64); + } + 8 + } + fn write_as_cdt_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + crate::msgpack::encoder::pack_integer(buffer, *self as i64) + } + + fn writable_value_particle_type(&self) -> ParticleType { + ParticleType::INTEGER + } + } + }; +} + +macro_rules! impl_writable_value_for_float { + ($ty:ident) => { + impl WritableValue for $ty { + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + if let Some(ref mut buf) = *buffer { + buf.write_f64(f64::from(*self)); + } + 8 + } + fn write_as_cdt_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + crate::msgpack::encoder::pack_f64(buffer, f64::from(*self)) + } + + fn writable_value_particle_type(&self) -> ParticleType { + ParticleType::FLOAT + } + } + }; +} + +impl_writable_value_for_num!(u8); +impl_writable_value_for_num!(i8); +impl_writable_value_for_num!(u16); +impl_writable_value_for_num!(i16); +impl_writable_value_for_num!(u32); +impl_writable_value_for_num!(i32); +impl_writable_value_for_num!(u64); +impl_writable_value_for_num!(i64); +impl_writable_value_for_num!(usize); +impl_writable_value_for_num!(isize); +impl_writable_value_for_float!(f64); +impl_writable_value_for_float!(f32); + +impl WritableValue for Option { + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + if let Some(v) = self { + return v.write_as_value(buffer); + } + 0 + } + + fn write_as_cdt_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + if let Some(v) = self { + return v.write_as_cdt_value(buffer); + } + 0 + } + + fn writable_value_particle_type(&self) -> ParticleType { + if let Some(v) = self { + return v.writable_value_particle_type(); + } + ParticleType::NULL + } + + fn writable_value_encodable(&self) -> bool { + self.is_some() + } +} + +impl WritableValue for Vec { + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + let mut size = 0; + size += crate::msgpack::encoder::pack_array_begin(buffer, self.len()); + for v in self { + size += v.write_as_cdt_value(buffer) + } + size + } + + fn writable_value_particle_type(&self) -> ParticleType { + ParticleType::LIST + } + + fn writable_value_encodable(&self) -> bool { + !self.is_empty() + } +} + +impl WritableValue for HashMap { + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + let mut size = 0; + size += crate::msgpack::encoder::pack_map_begin(buffer, self.len()); + for (k, v) in self { + size += k.write_as_cdt_value(buffer); + size += v.write_as_cdt_value(buffer); + } + size + } + fn writable_value_particle_type(&self) -> ParticleType { + ParticleType::MAP + } + fn writable_value_encodable(&self) -> bool { + !self.is_empty() + } +} + +impl WritableValue for String { + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + if let Some(ref mut buf) = *buffer { + buf.write_str(&self); + } + self.len() + } + + fn write_as_cdt_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + crate::msgpack::encoder::pack_string(buffer, &self) + } + + fn writable_value_particle_type(&self) -> ParticleType { + ParticleType::STRING + } + + fn writable_value_encodable(&self) -> bool { + !self.is_empty() + } +} + +impl WritableValue for bool { + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + if let Some(ref mut buf) = *buffer { + buf.write_bool(*self); + } + 1 + } + + fn write_as_cdt_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + crate::msgpack::encoder::pack_bool(buffer, *self) + } + + fn writable_value_particle_type(&self) -> ParticleType { + ParticleType::BOOL + } +} + +impl WritableValue for &str { + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + if let Some(ref mut buf) = *buffer { + buf.write_str(self); + } + self.len() + } + + fn write_as_cdt_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + crate::msgpack::encoder::pack_string(buffer, self) + } + + fn writable_value_particle_type(&self) -> ParticleType { + ParticleType::STRING + } + + fn writable_value_encodable(&self) -> bool { + !self.is_empty() + } +} +impl WritableBins for [Bin; COUNT] { + fn write_as_bins(&self, buffer: &mut Buffer, op_type: u8) -> Result<()> { + legacy_bins_slice_write_wire(self.as_slice(), buffer, op_type) + } + fn writable_bins_size(&self) -> usize { + legacy_bins_slice_writable_size(self.as_slice()) + } + fn writable_bins_count(&self) -> usize { + self.len() + } +} + +impl WritableBins for &[Bin] { + fn write_as_bins(&self, buffer: &mut Buffer, op_type: u8) -> Result<()> { + legacy_bins_slice_write_wire(&self, buffer, op_type) + } + fn writable_bins_size(&self) -> usize { + legacy_bins_slice_writable_size(&self) + } + fn writable_bins_count(&self) -> usize { + self.len() + } +} + +impl WritableBins for Vec { + fn write_as_bins(&self, buffer: &mut Buffer, op_type: u8) -> Result<()> { + legacy_bins_slice_write_wire(self.as_slice(), buffer, op_type) + } + fn writable_bins_size(&self) -> usize { + legacy_bins_slice_writable_size(self.as_slice()) + } + fn writable_bins_count(&self) -> usize { + self.len() + } +} + +fn legacy_bins_slice_write_wire(bins: &[Bin], buffer: &mut Buffer, op_type: u8) -> Result<()> { + bins.iter().for_each(|b| { + buffer.write_i32((b.name.len() + b.value.estimate_size() + 4) as i32); + buffer.write_u8(op_type); + buffer.write_u8(b.value.particle_type() as u8); + buffer.write_u8(0); + buffer.write_u8(b.name.len() as u8); + buffer.write_str(&b.name); + b.value.write_to(buffer); + }); + Ok(()) +} + +fn legacy_bins_slice_writable_size(bins: &[Bin]) -> usize { + let mut size: usize = 0; + bins.iter().for_each(|b| { + size += b.name.len() + b.value.estimate_size() + 8; + }); + size +} diff --git a/aerospike-core/src/errors.rs b/aerospike-core/src/errors.rs index f4344920..b8a238a5 100644 --- a/aerospike-core/src/errors.rs +++ b/aerospike-core/src/errors.rs @@ -121,11 +121,16 @@ error_chain! { display("UDF Bad Response: {}", details) } -/// Error returned when a tasked timeed out before it could be completed. +/// Error returned when a tasked timed out before it could be completed. Timeout(details: String) { description("Timeout") display("Timeout: {}", details) } +/// Error returned when a derive operation fails to encode/decode data. + Derive(details: String) { + description("Derive") + display("Derive error: {}", details) + } } } diff --git a/aerospike-core/src/lib.rs b/aerospike-core/src/lib.rs index 1a98c3f7..6e0a9fa7 100644 --- a/aerospike-core/src/lib.rs +++ b/aerospike-core/src/lib.rs @@ -183,8 +183,11 @@ mod batch; mod client; mod cluster; mod commands; + +pub use commands::buffer::Buffer; + pub mod expressions; -mod msgpack; +pub mod msgpack; mod net; pub mod operations; pub mod policy; @@ -196,3 +199,5 @@ mod user; #[cfg(test)] extern crate hex; + +pub mod derive; diff --git a/aerospike-core/src/msgpack/decoder.rs b/aerospike-core/src/msgpack/decoder.rs index a9ff80bb..58137b78 100644 --- a/aerospike-core/src/msgpack/decoder.rs +++ b/aerospike-core/src/msgpack/decoder.rs @@ -13,6 +13,8 @@ // License for the specific language governing permissions and limitations under // the License. +//! General Functions to Decode Aerospike Wire Data + use std::collections::HashMap; use std::vec::Vec; @@ -21,7 +23,7 @@ use crate::commands::ParticleType; use crate::errors::{ErrorKind, Result}; use crate::value::Value; -pub fn unpack_value_list(buf: &mut Buffer) -> Result { +pub(crate) fn unpack_value_list(buf: &mut Buffer) -> Result { if buf.data_buffer.is_empty() { return Ok(Value::List(vec![])); } @@ -32,13 +34,15 @@ pub fn unpack_value_list(buf: &mut Buffer) -> Result { 0x90..=0x9f => (ltype & 0x0f) as usize, 0xdc => buf.read_u16(None) as usize, 0xdd => buf.read_u32(None) as usize, - _ => unreachable!(), + _ => { + unreachable!() + } }; unpack_list(buf, count) } -pub fn unpack_value_map(buf: &mut Buffer) -> Result { +pub(crate) fn unpack_value_map(buf: &mut Buffer) -> Result { if buf.data_buffer.is_empty() { return Ok(Value::from(HashMap::with_capacity(0))); } @@ -111,7 +115,7 @@ fn unpack_blob(buf: &mut Buffer, count: usize) -> Result { } } -fn unpack_value(buf: &mut Buffer) -> Result { +pub(crate) fn unpack_value(buf: &mut Buffer) -> Result { let obj_type = buf.read_u8(None); match obj_type { @@ -219,6 +223,6 @@ fn unpack_value(buf: &mut Buffer) -> Result { } } -const fn is_ext(byte: u8) -> bool { +pub(crate) const fn is_ext(byte: u8) -> bool { matches!(byte, 0xc7 | 0xc8 | 0xc9 | 0xd4 | 0xd5 | 0xd6 | 0xd7 | 0xd8) } diff --git a/aerospike-core/src/msgpack/encoder.rs b/aerospike-core/src/msgpack/encoder.rs index bc772bf8..929a61c7 100644 --- a/aerospike-core/src/msgpack/encoder.rs +++ b/aerospike-core/src/msgpack/encoder.rs @@ -13,6 +13,8 @@ // License for the specific language governing permissions and limitations under // the License. +//! General Functions to Encode Aerospike Wire Data + use std::collections::HashMap; use std::num::Wrapping; use std::{i16, i32, i64, i8}; @@ -24,7 +26,7 @@ use crate::operations::cdt_context::CdtContext; use crate::value::{FloatValue, Value}; #[doc(hidden)] -pub fn pack_value(buf: &mut Option<&mut Buffer>, val: &Value) -> usize { +pub(crate) fn pack_value(buf: &mut Option<&mut Buffer>, val: &Value) -> usize { match *val { Value::Nil => pack_nil(buf), Value::Int(ref val) => pack_integer(buf, *val), @@ -44,7 +46,7 @@ pub fn pack_value(buf: &mut Option<&mut Buffer>, val: &Value) -> usize { } #[doc(hidden)] -pub fn pack_empty_args_array(buf: &mut Option<&mut Buffer>) -> usize { +pub(crate) fn pack_empty_args_array(buf: &mut Option<&mut Buffer>) -> usize { let mut size = 0; size += pack_array_begin(buf, 0); @@ -52,7 +54,7 @@ pub fn pack_empty_args_array(buf: &mut Option<&mut Buffer>) -> usize { } #[doc(hidden)] -pub fn pack_cdt_op( +pub(crate) fn pack_cdt_op( buf: &mut Option<&mut Buffer>, cdt_op: &CdtOperation, ctx: &[CdtContext], @@ -98,7 +100,7 @@ pub fn pack_cdt_op( } #[doc(hidden)] -pub fn pack_hll_op( +pub(crate) fn pack_hll_op( buf: &mut Option<&mut Buffer>, hll_op: &CdtOperation, _ctx: &[CdtContext], @@ -122,7 +124,7 @@ pub fn pack_hll_op( } #[doc(hidden)] -pub fn pack_cdt_bit_op( +pub(crate) fn pack_cdt_bit_op( buf: &mut Option<&mut Buffer>, cdt_op: &CdtOperation, ctx: &[CdtContext], @@ -162,7 +164,7 @@ pub fn pack_cdt_bit_op( } #[doc(hidden)] -pub fn pack_array(buf: &mut Option<&mut Buffer>, values: &[Value]) -> usize { +pub(crate) fn pack_array(buf: &mut Option<&mut Buffer>, values: &[Value]) -> usize { let mut size = 0; size += pack_array_begin(buf, values.len()); @@ -174,7 +176,7 @@ pub fn pack_array(buf: &mut Option<&mut Buffer>, values: &[Value]) -> usize { } #[doc(hidden)] -pub fn pack_map(buf: &mut Option<&mut Buffer>, map: &HashMap) -> usize { +pub(crate) fn pack_map(buf: &mut Option<&mut Buffer>, map: &HashMap) -> usize { let mut size = 0; size += pack_map_begin(buf, map.len()); @@ -205,13 +207,15 @@ const MSGPACK_MARKER_NI64: u8 = 0xd3; // This method is not compatible with MsgPack specs and is only used by aerospike client<->server // for wire transfer only #[doc(hidden)] -pub fn pack_raw_u16(buf: &mut Option<&mut Buffer>, value: u16) -> usize { +pub(crate) fn pack_raw_u16(buf: &mut Option<&mut Buffer>, value: u16) -> usize { if let Some(ref mut buf) = *buf { buf.write_u16(value); } 2 } +/// Packs a byte without Marker. Writes it to the Buffer if given. +/// Returns the size of the packaged Data #[doc(hidden)] pub fn pack_half_byte(buf: &mut Option<&mut Buffer>, value: u8) -> usize { if let Some(ref mut buf) = *buf { @@ -220,6 +224,8 @@ pub fn pack_half_byte(buf: &mut Option<&mut Buffer>, value: u8) -> usize { 1 } +/// Packs a byte with Marker. Writes it to the Buffer if given. +/// Returns the size of the packaged Data #[doc(hidden)] pub fn pack_byte(buf: &mut Option<&mut Buffer>, marker: u8, value: u8) -> usize { if let Some(ref mut buf) = *buf { @@ -230,13 +236,15 @@ pub fn pack_byte(buf: &mut Option<&mut Buffer>, marker: u8, value: u8) -> usize } #[doc(hidden)] -pub fn pack_nil(buf: &mut Option<&mut Buffer>) -> usize { +pub(crate) fn pack_nil(buf: &mut Option<&mut Buffer>) -> usize { if let Some(ref mut buf) = *buf { buf.write_u8(MSGPACK_MARKER_NIL); } 1 } +/// Packs a bool. Writes it to the Buffer if given. +/// Returns the size of the packaged Data #[doc(hidden)] pub fn pack_bool(buf: &mut Option<&mut Buffer>, value: bool) -> usize { if let Some(ref mut buf) = *buf { @@ -249,6 +257,8 @@ pub fn pack_bool(buf: &mut Option<&mut Buffer>, value: bool) -> usize { 1 } +/// Packs a Map Marker including the length. Writes it to the Buffer if given. +/// Returns the size of the packaged Data #[doc(hidden)] pub fn pack_map_begin(buf: &mut Option<&mut Buffer>, length: usize) -> usize { match length { @@ -258,6 +268,8 @@ pub fn pack_map_begin(buf: &mut Option<&mut Buffer>, length: usize) -> usize { } } +/// Packs a List Marker including the length. Writes it to the Buffer if given. +/// Returns the size of the packaged Data #[doc(hidden)] pub fn pack_array_begin(buf: &mut Option<&mut Buffer>, length: usize) -> usize { match length { @@ -267,6 +279,8 @@ pub fn pack_array_begin(buf: &mut Option<&mut Buffer>, length: usize) -> usize { } } +/// Packs a Byte Array Marker. Writes it to the Buffer if given. +/// Returns the size of the packaged Data #[doc(hidden)] pub fn pack_byte_array_begin(buf: &mut Option<&mut Buffer>, length: usize) -> usize { match length { @@ -276,6 +290,8 @@ pub fn pack_byte_array_begin(buf: &mut Option<&mut Buffer>, length: usize) -> us } } +/// Packs a blob. Writes it to the Buffer if given. +/// Returns the size of the packaged Data #[doc(hidden)] pub fn pack_blob(buf: &mut Option<&mut Buffer>, value: &[u8]) -> usize { let mut size = value.len() + 1; @@ -289,6 +305,8 @@ pub fn pack_blob(buf: &mut Option<&mut Buffer>, value: &[u8]) -> usize { size } +/// Packs a string including Marker. Writes it to the Buffer if given. +/// Returns the size of the packaged Data #[doc(hidden)] pub fn pack_string(buf: &mut Option<&mut Buffer>, value: &str) -> usize { let mut size = value.len() + 1; @@ -302,6 +320,8 @@ pub fn pack_string(buf: &mut Option<&mut Buffer>, value: &str) -> usize { size } +/// Packs a String without Marker. Writes it to the Buffer if given. +/// Returns the size of the packaged Data #[doc(hidden)] pub fn pack_raw_string(buf: &mut Option<&mut Buffer>, value: &str) -> usize { let mut size = value.len(); @@ -314,6 +334,8 @@ pub fn pack_raw_string(buf: &mut Option<&mut Buffer>, value: &str) -> usize { size } +/// Packs a GeoJSON Object. Writes it to the Buffer if given. +/// Returns the size of the packaged Data #[doc(hidden)] fn pack_geo_json(buf: &mut Option<&mut Buffer>, value: &str) -> usize { let mut size = value.len() + 1; @@ -327,6 +349,9 @@ fn pack_geo_json(buf: &mut Option<&mut Buffer>, value: &str) -> usize { size } +/// Packs a Integer. Writes it to the Buffer if given. +/// Returns the size of the packaged Data +/// The exact Integer size is handled by this function. #[doc(hidden)] pub fn pack_integer(buf: &mut Option<&mut Buffer>, val: i64) -> usize { match val { @@ -361,7 +386,7 @@ pub fn pack_integer(buf: &mut Option<&mut Buffer>, val: i64) -> usize { } #[doc(hidden)] -pub fn pack_i16(buf: &mut Option<&mut Buffer>, marker: u8, value: i16) -> usize { +pub(crate) fn pack_i16(buf: &mut Option<&mut Buffer>, marker: u8, value: i16) -> usize { if let Some(ref mut buf) = *buf { buf.write_u8(marker); buf.write_i16(value); @@ -370,7 +395,7 @@ pub fn pack_i16(buf: &mut Option<&mut Buffer>, marker: u8, value: i16) -> usize } #[doc(hidden)] -pub fn pack_i32(buf: &mut Option<&mut Buffer>, marker: u8, value: i32) -> usize { +pub(crate) fn pack_i32(buf: &mut Option<&mut Buffer>, marker: u8, value: i32) -> usize { if let Some(ref mut buf) = *buf { buf.write_u8(marker); buf.write_i32(value); @@ -379,7 +404,7 @@ pub fn pack_i32(buf: &mut Option<&mut Buffer>, marker: u8, value: i32) -> usize } #[doc(hidden)] -pub fn pack_i64(buf: &mut Option<&mut Buffer>, marker: u8, value: i64) -> usize { +pub(crate) fn pack_i64(buf: &mut Option<&mut Buffer>, marker: u8, value: i64) -> usize { if let Some(ref mut buf) = *buf { buf.write_u8(marker); buf.write_i64(value); @@ -388,7 +413,7 @@ pub fn pack_i64(buf: &mut Option<&mut Buffer>, marker: u8, value: i64) -> usize } #[doc(hidden)] -pub fn pack_u64(buf: &mut Option<&mut Buffer>, value: u64) -> usize { +pub(crate) fn pack_u64(buf: &mut Option<&mut Buffer>, value: u64) -> usize { if value <= i64::max_value() as u64 { return pack_integer(buf, value as i64); } diff --git a/aerospike-core/src/msgpack/mod.rs b/aerospike-core/src/msgpack/mod.rs index 1ee543a4..0c9bafe4 100644 --- a/aerospike-core/src/msgpack/mod.rs +++ b/aerospike-core/src/msgpack/mod.rs @@ -13,5 +13,7 @@ // License for the specific language governing permissions and limitations under // the License. +//! General Functions for Aerospike Wire encoding and decoding + pub mod decoder; pub mod encoder; diff --git a/aerospike-core/src/net/connection.rs b/aerospike-core/src/net/connection.rs index d17f220c..d6fbb318 100644 --- a/aerospike-core/src/net/connection.rs +++ b/aerospike-core/src/net/connection.rs @@ -15,6 +15,7 @@ use crate::commands::admin_command::AdminCommand; use crate::commands::buffer::Buffer; +use crate::derive::readable::{PreParsedBin, PreParsedValue}; use crate::errors::{ErrorKind, Result}; use crate::policy::ClientPolicy; #[cfg(all(any(feature = "rt-async-std"), not(feature = "rt-tokio")))] @@ -25,6 +26,8 @@ use aerospike_rt::net::TcpStream; use aerospike_rt::time::{Duration, Instant}; #[cfg(all(any(feature = "rt-async-std"), not(feature = "rt-tokio")))] use futures::{AsyncReadExt, AsyncWriteExt}; +use std::collections::hash_map::Entry::{Occupied, Vacant}; +use std::collections::HashMap; use std::ops::Add; #[derive(Debug)] @@ -132,4 +135,54 @@ impl Connection { pub const fn bytes_read(&self) -> usize { self.bytes_read } + + pub(crate) async fn pre_parse_stream_bins( + &mut self, + op_count: usize, + ) -> Result> { + let mut data_points: HashMap = HashMap::new(); + + for _ in 0..op_count { + self.read_buffer(8).await?; + + let op_size = self.buffer.read_u32(None) as usize; + self.buffer.skip(1); + let particle_type = self.buffer.read_u8(None); + self.buffer.skip(1); + let name_size = self.buffer.read_u8(None) as usize; + self.read_buffer(name_size).await?; + + let name: String = self.buffer.read_str(name_size)?; + let particle_bytes_size = op_size - (4 + name_size); + self.read_buffer(particle_bytes_size).await?; + + if particle_type != 0 { + let pre_parsed = PreParsedValue { + particle_type, + // Needs to be cloned since buffer will be changed in the meantime + buffer: self.buffer.clone(), + byte_length: particle_bytes_size, + }; + + match data_points.entry(name) { + Vacant(entry) => { + let pre_bin = PreParsedBin { + value: pre_parsed, + sub_values: Vec::new(), + }; + entry.insert(pre_bin); + } + Occupied(entry) => { + let ent = entry.into_mut(); + ent.sub_values.push(pre_parsed); + } + } + } + + // Value Data starts at current offset. We dont want to parse that now, so skip to let the loop continue at the next bin. + self.buffer.skip(particle_bytes_size); + } + + Ok(data_points) + } } diff --git a/aerospike-core/src/query/recordset.rs b/aerospike-core/src/query/recordset.rs index 9712473a..6c8a0a49 100644 --- a/aerospike-core/src/query/recordset.rs +++ b/aerospike-core/src/query/recordset.rs @@ -21,6 +21,7 @@ use std::thread; use crossbeam_queue::SegQueue; use rand::Rng; +use crate::derive::readable::ReadableBins; use crate::errors::Result; use crate::Record; @@ -28,16 +29,19 @@ use crate::Record; /// multiple threads will retrieve records from the server nodes and put these records on an /// internal queue managed by the recordset. The single user thread consumes these records from the /// queue. -pub struct Recordset { +pub struct Recordset +where + T: ReadableBins, +{ instances: AtomicUsize, record_queue_count: AtomicUsize, record_queue_size: AtomicUsize, - record_queue: SegQueue>, + record_queue: SegQueue>>, active: AtomicBool, task_id: AtomicUsize, } -impl Recordset { +impl Recordset { #[doc(hidden)] pub fn new(rec_queue_size: usize, nodes: usize) -> Self { let mut rng = rand::thread_rng(); @@ -64,7 +68,7 @@ impl Recordset { } #[doc(hidden)] - pub fn push(&self, record: Result) -> Option> { + pub fn push(&self, record: Result>) -> Option>> { if self.record_queue_count.fetch_add(1, Ordering::Relaxed) < self.record_queue_size.load(Ordering::Relaxed) { @@ -88,10 +92,10 @@ impl Recordset { } } -impl<'a> Iterator for &'a Recordset { - type Item = Result; +impl<'a, T: ReadableBins> Iterator for &'a Recordset { + type Item = Result>; - fn next(&mut self) -> Option> { + fn next(&mut self) -> Option>> { loop { if self.is_active() || !self.record_queue.is_empty() { let result = self.record_queue.pop().ok(); diff --git a/aerospike-core/src/record.rs b/aerospike-core/src/record.rs index 8804de53..583bd626 100644 --- a/aerospike-core/src/record.rs +++ b/aerospike-core/src/record.rs @@ -16,12 +16,10 @@ #[cfg(feature = "serialization")] use serde::Serialize; -use std::collections::HashMap; -use std::fmt; +use crate::derive::readable::ReadableBins; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use crate::Key; -use crate::Value; lazy_static! { // Fri Jan 1 00:00:00 UTC 2010 @@ -31,13 +29,13 @@ lazy_static! { /// Container object for a database record. #[derive(Debug, Clone)] #[cfg_attr(feature = "serialization", derive(Serialize))] -pub struct Record { +pub struct Record { /// Record key. When reading a record from the database, the key is not set in the returned /// Record struct. pub key: Option, /// Map of named record bins. - pub bins: HashMap, + pub bins: T, /// Record modification count. pub generation: u32, @@ -46,15 +44,10 @@ pub struct Record { expiration: u32, } -impl Record { +impl Record { /// Construct a new Record. For internal use only. #[doc(hidden)] - pub const fn new( - key: Option, - bins: HashMap, - generation: u32, - expiration: u32, - ) -> Self { + pub const fn new(key: Option, bins: T, generation: u32, expiration: u32) -> Self { Record { key, bins, @@ -81,25 +74,6 @@ impl Record { } } -impl fmt::Display for Record { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!(f, "key: {:?}", self.key)?; - write!(f, ", bins: {{")?; - for (i, (k, v)) in self.bins.iter().enumerate() { - if i > 0 { - write!(f, ", ")?; - } - write!(f, "{}: {}", k, v)?; - } - write!(f, "}}, generation: {}", self.generation)?; - write!(f, ", ttl: ")?; - match self.time_to_live() { - None => "none".fmt(f), - Some(duration) => duration.as_secs().fmt(f), - } - } -} - #[cfg(test)] mod tests { use super::{Record, CITRUSLEAF_EPOCH}; diff --git a/aerospike-core/src/traits.rs b/aerospike-core/src/traits.rs new file mode 100644 index 00000000..56f2b9b8 --- /dev/null +++ b/aerospike-core/src/traits.rs @@ -0,0 +1,586 @@ +// Copyright 2015-2018 Aerospike, Inc. +// +// Commonly used Traits for Data input and output handling + +use crate::commands::buffer::Buffer; +use crate::errors::Result; +use crate::{Bin, ParticleType, Value}; +use std::collections::HashMap; + +use crate::value::bytes_to_particle; +pub use aerospike_macro::{ReadableBins, WritableBins, WritableValue}; + +/// The WritableBins Trait is used to convert Objects to Aerospike Wire Data +pub trait WritableBins: Sync { + /// Writes the Object as Bins to the Wire + fn write_as_bins(&self, buffer: &mut Buffer, op_type: u8) -> Result<()>; + /// The encoded size to size the buffer and set the offsets accordingly. + /// Calculated by bin_name_bytes + value_bytes + 8 + /// Defaults to 0 + fn writable_bins_size(&self) -> usize { + 0 + } + /// The amount of bins that will be processed. This is usually just the amount of struct members or list entries. + /// Defaults to 0 + fn writable_bins_count(&self) -> usize { + 0 + } +} + +/// The ReadableBins Trait is used to convert Aerospike Wire Data to Objects +pub trait ReadableBins: Sync + Sized + Send + Clone { + /// Convert the pre-parsed Bins to a compatible Object + /// The String in `data_points` is the field name returned by the Server. + /// This can vary from the actual name in the Object if the rename attribute is used. + fn read_bins_from_bytes(data_points: &mut HashMap) -> Result; + /// Default Fallback for Empty Bins + /// Should be implemented for Types like Options and Lists. + /// Defaults to throwing an Error + fn new_empty() -> Result { + bail!("No empty implementation found") + } +} + +/// The ReadableValue Trait is used to convert Aerospike Wire Data into the Value of Objects +pub trait ReadableValue: Sync + Sized + Send + Clone { + /// Read the data from the Wire Buffer. + /// This method is primarily used for pre-parsing checks + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result; + /// Actual conversion of the bytes to the value + fn parse_value(data_point: &mut PreParsedValue) -> Result; + /// CDT values are parsed differently from normal Values. This buffer is not a copy, so modifications can cause wrong behavior. + fn parse_cdt_value(buff: &mut Buffer) -> Result; +} + +/// Before giving data to the Readable Traits, the client pre-parses the wire data into this format#[derive(Debug)] +#[derive(Debug)] +pub struct PreParsedBin { + /// Value is always given for any datatype. + pub value: PreParsedValue, + /// Lists and Maps can have other sub-values for entries. In this case, they need to be appended to the parsed value + pub sub_values: Vec, +} + +/// Includes the data for the Value part of a Bin. +#[derive(Debug)] +pub struct PreParsedValue { + /// The Particle Type the Sever stored the Value as. + pub particle_type: u8, + /// Part of the wire Buffer with only the relevant Value Data inside. Value starts at offset 0 without meta around it. + pub buffer: Buffer, + /// Amount of bytes that should be parsed as Value in the buffer. Should be used instead of buffer length for safety reasons. + pub byte_length: usize, +} + +/// The WritableValue Trait is used to convert Object Values to Aerospike Wire Data +pub trait WritableValue: Sync { + /// Write the Object as Value of a Bin + /// Requires `writable_value_size` and `writable_value_particle_type` to be overwritten to return the correct values + /// Needs to return the byte size of the value + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize; + /// Writes the Object as Value of a CDT + /// Most CDT Objects (content of maps/lists etc.) are encoded differently from the normal Values + fn write_as_cdt_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + self.write_as_value(buffer) + } + /// The particle Type of the value to write. + /// This sets the Value Type for the Aerospike Server + fn writable_value_particle_type(&self) -> ParticleType; + /// Defines if the Object can be encoded + /// For example empty Lists or Options should return false if no data is inside + /// Defaults to true + fn writable_value_encodable(&self) -> bool { + true + } +} + +macro_rules! impl_writable_value_for_num { + ($ty:ident) => { + impl WritableValue for $ty { + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + if let Some(ref mut buf) = *buffer { + buf.write_i64(*self as i64); + } + 8 + } + fn write_as_cdt_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + crate::msgpack::encoder::pack_integer(buffer, *self as i64) + } + + fn writable_value_particle_type(&self) -> ParticleType { + ParticleType::INTEGER + } + } + }; +} + +macro_rules! impl_writable_value_for_float { + ($ty:ident) => { + impl WritableValue for $ty { + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + if let Some(ref mut buf) = *buffer { + buf.write_f64(f64::from(*self)); + } + 8 + } + fn write_as_cdt_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + crate::msgpack::encoder::pack_f64(buffer, f64::from(*self)) + } + + fn writable_value_particle_type(&self) -> ParticleType { + ParticleType::FLOAT + } + } + }; +} + +impl_writable_value_for_num!(u8); +impl_writable_value_for_num!(i8); +impl_writable_value_for_num!(u16); +impl_writable_value_for_num!(i16); +impl_writable_value_for_num!(u32); +impl_writable_value_for_num!(i32); +impl_writable_value_for_num!(u64); +impl_writable_value_for_num!(i64); +impl_writable_value_for_num!(usize); +impl_writable_value_for_num!(isize); +impl_writable_value_for_float!(f64); +impl_writable_value_for_float!(f32); + +impl WritableValue for Option { + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + if let Some(v) = self { + return v.write_as_value(buffer); + } + 0 + } + + fn write_as_cdt_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + if let Some(v) = self { + return v.write_as_cdt_value(buffer); + } + 0 + } + + fn writable_value_particle_type(&self) -> ParticleType { + if let Some(v) = self { + return v.writable_value_particle_type(); + } + ParticleType::NULL + } + + fn writable_value_encodable(&self) -> bool { + self.is_some() + } +} + +impl WritableValue for Vec { + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + let mut size = 0; + size += crate::msgpack::encoder::pack_array_begin(buffer, self.len()); + for v in self { + size += v.write_as_cdt_value(buffer) + } + size + } + + fn writable_value_particle_type(&self) -> ParticleType { + ParticleType::LIST + } + + fn writable_value_encodable(&self) -> bool { + !self.is_empty() + } +} + +impl WritableValue for String { + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + if let Some(ref mut buf) = *buffer { + buf.write_str(&self); + } + self.len() + } + + fn write_as_cdt_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + crate::msgpack::encoder::pack_string(buffer, &self) + } + + fn writable_value_particle_type(&self) -> ParticleType { + ParticleType::STRING + } + + fn writable_value_encodable(&self) -> bool { + !self.is_empty() + } +} + +impl WritableValue for bool { + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + if let Some(ref mut buf) = *buffer { + buf.write_bool(*self); + } + 1 + } + + fn write_as_cdt_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + crate::msgpack::encoder::pack_bool(buffer, *self) + } + + fn writable_value_particle_type(&self) -> ParticleType { + ParticleType::BOOL + } +} + +impl WritableValue for &str { + fn write_as_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + if let Some(ref mut buf) = *buffer { + buf.write_str(self); + } + self.len() + } + + fn write_as_cdt_value(&self, buffer: &mut Option<&mut Buffer>) -> usize { + crate::msgpack::encoder::pack_string(buffer, self) + } + + fn writable_value_particle_type(&self) -> ParticleType { + ParticleType::STRING + } + + fn writable_value_encodable(&self) -> bool { + !self.is_empty() + } +} +impl WritableBins for [Bin; COUNT] { + fn write_as_bins(&self, buffer: &mut Buffer, op_type: u8) -> Result<()> { + legacy_bins_slice_write_wire(self.as_slice(), buffer, op_type) + } + fn writable_bins_size(&self) -> usize { + legacy_bins_slice_writable_size(self.as_slice()) + } + fn writable_bins_count(&self) -> usize { + self.len() + } +} + +impl WritableBins for &[Bin] { + fn write_as_bins(&self, buffer: &mut Buffer, op_type: u8) -> Result<()> { + legacy_bins_slice_write_wire(&self, buffer, op_type) + } + fn writable_bins_size(&self) -> usize { + legacy_bins_slice_writable_size(&self) + } + fn writable_bins_count(&self) -> usize { + self.len() + } +} + +impl WritableBins for Vec { + fn write_as_bins(&self, buffer: &mut Buffer, op_type: u8) -> Result<()> { + legacy_bins_slice_write_wire(self.as_slice(), buffer, op_type) + } + fn writable_bins_size(&self) -> usize { + legacy_bins_slice_writable_size(self.as_slice()) + } + fn writable_bins_count(&self) -> usize { + self.len() + } +} + +fn legacy_bins_slice_write_wire(bins: &[Bin], buffer: &mut Buffer, op_type: u8) -> Result<()> { + bins.iter().for_each(|b| { + buffer.write_i32((b.name.len() + b.value.estimate_size() + 4) as i32); + buffer.write_u8(op_type); + buffer.write_u8(b.value.particle_type() as u8); + buffer.write_u8(0); + buffer.write_u8(b.name.len() as u8); + buffer.write_str(&b.name); + b.value.write_to(buffer); + }); + Ok(()) +} + +fn legacy_bins_slice_writable_size(bins: &[Bin]) -> usize { + let mut size: usize = 0; + bins.iter().for_each(|b| { + size += b.name.len() + b.value.estimate_size() + 8; + }); + size +} + +impl ReadableValue for Value { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + let mut val = Value::parse_value(&mut data_point.value)?; + + for sv in &mut data_point.sub_values { + let sval = Value::parse_value(sv)?; + match val { + Value::List(ref mut list) => list.push(sval), + ref mut prev => { + *prev = as_list!(prev.clone(), sval); + } + } + } + return Ok(val); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + bytes_to_particle( + data_point.particle_type, + &mut data_point.buffer, + data_point.byte_length, + ) + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + crate::msgpack::decoder::unpack_value(buff) + } +} + +impl ReadableBins for HashMap { + fn read_bins_from_bytes(data_points: &mut HashMap) -> Result { + let mut hm = HashMap::new(); + for (k, d) in data_points { + let x = Value::read_value_from_bytes(d)?; + hm.insert(k.to_string(), x); + } + + Ok(hm) + } + + fn new_empty() -> Result { + Ok(HashMap::new()) + } +} + +impl ReadableValue for i64 { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + if data_point.value.particle_type == 0 { + bail!("No Value received for Integer") + } + if !data_point.sub_values.is_empty() { + bail!("Multiple Values received for Integer") + } + return Self::parse_value(&mut data_point.value); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + return Ok(data_point.buffer.read_i64(None)); + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + let ptype = buff.read_u8(None); + match ptype { + 0x00..=0x7f => Ok(i64::from(ptype) as i64), + 0xcc => Ok(buff.read_u8(None) as i64), + 0xcd => Ok(buff.read_u16(None) as i64), + 0xce => Ok(buff.read_u32(None) as i64), + 0xcf => Ok(buff.read_u64(None) as i64), + 0xd0 => Ok(buff.read_i8(None) as i64), + 0xd1 => Ok(buff.read_i16(None) as i64), + 0xd2 => Ok(buff.read_i32(None) as i64), + 0xd3 => Ok(buff.read_i64(None) as i64), + _ => bail!("Invalid Data Type for derive i64 CDT Value"), + } + } +} + +impl ReadableValue for usize { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + if data_point.value.particle_type == 0 { + bail!("No Value received for Integer") + } + if !data_point.sub_values.is_empty() { + bail!("Multiple Values received for Integer") + } + return Self::parse_value(&mut data_point.value); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + return Ok(data_point.buffer.read_i64(None) as usize); + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + let ptype = buff.read_u8(None); + match ptype { + 0x00..=0x7f => Ok(i64::from(ptype) as usize), + 0xcc => Ok(buff.read_u8(None) as usize), + 0xcd => Ok(buff.read_u16(None) as usize), + 0xce => Ok(buff.read_u32(None) as usize), + 0xcf => Ok(buff.read_u64(None) as usize), + 0xd0 => Ok(buff.read_i8(None) as usize), + 0xd1 => Ok(buff.read_i16(None) as usize), + 0xd2 => Ok(buff.read_i32(None) as usize), + 0xd3 => Ok(buff.read_i64(None) as usize), + _ => bail!("Invalid Data Type for derive usize CDT Value"), + } + } +} + +impl ReadableValue for isize { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + if data_point.value.particle_type == 0 { + bail!("No Value received for Integer") + } + if !data_point.sub_values.is_empty() { + bail!("Multiple Values received for Integer") + } + return Self::parse_value(&mut data_point.value); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + return Ok(data_point.buffer.read_i64(None) as isize); + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + let ptype = buff.read_u8(None); + match ptype { + 0x00..=0x7f => Ok(i64::from(ptype) as isize), + 0xcc => Ok(buff.read_u8(None) as isize), + 0xcd => Ok(buff.read_u16(None) as isize), + 0xce => Ok(buff.read_u32(None) as isize), + 0xcf => Ok(buff.read_u64(None) as isize), + 0xd0 => Ok(buff.read_i8(None) as isize), + 0xd1 => Ok(buff.read_i16(None) as isize), + 0xd2 => Ok(buff.read_i32(None) as isize), + 0xd3 => Ok(buff.read_i64(None) as isize), + _ => bail!("Invalid Data Type for derive isize CDT Value"), + } + } +} + +impl ReadableValue for u64 { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + if data_point.value.particle_type == 0 { + bail!("No Value received for Integer") + } + if !data_point.sub_values.is_empty() { + bail!("Multiple Values received for Integer") + } + return Self::parse_value(&mut data_point.value); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + return Ok(data_point.buffer.read_i64(None) as u64); + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + let ptype = buff.read_u8(None); + match ptype { + 0x00..=0x7f => Ok(i64::from(ptype) as u64), + 0xcc => Ok(buff.read_u8(None) as u64), + 0xcd => Ok(buff.read_u16(None) as u64), + 0xce => Ok(buff.read_u32(None) as u64), + 0xcf => Ok(buff.read_u64(None)), + 0xd0 => Ok(buff.read_i8(None) as u64), + 0xd1 => Ok(buff.read_i16(None) as u64), + 0xd2 => Ok(buff.read_i32(None) as u64), + 0xd3 => Ok(buff.read_i64(None) as u64), + _ => bail!("Invalid Data Type for derive u64 CDT Value"), + } + } +} + +impl ReadableValue for f64 { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + if data_point.value.particle_type == 0 { + bail!("No Value received for Float") + } + if !data_point.sub_values.is_empty() { + bail!("Multiple Values received for Float") + } + return Self::parse_value(&mut data_point.value); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + return Ok(data_point.buffer.read_f64(None)); + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + let ptype = buff.read_u8(None); + match ptype { + 0xca => Ok(buff.read_f32(None) as f64), + 0xcb => Ok(buff.read_f64(None)), + _ => bail!("Invalid Data Type for derive float CDT Value"), + } + } +} + +impl ReadableValue for String { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + if data_point.value.particle_type == 0 { + bail!("No Value received for String") + } + if !data_point.sub_values.is_empty() { + bail!("Multiple Values received for string") + } + return Self::parse_value(&mut data_point.value); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + return data_point.buffer.read_str(data_point.byte_length); + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + let len = (buff.read_u8(None) & 0x1f) as usize; + let ptype = buff.read_u8(None); + if ptype != ParticleType::STRING as u8 { + bail!("Invalid Data Type for derive string CDT Value") + } + return buff.read_str(len - 1); + } +} + +impl ReadableValue for Option { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + if data_point.value.particle_type == 0 { + return Ok(None); + } + Ok(Some(T::read_value_from_bytes(data_point)?)) + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + Ok(Some(T::parse_value(data_point)?)) + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + Ok(Some(T::parse_cdt_value(buff)?)) + } +} + +impl ReadableValue for Vec { + fn read_value_from_bytes(data_point: &mut PreParsedBin) -> Result { + let mut v: Vec = Vec::with_capacity(data_point.sub_values.len() + 1); + v.extend(Self::parse_value(&mut data_point.value)?); + for sv in &mut data_point.sub_values { + v.extend(Self::parse_value(sv)?) + } + return Ok(v); + } + + fn parse_value(data_point: &mut PreParsedValue) -> Result { + if data_point.particle_type == ParticleType::LIST as u8 { + Self::parse_cdt_value(&mut data_point.buffer) + } else { + bail!("Invalid Data Type for derive List CDT Type") + } + } + + fn parse_cdt_value(buff: &mut Buffer) -> Result { + let ltype = buff.read_u8(None); + let count: usize = match ltype { + 0x90..=0x9f => (ltype & 0x0f) as usize, + 0xdc => buff.read_u16(None) as usize, + 0xdd => buff.read_u32(None) as usize, + _ => { + bail!("Invalid Data Type for derive List CDT Type") + } + }; + + let mut list = Vec::with_capacity(count); + for _ in 0..count { + list.push(T::parse_cdt_value(buff)?); + } + return Ok(list); + } +} diff --git a/aerospike-macro/Cargo.toml b/aerospike-macro/Cargo.toml index 2202bd07..6a9aee83 100644 --- a/aerospike-macro/Cargo.toml +++ b/aerospike-macro/Cargo.toml @@ -9,7 +9,7 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.28" -syn = {version = "1.0.30", default-features = false, features = ["full"]} +syn = {version = "2.0.18", default-features = false, features = ["full", "extra-traits"]} quote = {version = "1.0.6"} aerospike-rt = {path = "../aerospike-rt"} diff --git a/aerospike-macro/src/lib.rs b/aerospike-macro/src/lib.rs index 63abec43..f9083301 100644 --- a/aerospike-macro/src/lib.rs +++ b/aerospike-macro/src/lib.rs @@ -1,10 +1,16 @@ -extern crate proc_macro; -use proc_macro::TokenStream; +use crate::traits::readable::{convert_readable_bins, convert_readable_value_source}; +use crate::traits::writable::{build_writable, convert_writable_value_source}; use quote::quote; +use syn::{parse_macro_input, parse_quote, DeriveInput, GenericParam}; + +mod traits; #[doc(hidden)] #[proc_macro_attribute] -pub fn test(_attr: TokenStream, input: TokenStream) -> TokenStream { +pub fn test( + _attr: proc_macro::TokenStream, + input: proc_macro::TokenStream, +) -> proc_macro::TokenStream { let input = syn::parse_macro_input!(input as syn::ItemFn); let ret = &input.sig.output; @@ -34,3 +40,104 @@ pub fn test(_attr: TokenStream, input: TokenStream) -> TokenStream { result.into() } + +#[proc_macro_derive(WritableBins, attributes(aerospike))] +pub fn writable_bins(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let mut input = parse_macro_input!(input as DeriveInput); + let name = input.ident; + for param in &mut input.generics.params { + if let GenericParam::Type(ref mut type_param) = *param { + type_param + .bounds + .push(parse_quote!(aerospike::WritableValue)); + } + } + let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); + let ops = build_writable(&input.data); + + let expanded = quote! { + impl #impl_generics WritableBins for #name #ty_generics #where_clause { + #ops + } + }; + proc_macro::TokenStream::from(expanded) +} + +#[proc_macro_derive(WritableValue, attributes(aerospike))] +pub fn writable_value(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let mut input = parse_macro_input!(input as DeriveInput); + let name = input.ident; + for param in &mut input.generics.params { + if let GenericParam::Type(ref mut type_param) = *param { + type_param + .bounds + .push(parse_quote!(aerospike::derive::writable::WritableValue)); + } + } + let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); + let vals = convert_writable_value_source(&input.data); + + let expanded = quote! { + impl #impl_generics WritableValue for #name #ty_generics #where_clause { + fn write_as_value(&self, buffer: &mut Option<&mut aerospike::Buffer>) -> usize{ + // Vec for bin Values + // Bins token Stream + let mut size: usize = 0; + #vals + size + } + + fn writable_value_particle_type(&self) -> aerospike::ParticleType { + aerospike::ParticleType::MAP + } + } + }; + proc_macro::TokenStream::from(expanded) +} + +#[proc_macro_derive(ReadableBins, attributes(aerospike))] +pub fn readable_bins(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let mut input = parse_macro_input!(input as DeriveInput); + let name = input.ident; + for param in &mut input.generics.params { + if let GenericParam::Type(ref mut type_param) = *param { + type_param + .bounds + .push(parse_quote!(aerospike::derive::readable::ReadableValue)); + } + } + let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); + let vals = convert_readable_bins(&input.data, &name); + + let expanded = quote! { + impl #impl_generics ReadableBins for #name #ty_generics #where_clause { + fn read_bins_from_bytes(data_points: &mut HashMap) -> aerospike::errors::Result{ + #vals + } + + } + }; + proc_macro::TokenStream::from(expanded) +} + +#[proc_macro_derive(ReadableValue, attributes(aerospike))] +pub fn readable_value(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let mut input = parse_macro_input!(input as DeriveInput); + let name = input.ident; + for param in &mut input.generics.params { + if let GenericParam::Type(ref mut type_param) = *param { + type_param + .bounds + .push(parse_quote!(aerospike::derive::readable::ReadableValue)); + } + } + let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); + let vals = convert_readable_value_source(&input.data, &name); + + let expanded = quote! { + impl #impl_generics ReadableValue for #name #ty_generics #where_clause { + #vals + } + }; + proc_macro::TokenStream::from(expanded) +} diff --git a/aerospike-macro/src/traits/mod.rs b/aerospike-macro/src/traits/mod.rs new file mode 100644 index 00000000..5393aef6 --- /dev/null +++ b/aerospike-macro/src/traits/mod.rs @@ -0,0 +1,2 @@ +pub mod readable; +pub mod writable; diff --git a/aerospike-macro/src/traits/readable.rs b/aerospike-macro/src/traits/readable.rs new file mode 100644 index 00000000..a35b1958 --- /dev/null +++ b/aerospike-macro/src/traits/readable.rs @@ -0,0 +1,184 @@ +use quote::{quote, quote_spanned}; +use syn::spanned::Spanned; +use syn::Expr::{Assign, Lit, Path}; +use syn::{Data, Expr, Field, Fields, Ident, Type}; + +pub struct ReadableFieldAttributes<'a> { + field: &'a Field, + ident: &'a Option, + name: String, + ty: Type, +} + +fn readable_field_arguments(field: &Field) -> ReadableFieldAttributes { + // Collect initial Information + let mut attributes = ReadableFieldAttributes { + field, + ident: &field.ident, + name: field.ident.clone().unwrap().to_string(), + ty: field.ty.clone(), + }; + + for a in &field.attrs { + // Filter for aerospike() attributes + if !a.path().is_ident("aerospike") { + continue; + } + + // Parse field attributes to Expression + let expr: Expr = a.parse_args().unwrap(); + + match expr { + // Assign based Attributes like rename + Assign(assign) => { + match assign.left.as_ref() { + Path(path) => { + // Rename Attribute extraction + if path.path.is_ident("rename") { + if let Lit(lit) = *assign.right { + // Currently only accepts Strings as Field Name + if let syn::Lit::Str(ls) = lit.lit { + attributes.name = ls.value(); + } else { + panic!("Invalid Aerospike Rename Value") + } + } else { + panic!("Invalid Aerospike Rename Value") + } + } + } + _ => { + panic!("Invalid Aerospike Derive Attribute") + } + } + } + // Path based Attributes that just serve as markers + /*Path(path) => { + if let Some(ident) = path.path.get_ident() { + match ident.to_string().as_ref() { + _ => { panic!("Invalid Aerospike Derive Attribute") } + } + } + }*/ + _ => { + panic!("Invalid Aerospike Derive Attribute") + } + } + } + if attributes.name.len() > 15 { + panic!("Aerospike Derive Bin Names can not be longer than 15 bytes!") + } + attributes +} + +pub(crate) fn convert_readable_bins(data: &Data, name: &Ident) -> proc_macro2::TokenStream { + match *data { + Data::Struct(ref data) => { + match data.fields { + Fields::Named(ref fields) => { + let field_args = fields + .named + .iter() + .map(|f| readable_field_arguments(&f)) + .collect::>(); + + let field_recurse = field_args.iter().map(|f| { + let field_name = &f.name; + let name = f.ident; + quote_spanned! {f.field.span()=> + #name: aerospike::derive::readable::ReadableValue::read_value_from_bytes(data_points.get_mut(#field_name).unwrap_or(&mut aerospike::derive::readable::PreParsedBin {sub_values: vec![], value: aerospike::derive::readable::PreParsedValue { particle_type: 0, buffer: Default::default(), byte_length: 0 }}))?, + } + }); + quote! { + // Build the final struct + Ok(#name { + #(#field_recurse)* + }) + } + } + _ => unimplemented!(), + } + } + Data::Enum(_) | Data::Union(_) => unimplemented!(), + } +} + +pub(crate) fn convert_readable_value_source(data: &Data, name: &Ident) -> proc_macro2::TokenStream { + match *data { + Data::Struct(ref data) => { + match data.fields { + Fields::Named(ref fields) => { + let field_args = fields + .named + .iter() + .map(|f| readable_field_arguments(&f)) + .collect::>(); + + let field_recurse = field_args.iter().map(|f| { + let field_name = &f.name; + let name = f.ident; + let ty = &f.ty; + quote_spanned! {f.field.span()=> + #name: aerospike::derive::readable::read_map_value_bytes(buff, keys.get(#field_name))?, + } + }); + quote! { + fn read_value_from_bytes(data_point: &mut aerospike::derive::readable::PreParsedBin) -> aerospike::errors::Result { + let mut nbuf = aerospike::Buffer::new(1); + let mut op_count: u64 = 0; + let mut ndbuf = vec![]; + let mut ndval = vec![&mut data_point.value]; + ndval.extend(&mut data_point.sub_values); + for sv in &mut ndval { + let ltype = sv.buffer.peek(); + let count: usize = match ltype { + 0x80..=0x8f => (ltype & 0x0f) as usize, + 0xde => sv.buffer.read_u16(None) as usize, + 0xdf => sv.buffer.read_u32(None) as usize, + _ => { + return Err(aerospike::ErrorKind::Derive("Invalid Data Type for derive Map (Struct) CDT Type".to_string()).into()) + } + }; + op_count += count as u64; + ndbuf.extend(sv.buffer.data_buffer[1..].iter()); + } + nbuf.resize_buffer(8)?; + nbuf.reset_offset(); + nbuf.write_u64(op_count); + nbuf.data_buffer.extend(ndbuf.iter()); + Self::parse_cdt_value(&mut nbuf) + } + + fn parse_value(data_point: &mut aerospike::derive::readable::PreParsedValue) -> aerospike::errors::Result { + unreachable!() + } + + fn parse_cdt_value(buff: &mut aerospike::Buffer) -> aerospike::errors::Result { + buff.reset_offset(); + let count = buff.read_u64(None); + + let mut keys: std::collections::HashMap = std::collections::HashMap::new(); + for _ in 0..count { + let len = buff.read_u8(None) as usize & 0x1f; + let vtype = buff.read_u8(None); + if(vtype != 3) { + return Err(aerospike::ErrorKind::Derive("Only map keys of type string are allowed for Struct reading".to_string()).into()) + } + let name = buff.read_str(len-1)?; + let ofs = buff.data_offset(); + aerospike::derive::readable::skip_map_value_bytes(buff)?; + keys.insert(name.clone(), ofs); + } + + Ok(#name { + #(#field_recurse)* + }) + } + } + } + _ => unimplemented!(), + } + } + Data::Enum(_) | Data::Union(_) => unimplemented!(), + } +} diff --git a/aerospike-macro/src/traits/writable.rs b/aerospike-macro/src/traits/writable.rs new file mode 100644 index 00000000..98d7d0e7 --- /dev/null +++ b/aerospike-macro/src/traits/writable.rs @@ -0,0 +1,400 @@ +use proc_macro2::TokenStream; +use quote::{quote, quote_spanned}; +use syn::spanned::Spanned; +use syn::Expr::{Assign, Lit, Path}; +use syn::{Data, Expr, Field, Fields, Ident}; + +pub struct WritableFieldAttributes<'a> { + field: &'a Field, + ident: &'a Option, + name: String, + default: Option, + skip: bool, +} + +impl<'a> WritableFieldAttributes<'a> { + pub fn default_write_value_token_stream(&self) -> (TokenStream, usize, u8) { + // Unwarp is fine since this function can only get called if default is Some. + let default = self.default.clone().unwrap(); + match default { + syn::Lit::Str(s) => { + let val = &s.value(); + return ( + quote! { + buffer.write_str(#val); + }, + val.len(), + 3, + ); + } + syn::Lit::Int(i) => { + if let Ok(val) = i.base10_parse::() { + return ( + quote! { + buffer.write_i64(#val); + }, + 8, + 1, + ); + } else { + panic!("Aerospike Default value could not be parsed as i64") + } + } + syn::Lit::Float(f) => { + if let Ok(val) = f.base10_parse::() { + return ( + quote! { + buffer.write_f64(#val); + }, + 8, + 2, + ); + } else { + panic!("Aerospike Default value could not be parsed as f64") + } + } + syn::Lit::Bool(b) => { + let val = b.value(); + return ( + quote! { + buffer.write_bool(#val); + }, + 1, + 17, + ); + } + _ => { + panic!( + "Aerospike Default value is not supported for the value on {}", + &self.name + ) + } + } + } + + pub fn default_write_value_cdt_token_stream(&self) -> TokenStream { + // Unwarp is fine since this function can only get called if default is Some. + let default = self.default.clone().unwrap(); + match default { + syn::Lit::Str(s) => { + let val = &s.value(); + return quote! { + size += aerospike::msgpack::encoder::pack_string(buffer, #val); + }; + } + syn::Lit::Int(i) => { + if let Ok(val) = i.base10_parse::() { + return quote! { + size += aerospike::msgpack::encoder::pack_integer(buffer, #val); + }; + } else { + panic!("Aerospike Default value could not be parsed as i64") + } + } + syn::Lit::Float(f) => { + if let Ok(val) = f.base10_parse::() { + // Default Values are always encoded as f64 + return quote! { + size += aerospike::msgpack::encoder::pack_f64(buffer, #val); + }; + } else { + panic!("Aerospike Default value could not be parsed as f64") + } + } + syn::Lit::Bool(b) => { + let val = b.value(); + return quote! { + size += aerospike::msgpack::encoder::pack_bool(buffer, #val); + }; + } + _ => { + panic!( + "Aerospike Default value is not supported for the value on {}", + &self.name + ) + } + } + } +} + +fn writable_field_arguments(field: &Field) -> WritableFieldAttributes { + // Collect initial Information + let mut attributes = WritableFieldAttributes { + field, + ident: &field.ident, + name: field.ident.clone().unwrap().to_string(), + default: None, + skip: false, + }; + + for a in &field.attrs { + // Filter for aerospike() attributes + if !a.path().is_ident("aerospike") { + continue; + } + + // Parse field attributes to Expression + let expr: Expr = a.parse_args().unwrap(); + + match expr { + // Assign based Attributes like rename + Assign(assign) => { + match assign.left.as_ref() { + Path(path) => { + // Rename Attribute extraction + if path.path.is_ident("rename") { + if let Lit(lit) = *assign.right { + // Currently only accepts Strings as Field Name + if let syn::Lit::Str(ls) = lit.lit { + attributes.name = ls.value(); + } else { + panic!("Invalid Aerospike Rename Value") + } + } else { + panic!("Invalid Aerospike Rename Value") + } + } else if path.path.is_ident("default") { + if let Lit(lit) = *assign.right { + attributes.default = Some(lit.lit); + } + } + } + _ => { + panic!("Invalid Aerospike Derive Attribute") + } + } + } + // Path based Attributes that just serve as markers + Path(path) => { + if let Some(ident) = path.path.get_ident() { + match ident.to_string().as_ref() { + // Ignore Attribute with skip as alias + "ignore" | "skip" => attributes.skip = true, + _ => { + panic!("Invalid Aerospike Derive Attribute") + } + } + } + } + _ => { + panic!("Invalid Aerospike Derive Attribute") + } + } + } + if attributes.name.len() > 15 { + panic!("Aerospike Derive Bin Names can not be longer than 15 bytes!") + } + attributes +} + +pub(crate) fn build_writable(data: &Data) -> TokenStream { + match *data { + Data::Struct(ref data) => { + match data.fields { + Fields::Named(ref fields) => { + // Collect all the Field Info + let field_args = fields + .named + .iter() + .map(|f| writable_field_arguments(&f)) + .collect::>(); + + // Build the `write_as_bins` function + let writer_recurse = field_args.iter().map(|f| { + let name = f.ident; + let skip = f.skip; + let name_str = &f.name; + + let has_default = f.default.is_some(); + // Build the bin Token Stream. + if has_default { + let default = f.default_write_value_token_stream(); + let default_writer = default.0; + let default_length = default.1; + let default_type = default.2; + + quote_spanned! {f.field.span()=> + if !#skip { + { + let encodable = aerospike::derive::writable::WritableValue::writable_value_encodable(&self.#name); + if encodable { + buffer.write_i32((#name_str.len() + aerospike::derive::writable::WritableValue::write_as_value(&self.#name, &mut None) + 4) as i32); + } else { + buffer.write_i32((#name_str.len() + #default_length + 4) as i32); + } + buffer.write_u8(op_type); + + if encodable { + buffer.write_u8(aerospike::derive::writable::WritableValue::writable_value_particle_type(&self.#name) as u8); + } else { + buffer.write_u8(#default_type); + } + buffer.write_u8(0); + buffer.write_u8(#name_str.len() as u8); + buffer.write_str(#name_str); + if encodable { + aerospike::derive::writable::WritableValue::write_as_value(&self.#name, &mut Some(buffer)); + } else { + #default_writer + } + } + } + } + + } else { + quote_spanned! {f.field.span()=> + if !#skip && aerospike::derive::writable::WritableValue::writable_value_encodable(&self.#name) { + buffer.write_i32((#name_str.len() + aerospike::derive::writable::WritableValue::write_as_value(&self.#name, &mut None) + 4) as i32); + buffer.write_u8(op_type); + buffer.write_u8(aerospike::derive::writable::WritableValue::writable_value_particle_type(&self.#name) as u8); + buffer.write_u8(0); + buffer.write_u8(#name_str.len() as u8); + buffer.write_str(#name_str); + aerospike::derive::writable::WritableValue::write_as_value(&self.#name, &mut Some(buffer)); + } + } + } + + }); + + // Build the `writable_bins_size` function + let length_recurse = field_args.iter().map(|f| { + let name = f.ident; + let name_len = f.name.len(); + let skip = f.skip; + let has_default = f.default.is_some(); + // Build the bin Token Stream. + if has_default { + let default = f.default_write_value_token_stream(); + let default_length = default.1; + + quote_spanned! {f.field.span()=> + if !#skip { + if aerospike::derive::writable::WritableValue::writable_value_encodable(&self.#name) { + size += #name_len + aerospike::derive::writable::WritableValue::write_as_value(&self.#name, &mut None) + 8; + } else { + size += #name_len + #default_length + 8; + } + } + } + } else { + quote_spanned! {f.field.span()=> + if !#skip && aerospike::derive::writable::WritableValue::writable_value_encodable(&self.#name) { + size += #name_len + aerospike::derive::writable::WritableValue::write_as_value(&self.#name, &mut None) + 8; + } + } + } + }); + + // Build the `writable_bins_count` function + let op_count_recurse = field_args.iter().map(|f| { + let name = f.ident; + let skip = f.skip; + let has_default = f.default.is_some(); + if has_default { + quote_spanned! {f.field.span()=> + if !#skip { + count += 1; + } + } + } else { + quote_spanned! {f.field.span()=> + if !#skip && aerospike::derive::writable::WritableValue::writable_value_encodable(&self.#name) { + count += 1; + } + } + } + + }); + + // Build the final functions for the Trait impl + quote! { + fn write_as_bins(&self, buffer: &mut aerospike::Buffer, op_type: u8) -> aerospike::errors::Result<()>{ + #(#writer_recurse)* + Ok(()) + } + + fn writable_bins_size(&self) -> usize { + let mut size: usize = 0; + #(#length_recurse)* + size + } + + fn writable_bins_count(&self) -> usize { + let mut count: usize = 0; + #(#op_count_recurse)* + count + } + + } + } + _ => panic!("Aerospike Bin Derive is not supported for unnamed Structs"), + } + } + Data::Enum(_) | Data::Union(_) => { + panic!("Aerospike Bin Derive is only supported for Enum and Union") + } + } +} + +// WritableValue +pub(crate) fn convert_writable_value_source(data: &Data) -> proc_macro2::TokenStream { + match *data { + Data::Struct(ref data) => match data.fields { + Fields::Named(ref fields) => { + let field_args = fields + .named + .iter() + .map(|f| writable_field_arguments(&f)) + .collect::>(); + + let recurse = field_args.iter().map(|f| { + let name = f.ident; + let name_str = &f.name; + let skip = &f.skip; + let has_default = f.default.is_some(); + + if has_default { + let default_writer = f.default_write_value_cdt_token_stream(); + quote_spanned! {f.field.span()=> + if !#skip { + size += aerospike::msgpack::encoder::pack_string(buffer, #name_str); + if aerospike::derive::writable::WritableValue::writable_value_encodable(&self.#name) { + size += aerospike::derive::writable::WritableValue::write_as_cdt_value(&self.#name, buffer); + } else { + #default_writer + } + } + } + } else { + quote_spanned! {f.field.span()=> + if !#skip && aerospike::derive::writable::WritableValue::writable_value_encodable(&self.#name) { + size += aerospike::msgpack::encoder::pack_string(buffer, #name_str); + size += aerospike::derive::writable::WritableValue::write_as_cdt_value(&self.#name, buffer); + } + } + } + }); + let len_recurse = field_args.iter().map(|f| { + let skip = f.skip; + let name = f.ident; + let has_default = f.default.is_some(); + quote_spanned! {f.field.span()=> + if !#skip && (#has_default || aerospike::derive::writable::WritableValue::writable_value_encodable(&self.#name)) { + len += 1; + } + } + }); + quote! { + let mut len = 0; + #(#len_recurse)* + + size += aerospike::msgpack::encoder::pack_map_begin(buffer, len); + #(#recurse)* + } + } + _ => unimplemented!(), + }, + Data::Enum(_) | Data::Union(_) => unimplemented!(), + } +} diff --git a/aerospike-rt/src/lib.rs b/aerospike-rt/src/lib.rs index 2d0e73f1..3107903e 100644 --- a/aerospike-rt/src/lib.rs +++ b/aerospike-rt/src/lib.rs @@ -1,4 +1,5 @@ #[cfg(not(any(feature = "rt-tokio", feature = "rt-async-std")))] + compile_error!("Please select a runtime from ['rt-tokio', 'rt-async-std']"); #[cfg(any(all(feature = "rt-async-std", feature = "rt-tokio")))] diff --git a/benches/client_server.rs b/benches/client_server.rs index 19263bf0..df454e7b 100644 --- a/benches/client_server.rs +++ b/benches/client_server.rs @@ -13,16 +13,16 @@ // License for the specific language governing permissions and limitations under // the License. -#[macro_use] -extern crate bencher; #[macro_use] extern crate lazy_static; extern crate rand; -use aerospike::{Bins, ReadPolicy, WritePolicy}; - use aerospike::{as_bin, as_key}; -use bencher::Bencher; +use aerospike::{Bins, Client, Key, ReadPolicy, Value, WritePolicy}; +use criterion::{criterion_group, criterion_main, Criterion}; +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Duration; #[path = "../tests/common/mod.rs"] mod common; @@ -31,34 +31,59 @@ lazy_static! { static ref TEST_SET: String = common::rand_str(10); } -fn single_key_read(bench: &mut Bencher) { - let client = common::client(); - let namespace = common::namespace(); - let key = as_key!(namespace, &TEST_SET, common::rand_str(10)); - let wbin = as_bin!("i", 1); - let bins = vec![&wbin]; +async fn single_key_read(client: Arc, key: &Key) { let rpolicy = ReadPolicy::default(); - let wpolicy = WritePolicy::default(); - client.put(&wpolicy, &key, &bins).unwrap(); - - bench.iter(|| client.get(&rpolicy, &key, Bins::All).unwrap()); + client + .get::, Bins>(&rpolicy, &key, Bins::All) + .await + .unwrap(); } -fn single_key_read_header(bench: &mut Bencher) { - let client = common::client(); +fn run_single_key_read(bench: &mut Criterion) { + let rt = tokio::runtime::Runtime::new().unwrap(); + let client = Arc::new(rt.block_on(common::client())); let namespace = common::namespace(); let key = as_key!(namespace, &TEST_SET, common::rand_str(10)); let wbin = as_bin!("i", 1); - let bins = vec![&wbin]; - let rpolicy = ReadPolicy::default(); + let bins = vec![wbin]; + let wpolicy = WritePolicy::default(); - client.put(&wpolicy, &key, &bins).unwrap(); + rt.block_on(client.put(&wpolicy, &key, &bins)).unwrap(); + + let key2 = as_key!(namespace, &TEST_SET, common::rand_str(10)); + rt.block_on(client.put(&wpolicy, &key2, &bins)).unwrap(); + + let mut group = bench.benchmark_group("single operations"); + group + .sample_size(1000) + .measurement_time(Duration::from_secs(40)); - bench.iter(|| client.get(&rpolicy, &key, Bins::None).unwrap()); + group.bench_function("single_key_read", |b| { + b.to_async(&rt) + .iter(|| single_key_read(client.clone(), &key)) + }); + + group.bench_function("single_key_read_header", |b| { + b.to_async(&rt) + .iter(|| single_key_read_header(client.clone(), &key2)) + }); + + group.bench_function("single_key_write", |b| { + b.to_async(&rt).iter(|| single_key_write(client.clone())) + }); + + group.finish() } -fn single_key_write(bench: &mut Bencher) { - let client = common::client(); +async fn single_key_read_header(client: Arc, key: &Key) { + let rpolicy = ReadPolicy::default(); + client + .get::, Bins>(&rpolicy, &key, Bins::None) + .await + .unwrap(); +} + +async fn single_key_write(client: Arc) { let namespace = common::namespace(); let key = as_key!(namespace, &TEST_SET, common::rand_str(10)); let wpolicy = WritePolicy::default(); @@ -69,15 +94,13 @@ fn single_key_write(bench: &mut Bencher) { let bin4 = as_bin!("str1", common::rand_str(256)); let bins = [bin1, bin2, bin3, bin4]; - bench.iter(|| { - client.put(&wpolicy, &key, &bins).unwrap(); - }); + client.put(&wpolicy, &key, &bins).await.unwrap(); } -benchmark_group!( +criterion_group!( benches, - single_key_read, - single_key_read_header, - single_key_write, + run_single_key_read, + //single_key_read_header, + //single_key_write, ); -benchmark_main!(benches); +criterion_main!(benches); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 8ff8477a..f4453f4b 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -60,5 +60,5 @@ pub async fn client() -> Client { pub fn rand_str(sz: usize) -> String { let rng = rand::thread_rng(); - rng.sample_iter(&Alphanumeric).take(sz).collect() + String::from_utf8(rng.sample_iter(&Alphanumeric).take(sz).collect::>()).unwrap() } diff --git a/tests/src/batch.rs b/tests/src/batch.rs index 1caf09f0..85a6a9ab 100644 --- a/tests/src/batch.rs +++ b/tests/src/batch.rs @@ -15,7 +15,8 @@ use aerospike::BatchRead; use aerospike::Bins; -use aerospike::{as_bin, as_key, BatchPolicy, Concurrency, WritePolicy}; +use aerospike::{as_bin, as_key, BatchPolicy, Concurrency, Value, WritePolicy}; +use std::collections::HashMap; use env_logger; @@ -58,7 +59,8 @@ async fn batch_get() { BatchRead::new(key3.clone(), none.clone()), BatchRead::new(key4.clone(), none), ]; - let mut results = client.batch_get(&bpolicy, batch).await.unwrap(); + let mut results: Vec>> = + client.batch_get(&bpolicy, batch).await.unwrap(); let result = results.remove(0); assert_eq!(result.key, key1); diff --git a/tests/src/cdt_bitwise.rs b/tests/src/cdt_bitwise.rs index d3213be0..3617c162 100644 --- a/tests/src/cdt_bitwise.rs +++ b/tests/src/cdt_bitwise.rs @@ -15,6 +15,7 @@ use crate::common; use env_logger; +use std::collections::HashMap; use aerospike::operations::bitwise; use aerospike::operations::bitwise::{BitPolicy, BitwiseOverflowActions}; @@ -42,12 +43,18 @@ async fn cdt_bitwise() { bitwise::insert("bin", 0, &val, &bpolicy), bitwise::get("bin", 9, 5), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::Blob(vec![0b10000000])); // Verify the Count command let ops = &vec![bitwise::count("bin", 20, 4)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::Int(2)); // Verify the set command @@ -56,7 +63,10 @@ async fn cdt_bitwise() { bitwise::set("bin", 13, 3, &val, &bpolicy), bitwise::get("bin", 0, 40), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), Value::Blob(vec![ @@ -69,7 +79,10 @@ async fn cdt_bitwise() { bitwise::remove("bin", 0, 1, &bpolicy), bitwise::get("bin", 0, 8), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::Blob(vec![0b01000111])); // Verify OR command @@ -78,7 +91,10 @@ async fn cdt_bitwise() { bitwise::or("bin", 0, 8, &val, &bpolicy), bitwise::get("bin", 0, 8), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::Blob(vec![0b11101111])); // Verify XOR command @@ -87,7 +103,10 @@ async fn cdt_bitwise() { bitwise::xor("bin", 0, 8, &val, &bpolicy), bitwise::get("bin", 0, 8), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::Blob(vec![0b01000011])); // Verify AND command @@ -96,7 +115,10 @@ async fn cdt_bitwise() { bitwise::and("bin", 0, 8, &val, &bpolicy), bitwise::get("bin", 0, 8), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::Blob(vec![0b01000010])); // Verify NOT command @@ -104,7 +126,10 @@ async fn cdt_bitwise() { bitwise::not("bin", 0, 8, &bpolicy), bitwise::get("bin", 0, 8), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::Blob(vec![0b10111101])); // Verify LSHIFT command @@ -112,7 +137,10 @@ async fn cdt_bitwise() { bitwise::lshift("bin", 24, 8, 3, &bpolicy), bitwise::get("bin", 24, 8), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::Blob(vec![0b00101000])); // Verify RSHIFT command @@ -120,7 +148,10 @@ async fn cdt_bitwise() { bitwise::rshift("bin", 0, 9, 1, &bpolicy), bitwise::get("bin", 0, 16), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), Value::Blob(vec![0b01011110, 0b10000011]) @@ -139,7 +170,10 @@ async fn cdt_bitwise() { ), bitwise::get("bin", 0, 32), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), Value::Blob(vec![0b11011110, 0b10000011, 0b00000100, 0b00101000]) @@ -158,7 +192,10 @@ async fn cdt_bitwise() { ), bitwise::get("bin", 0, 32), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), Value::Blob(vec![0b01011110, 0b10000011, 0b00000100, 0b00101000]) @@ -169,7 +206,10 @@ async fn cdt_bitwise() { bitwise::set_int("bin", 8, 8, 255, &bpolicy), bitwise::get("bin", 0, 32), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), Value::Blob(vec![0b01011110, 0b11111111, 0b00000100, 0b00101000]) @@ -177,17 +217,26 @@ async fn cdt_bitwise() { // Verify the get int command let ops = &vec![bitwise::get_int("bin", 8, 8, false)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::Int(255)); // Verify the LSCAN command let ops = &vec![bitwise::lscan("bin", 19, 8, true)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::Int(2)); // Verify the RSCAN command let ops = &vec![bitwise::rscan("bin", 19, 8, true)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::Int(7)); client.close().await.unwrap(); } diff --git a/tests/src/cdt_list.rs b/tests/src/cdt_list.rs index d34f7483..052dbfbb 100644 --- a/tests/src/cdt_list.rs +++ b/tests/src/cdt_list.rs @@ -15,11 +15,13 @@ use crate::common; use env_logger; +use std::collections::HashMap; use aerospike::operations; use aerospike::operations::lists; use aerospike::operations::lists::{ListPolicy, ListReturnType, ListSortFlags}; use aerospike::{as_bin, as_key, as_list, as_val, as_values, Bins, ReadPolicy, Value, WritePolicy}; +use aerospike_core::Record; #[aerospike_macro::test] fn cdt_list() { @@ -41,11 +43,14 @@ fn cdt_list() { client.delete(&wpolicy, &key).await.unwrap(); client.put(&wpolicy, &key, &bins).await.unwrap(); - let rec = client.get(&policy, &key, Bins::All).await.unwrap(); + let rec: Record> = client.get(&policy, &key, Bins::All).await.unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), val); let ops = &vec![lists::size("bin")]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::from(3)); let values = vec![as_val!(9), as_val!(8), as_val!(7)]; @@ -53,28 +58,40 @@ fn cdt_list() { lists::insert_items(&lpolicy, "bin", 1, &values), operations::get_bin("bin"), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(6, as_list!("0", 9, 8, 7, 1, 2.1f64)) ); let ops = &vec![lists::pop("bin", 0), operations::get_bin("bin")]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!("0", as_list!(9, 8, 7, 1, 2.1f64)) ); let ops = &vec![lists::pop_range("bin", 0, 2), operations::get_bin("bin")]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(9, 8, as_list!(7, 1, 2.1f64)) ); let ops = &vec![lists::pop_range_from("bin", 1), operations::get_bin("bin")]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(1, 2.1f64, as_list!(7)) @@ -86,25 +103,37 @@ fn cdt_list() { lists::append_items(&lpolicy, "bin", &values), operations::get_bin("bin"), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(6, as_list!("0", 9, 8, 7, 1, 2.1f64)) ); let ops = &vec![lists::increment(&lpolicy, "bin", 1, 4)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::from(13)); let ops = &vec![lists::remove("bin", 1), operations::get_bin("bin")]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(1, as_list!("0", 8, 7, 1, 2.1f64)) ); let ops = &vec![lists::remove_range("bin", 1, 2), operations::get_bin("bin")]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(2, as_list!("0", 1, 2.1f64)) @@ -114,12 +143,18 @@ fn cdt_list() { lists::remove_range_from("bin", -1), operations::get_bin("bin"), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!(1, as_list!("0", 1))); let v = as_val!(2); let ops = &vec![lists::set("bin", -1, &v), operations::get_bin("bin")]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!("0", 2)); let values = as_values!["0", 9, 8, 7, 1, 2.1f64, -1]; @@ -128,14 +163,20 @@ fn cdt_list() { lists::append_items(&lpolicy, "bin", &values), operations::get_bin("bin"), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(7, as_list!("0", 9, 8, 7, 1, 2.1f64, -1)) ); let ops = &vec![lists::trim("bin", 1, 1), operations::get_bin("bin")]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!(6, as_list!(9))); let values = as_values!["0", 9, 8, 7, 1, 2.1f64, -1]; @@ -144,30 +185,45 @@ fn cdt_list() { lists::append_items(&lpolicy, "bin", &values), operations::get_bin("bin"), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(7, as_list!("0", 9, 8, 7, 1, 2.1f64, -1)) ); let ops = &vec![lists::get("bin", 1)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_val!(9)); let ops = &vec![lists::get_range("bin", 1, -1)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(9, 8, 7, 1, 2.1f64, -1) ); let ops = &vec![lists::get_range_from("bin", 2)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!(8, 7, 1, 2.1f64, -1)); let rval = Value::from(9); let ops = &vec![lists::remove_by_value("bin", &rval, ListReturnType::Count)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::from(1)); let rval = vec![Value::from(8), Value::from(7)]; @@ -176,7 +232,10 @@ fn cdt_list() { &rval, ListReturnType::Count, )]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::from(2)); let values = as_values!["0", 9, 8, 7, 1, 2.1f64, -1]; @@ -185,7 +244,10 @@ fn cdt_list() { lists::append_items(&lpolicy, "bin", &values), operations::get_bin("bin"), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(7, as_list!("0", 9, 8, 7, 1, 2.1f64, -1)) @@ -199,7 +261,10 @@ fn cdt_list() { &beg, &end, )]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::from(2)); let values = as_values!["0", 9, 8, 7, 1, 2.1f64, -1]; @@ -208,24 +273,36 @@ fn cdt_list() { lists::append_items(&lpolicy, "bin", &values), operations::get_bin("bin"), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(7, as_list!("0", 9, 8, 7, 1, 2.1f64, -1)) ); let ops = &vec![lists::sort("bin", ListSortFlags::Default)]; - client.operate(&wpolicy, &key, ops).await.unwrap(); + client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); let ops = &vec![operations::get_bin("bin")]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(-1, 1, 7, 8, 9, "0", 2.1f64) ); let ops = &vec![lists::remove_by_index("bin", 1, ListReturnType::Values)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::from(1)); let ops = &vec![lists::remove_by_index_range( @@ -233,7 +310,10 @@ fn cdt_list() { 4, ListReturnType::Values, )]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!("0", 2.1f64)); let values = as_values!["0", 9, 8, 7, 1, 2.1f64, -1]; @@ -242,7 +322,10 @@ fn cdt_list() { lists::append_items(&lpolicy, "bin", &values), operations::get_bin("bin"), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(7, as_list!("0", 9, 8, 7, 1, 2.1f64, -1)) @@ -254,11 +337,17 @@ fn cdt_list() { 2, ListReturnType::Values, )]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!("0", 9)); let ops = &vec![lists::remove_by_rank("bin", 2, ListReturnType::Values)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::from(7)); let ops = &vec![lists::remove_by_rank_range( @@ -266,7 +355,10 @@ fn cdt_list() { 2, ListReturnType::Values, )]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!(8, 2.1f64)); let values = as_values!["0", 9, 8, 7, 1, 2.1f64, -1]; @@ -275,7 +367,10 @@ fn cdt_list() { lists::append_items(&lpolicy, "bin", &values), operations::get_bin("bin"), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(7, as_list!("0", 9, 8, 7, 1, 2.1f64, -1)) @@ -287,7 +382,10 @@ fn cdt_list() { 2, ListReturnType::Values, )]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!(8, 7)); let values = as_values!["0", 9, 8, 7, 1, 2.1f64, -1]; @@ -296,7 +394,10 @@ fn cdt_list() { lists::append_items(&lpolicy, "bin", &values), operations::get_bin("bin"), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(7, as_list!("0", 9, 8, 7, 1, 2.1f64, -1)) @@ -309,7 +410,10 @@ fn cdt_list() { &val, 1, )]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(7, 8, 9, "0", 2.1f64) @@ -321,7 +425,10 @@ fn cdt_list() { lists::append_items(&lpolicy, "bin", &values), operations::get_bin("bin"), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(7, as_list!("0", 9, 8, 7, 1, 2.1f64, -1)) @@ -335,7 +442,10 @@ fn cdt_list() { 1, 2, )]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!(8, 7)); let values = as_values!["0", 9, 8, 7, 1, 2.1f64, -1]; @@ -344,7 +454,10 @@ fn cdt_list() { lists::append_items(&lpolicy, "bin", &values), operations::get_bin("bin"), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(7, as_list!("0", 9, 8, 7, 1, 2.1f64, -1)) @@ -358,17 +471,26 @@ fn cdt_list() { 2, ListReturnType::Values, )]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!(8, 9)); let val = Value::from(1); let ops = &vec![lists::get_by_value("bin", &val, ListReturnType::Count)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::from(1)); let val = vec![Value::from(1), Value::from("0")]; let ops = &vec![lists::get_by_value_list("bin", &val, ListReturnType::Count)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::from(2)); let beg = Value::from(1); @@ -379,15 +501,24 @@ fn cdt_list() { &end, ListReturnType::Count, )]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::from(3)); let ops = &vec![lists::get_by_index("bin", 3, ListReturnType::Values)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::from(7)); let ops = &vec![lists::get_by_index_range("bin", 3, ListReturnType::Values)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!(7, 1, 2.1f64, -1)); let ops = &vec![lists::get_by_index_range_count( @@ -396,7 +527,10 @@ fn cdt_list() { 2, ListReturnType::Values, )]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!("0", 9)); let values = as_values!["0", 9, 8, 7, 1, 2.1f64, -1]; @@ -405,18 +539,27 @@ fn cdt_list() { lists::append_items(&lpolicy, "bin", &values), operations::get_bin("bin"), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(7, as_list!("0", 9, 8, 7, 1, 2.1f64, -1)) ); let ops = &vec![lists::get_by_rank("bin", 2, ListReturnType::Values)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), Value::from(7)); let ops = &vec![lists::get_by_rank_range("bin", 4, ListReturnType::Values)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!(9, "0", 2.1f64)); let ops = &vec![lists::get_by_rank_range_count( @@ -425,7 +568,10 @@ fn cdt_list() { 2, ListReturnType::Values, )]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!(8, 7)); let val = Value::from(1); @@ -435,7 +581,10 @@ fn cdt_list() { 2, ListReturnType::Values, )]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!(8, 9, "0", 2.1f64)); let val = Value::from(1); @@ -446,7 +595,10 @@ fn cdt_list() { 2, ListReturnType::Values, )]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!(*rec.bins.get("bin").unwrap(), as_list!(8, 9)); client.close().await.unwrap(); } diff --git a/tests/src/cdt_map.rs b/tests/src/cdt_map.rs index ef218c3e..00453951 100644 --- a/tests/src/cdt_map.rs +++ b/tests/src/cdt_map.rs @@ -21,8 +21,8 @@ use env_logger; use aerospike::operations::cdt_context::{ctx_map_key, ctx_map_key_create}; use aerospike::operations::{maps, MapOrder}; use aerospike::{ - as_bin, as_key, as_list, as_map, as_val, Bins, MapPolicy, MapReturnType, ReadPolicy, - WritePolicy, + as_bin, as_key, as_list, as_map, as_val, Bins, MapPolicy, MapReturnType, ReadPolicy, Record, + Value, WritePolicy, }; #[aerospike_macro::test] @@ -51,16 +51,16 @@ async fn map_operations() { let (k, v) = (as_val!("c"), as_val!(3)); let op = maps::put(&mpolicy, bin_name, &k, &v); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); // returns size of map after put assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(3)); let op = maps::size(bin_name); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); // returns size of map assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(3)); - let rec = client.get(&rpolicy, &key, Bins::All).await.unwrap(); + let rec: Record> = client.get(&rpolicy, &key, Bins::All).await.unwrap(); assert_eq!( *rec.bins.get(bin_name).unwrap(), as_map!("a" => 1, "b" => 2, "c" => 3) @@ -70,24 +70,24 @@ async fn map_operations() { items.insert(as_val!("d"), as_val!(4)); items.insert(as_val!("e"), as_val!(5)); let op = maps::put_items(&mpolicy, bin_name, &items); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); // returns size of map after put assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(5)); let k = as_val!("e"); let op = maps::remove_by_key(bin_name, &k, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(5)); let (k, i) = (as_val!("a"), as_val!(19)); let op = maps::increment_value(&mpolicy, bin_name, &k, &i); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); // returns value of the key after increment assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(20)); let (k, i) = (as_val!("a"), as_val!(10)); let op = maps::decrement_value(&mpolicy, bin_name, &k, &i); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); // returns value of the key after decrement assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(10)); @@ -95,12 +95,13 @@ async fn map_operations() { let dec = maps::decrement_value(&mpolicy, bin_name, &k, &i); let (k, i) = (as_val!("a"), as_val!(7)); let inc = maps::increment_value(&mpolicy, bin_name, &k, &i); - let rec = client.operate(&wpolicy, &key, &[dec, inc]).await.unwrap(); + let rec: Record> = + client.operate(&wpolicy, &key, &[dec, inc]).await.unwrap(); // returns values from multiple ops returned as list assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(5, 12)); let op = maps::clear(bin_name); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); // map_clear returns no result assert!(rec.bins.get(bin_name).is_none()); @@ -114,75 +115,75 @@ async fn map_operations() { client.put(&wpolicy, &key, &bins.as_slice()).await.unwrap(); let op = maps::get_by_index(bin_name, 0, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(1)); let op = maps::get_by_index_range(bin_name, 1, 2, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(2, 3)); let op = maps::get_by_index_range_from(bin_name, 3, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(4, 5)); let val = as_val!(5); let op = maps::get_by_value(bin_name, &val, MapReturnType::Index); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(4)); let beg = as_val!(3); let end = as_val!(5); let op = maps::get_by_value_range(bin_name, &beg, &end, MapReturnType::Count); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(2)); let op = maps::get_by_rank(bin_name, 2, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(3)); let op = maps::get_by_rank_range(bin_name, 2, 3, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(3, 4, 5)); let op = maps::get_by_rank_range_from(bin_name, 2, MapReturnType::Count); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(3)); let mkey = as_val!("b"); let op = maps::get_by_key(bin_name, &mkey, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(2)); let mkey = as_val!("b"); let mkey2 = as_val!("d"); let op = maps::get_by_key_range(bin_name, &mkey, &mkey2, MapReturnType::Count); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(2)); let mkey = vec![as_val!("b"), as_val!("d")]; let op = maps::get_by_key_list(bin_name, &mkey, MapReturnType::Count); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(2)); let mkey = vec![as_val!(2), as_val!(3)]; let op = maps::get_by_value_list(bin_name, &mkey, MapReturnType::Count); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(2)); let mkey = vec![as_val!("b"), as_val!("d")]; let op = maps::remove_by_key_list(bin_name, &mkey, MapReturnType::Count); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(2)); let mkey = as_val!("a"); let mkey2 = as_val!("c"); let op = maps::remove_by_key_range(bin_name, &mkey, &mkey2, MapReturnType::Count); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(1)); let mkey = as_val!(5); let op = maps::remove_by_value(bin_name, &mkey, MapReturnType::Count); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(1)); client.delete(&wpolicy, &key).await.unwrap(); @@ -190,46 +191,46 @@ async fn map_operations() { let mkey = vec![as_val!(4), as_val!(5)]; let op = maps::remove_by_value_list(bin_name, &mkey, MapReturnType::Count); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(2)); let mkey = as_val!(1); let mkey2 = as_val!(3); let op = maps::remove_by_value_range(bin_name, &mkey, &mkey2, MapReturnType::Count); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(2)); client.delete(&wpolicy, &key).await.unwrap(); client.put(&wpolicy, &key, &bins).await.unwrap(); let op = maps::remove_by_index(bin_name, 1, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(2)); let op = maps::remove_by_index_range(bin_name, 1, 2, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(3, 4)); let op = maps::remove_by_index_range_from(bin_name, 1, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(5)); client.delete(&wpolicy, &key).await.unwrap(); client.put(&wpolicy, &key, &bins).await.unwrap(); let op = maps::remove_by_rank(bin_name, 1, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(2)); let op = maps::remove_by_rank_range(bin_name, 1, 2, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(3, 4)); client.delete(&wpolicy, &key).await.unwrap(); client.put(&wpolicy, &key, &bins).await.unwrap(); let op = maps::remove_by_rank_range_from(bin_name, 3, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(4, 5)); client.delete(&wpolicy, &key).await.unwrap(); @@ -237,13 +238,13 @@ async fn map_operations() { let mkey = as_val!("b"); let op = maps::remove_by_key_relative_index_range(bin_name, &mkey, 2, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(4, 5)); let mkey = as_val!("c"); let op = maps::remove_by_key_relative_index_range_count(bin_name, &mkey, 0, 2, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(3)); client.delete(&wpolicy, &key).await.unwrap(); @@ -257,12 +258,12 @@ async fn map_operations() { 2, MapReturnType::Value, ); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(5)); let mkey = as_val!(2); let op = maps::remove_by_value_relative_rank_range(bin_name, &mkey, 1, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(3, 4)); client.delete(&wpolicy, &key).await.unwrap(); @@ -270,35 +271,38 @@ async fn map_operations() { let mkey = as_val!("a"); let op = maps::get_by_key_relative_index_range(bin_name, &mkey, 1, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(2, 3, 4, 5)); let mkey = as_val!("a"); let op = maps::get_by_key_relative_index_range_count(bin_name, &mkey, 1, 2, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(2, 3)); let mkey = as_val!(2); let op = maps::get_by_value_relative_rank_range(bin_name, &mkey, 1, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(3, 4, 5)); let mkey = as_val!(2); let op = maps::get_by_value_relative_rank_range_count(bin_name, &mkey, 1, 1, MapReturnType::Value); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_list!(3)); let mkey = as_val!("ctxtest"); let mval = as_map!("x" => 7, "y" => 8, "z" => 9); let op = maps::put(&mpolicy, bin_name, &mkey, &mval); - client.operate(&wpolicy, &key, &[op]).await.unwrap(); + client + .operate::>(&wpolicy, &key, &[op]) + .await + .unwrap(); let ctx = &vec![ctx_map_key(mkey)]; let xkey = as_val!("y"); let op = maps::get_by_key(bin_name, &xkey, MapReturnType::Value).set_context(ctx); - let rec = client.operate(&wpolicy, &key, &[op]).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &[op]).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(8)); let mkey = as_val!("ctxtest2"); @@ -306,9 +310,12 @@ async fn map_operations() { let xkey = as_val!("y"); let xval = as_val!(8); let op = [maps::put(&mpolicy, bin_name, &xkey, &xval).set_context(ctx)]; - client.operate(&wpolicy, &key, &op).await.unwrap(); + client + .operate::>(&wpolicy, &key, &op) + .await + .unwrap(); let op = [maps::get_by_key(bin_name, &xkey, MapReturnType::Value).set_context(ctx)]; - let rec = client.operate(&wpolicy, &key, &op).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &op).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(8)); let mkey2 = as_val!("ctxtest3"); @@ -319,9 +326,12 @@ async fn map_operations() { let xkey = as_val!("c"); let xval = as_val!(9); let op = [maps::put(&mpolicy, bin_name, &xkey, &xval).set_context(ctx)]; - client.operate(&wpolicy, &key, &op).await.unwrap(); + client + .operate::>(&wpolicy, &key, &op) + .await + .unwrap(); let op = [maps::get_by_key(bin_name, &xkey, MapReturnType::Value).set_context(ctx)]; - let rec = client.operate(&wpolicy, &key, &op).await.unwrap(); + let rec: Record> = client.operate(&wpolicy, &key, &op).await.unwrap(); assert_eq!(*rec.bins.get(bin_name).unwrap(), as_val!(9)); client.close().await.unwrap(); diff --git a/tests/src/derive.rs b/tests/src/derive.rs new file mode 100644 index 00000000..11da507e --- /dev/null +++ b/tests/src/derive.rs @@ -0,0 +1,278 @@ +use crate::common; +use aerospike::as_key; +use aerospike::as_val; +use aerospike::derive::readable::{ReadableBins, ReadableValue}; +use aerospike::derive::writable::{WritableBins, WritableValue}; +use aerospike::WritePolicy; +use aerospike::{Bins, ReadPolicy, Record, Value}; +use std::collections::HashMap; + +#[aerospike_macro::test] +async fn derive_writable() { + let client = common::client().await; + let namespace: &str = common::namespace(); + let set_name = &common::rand_str(10); + let key = as_key!(namespace, set_name, "derive_struct"); + + #[derive(WritableValue)] + struct TestValue { + string: String, + no_val_cdt: Option, + has_val_cdt: Option, + #[aerospike(default = 1.231f64)] + default_val_cdt: Option, + #[aerospike(rename = "int32")] + int: i32, + #[aerospike(skip)] + skipped_bool: bool, + } + + #[derive(WritableBins)] + struct TestData<'a> { + #[aerospike(rename = "renamed_int32")] + int32: i32, + string: String, + refstr: &'a str, + uint16: u16, + test: TestValue, + #[aerospike(skip)] + no_write: i32, + #[aerospike(default = "test")] + default_value: Option, + no_value: Option, + has_value: Option, + list: Vec, + } + + let testv = TestValue { + string: "asd".to_string(), + no_val_cdt: None, + has_val_cdt: Some(123456), + default_val_cdt: None, + int: 1234, + skipped_bool: true, + }; + + let test = TestData { + int32: 65521, + string: "string".to_string(), + refstr: "str", + uint16: 7, + test: testv, + no_write: 123, + default_value: None, + no_value: None, + has_value: Some(12345), + list: Vec::from(["test1".to_string(), "test2".to_string()]), + }; + + let res = client.put(&WritePolicy::default(), &key, &test).await; + assert_eq!(res.is_ok(), true, "Derive writer failed"); + let res: Record> = client + .get(&ReadPolicy::default(), &key, Bins::All) + .await + .unwrap(); + + let bins = res.bins; + + assert_eq!(bins.get("int32"), None, "Derive Bin renaming failed"); + assert_eq!( + bins.get("renamed_int32"), + Some(&as_val!(65521)), + "Derive Bin renaming failed" + ); + + assert_eq!(bins.get("no_value"), None, "Derive Bin empty Option failed"); + assert_eq!( + bins.get("has_value"), + Some(&as_val!(12345)), + "Derive Bin filled Option failed" + ); + + assert_eq!( + bins.get("default_value"), + Some(&as_val!("test")), + "Derive Bin default value failed" + ); + + assert_eq!(bins.get("no_write"), None, "Derive Bin skipping failed"); + + assert_eq!( + bins.get("uint16"), + Some(&as_val!(7)), + "Derive Bin encoding failed for uint16" + ); + assert_eq!( + bins.get("string"), + Some(&as_val!("string")), + "Derive Bin encoding failed for string" + ); + assert_eq!( + bins.get("refstr"), + Some(&as_val!("str")), + "Derive Bin encoding failed for refstr" + ); + + assert_eq!( + bins.get("test").is_some(), + true, + "Derive Bin encoding failed for cdt map" + ); + + assert_eq!( + bins.get("list"), + Some(&Value::List(Vec::from([ + as_val!("test1"), + as_val!("test2") + ]))), + "Derive Bin encoding for list failed" + ); + + if let Some(bin) = bins.get("test") { + match bin { + Value::HashMap(m) => { + assert_eq!( + m.get(&as_val!("string")), + Some(&as_val!("asd")), + "Derive Value encoding failed for string" + ); + assert_eq!( + m.get(&as_val!("no_val_cdt")), + None, + "Derive Value encoding failed for no_val_cdt" + ); + assert_eq!( + m.get(&as_val!("default_val_cdt")), + Some(&as_val!(1.231f64)), + "Derive Value encoding failed for default_val_cdt" + ); + assert_eq!( + m.get(&as_val!("int32")), + Some(&as_val!(1234)), + "Derive Value encoding failed for renamed int" + ); + assert_eq!( + m.get(&as_val!("has_val_cdt")), + Some(&as_val!(123456)), + "Derive Value encoding failed for has_val_cdt" + ); + assert_eq!( + m.get(&as_val!("skipped_bool")), + None, + "Derive Value encoding failed for skipped_bool" + ); + } + _ => panic!("Derive Bin encoding for map returned wrong type"), + } + } else { + panic!("Derive Bin encoding for map undefined") + } +} + +#[aerospike_macro::test] +async fn derive_readable() { + let client = common::client().await; + let namespace: &str = common::namespace(); + let set_name = &common::rand_str(10); + let key = as_key!(namespace, set_name, "derive_struct"); + + #[derive(ReadableValue, WritableValue, Clone, Debug)] + struct TestValue { + string: String, + int: i64, + float: f64, + option: Option, + list: Vec, + list_i: Vec, + no_val: Option, + map: HashMap, + nested_list: Vec>, + } + + #[derive(ReadableBins, WritableBins, Clone, Debug)] + struct TestData { + string: String, + int: i64, + float: f64, + option: Option, + list: Vec, + list_i: Vec, + no_val: Option, + nested_list: Vec>, + test: TestValue, + map: HashMap + } + + let tv = TestValue { + string: "eeeeeeee".to_string(), + int: 56789, + float: 567.765, + option: Some("ffffff".to_string()), + list_i: vec![5, 2, 1, 11, 15], + list: vec!["eee".to_string(), "fff".to_string(), "ggg".to_string()], + no_val: None, + map: HashMap::from([("asdasd".to_string(), 1), ("sdfsdf".to_string(), 1234567890)]), + nested_list: vec![vec![3, 2, 1], vec![6, 5, 4]], + }; + + let write_data = TestData { + string: "asdfsd".to_string(), + int: 1234, + float: 123.456, + option: Some("asd".to_string()), + list_i: vec![1, 5, 8, 9, 15], + list: vec!["asd".to_string(), "ase".to_string(), "asf".to_string()], + no_val: None, + nested_list: vec![vec![1, 2, 3], vec![4, 5, 6]], + test: tv.clone(), + map: HashMap::from([("testasd".to_string(), 1), ("testfgh".to_string(), 1234567890)]) + }; + + let res = client.put(&WritePolicy::default(), &key, &write_data).await; + let res: aerospike::errors::Result> = + client.get(&ReadPolicy::default(), &key, Bins::All).await; + println!("{:?}", res); + assert_eq!(res.is_ok(), true, "Aerospike derive reader failed"); + let res = res.unwrap().bins; + assert_eq!( + res.string, "asdfsd", + "Aerospike derive reader failed for String" + ); + assert_eq!(res.int, 1234, "Aerospike derive reader failed for Int"); + assert_eq!( + res.float, 123.456, + "Aerospike derive reader failed for Float" + ); + assert_eq!( + res.option, + Some("asd".to_string()), + "Aerospike derive reader failed for Option Some" + ); + assert_eq!( + res.no_val, None, + "Aerospike derive reader failed for Option None" + ); + assert_eq!( + res.list_i, + vec![1, 5, 8, 9, 15], + "Aerospike derive reader failed for Int List" + ); + assert_eq!( + res.nested_list, + vec![vec![1, 2, 3], vec![4, 5, 6]], + "Aerospike derive reader failed for Nested List" + ); + assert_eq!( + res.list, + vec!["asd".to_string(), "ase".to_string(), "asf".to_string()], + "Aerospike derive reader failed for String List" + ); + assert_eq!( + res.test.string, tv.string, + "Aerospike derive reader failed for ReadableValue string" + ); + assert_eq!( + res.test.int, tv.int, + "Aerospike derive reader failed for ReadableValue int" + ); +} diff --git a/tests/src/exp.rs b/tests/src/exp.rs index 3a216138..1cfcda99 100644 --- a/tests/src/exp.rs +++ b/tests/src/exp.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; // Copyright 2015-2020 Aerospike, Inc. // // Portions may be licensed to Aerospike, Inc. under one or more contributor @@ -16,8 +17,9 @@ use crate::common; use env_logger; use aerospike::expressions::*; -use aerospike::ParticleType; use aerospike::*; +use aerospike::{ParticleType, Record}; +use aerospike_core::Recordset; use std::sync::Arc; const EXPECTED: usize = 100; @@ -646,11 +648,11 @@ async fn expression_commands() { // GET let key = as_key!(namespace, &set_name, 35); rpolicy.filter_expression = Some(eq(int_bin("bin".to_string()), int_val(15))); - let test = client.get(&rpolicy, &key, Bins::All).await; + let test: Result>> = client.get(&rpolicy, &key, Bins::All).await; assert_eq!(test.is_err(), true, "GET Err Test Failed"); rpolicy.filter_expression = Some(eq(int_bin("bin".to_string()), int_val(35))); - let test = client.get(&rpolicy, &key, Bins::All).await; + let test: Result>> = client.get(&rpolicy, &key, Bins::All).await; assert_eq!(test.is_ok(), true, "GET Ok Test Failed"); // EXISTS @@ -703,7 +705,10 @@ async fn expression_commands() { // SCAN spolicy.filter_expression = Some(eq(int_bin("bin".to_string()), int_val(75))); - match client.scan(&spolicy, namespace, &set_name, Bins::All).await { + + let res: aerospike::Result>>> = + client.scan(&spolicy, namespace, &set_name, Bins::All).await; + match res { Ok(records) => { let mut count = 0; for record in &*records { @@ -723,12 +728,16 @@ async fn expression_commands() { let key = as_key!(namespace, &set_name, 85); wpolicy.filter_expression = Some(eq(int_bin("bin".to_string()), int_val(15))); - let op = client.operate(&wpolicy, &key, &ops).await; + let op = client + .operate::>(&wpolicy, &key, &ops) + .await; assert_eq!(op.is_err(), true, "OPERATE Err Test Failed"); let key = as_key!(namespace, &set_name, 85); wpolicy.filter_expression = Some(eq(int_bin("bin".to_string()), int_val(85))); - let op = client.operate(&wpolicy, &key, &ops).await; + let op = client + .operate::>(&wpolicy, &key, &ops) + .await; assert_eq!(op.is_ok(), true, "OPERATE Ok Test Failed"); // BATCH GET @@ -736,7 +745,7 @@ async fn expression_commands() { let b = Bins::from(["bin"]); for i in 85..90 { let key = as_key!(namespace, &set_name, i); - batch_reads.push(BatchRead::new(key, b.clone())); + batch_reads.push(BatchRead::>::new(key, b.clone())); } bpolicy.filter_expression = Some(gt(int_bin("bin".to_string()), int_val(84))); match client.batch_get(&bpolicy, batch_reads).await { @@ -756,7 +765,11 @@ async fn expression_commands() { client.close().await.unwrap(); } -async fn test_filter(client: &Client, filter: FilterExpression, set_name: &str) -> Arc { +async fn test_filter( + client: &Client, + filter: FilterExpression, + set_name: &str, +) -> Arc>> { let namespace = common::namespace(); let mut qpolicy = QueryPolicy::default(); @@ -766,7 +779,7 @@ async fn test_filter(client: &Client, filter: FilterExpression, set_name: &str) client.query(&qpolicy, statement).await.unwrap() } -fn count_results(rs: Arc) -> usize { +fn count_results(rs: Arc>>) -> usize { let mut count = 0; for res in &*rs { diff --git a/tests/src/exp_bitwise.rs b/tests/src/exp_bitwise.rs index 990384ae..f7faa6d3 100644 --- a/tests/src/exp_bitwise.rs +++ b/tests/src/exp_bitwise.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; // Copyright 2015-2020 Aerospike, Inc. // // Portions may be licensed to Aerospike, Inc. under one or more contributor @@ -46,7 +47,7 @@ async fn expression_bitwise() { let set_name = create_test_set(&client, EXPECTED).await; // EQ - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( count(int_val(0), int_val(16), blob_bin("bin".to_string())), @@ -58,7 +59,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "COUNT Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( count( @@ -79,7 +80,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "RESIZE Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( count( @@ -100,7 +101,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "INSERT Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( count( @@ -121,7 +122,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "REMOVE Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( count( @@ -143,7 +144,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "SET Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( count( @@ -165,7 +166,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "OR Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( count( @@ -187,7 +188,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "XOR Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( count( @@ -209,7 +210,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "AND Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( count( @@ -230,7 +231,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "NOT Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( count( @@ -252,7 +253,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "LSHIFT Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( count( @@ -274,7 +275,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "RSHIFT Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( count( @@ -298,7 +299,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "ADD Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( count( @@ -322,7 +323,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "SUBTRACT Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( count( @@ -344,7 +345,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "SET INT Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( get(int_val(0), int_val(8), blob_bin("bin".to_string())), @@ -356,7 +357,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "GET Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( lscan( @@ -373,7 +374,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "LSCAN Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( rscan( @@ -390,7 +391,7 @@ async fn expression_bitwise() { let item_count = count_results(rs); assert_eq!(item_count, 100, "RSCAN Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( get_int(int_val(0), int_val(8), false, blob_bin("bin".to_string())), @@ -405,7 +406,11 @@ async fn expression_bitwise() { client.close().await.unwrap(); } -async fn test_filter(client: &Client, filter: FilterExpression, set_name: &str) -> Arc { +async fn test_filter( + client: &Client, + filter: FilterExpression, + set_name: &str, +) -> Arc>> { let namespace = common::namespace(); let mut qpolicy = QueryPolicy::default(); @@ -415,7 +420,7 @@ async fn test_filter(client: &Client, filter: FilterExpression, set_name: &str) client.query(&qpolicy, statement).await.unwrap() } -fn count_results(rs: Arc) -> usize { +fn count_results(rs: Arc>>) -> usize { let mut count = 0; for res in &*rs { diff --git a/tests/src/exp_hll.rs b/tests/src/exp_hll.rs index ad7aeb0c..dffef929 100644 --- a/tests/src/exp_hll.rs +++ b/tests/src/exp_hll.rs @@ -15,6 +15,7 @@ use crate::common; use env_logger; +use std::collections::HashMap; use aerospike::expressions::hll::*; use aerospike::expressions::lists::*; @@ -57,7 +58,7 @@ async fn create_test_set(client: &Client, no_records: usize) -> String { ), ]; client - .operate(&WritePolicy::default(), &key, &ops) + .operate::>(&WritePolicy::default(), &key, &ops) .await .unwrap(); } @@ -70,7 +71,7 @@ async fn expression_hll() { let client = common::client().await; let set_name = create_test_set(&client, EXPECTED).await; - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( get_count(add_with_index_and_min_hash( @@ -88,7 +89,7 @@ async fn expression_hll() { let count = count_results(rs); assert_eq!(count, 99, "HLL INIT Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( may_contain( @@ -103,7 +104,7 @@ async fn expression_hll() { let count = count_results(rs); assert_eq!(count, 1, "HLL MAY CONTAIN Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, lt( get_by_index( @@ -121,7 +122,7 @@ async fn expression_hll() { let count = count_results(rs); assert_eq!(count, 100, "HLL DESCRIBE Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( get_count(get_union( @@ -136,7 +137,7 @@ async fn expression_hll() { let count = count_results(rs); assert_eq!(count, 98, "HLL GET UNION Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( get_union_count( @@ -151,7 +152,7 @@ async fn expression_hll() { let count = count_results(rs); assert_eq!(count, 98, "HLL GET UNION COUNT Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, eq( get_intersect_count( @@ -166,7 +167,7 @@ async fn expression_hll() { let count = count_results(rs); assert_eq!(count, 99, "HLL GET INTERSECT COUNT Test Failed"); - let rs = test_filter( + let rs: Arc>> = test_filter( &client, gt( get_similarity( @@ -184,7 +185,11 @@ async fn expression_hll() { client.close().await.unwrap(); } -async fn test_filter(client: &Client, filter: FilterExpression, set_name: &str) -> Arc { +async fn test_filter( + client: &Client, + filter: FilterExpression, + set_name: &str, +) -> Arc>> { let namespace = common::namespace(); let mut qpolicy = QueryPolicy::default(); @@ -194,7 +199,7 @@ async fn test_filter(client: &Client, filter: FilterExpression, set_name: &str) client.query(&qpolicy, statement).await.unwrap() } -fn count_results(rs: Arc) -> usize { +fn count_results(rs: Arc>>) -> usize { let mut count = 0; for res in &*rs { diff --git a/tests/src/exp_list.rs b/tests/src/exp_list.rs index ae83d794..f2866a0e 100644 --- a/tests/src/exp_list.rs +++ b/tests/src/exp_list.rs @@ -1,5 +1,6 @@ use crate::common; use env_logger; +use std::collections::HashMap; use aerospike::expressions::lists::*; use aerospike::expressions::*; @@ -558,7 +559,11 @@ async fn expression_list() { client.close().await.unwrap(); } -async fn test_filter(client: &Client, filter: FilterExpression, set_name: &str) -> Arc { +async fn test_filter( + client: &Client, + filter: FilterExpression, + set_name: &str, +) -> Arc>> { let namespace = common::namespace(); let mut qpolicy = QueryPolicy::default(); @@ -568,7 +573,7 @@ async fn test_filter(client: &Client, filter: FilterExpression, set_name: &str) client.query(&qpolicy, statement).await.unwrap() } -fn count_results(rs: Arc) -> usize { +fn count_results(rs: Arc>>) -> usize { let mut count = 0; for res in &*rs { diff --git a/tests/src/exp_map.rs b/tests/src/exp_map.rs index fbbf626a..d8513056 100644 --- a/tests/src/exp_map.rs +++ b/tests/src/exp_map.rs @@ -658,7 +658,11 @@ fn expression_map() { client.close().await.unwrap(); } -async fn test_filter(client: &Client, filter: FilterExpression, set_name: &str) -> Arc { +async fn test_filter( + client: &Client, + filter: FilterExpression, + set_name: &str, +) -> Arc>> { let namespace = common::namespace(); let mut qpolicy = QueryPolicy::default(); @@ -668,7 +672,7 @@ async fn test_filter(client: &Client, filter: FilterExpression, set_name: &str) client.query(&qpolicy, statement).await.unwrap() } -fn count_results(rs: Arc) -> usize { +fn count_results(rs: Arc>>) -> usize { let mut count = 0; for res in &*rs { diff --git a/tests/src/exp_op.rs b/tests/src/exp_op.rs index 918c4196..4f01d98c 100644 --- a/tests/src/exp_op.rs +++ b/tests/src/exp_op.rs @@ -1,7 +1,8 @@ use crate::common; use aerospike::expressions::{int_bin, int_val, num_add}; use aerospike::operations::exp::{read_exp, write_exp, ExpReadFlags, ExpWriteFlags}; -use aerospike::{as_bin, as_key, as_val, Bins, ReadPolicy, WritePolicy}; +use aerospike::{as_bin, as_key, as_val, Bins, ReadPolicy, Record, Value, WritePolicy}; +use std::collections::HashMap; #[aerospike_macro::test] async fn exp_ops() { @@ -21,7 +22,7 @@ async fn exp_ops() { client.delete(&wpolicy, &key).await.unwrap(); client.put(&wpolicy, &key, &bins).await.unwrap(); - let rec = client.get(&policy, &key, Bins::All).await.unwrap(); + let rec: Record> = client.get(&policy, &key, Bins::All).await.unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_val!(25), @@ -29,7 +30,9 @@ async fn exp_ops() { ); let flt = num_add(vec![int_bin("bin".to_string()), int_val(4)]); let ops = &vec![read_exp("example", &flt, ExpReadFlags::Default)]; - let rec = client.operate(&wpolicy, &key, ops).await; + let rec = client + .operate::>(&wpolicy, &key, ops) + .await; let rec = rec.unwrap(); assert_eq!( @@ -44,7 +47,9 @@ async fn exp_ops() { read_exp("example", &flt2, ExpReadFlags::Default), ]; - let rec = client.operate(&wpolicy, &key, ops).await; + let rec = client + .operate::>(&wpolicy, &key, ops) + .await; let rec = rec.unwrap(); assert_eq!( diff --git a/tests/src/hll.rs b/tests/src/hll.rs index a7ad14dd..21e099ac 100644 --- a/tests/src/hll.rs +++ b/tests/src/hll.rs @@ -1,9 +1,11 @@ use crate::common; use env_logger; +use std::collections::HashMap; use aerospike::operations::hll; use aerospike::operations::hll::HLLPolicy; use aerospike::{as_key, as_list, as_val, Bins, FloatValue, ReadPolicy, Value, WritePolicy}; +use aerospike_core::Record; #[aerospike_macro::test] async fn hll() { @@ -20,11 +22,17 @@ async fn hll() { let rpolicy = ReadPolicy::default(); let ops = &vec![hll::init(&hpolicy, "bin", 4)]; - client.operate(&wpolicy, &key, ops).await.unwrap(); + client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); let v = vec![Value::from("asd123")]; let ops = &vec![hll::add(&hpolicy, "bin", &v)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), Value::Int(1), @@ -32,7 +40,10 @@ async fn hll() { ); let ops = &vec![hll::get_count("bin")]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), Value::Int(1), @@ -40,14 +51,23 @@ async fn hll() { ); let ops = &vec![hll::init_with_min_hash(&hpolicy, "bin2", 8, 0)]; - client.operate(&wpolicy, &key, ops).await.unwrap(); + client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); let ops = &vec![hll::fold("bin2", 6)]; - client.operate(&wpolicy, &key, ops).await.unwrap(); + client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); let v2 = vec![Value::from("123asd")]; let ops = &vec![hll::add(&hpolicy, "bin2", &v2)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin2").unwrap(), Value::Int(1), @@ -55,21 +75,27 @@ async fn hll() { ); let ops = &vec![hll::describe("bin")]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), as_list!(4, 0), "Index bits did not match" ); - let rec = client + let rec: Record> = client .get(&rpolicy, &key, Bins::from(["bin2"])) .await .unwrap(); let bin2val = vec![rec.bins.get("bin2").unwrap().clone()]; let ops = &vec![hll::get_intersect_count("bin", &bin2val)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), Value::from(0), @@ -77,7 +103,10 @@ async fn hll() { ); let ops = &vec![hll::get_union_count("bin", &bin2val)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), Value::from(2), @@ -85,14 +114,20 @@ async fn hll() { ); let ops = &vec![hll::get_union("bin", &bin2val)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); let val = Value::HLL(vec![ 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]); assert_eq!(*rec.bins.get("bin").unwrap(), val, "Union does not match"); let ops = &vec![hll::refresh_count("bin")]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), Value::Int(1), @@ -103,7 +138,10 @@ async fn hll() { hll::set_union(&hpolicy, "bin", &bin2val), hll::get_count("bin"), ]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), Value::from(2), @@ -111,7 +149,10 @@ async fn hll() { ); let ops = &vec![hll::get_similarity("bin", &bin2val)]; - let rec = client.operate(&wpolicy, &key, ops).await.unwrap(); + let rec = client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); assert_eq!( *rec.bins.get("bin").unwrap(), Value::Float(FloatValue::F64(4602678819172646912)), diff --git a/tests/src/kv.rs b/tests/src/kv.rs index 307a0acb..e3cbf44b 100644 --- a/tests/src/kv.rs +++ b/tests/src/kv.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; // Copyright 2015-2018 Aerospike, Inc. // // Portions may be licensed to Aerospike, Inc. under one or more contributor @@ -16,6 +17,7 @@ use aerospike::operations; use aerospike::{ as_bin, as_blob, as_geo, as_key, as_list, as_map, as_val, Bins, ReadPolicy, Value, WritePolicy, }; +use aerospike_core::Record; use env_logger; use crate::common; @@ -52,7 +54,8 @@ async fn connect() { ]; client.put(&wpolicy, &key, &bins).await.unwrap(); - let record = client.get(&policy, &key, Bins::All).await.unwrap(); + let record: Record> = + client.get(&policy, &key, Bins::All).await.unwrap(); let bins = record.bins; assert_eq!(bins.len(), 8); assert_eq!(bins.get("bin bool"), Some(&Value::Bool(true))); @@ -81,10 +84,11 @@ async fn connect() { client.touch(&wpolicy, &key).await.unwrap(); let bins = Bins::from(["bin999", "bin f64"]); - let record = client.get(&policy, &key, bins).await.unwrap(); + let record: Record> = client.get(&policy, &key, bins).await.unwrap(); assert_eq!(record.bins.len(), 2); - let record = client.get(&policy, &key, Bins::None).await.unwrap(); + let record: Record> = + client.get(&policy, &key, Bins::None).await.unwrap(); assert_eq!(record.bins.len(), 0); let exists = client.exists(&wpolicy, &key).await.unwrap(); @@ -92,7 +96,10 @@ async fn connect() { let bin = as_bin!("bin999", "test string"); let ops = &vec![operations::put(&bin), operations::get()]; - client.operate(&wpolicy, &key, ops).await.unwrap(); + client + .operate::>(&wpolicy, &key, ops) + .await + .unwrap(); let existed = client.delete(&wpolicy, &key).await.unwrap(); assert!(existed); diff --git a/tests/src/mod.rs b/tests/src/mod.rs index 1d3765d9..2a68dea7 100644 --- a/tests/src/mod.rs +++ b/tests/src/mod.rs @@ -17,6 +17,7 @@ mod batch; mod cdt_bitwise; mod cdt_list; mod cdt_map; +mod derive; mod exp; mod exp_bitwise; mod exp_hll; diff --git a/tests/src/query.rs b/tests/src/query.rs index 8ac79586..da35b0f0 100644 --- a/tests/src/query.rs +++ b/tests/src/query.rs @@ -13,6 +13,7 @@ // License for the specific language governing permissions and limitations under // the License. +use std::collections::HashMap; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::thread; @@ -65,7 +66,8 @@ async fn query_single_consumer() { // Filter Query let mut statement = Statement::new(namespace, &set_name, Bins::All); statement.add_filter(as_eq!("bin", 1)); - let rs = client.query(&qpolicy, statement).await.unwrap(); + let rs: Arc>> = + client.query(&qpolicy, statement).await.unwrap(); let mut count = 0; for res in &*rs { match res { @@ -81,7 +83,8 @@ async fn query_single_consumer() { // Range Query let mut statement = Statement::new(namespace, &set_name, Bins::All); statement.add_filter(as_range!("bin", 0, 9)); - let rs = client.query(&qpolicy, statement).await.unwrap(); + let rs: Arc>> = + client.query(&qpolicy, statement).await.unwrap(); let mut count = 0; for res in &*rs { match res { @@ -110,7 +113,8 @@ async fn query_nobins() { let mut statement = Statement::new(namespace, &set_name, Bins::None); statement.add_filter(as_range!("bin", 0, 9)); - let rs = client.query(&qpolicy, statement).await.unwrap(); + let rs: Arc>> = + client.query(&qpolicy, statement).await.unwrap(); let mut count = 0; for res in &*rs { match res { @@ -140,7 +144,8 @@ async fn query_multi_consumer() { let mut statement = Statement::new(namespace, &set_name, Bins::All); let f = as_range!("bin", 0, 9); statement.add_filter(f); - let rs = client.query(&qpolicy, statement).await.unwrap(); + let rs: Arc>> = + client.query(&qpolicy, statement).await.unwrap(); let count = Arc::new(AtomicUsize::new(0)); let mut threads = vec![]; @@ -191,7 +196,8 @@ async fn query_node() { let qpolicy = QueryPolicy::default(); let mut statement = Statement::new(namespace, &set_name, Bins::All); statement.add_filter(as_range!("bin", 0, 99)); - let rs = client.query_node(&qpolicy, node, statement).await.unwrap(); + let rs: Arc>> = + client.query_node(&qpolicy, node, statement).await.unwrap(); let ok = (&*rs).filter(Result::is_ok).count(); count.fetch_add(ok, Ordering::Relaxed); })); @@ -230,7 +236,8 @@ async fn query_large_i64() { .filter_expression .replace(aerospike::expressions::eq(bin_name, bin_val)); let stmt = aerospike::Statement::new(common::namespace(), SET, aerospike::Bins::All); - let recordset = client.query(&qpolicy, stmt).await.unwrap(); + let recordset: Arc>> = + client.query(&qpolicy, stmt).await.unwrap(); for r in &*recordset { assert!(r.is_ok()); diff --git a/tests/src/scan.rs b/tests/src/scan.rs index 114af5d5..832d8060 100644 --- a/tests/src/scan.rs +++ b/tests/src/scan.rs @@ -13,6 +13,7 @@ // License for the specific language governing permissions and limitations under // the License. +use std::collections::HashMap; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::thread; @@ -49,7 +50,7 @@ async fn scan_single_consumer() { let set_name = create_test_set(&client, EXPECTED).await; let spolicy = ScanPolicy::default(); - let rs = client + let rs: Arc>> = client .scan(&spolicy, namespace, &set_name, Bins::All) .await .unwrap(); @@ -70,7 +71,7 @@ async fn scan_multi_consumer() { let mut spolicy = ScanPolicy::default(); spolicy.record_queue_size = 4096; - let rs = client + let rs: Arc>> = client .scan(&spolicy, namespace, &set_name, Bins::All) .await .unwrap(); @@ -113,7 +114,7 @@ async fn scan_node() { let set_name = set_name.clone(); threads.push(aerospike_rt::spawn(async move { let spolicy = ScanPolicy::default(); - let rs = client + let rs: Arc>> = client .scan_node(&spolicy, node, namespace, &set_name, Bins::All) .await .unwrap(); diff --git a/tests/src/serialization.rs b/tests/src/serialization.rs index de5dd129..641a6eaf 100644 --- a/tests/src/serialization.rs +++ b/tests/src/serialization.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; // Copyright 2015-2018 Aerospike, Inc. // // Portions may be licensed to Aerospike, Inc. under one or more contributor @@ -13,7 +14,8 @@ // License for the specific language governing permissions and limitations under // the License. use aerospike::{ - as_bin, as_blob, as_geo, as_key, as_list, as_map, as_val, Bins, ReadPolicy, WritePolicy, + as_bin, as_blob, as_geo, as_key, as_list, as_map, as_val, Bins, ReadPolicy, Record, Value, + WritePolicy, }; use env_logger; @@ -50,7 +52,8 @@ async fn serialize() { ]; client.put(&wpolicy, &key, &bins).await.unwrap(); - let record = client.get(&policy, &key, Bins::All).await.unwrap(); + let record: Record> = + client.get(&policy, &key, Bins::All).await.unwrap(); let json = serde_json::to_string(&record); if json.is_err() {