From 96a0cf5af7574476be23ae49bbb4502057cf1467 Mon Sep 17 00:00:00 2001 From: "zeli.lwb" Date: Mon, 5 Jun 2023 17:24:03 +0800 Subject: [PATCH 1/9] [Feat] async client --- src/client/mod.rs | 24 +- src/client/query.rs | 109 ++-- src/client/table.rs | 117 ++--- src/client/table_client.rs | 182 +++---- src/lib.rs | 4 +- src/rpc/conn_pool.rs | 103 ++-- src/rpc/mod.rs | 131 +++-- src/rpc/proxy.rs | 10 +- tests/test_cse_table.rs | 244 +++++---- tests/test_hbase_client.rs | 189 ++++--- tests/test_table_client.rs | 124 +++-- tests/test_table_client_base.rs | 722 +++++++++++++++----------- tests/test_table_client_hash.rs | 22 +- tests/test_table_client_key.rs | 286 +++++----- tests/test_table_client_range.rs | 125 +++-- tests/test_table_client_sql.rs | 105 ++-- ycsb-rs/Makefile | 2 +- ycsb-rs/src/db.rs | 3 +- ycsb-rs/src/main.rs | 34 +- ycsb-rs/src/obkv_client.rs | 24 +- ycsb-rs/src/properties.rs | 6 + ycsb-rs/src/runtime.rs | 45 ++ ycsb-rs/src/workload.rs | 6 +- ycsb-rs/src/workload/core_workload.rs | 72 +-- ycsb-rs/workloads/workload_obkv.toml | 4 +- 25 files changed, 1503 insertions(+), 1190 deletions(-) create mode 100644 ycsb-rs/src/runtime.rs diff --git a/src/client/mod.rs b/src/client/mod.rs index 7283ddd..6bc303c 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -37,7 +37,7 @@ pub enum TableOpResult { pub trait Table { /// Insert a record - fn insert( + async fn insert( &self, table_name: &str, row_keys: Vec, @@ -46,7 +46,7 @@ pub trait Table { ) -> Result; /// Update a record - fn update( + async fn update( &self, table_name: &str, row_keys: Vec, @@ -56,7 +56,7 @@ pub trait Table { /// Insert or update a record, if the record exists, update it. /// Otherwise insert a new one. - fn insert_or_update( + async fn insert_or_update( &self, table_name: &str, row_keys: Vec, @@ -65,7 +65,7 @@ pub trait Table { ) -> Result; /// Replace a record. - fn replace( + async fn replace( &self, table_name: &str, row_keys: Vec, @@ -74,7 +74,7 @@ pub trait Table { ) -> Result; /// Append - fn append( + async fn append( &self, table_name: &str, row_keys: Vec, @@ -83,7 +83,7 @@ pub trait Table { ) -> Result; /// Increment - fn increment( + async fn increment( &self, table_name: &str, row_keys: Vec, @@ -92,10 +92,10 @@ pub trait Table { ) -> Result; /// Delete records by row keys. - fn delete(&self, table_name: &str, row_keys: Vec) -> Result; + async fn delete(&self, table_name: &str, row_keys: Vec) -> Result; /// Retrieve a record by row keys. - fn get( + async fn get( &self, table_name: &str, row_keys: Vec, @@ -105,7 +105,7 @@ pub trait Table { /// Create a batch operation fn batch_operation(&self, ops_num_hint: usize) -> ObTableBatchOperation; // Execute a batch operation - fn execute_batch( + async fn execute_batch( &self, table_name: &str, batch_op: ObTableBatchOperation, @@ -152,9 +152,11 @@ pub struct ClientConfig { pub max_conns_per_server: usize, pub min_idle_conns_per_server: usize, - pub conn_init_thread_num: usize, pub query_concurrency_limit: Option, + pub batch_op_thread_num: usize, + pub query_thread_num: usize, + pub conn_init_thread_num: usize, pub conn_reader_thread_num: usize, pub conn_writer_thread_num: usize, pub default_thread_num: usize, @@ -205,6 +207,8 @@ impl Default for ClientConfig { conn_init_thread_num: 2, query_concurrency_limit: None, + batch_op_thread_num: 2, + query_thread_num: 2, conn_reader_thread_num: 4, conn_writer_thread_num: 2, default_thread_num: 2, diff --git a/src/client/query.rs b/src/client/query.rs index 8a4d0a0..9bf2d09 100644 --- a/src/client/query.rs +++ b/src/client/query.rs @@ -25,7 +25,7 @@ use std::{ /// Query API for ob table use super::ObTable; use crate::{ - client::table_client::OBKV_CLIENT_METRICS, + client::table_client::{ObTableClientStreamQuerier, OBKV_CLIENT_METRICS}, error::{CommonErrCode, Error::Common as CommonErr, Result}, rpc::protocol::{ payloads::ObTableEntityType, @@ -35,6 +35,7 @@ use crate::{ }, DEFAULT_FLAG, }, + runtime::RuntimeRef, serde_obkv::value::Value, }; @@ -43,25 +44,27 @@ use crate::{ const ZERO_TIMEOUT_MS: Duration = Duration::from_millis(0); pub trait StreamQuerier { - fn execute_query( + async fn execute_query( &self, result: &mut QueryStreamResult, part_id_and_table: (i64, Arc), payload: &mut ObTableQueryRequest, ) -> Result; - fn execute_stream( + async fn execute_stream( &self, result: &mut QueryStreamResult, part_id_and_table: (i64, Arc), payload: &mut ObTableStreamRequest, ) -> Result; + + fn get_runtime(&self) -> RuntimeRef; } type PartitionQueryResultDeque = VecDeque<((i64, Arc), ObTableQueryResult)>; pub struct QueryStreamResult { - querier: Arc, + querier: Arc, initialized: bool, eof: bool, closed: bool, @@ -87,7 +90,7 @@ impl fmt::Debug for QueryStreamResult { } impl QueryStreamResult { - pub fn new(querier: Arc, table_query: ObTableQuery) -> Self { + pub fn new(querier: Arc, table_query: ObTableQuery) -> Self { Self { querier, initialized: false, @@ -106,7 +109,10 @@ impl QueryStreamResult { } } - fn refer_to_new_partition(&mut self, (part_id, ob_table): (i64, Arc)) -> Result { + async fn refer_to_new_partition( + &mut self, + (part_id, ob_table): (i64, Arc), + ) -> Result { let mut req = ObTableQueryRequest::new( &self.table_name, part_id, @@ -120,16 +126,17 @@ impl QueryStreamResult { let result = self .querier .clone() - .execute_query(self, (part_id, ob_table), &mut req); + .execute_query(self, (part_id, ob_table), &mut req) + .await; if result.is_err() { - self.close_eagerly("err"); + self.close_eagerly("err").await; } result } - fn refer_to_last_stream_result( + async fn refer_to_last_stream_result( &mut self, (part_id, ob_table): (i64, Arc), last_result: &ObTableQueryResult, @@ -144,10 +151,11 @@ impl QueryStreamResult { let result = self .querier .clone() - .execute_stream(self, (part_id, ob_table), &mut req); + .execute_stream(self, (part_id, ob_table), &mut req) + .await; if result.is_err() { - self.close_eagerly("err"); + self.close_eagerly("err").await; } result @@ -192,16 +200,16 @@ impl QueryStreamResult { } } - pub fn init(&mut self) -> Result<()> { + pub async fn init(&mut self) -> Result<()> { if self.initialized { return Ok(()); } if self.table_query.batch_size() == -1 { - let tuples = std::mem::take(&mut self.expectant); + let tuples = mem::take(&mut self.expectant); for (_, tuple) in tuples { - self.refer_to_new_partition(tuple)?; + self.refer_to_new_partition(tuple).await?; } } @@ -214,11 +222,10 @@ impl QueryStreamResult { self.cache_rows.len() } - pub fn close(&mut self) -> Result<()> { + pub async fn close(&mut self) -> Result<()> { if self.closed { return Ok(()); } - self.closed = true; let last_result_num = self.partition_last_result.len(); @@ -229,7 +236,7 @@ impl QueryStreamResult { None => break, Some((tuple, last_result)) => { if last_result.is_stream() && last_result.is_stream_next() { - if let Err(e) = self.close_last_stream_result(tuple, last_result) { + if let Err(e) = self.close_last_stream_result(tuple, last_result).await { debug!( "QueryStreamResult::close fail to close \ last stream result, err: {}", @@ -249,6 +256,9 @@ impl QueryStreamResult { last_result_num, loop_cnt ); } + + self.closed = true; + Ok(()) } @@ -271,7 +281,7 @@ impl QueryStreamResult { ZERO_TIMEOUT_MS } - fn close_last_stream_result( + async fn close_last_stream_result( &mut self, (part_id, ob_table): (i64, Arc), last_result: ObTableQueryResult, @@ -286,6 +296,7 @@ impl QueryStreamResult { self.querier .clone() .execute_stream(self, (part_id, ob_table), &mut req) + .await } fn pop_next_row_from_cache(&mut self) -> Result>> { @@ -294,8 +305,8 @@ impl QueryStreamResult { } #[inline] - fn close_eagerly(&mut self, tag: &str) { - if let Err(e) = self.close() { + async fn close_eagerly(&mut self, tag: &str) { + if let Err(e) = self.close().await { error!( "QueryStreamResult::close_eagerly fail to close stream result, err: {}", e @@ -312,7 +323,7 @@ impl QueryStreamResult { self.cache_properties.clone() } - pub fn fetch_next_row(&mut self) -> Result>> { + pub async fn fetch_next_row(&mut self) -> Result>> { if !self.initialized { return Err(CommonErr( CommonErrCode::NotInitialized, @@ -331,19 +342,21 @@ impl QueryStreamResult { )); } - //1. Found from cache. + // 1. Found from cache. if !self.cache_rows.is_empty() { return self.pop_next_row_from_cache(); } - //2. Get from the last stream request result + // 2. Get from the last stream request result loop { let last_part_result = self.partition_last_result.pop_front(); match last_part_result { None => break, Some((tuple, last_result)) => { if last_result.is_stream() && last_result.is_stream_next() { - let row_count = self.refer_to_last_stream_result(tuple, &last_result)?; + let row_count = self + .refer_to_last_stream_result(tuple, &last_result) + .await?; if row_count == 0 { continue; } @@ -353,13 +366,13 @@ impl QueryStreamResult { } } - //3. Query from new parttion + // 3. Query from new parttion let mut referred_partitions = vec![]; let mut has_next = false; for (k, tuple) in self.expectant.clone() { referred_partitions.push(k); - let row_count = self.refer_to_new_partition(tuple)?; + let row_count = self.refer_to_new_partition(tuple).await?; if row_count != 0 { has_next = true; @@ -376,7 +389,7 @@ impl QueryStreamResult { } else { //4. Reach the end. self.eof = true; - self.close_eagerly("eof"); + self.close_eagerly("eof").await; Ok(None) } } @@ -384,9 +397,10 @@ impl QueryStreamResult { impl Drop for QueryStreamResult { fn drop(&mut self) { - match self.close() { - Ok(()) => (), - Err(e) => error!("QueryStreamResult::close fail: #{:?}", e), + if self.closed { + () + } else { + error!("QueryStreamResult::close fail") } } } @@ -425,20 +439,35 @@ impl QueryResultSet { pub fn close(&mut self) -> Result<()> { match self { QueryResultSet::None => Ok(()), - QueryResultSet::Some(stream_result) => stream_result.close(), + QueryResultSet::Some(stream_result) => { + // TODO: async close + if stream_result.closed { + return Ok(()); + } else { + Err(CommonErr( + CommonErrCode::Rpc, + "QueryResultSet is not closed".to_owned(), + )) + } + } } } -} -impl Iterator for QueryResultSet { - type Item = Result>; + pub async fn async_close(&mut self) -> Result<()> { + match self { + QueryResultSet::None => Ok(()), + QueryResultSet::Some(stream_result) => stream_result.close().await, + } + } +} - fn next(&mut self) -> Option { +impl QueryResultSet { + pub async fn next(&mut self) -> Option>> { match self { QueryResultSet::None => None, QueryResultSet::Some(ref mut stream_result) => { - match stream_result.fetch_next_row() { - //Find a row. + match stream_result.fetch_next_row().await { + // Find a row. Ok(Some(mut row)) => { let mut names = stream_result.cache_properties(); assert_eq!(names.len(), row.len()); @@ -452,9 +481,9 @@ impl Iterator for QueryResultSet { } Some(Ok(row_map)) } - //Reach end + // Reach end Ok(None) => None, - //Error happens + // Error happens Err(e) => Some(Err(e)), } } @@ -476,7 +505,7 @@ impl Drop for QueryResultSet { const PRIMARY_INDEX_NAME: &str = "PRIMARY"; pub trait TableQuery { - fn execute(&self) -> Result; + async fn execute(&self) -> Result; fn get_table_name(&self) -> String; fn set_entity_type(&mut self, entity_type: ObTableEntityType); fn entity_type(&self) -> ObTableEntityType; diff --git a/src/client/table.rs b/src/client/table.rs index 9c1d9e5..93a6c81 100644 --- a/src/client/table.rs +++ b/src/client/table.rs @@ -18,7 +18,7 @@ use std::{collections::HashMap, fmt::Formatter, sync::Arc, time::Duration}; use super::{ - query::{QueryResultSet, QueryStreamResult, StreamQuerier, TableQuery}, + query::{QueryResultSet, TableQuery}, ClientConfig, Table, TableOpResult, }; use crate::{ @@ -28,8 +28,7 @@ use crate::{ codes::ResultCodes, payloads::*, query::{ - ObHTableFilter, ObNewRange, ObScanOrder, ObTableQuery, ObTableQueryRequest, - ObTableQueryResult, ObTableStreamRequest, + ObHTableFilter, ObNewRange, ObScanOrder, ObTableQuery }, ObPayload, }, @@ -63,12 +62,12 @@ impl std::fmt::Debug for ObTable { } impl ObTable { - pub fn execute_payload( + pub async fn execute_payload( &self, payload: &mut T, result: &mut R, ) -> Result<()> { - self.rpc_proxy.execute(payload, result)?; + self.rpc_proxy.execute(payload, result).await?; Ok(()) } @@ -80,7 +79,7 @@ impl ObTable { self.config.rpc_operation_timeout } - fn execute( + async fn execute( &self, table_name: &str, operation_type: ObTableOperationType, @@ -98,7 +97,7 @@ impl ObTable { self.config.log_level_flag, ); let mut result = ObTableOperationResult::new(); - self.execute_payload(&mut payload, &mut result)?; + self.execute_payload(&mut payload, &mut result).await?; Ok(result) } } @@ -175,7 +174,7 @@ impl Builder { // TODO: Table has no retry for any operation impl Table for ObTable { - fn insert( + async fn insert( &self, table_name: &str, row_keys: Vec, @@ -189,11 +188,12 @@ impl Table for ObTable { row_keys, Some(columns), Some(properties), - )? + ) + .await? .affected_rows()) } - fn update( + async fn update( &self, table_name: &str, row_keys: Vec, @@ -207,11 +207,12 @@ impl Table for ObTable { row_keys, Some(columns), Some(properties), - )? + ) + .await? .affected_rows()) } - fn insert_or_update( + async fn insert_or_update( &self, table_name: &str, row_keys: Vec, @@ -225,11 +226,12 @@ impl Table for ObTable { row_keys, Some(columns), Some(properties), - )? + ) + .await? .affected_rows()) } - fn replace( + async fn replace( &self, table_name: &str, row_keys: Vec, @@ -243,11 +245,12 @@ impl Table for ObTable { row_keys, Some(columns), Some(properties), - )? + ) + .await? .affected_rows()) } - fn append( + async fn append( &self, table_name: &str, row_keys: Vec, @@ -261,11 +264,12 @@ impl Table for ObTable { row_keys, Some(columns), Some(properties), - )? + ) + .await? .affected_rows()) } - fn increment( + async fn increment( &self, table_name: &str, row_keys: Vec, @@ -279,17 +283,19 @@ impl Table for ObTable { row_keys, Some(columns), Some(properties), - )? + ) + .await? .affected_rows()) } - fn delete(&self, table_name: &str, row_keys: Vec) -> Result { + async fn delete(&self, table_name: &str, row_keys: Vec) -> Result { Ok(self - .execute(table_name, ObTableOperationType::Del, row_keys, None, None)? + .execute(table_name, ObTableOperationType::Del, row_keys, None, None) + .await? .affected_rows()) } - fn get( + async fn get( &self, table_name: &str, row_keys: Vec, @@ -302,7 +308,8 @@ impl Table for ObTable { row_keys, Some(columns), None, - )? + ) + .await? .take_entity() .take_properties()) } @@ -311,7 +318,7 @@ impl Table for ObTable { ObTableBatchOperation::with_ops_num(ops_num_hint) } - fn execute_batch( + async fn execute_batch( &self, _table_name: &str, batch_op: ObTableBatchOperation, @@ -323,7 +330,7 @@ impl Table for ObTable { ); let mut result = ObTableBatchOperationResult::new(); - self.rpc_proxy.execute(&mut payload, &mut result)?; + self.rpc_proxy.execute(&mut payload, &mut result).await?; result.into() } @@ -355,44 +362,7 @@ impl From for Result> { } } -struct ObTableStreamQuerier; - -impl ObTableStreamQuerier { - pub fn new() -> ObTableStreamQuerier { - ObTableStreamQuerier {} - } -} - -impl StreamQuerier for ObTableStreamQuerier { - fn execute_query( - &self, - stream_result: &mut QueryStreamResult, - (part_id, ob_table): (i64, Arc), - payload: &mut ObTableQueryRequest, - ) -> Result { - let mut result = ObTableQueryResult::new(); - ob_table.rpc_proxy.execute(payload, &mut result)?; - let row_count = result.row_count(); - stream_result.cache_stream_next((part_id, ob_table), result); - Ok(row_count) - } - - fn execute_stream( - &self, - stream_result: &mut QueryStreamResult, - (part_id, ob_table): (i64, Arc), - payload: &mut ObTableStreamRequest, - ) -> Result { - let mut result = ObTableQueryResult::new(); - let is_stream_next = payload.is_stream_next(); - ob_table.rpc_proxy.execute(payload, &mut result)?; - let row_count = result.row_count(); - if is_stream_next { - stream_result.cache_stream_next((part_id, ob_table), result); - } - Ok(row_count) - } -} +// impl ObTableStreamQuerier for obtable pub struct ObTableQueryImpl { operation_timeout: Option, @@ -420,25 +390,8 @@ impl ObTableQueryImpl { } impl TableQuery for ObTableQueryImpl { - fn execute(&self) -> Result { - let mut partition_table: HashMap)> = HashMap::new(); - partition_table.insert(0, (0, self.table.clone())); - - self.table_query.verify()?; - - let mut stream_result = QueryStreamResult::new( - Arc::new(ObTableStreamQuerier::new()), - self.table_query.clone(), - ); - - stream_result.set_entity_type(self.entity_type()); - stream_result.set_table_name(&self.table_name); - stream_result.set_expectant(partition_table); - stream_result.set_operation_timeout(self.operation_timeout); - stream_result.set_flag(self.table.config.log_level_flag); - stream_result.init()?; - - Ok(QueryResultSet::from_stream_result(stream_result)) + async fn execute(&self) -> Result { + todo!() } fn get_table_name(&self) -> String { diff --git a/src/client/table_client.rs b/src/client/table_client.rs index c43862b..50f834f 100644 --- a/src/client/table_client.rs +++ b/src/client/table_client.rs @@ -27,8 +27,6 @@ use std::{ time::{Duration, Instant}, }; -use futures::{future, Future}; -use futures_cpupool::{Builder as CpuPoolBuilder, CpuPool}; use rand::{seq::SliceRandom, thread_rng}; use scheduled_thread_pool::ScheduledThreadPool; @@ -216,10 +214,6 @@ struct ObTableClientInner { refresh_metadata_mutex: Lock, last_refresh_metadata_ts: AtomicUsize, - conn_init_thread_pool: Arc, - - // table_name => thread pool - table_batch_op_thread_pools: Arc>>>, // query concurrency control query_permits: Option, } @@ -238,7 +232,6 @@ impl ObTableClientInner { config: ClientConfig, runtimes: Arc, ) -> Result { - let conn_init_thread_num = config.conn_init_thread_num; let ocp_manager = ObOcpModelManager::new(config.rslist_acquire_timeout, &config.ocp_model_cache_file)?; @@ -275,13 +268,6 @@ impl ObTableClientInner { refresh_metadata_mutex: Mutex::new(0), last_refresh_metadata_ts: AtomicUsize::new(0), - conn_init_thread_pool: Arc::new( - ScheduledThreadPool::builder() - .num_threads(conn_init_thread_num) - .thread_name_pattern("conn_init_{}") - .build(), - ), - table_batch_op_thread_pools: Arc::new(RwLock::new(HashMap::new())), query_permits, }) } @@ -614,7 +600,6 @@ impl ObTableClientInner { ConnPoolBuilder::new() .max_conn_num(self.config.max_conns_per_server) .min_conn_num(self.config.min_idle_conns_per_server) - .conn_init_thread_pool(self.conn_init_thread_pool.clone()) .conn_builder(conn_builder) .build()?, ); @@ -1055,29 +1040,6 @@ impl ObTableClientInner { } } - fn get_or_create_batch_op_thread_pool(&self, table_name: &str) -> Arc { - let pools = self.table_batch_op_thread_pools.rl(); - if let Some(pool) = pools.get(table_name) { - pool.clone() - } else { - drop(pools); - let mut pools = self.table_batch_op_thread_pools.wl(); - if let Some(pool) = pools.get(table_name) { - pool.clone() - } else { - let pool = Arc::new( - CpuPoolBuilder::new() - .name_prefix(format!("batch-ops-for-{table_name}")) - .pool_size(self.config.table_batch_op_thread_num) - .create(), - ); - - pools.insert(table_name.to_owned(), pool.clone()); - pool - } - } - } - fn invalidate_table(&self, table_name: &str) { let mutex = { let table_mutexs = self.table_mutexs.rl(); @@ -1093,7 +1055,6 @@ impl ObTableClientInner { self.table_row_key_element.wl().remove(table_name); self.table_continuous_failures.wl().remove(table_name); self.table_mutexs.wl().remove(table_name); - self.table_batch_op_thread_pools.wl().remove(table_name); } fn execute_sql(&self, sql: &str) -> Result<()> { @@ -1376,7 +1337,7 @@ impl ObTableClientInner { Ok(()) } - fn execute_once( + async fn execute_once( &self, table_name: &str, operation_type: ObTableOperationType, @@ -1401,14 +1362,14 @@ impl ObTableClientInner { ); payload.set_partition_id(part_id); let mut result = ObTableOperationResult::new(); - table.execute_payload(&mut payload, &mut result)?; + table.execute_payload(&mut payload, &mut result).await?; OBKV_CLIENT_METRICS.observe_operation_opt_rt(operation_type, start.elapsed()); Ok(result) } - fn execute( + async fn execute( &self, table_name: &str, operation_type: ObTableOperationType, @@ -1419,13 +1380,16 @@ impl ObTableClientInner { let mut retry_num = 0; loop { retry_num += 1; - match self.execute_once( - table_name, - operation_type, - row_keys.clone(), - columns.clone(), - properties.clone(), - ) { + return match self + .execute_once( + table_name, + operation_type, + row_keys.clone(), + columns.clone(), + properties.clone(), + ) + .await + { Ok(result) => { let error_no = result.header().errorno(); let result_code = ResultCodes::from_i32(error_no); @@ -1441,7 +1405,7 @@ impl ObTableClientInner { ), )) }; - return result; + result } Err(e) => { debug!( @@ -1470,7 +1434,7 @@ impl ObTableClientInner { table_name:{}, op_type:{:?}, retry_num:{}, err:{}", table_name, operation_type, retry_num, e ); - return Err(e); + Err(e) } } } @@ -1491,6 +1455,12 @@ pub type RuntimesRef = Arc; /// OBKV Table Runtime #[derive(Clone, Debug)] pub struct ObClientRuntimes { + /// Runtime for multi-batch operation + pub batch_op_runtime: RuntimeRef, + /// Runtime for query + pub query_runtime: RuntimeRef, + /// Runtime for init connection + pub conn_init_runtime: RuntimeRef, /// Runtime for connection to read data pub reader_runtime: RuntimeRef, /// Runtime for connection to write data @@ -1502,6 +1472,9 @@ pub struct ObClientRuntimes { impl ObClientRuntimes { pub fn test_default() -> ObClientRuntimes { ObClientRuntimes { + batch_op_runtime: Arc::new(build_runtime("ob-batch-executor", 1)), + query_runtime: Arc::new(build_runtime("ob-query-executor", 1)), + conn_init_runtime: Arc::new(build_runtime("ob-conn-initer", 1)), reader_runtime: Arc::new(build_runtime("ob-conn-reader", 1)), writer_runtime: Arc::new(build_runtime("ob-conn-writer", 1)), default_runtime: Arc::new(build_runtime("ob-default", 1)), @@ -1520,6 +1493,12 @@ fn build_runtime(name: &str, threads_num: usize) -> runtime::Runtime { fn build_obkv_runtimes(config: &ClientConfig) -> ObClientRuntimes { ObClientRuntimes { + batch_op_runtime: Arc::new(build_runtime( + "ob-batch-executor", + config.batch_op_thread_num, + )), + query_runtime: Arc::new(build_runtime("ob-query-executor", config.query_thread_num)), + conn_init_runtime: Arc::new(build_runtime("ob-conn-initer", config.conn_init_thread_num)), reader_runtime: Arc::new(build_runtime( "ob-conn-reader", config.conn_reader_thread_num, @@ -1536,7 +1515,6 @@ fn build_obkv_runtimes(config: &ClientConfig) -> ObClientRuntimes { #[derive(Clone)] #[allow(dead_code)] pub struct ObTableClient { - runtimes: Arc, inner: Arc, refresh_thread_pool: Arc, } @@ -1553,7 +1531,7 @@ impl ObTableClient { } /// Create a TableQuery instance for table. - pub fn query(&self, table_name: &str) -> impl TableQuery { + pub fn query(&self, table_name: &str) -> ObTableClientQueryImpl { ObTableClientQueryImpl::new(table_name, self.inner.clone()) } @@ -1607,7 +1585,7 @@ impl ObTableClient { self.inner.get_table(table_name, row_key, refresh) } - fn execute_batch_once( + async fn execute_batch_once( &self, table_name: &str, batch_op: ObTableBatchOperation, @@ -1633,8 +1611,6 @@ impl ObTableClient { return Ok(Vec::new()); } - let start = Instant::now(); - OBKV_CLIENT_METRICS.observe_misc("partitioned_batch_ops", part_batch_ops.len() as f64); // fast path: to process batch operations involving only one partition @@ -1646,7 +1622,7 @@ impl ObTableClient { let (_, table) = self .inner .get_or_create_table(table_name, &table_entry, part_id)?; - return table.execute_batch(table_name, part_batch_op); + return table.execute_batch(table_name, part_batch_op).await; } // atomic now only support single partition @@ -1661,37 +1637,33 @@ impl ObTableClient { // slow path: have to process operations involving multiple partitions // concurrent send the batch ops by partition - let pool = self.inner.get_or_create_batch_op_thread_pool(table_name); + let mut all_results = Vec::new(); + let mut handles = Vec::with_capacity(part_batch_ops.len()); - // prepare all the runners - let mut runners = Vec::with_capacity(part_batch_ops.len()); for (part_id, mut batch_op) in part_batch_ops { let (_, table) = self .inner .get_or_create_table(table_name, &table_entry, part_id)?; let table_name = table_name.to_owned(); - runners.push(move || { + handles.push(self.inner.runtimes.batch_op_runtime.spawn(async move { batch_op.set_partition_id(part_id); batch_op.set_table_name(table_name.clone()); - table.execute_batch(&table_name, batch_op) - }); + table.execute_batch(&table_name, batch_op).await + })); } - // join all runners into one future - let put_all = future::join_all(runners.into_iter().map(|runner| pool.spawn_fn(runner))); - - // wait for all futures done - let results = put_all.wait()?; - - OBKV_CLIENT_METRICS.observe_operation_ort_rt(ObClientOpRecordType::Batch, start.elapsed()); + for handle in handles { + let results = handle.await??; + all_results.extend(results); + } - Ok(results.into_iter().flatten().collect()) + Ok(all_results) } } impl Table for ObTableClient { #[inline] - fn insert( + async fn insert( &self, table_name: &str, row_keys: Vec, @@ -1706,12 +1678,13 @@ impl Table for ObTableClient { row_keys, Some(columns), Some(properties), - )? + ) + .await? .affected_rows()) } #[inline] - fn update( + async fn update( &self, table_name: &str, row_keys: Vec, @@ -1726,12 +1699,13 @@ impl Table for ObTableClient { row_keys, Some(columns), Some(properties), - )? + ) + .await? .affected_rows()) } #[inline] - fn insert_or_update( + async fn insert_or_update( &self, table_name: &str, row_keys: Vec, @@ -1746,12 +1720,13 @@ impl Table for ObTableClient { row_keys, Some(columns), Some(properties), - )? + ) + .await? .affected_rows()) } #[inline] - fn replace( + async fn replace( &self, table_name: &str, row_keys: Vec, @@ -1766,12 +1741,13 @@ impl Table for ObTableClient { row_keys, Some(columns), Some(properties), - )? + ) + .await? .affected_rows()) } #[inline] - fn append( + async fn append( &self, table_name: &str, row_keys: Vec, @@ -1786,12 +1762,13 @@ impl Table for ObTableClient { row_keys, Some(columns), Some(properties), - )? + ) + .await? .affected_rows()) } #[inline] - fn increment( + async fn increment( &self, table_name: &str, row_keys: Vec, @@ -1806,20 +1783,22 @@ impl Table for ObTableClient { row_keys, Some(columns), Some(properties), - )? + ) + .await? .affected_rows()) } #[inline] - fn delete(&self, table_name: &str, row_keys: Vec) -> Result { + async fn delete(&self, table_name: &str, row_keys: Vec) -> Result { Ok(self .inner - .execute(table_name, ObTableOperationType::Del, row_keys, None, None)? + .execute(table_name, ObTableOperationType::Del, row_keys, None, None) + .await? .affected_rows()) } #[inline] - fn get( + async fn get( &self, table_name: &str, row_keys: Vec, @@ -1833,7 +1812,8 @@ impl Table for ObTableClient { row_keys, Some(columns), None, - )? + ) + .await? .take_entity() .take_properties()) } @@ -1843,7 +1823,7 @@ impl Table for ObTableClient { ObTableBatchOperation::with_ops_num_raw(ops_num_hint) } - fn execute_batch( + async fn execute_batch( &self, table_name: &str, batch_op: ObTableBatchOperation, @@ -1851,7 +1831,7 @@ impl Table for ObTableClient { let mut retry_num = 0; loop { retry_num += 1; - match self.execute_batch_once(table_name, batch_op.clone()) { + match self.execute_batch_once(table_name, batch_op.clone()).await { Ok(res) => { self.inner.reset_table_failure(table_name); return Ok(res); @@ -1891,7 +1871,7 @@ impl Table for ObTableClient { } } -struct ObTableClientStreamQuerier { +pub struct ObTableClientStreamQuerier { client: Arc, table_name: String, start_execute_ts: AtomicI64, @@ -1922,7 +1902,7 @@ impl Drop for ObTableClientStreamQuerier { } impl StreamQuerier for ObTableClientStreamQuerier { - fn execute_query( + async fn execute_query( &self, stream_result: &mut QueryStreamResult, (part_id, ob_table): (i64, Arc), @@ -1934,7 +1914,7 @@ impl StreamQuerier for ObTableClientStreamQuerier { .store(current_time_millis(), Ordering::Relaxed); let mut result = ObTableQueryResult::new(); - match ob_table.execute_payload(payload, &mut result) { + match ob_table.execute_payload(payload, &mut result).await { Ok(()) => self.client.reset_table_failure(&self.table_name), Err(e) => { if let Err(e) = self.client.on_table_op_failure(&self.table_name, &e) { @@ -1952,7 +1932,7 @@ impl StreamQuerier for ObTableClientStreamQuerier { Ok(row_count) } - fn execute_stream( + async fn execute_stream( &self, stream_result: &mut QueryStreamResult, (part_id, ob_table): (i64, Arc), @@ -1961,7 +1941,7 @@ impl StreamQuerier for ObTableClientStreamQuerier { let is_stream_next = payload.is_stream_next(); let mut result = ObTableQueryResult::new(); - match ob_table.execute_payload(payload, &mut result) { + match ob_table.execute_payload(payload, &mut result).await { Ok(()) => self.client.reset_table_failure(&self.table_name), Err(e) => { if let Err(e) = self.client.on_table_op_failure(&self.table_name, &e) { @@ -1981,10 +1961,14 @@ impl StreamQuerier for ObTableClientStreamQuerier { } Ok(row_count) } + + fn get_runtime(&self) -> RuntimeRef { + self.client.runtimes.query_runtime.clone() + } } /// TODO refactor with ObTableQueryImpl -struct ObTableClientQueryImpl { +pub struct ObTableClientQueryImpl { operation_timeout: Option, entity_type: ObTableEntityType, table_name: String, @@ -2009,7 +1993,7 @@ impl ObTableClientQueryImpl { } impl TableQuery for ObTableClientQueryImpl { - fn execute(&self) -> Result { + async fn execute(&self) -> Result { let mut partition_table: HashMap)> = HashMap::new(); self.table_query.verify()?; @@ -2026,6 +2010,9 @@ impl TableQuery for ObTableClientQueryImpl { )?; for (part_id, ob_table) in pairs { + if partition_table.contains_key(&part_id) { + continue; + } partition_table.insert(part_id, (part_id, ob_table)); } } @@ -2045,7 +2032,7 @@ impl TableQuery for ObTableClientQueryImpl { stream_result.set_expectant(partition_table); stream_result.set_operation_timeout(self.operation_timeout); stream_result.set_flag(self.client.config.log_level_flag); - stream_result.init()?; + stream_result.init().await?; let result = QueryResultSet::from_stream_result(stream_result); @@ -2386,7 +2373,6 @@ impl Builder { let runtimes = Arc::new(build_obkv_runtimes(&self.config)); Ok(ObTableClient { - runtimes: runtimes.clone(), inner: Arc::new(ObTableClientInner::internal_new( self.param_url, self.full_user_name, diff --git a/src/lib.rs b/src/lib.rs index b147674..79bd484 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -15,6 +15,8 @@ * #L% */ +#![feature(async_fn_in_trait)] + extern crate bytes; extern crate chrono; extern crate crossbeam; @@ -51,7 +53,7 @@ pub mod error; mod location; pub mod monitors; mod rpc; -mod runtime; +pub mod runtime; pub mod serde_obkv; mod util; pub use self::{ diff --git a/src/rpc/conn_pool.rs b/src/rpc/conn_pool.rs index 7b3485c..1db2ca9 100644 --- a/src/rpc/conn_pool.rs +++ b/src/rpc/conn_pool.rs @@ -22,10 +22,11 @@ use std::{ u32, }; -use scheduled_thread_pool::ScheduledThreadPool; +use tokio::time::sleep; use super::{Builder as ConnBuilder, Connection}; use crate::{ + client::table_client::RuntimesRef, error::{CommonErrCode, Error::Common as CommonErr, Result}, proxy::OBKV_PROXY_METRICS, }; @@ -114,13 +115,11 @@ impl ConnPool { fn internal_new( min_conn_num: usize, max_conn_num: usize, - conn_init_thread_pool: Arc, builder: ConnBuilder, ) -> Result { let shared_pool = Arc::new(SharedPool::internal_new( min_conn_num, max_conn_num, - conn_init_thread_pool, builder, )?); Ok(Self { @@ -169,34 +168,32 @@ impl ConnPool { shared_pool: &Arc, delay: Duration, min_build_retry_interval: Duration, - retry_num: usize, + mut retry_num: usize, build_retry_limit: usize, ) { - if retry_num > build_retry_limit { - let mut inner = shared_pool.inner.lock().unwrap(); - inner.unpend_conn(); - error!("ConnPool::add_connection_background::bg_add fail to build connection after {} retries", retry_num); - return; - } - - let weak_shared_pool = Arc::downgrade(shared_pool); - shared_pool.conn_init_thread_pool.execute_after(delay, move || { - let shared_pool = match weak_shared_pool.upgrade() { - None => return, - Some(p) => p, - }; - - match shared_pool.build_conn() { - Ok(conn) => { - let mut inner = shared_pool.inner.lock().unwrap(); - inner.add_conn(conn); - shared_pool.cond.notify_all(); - } - Err(e) => { - error!("ConnPool::add_connection_background::bg_add fail to build a connection after {} retries, err:{}", retry_num, e); - let delay = cmp::max(min_build_retry_interval, delay); - let delay = cmp::min(shared_pool.conn_builder.connect_timeout / 2, delay * 2); - bg_add(&shared_pool, delay, min_build_retry_interval, retry_num + 1, build_retry_limit); + let shared_pool = shared_pool.clone(); + shared_pool.clone().runtimes.conn_init_runtime.spawn(async move { + loop { + match shared_pool.build_conn().await { + Ok(conn) => { + let mut inner = shared_pool.inner.lock().unwrap(); + inner.add_conn(conn); + shared_pool.cond.notify_all(); + break; + } + Err(e) => { + retry_num += 1; + if retry_num > build_retry_limit { + let mut inner = shared_pool.inner.lock().unwrap(); + inner.unpend_conn(); + error!("ConnPool::add_connection_background::bg_add fail to build connection after {} retries", retry_num); + return; + } + error!("ConnPool::add_connection_background::bg_add fail to build a connection after {} retries, err:{}", retry_num, e); + let delay = cmp::max(min_build_retry_interval, delay); + let delay = cmp::min(shared_pool.conn_builder.connect_timeout / 2, delay * 2); + sleep(delay.into()).await; + } } } }); @@ -249,7 +246,7 @@ impl ConnPool { Ok(()) } - pub fn get(&self) -> Result> { + pub async fn get(&self) -> Result> { let start = Instant::now(); let pool = &self.shared_pool; @@ -319,34 +316,35 @@ struct SharedPool { conn_builder: ConnBuilder, inner: Mutex, cond: Condvar, - conn_init_thread_pool: Arc, + runtimes: RuntimesRef, } impl SharedPool { fn internal_new( min_conn_num: usize, max_conn_num: usize, - conn_init_thread_pool: Arc, builder: ConnBuilder, ) -> Result { + let runtimes = builder.runtimes.as_ref().unwrap().clone(); Ok(Self { min_conn_num, max_conn_num, conn_builder: builder, inner: Mutex::new(PoolInner::new(max_conn_num)), cond: Condvar::new(), - conn_init_thread_pool, + runtimes, }) } - fn build_conn(&self) -> Result { - let mut conn = self.conn_builder.clone().build()?; + async fn build_conn(&self) -> Result { + let mut conn = self.conn_builder.clone().build().await?; conn.connect( &self.conn_builder.tenant_name, &self.conn_builder.user_name, &self.conn_builder.database_name, &self.conn_builder.password, - )?; + ) + .await?; Ok(conn) } } @@ -354,7 +352,6 @@ impl SharedPool { pub struct Builder { min_conn_num: usize, max_conn_num: usize, - conn_init_thread_pool: Option>, conn_builder: Option, } @@ -363,7 +360,6 @@ impl Default for Builder { Self { min_conn_num: 1, max_conn_num: 3, - conn_init_thread_pool: None, conn_builder: None, } } @@ -389,20 +385,11 @@ impl Builder { self } - pub fn conn_init_thread_pool(mut self, thread_pool: Arc) -> Self { - self.conn_init_thread_pool = Some(thread_pool); - self - } - pub fn build(self) -> Result { assert!( self.conn_builder.is_some(), "missing necessary conn builder" ); - assert!( - self.conn_init_thread_pool.is_some(), - "missing necessary conn init thread pool" - ); assert!( self.min_conn_num <= self.max_conn_num, "min_conn_num({}) must equal or less than max_conn_num({})", @@ -412,7 +399,6 @@ impl Builder { let pool = ConnPool::internal_new( self.min_conn_num, self.max_conn_num, - self.conn_init_thread_pool.unwrap(), self.conn_builder.unwrap(), )?; pool.wait_for_initialized()?; @@ -445,18 +431,16 @@ mod test { fn gen_test_conn_pool(min_conn_num: usize, max_conn_num: usize) -> ConnPool { let conn_builder = gen_test_conn_builder(); - let thread_pool = ScheduledThreadPool::new(2); let builder = Builder::new() .min_conn_num(min_conn_num) .max_conn_num(max_conn_num) - .conn_init_thread_pool(Arc::new(thread_pool)) .conn_builder(conn_builder); builder.build().expect("fail to build ConnPool") } #[test] #[ignore] - fn check_conn_valid() { + async fn check_conn_valid() { let (min_conn_num, max_conn_num) = (2, 3); let pool = gen_test_conn_pool(min_conn_num, max_conn_num); let conn_num = pool.idle_conn_num(); @@ -465,25 +449,31 @@ mod test { "conn_num({conn_num}) should in the range: [{min_conn_num}, {max_conn_num}]", ); - let conn = pool.get().expect("fail to get connection from the pool"); + let conn = pool + .get() + .await + .expect("fail to get connection from the pool"); assert!(conn.is_active(), "conn should be active"); } #[test] #[ignore] - fn rebuild_conn() { + async fn rebuild_conn() { let (min_conn_num, max_conn_num) = (3, 5); let pool = gen_test_conn_pool(min_conn_num, max_conn_num); for _ in 0..max_conn_num * 2 { - let conn = pool.get().expect("fail to get connection from the pool"); + let conn = pool + .get() + .await + .expect("fail to get connection from the pool"); assert!(conn.is_active(), "should get active connection"); conn.active.store(false, Ordering::SeqCst); } } - #[test] + #[tokio::test] #[ignore] - fn test_pool_inner_remove() { + async fn test_pool_inner_remove() { let max_conn_num = 2; let mut pool_inner = PoolInner::new(max_conn_num); let conn_builder = gen_test_conn_builder(); @@ -491,6 +481,7 @@ mod test { let conn = conn_builder .clone() .build() + .await .expect("fail to build connection"); assert!(!conn.is_active(), "should be inactive"); pool_inner.pend_conn(); diff --git a/src/rpc/mod.rs b/src/rpc/mod.rs index 3a33b38..80e3741 100644 --- a/src/rpc/mod.rs +++ b/src/rpc/mod.rs @@ -77,7 +77,6 @@ const CONN_CONTINUOUS_TIMEOUT_CEILING: usize = 10; pub struct ConnectionSender { sender: mpsc::Sender, writer: Option>>, - default_runtime: RuntimeRef, // TODO: remove this } impl ConnectionSender { @@ -86,7 +85,6 @@ impl ConnectionSender { requests: RequestsMap, active: Arc, sender_runtime: RuntimeRef, - default_runtime: RuntimeRef, channel_capacity: usize, ) -> ConnectionSender { let (sender, mut receiver): (mpsc::Sender, mpsc::Receiver) = @@ -171,7 +169,6 @@ impl ConnectionSender { ConnectionSender { sender, writer: Some(writer), - default_runtime, } } @@ -179,14 +176,12 @@ impl ConnectionSender { /// ///It can fail only when connection gets closed. ///Which means OBKV connection is no longer valid. - pub fn request(&self, message: ObTablePacket) -> Result<()> { - // TODO: remove block_on with sender.send().await - self.default_runtime - .block_on(async move { self.sender.send(message).await.map_err(Self::broken_pipe) }) + pub async fn request(&self, message: ObTablePacket) -> Result<()> { + self.sender.send(message).await.map_err(Self::broken_pipe) } - fn close(&mut self) -> Result<()> { - self.request(ObTablePacket::ClosePoison)?; + async fn close(&mut self) -> Result<()> { + self.request(ObTablePacket::ClosePoison).await?; let writer = mem::replace(&mut self.writer, None); let drop_helper = AbortOnDropMany(vec![writer.unwrap()]); drop(drop_helper); @@ -278,7 +273,6 @@ impl Connection { requests.clone(), active.clone(), runtimes.writer_runtime.clone(), - runtimes.default_runtime.clone(), channel_capacity, ), requests, @@ -494,7 +488,7 @@ impl Connection { // payload & response should keep Idempotent // NOTE: caller should know response wont be be updated when a no-reply request // is execute - pub fn execute( + pub async fn execute( &self, payload: &mut T, response: &mut R, @@ -523,7 +517,7 @@ impl Connection { let channel_id = match req.channel_id() { None => { debug!("Connection::execute: send no reply request"); - self.sender.request(req).map_err(|e| { + self.sender.request(req).await.map_err(|e| { error!( "Connection::execute fail to send no-reply request, err:{}", e @@ -535,40 +529,38 @@ impl Connection { Some(id) => id, }; - let rx = self.send(req, channel_id)?; + let rx = self.send(req, channel_id).await?; if payload.timeout_millis() == 0 { // no-wait request,return Ok directly return Ok(()); } - // TODO: remove block_on with rx.await - let resp = self.runtimes.default_runtime.block_on(async move { - match tokio::time::timeout(timeout, rx).await { - Ok(resp) => { - self.on_recv_in_time(); - resp.map_err(|e| { - error!( - "Connection::execute: fail to fetch rpc response, addr:{}, trace_id:{}, err:{}", - self.addr, trace_id, e - ); - e - }) - } - Err(err) => { + // Get result from receiver + let resp = match tokio::time::timeout(timeout, rx).await { + Ok(resp) => { + self.on_recv_in_time(); + resp.map_err(|e| { error!( - "Connection::execute: wait for rpc response timeout, addr:{}, trace_id:{}, err:{}", - self.addr, trace_id, err - ); - - self.on_recv_timeout(); - return Err(CommonErr( - CommonErrCode::Rpc, - format!("wait for rpc response timeout, err:{err}"), - )); - } - }.map_err(|err| CommonErr(CommonErrCode::Rpc, format!("Tokio timeout error: {err:?}")))? - }); + "Connection::execute: fail to fetch rpc response, addr:{}, trace_id:{}, err:{}", + self.addr, trace_id, e + ); + e + }) + } + Err(err) => { + error!( + "Connection::execute: wait for rpc response timeout, addr:{}, trace_id:{}, err:{}", + self.addr, trace_id, err + ); + + self.on_recv_timeout(); + return Err(CommonErr( + CommonErrCode::Rpc, + format!("wait for rpc response timeout, err:{err}"), + )); + } + }.map_err(|err| CommonErr(CommonErrCode::Rpc, format!("Tokio timeout error: {err:?}")))?; match resp { Ok(ObTablePacket::ServerPacket { @@ -610,7 +602,7 @@ impl Connection { } } - pub fn connect( + pub async fn connect( &mut self, tenant_name: &str, user_name: &str, @@ -618,9 +610,10 @@ impl Connection { password: &str, ) -> Result<()> { self.login(tenant_name, user_name, database_name, password) + .await } - fn login( + async fn login( &mut self, tenant_name: &str, user_name: &str, @@ -633,7 +626,7 @@ impl Connection { let mut login_result = ObTableLoginResult::new(); - self.execute(&mut payload, &mut login_result)?; + self.execute(&mut payload, &mut login_result).await?; debug!("Connection::login, login result {:?}", login_result); @@ -679,11 +672,13 @@ impl Connection { /// invalidated. /// ///For info on default settings see [Builder](struct.Builder.html) - pub fn new() -> Result { - Builder::new().build() + pub async fn new() -> Result { + Builder::new().build().await } /// close the connection + /// close is used by Drop, since Drop is sync, we need to use block_on to + /// wait for the future fn close(&mut self) -> Result<()> { if self.reader.is_none() { return Ok(()); @@ -691,12 +686,15 @@ impl Connection { self.set_active(false); // 1. close writer - if let Err(e) = self.sender.close() { + if let Err(e) = self + .runtimes + .default_runtime + .block_on(async { self.sender.close().await }) + { error!("Connection::close fail to close writer, err: {}.", e); } // 2. close reader - // TODO: remove block_on if let Err(e) = self.runtimes.default_runtime.block_on(async { self.reader_signal_sender .send(()) @@ -721,14 +719,14 @@ impl Connection { /// ///It can fail only when connection gets closed. ///Which means OBKV connection is no longer valid. - pub fn send( + pub async fn send( &self, message: ObTablePacket, channel_id: i32, ) -> Result>> { let (tx, rx) = oneshot::channel(); self.requests.lock().unwrap().insert(channel_id, tx); - self.sender.request(message).map_err(|e| { + self.sender.request(message).await.map_err(|e| { error!("Connection::send: fail to send message, err:{}", e); self.requests.lock().unwrap().remove(&channel_id); e @@ -865,13 +863,13 @@ impl Builder { self } - pub fn build(self) -> Result { + pub async fn build(self) -> Result { let uuid = Uuid::new_v4(); let id = BigEndian::read_u32(uuid.as_bytes()); - self.build_with_id(id) + self.build_with_id(id).await } - pub fn build_with_id(self, id: u32) -> Result { + pub async fn build_with_id(self, id: u32) -> Result { let addr = (&self.ip[..], self.port).to_socket_addrs()?.next(); if let Some(addr) = addr { @@ -891,22 +889,14 @@ impl Builder { let tokio_socket = TcpSocket::from_std_stream(socket2_socket.into()); - // TODO: remove block_on - let stream = self - .runtimes - .clone() - .unwrap() - .default_runtime - .block_on(async move { - tokio_socket - .connect(addr) - .await - .map_err(|e| { - error!("Builder::build fail to connect to {}, err: {}.", addr, e); - e - }) - .unwrap() - }); + let stream = tokio_socket + .connect(addr) + .await + .map_err(|e| { + error!("Builder::build fail to connect to {}, err: {}.", addr, e); + e + }) + .unwrap(); debug!("Builder::build succeeds in connecting to {}.", addr); @@ -950,19 +940,20 @@ mod test { } } - #[test] + #[tokio::test] #[ignore] - fn test_connect() { + async fn test_connect() { let packet = gen_test_server_packet(100); let mut builder = Builder::new(); builder = builder.ip(TEST_SERVER_IP).port(TEST_SERVER_PORT); - let mut conn: Connection = builder.build().expect("Create OBKV Client"); + let mut conn: Connection = builder.build().await.expect("Create OBKV Client"); let channel_id = packet.channel_id().unwrap(); let res = conn .send(packet, channel_id) + .await .expect("fail to send request") .try_recv(); assert!(res.is_ok()); diff --git a/src/rpc/proxy.rs b/src/rpc/proxy.rs index f68bd6c..3ae32c7 100644 --- a/src/rpc/proxy.rs +++ b/src/rpc/proxy.rs @@ -39,18 +39,18 @@ impl Proxy { Proxy(conn_pool) } - pub fn execute( + pub async fn execute( &self, payload: &mut T, response: &mut R, ) -> Result<()> { // the connection is ensured to be active now by checking conn.is_active // but it may be actually broken already. - let conn = self.0.get()?; + let conn = self.0.get().await?; OBKV_PROXY_METRICS.observe_proxy_misc("conn_load", conn.load() as f64); - let res = conn.execute(payload, response); + let res = conn.execute(payload, response).await; if res.is_ok() || conn.is_active() { return res; } @@ -80,8 +80,8 @@ impl Proxy { retry_cnt, err ); - let conn = self.0.get()?; - let res = conn.execute(payload, response); + let conn = self.0.get().await?; + let res = conn.execute(payload, response).await; if res.is_ok() || conn.is_active() { OBKV_PROXY_METRICS.observe_proxy_misc("retry_times", retry_cnt as f64); return res; diff --git a/tests/test_cse_table.rs b/tests/test_cse_table.rs index e4b8eb9..976e54a 100644 --- a/tests/test_cse_table.rs +++ b/tests/test_cse_table.rs @@ -22,7 +22,7 @@ use std::collections::HashSet; use obkv::{client::query::TableQuery, Table, Value}; #[allow(unused_imports)] use serial_test_derive::serial; -use test_log::test; +use tokio::task; // ```sql // create table cse_data_20190308_1 ( @@ -40,10 +40,11 @@ use test_log::test; // partition p19 values less than(72000), partition p20 values less than(75600), partition p21 values less than(79200), partition p22 values less than(82800), // partition p23 values less than(86400), partition p24 values less than(MAXVALUE)); // ``` -#[test] +#[tokio::test] #[serial] -fn test_cse_data_range_table() { - let client = utils::common::build_normal_client(); +async fn test_cse_data_range_table() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); let cse_table = "cse_data_20190308_1"; client @@ -58,37 +59,46 @@ fn test_cse_data_range_table() { ], ); let rowk_keys = vec![Value::from(11i64), Value::from(1i32), Value::from(3600i32)]; - let result = client.delete(cse_table, rowk_keys.clone()); + let result = client.delete(cse_table, rowk_keys.clone()).await; + assert!(result.is_ok()); result.unwrap(); - let result = client.insert( - cse_table, - rowk_keys.clone(), - vec!["value".to_owned()], - vec![Value::from("aa")], - ); + let result = client + .insert( + cse_table, + rowk_keys.clone(), + vec!["value".to_owned()], + vec![Value::from("aa")], + ) + .await; let result = result.unwrap(); assert_eq!(1, result); - let result = client.get(cse_table, rowk_keys.clone(), vec!["value".to_owned()]); + let result = client + .get(cse_table, rowk_keys.clone(), vec!["value".to_owned()]) + .await; let mut result = result.unwrap(); assert_eq!(1, result.len()); let value = result.remove("value").unwrap(); assert!(value.is_bytes()); assert_eq!("aa".to_owned().into_bytes(), value.as_bytes()); - let result = client.update( - cse_table, - rowk_keys.clone(), - vec!["value".to_owned()], - vec![Value::from("bb")], - ); + let result = client + .update( + cse_table, + rowk_keys.clone(), + vec!["value".to_owned()], + vec![Value::from("bb")], + ) + .await; let result = result.unwrap(); assert_eq!(1, result); - let result = client.get(cse_table, rowk_keys, vec!["value".to_owned()]); + let result = client + .get(cse_table, rowk_keys, vec!["value".to_owned()]) + .await; let mut result = result.unwrap(); assert_eq!(1, result.len()); let value = result.remove("value").unwrap(); @@ -96,10 +106,11 @@ fn test_cse_data_range_table() { assert_eq!("bb".to_owned().into_bytes(), value.as_bytes()); } -#[test] +#[tokio::test] #[serial] -fn test_data_range_part() { - let client = utils::common::build_normal_client(); +async fn test_data_range_part() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); let cse_table = "cse_data_20190308_1"; client .truncate_table(cse_table) @@ -131,11 +142,13 @@ fn test_data_range_part() { // interval_ms INT DEFAULT 0, // PRIMARY KEY(id), UNIQUE KEY data_table_loc(data_table_name, data_table_start_time_ms)); // ``` -#[test] +#[tokio::test] #[serial] -fn test_cse_meta_data_table() { - let client = utils::common::build_normal_client(); +async fn test_cse_meta_data_table() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); let cse_table = "cse_meta_data_0"; + client .truncate_table(cse_table) .expect("Fail to truncate table"); @@ -151,6 +164,7 @@ fn test_cse_meta_data_table() { ], vec![Value::from("data_000_0"), Value::from(0i32)], ) + .await .expect("Fail to insert one test entry"); let mut batch_op = client.batch_operation(1); @@ -177,6 +191,7 @@ fn test_cse_meta_data_table() { ); client .execute_batch(cse_table, batch_op) + .await .expect("Fail to update row"); } @@ -189,10 +204,11 @@ fn test_cse_meta_data_table() { // PRIMARY KEY(measurement, tag_key, tag_value)) // partition by key(measurement, tag_key, tag_value) partitions 13; // ``` -#[test] +#[tokio::test] #[serial] -fn test_cse_index_key_table() { - let client = utils::common::build_normal_client(); +async fn test_cse_index_key_table() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); let cse_table = "cse_index_1"; client @@ -228,6 +244,7 @@ fn test_cse_index_key_table() { } let res = client .execute_batch(cse_table, batch_ops) + .await .expect("Fail to execute batch operations"); assert_eq!(100, res.len()); @@ -244,29 +261,46 @@ fn test_cse_index_key_table() { vec![Value::get_max(), Value::get_max(), Value::get_max()], false, ); - let result_set = query.execute().expect("Fail to execute"); - - for res in result_set { - let mut res = res.expect("fail to query"); - let row: Vec = columns - .iter() - .map(|name| String::from_utf8(res.remove(name).unwrap().as_bytes().to_vec()).unwrap()) - .collect(); - let (m, k, v, series_ids) = ( - row[0].to_owned(), - row[1].to_owned(), - row[2].to_owned(), - row[3].to_owned(), - ); - assert!(rows.contains(&vec![m, k, v])); - assert_eq!(ids, series_ids); + let mut result_set = query.execute().await.expect("Fail to execute"); + + for i in 0..result_set.cache_size() { + match result_set.next().await { + Some(Ok(mut res)) => { + let row: Vec = columns + .iter() + .map(|name| { + String::from_utf8(res.remove(name).unwrap().as_bytes().to_vec()).unwrap() + }) + .collect(); + let (m, k, v, series_ids) = ( + row[0].to_owned(), + row[1].to_owned(), + row[2].to_owned(), + row[3].to_owned(), + ); + assert_eq!(rows.contains(&vec![m, k, v]), true); + assert_eq!(ids, series_ids); + } + None => { + assert_eq!(i, 100); + break; + } + Some(Err(e)) => { + panic!("Error: {:?}", e); + } + } } + result_set + .async_close() + .await + .expect("Fail to close result set"); } -#[test] +#[tokio::test] #[serial] -fn test_index_key_part() { - let client = utils::common::build_normal_client(); +async fn test_index_key_part() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); let cse_table = "cse_index_1"; client @@ -295,10 +329,11 @@ fn test_index_key_part() { // PRIMARY KEY(measurement, field_name)) // partition by key(measurement, field_name) partitions 13; // ``` -#[test] +#[tokio::test] #[serial] -fn test_cse_field_key_table() { - let client = utils::common::build_normal_client(); +async fn test_cse_field_key_table() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); let cse_table = "cse_field_1"; client @@ -309,23 +344,27 @@ fn test_cse_field_key_table() { vec!["measurement".to_string(), "field_name".to_string()], ); let rowk_keys = vec![Value::from("a"), Value::from("site")]; - let result = client.delete(cse_table, rowk_keys.clone()); + let result = client.delete(cse_table, rowk_keys.clone()).await; result.unwrap(); - let result = client.insert( - cse_table, - rowk_keys.clone(), - vec!["field_type".to_owned(), "id".to_owned()], - vec![Value::from(1i32), Value::from(2i32)], - ); + let result = client + .insert( + cse_table, + rowk_keys.clone(), + vec!["field_type".to_owned(), "id".to_owned()], + vec![Value::from(1i32), Value::from(2i32)], + ) + .await; let result = result.unwrap(); assert_eq!(1, result); - let result = client.get( - cse_table, - rowk_keys.clone(), - vec!["field_type".to_owned(), "id".to_owned()], - ); + let result = client + .get( + cse_table, + rowk_keys.clone(), + vec!["field_type".to_owned(), "id".to_owned()], + ) + .await; let mut result = result.unwrap(); assert_eq!(2, result.len()); let value = result.remove("field_type").unwrap(); @@ -335,22 +374,26 @@ fn test_cse_field_key_table() { assert!(value.is_i32()); assert_eq!(2i32, value.as_i32()); - let result = client.update( - cse_table, - rowk_keys.clone(), - vec!["field_type".to_owned(), "id".to_owned()], - vec![Value::from(3i32), Value::from(4i32)], - ); + let result = client + .update( + cse_table, + rowk_keys.clone(), + vec!["field_type".to_owned(), "id".to_owned()], + vec![Value::from(3i32), Value::from(4i32)], + ) + .await; let result = result.unwrap(); assert_eq!(1, result); - let result = client.get( - cse_table, - rowk_keys, - vec!["field_type".to_owned(), "id".to_owned()], - ); + let result = client + .get( + cse_table, + rowk_keys, + vec!["field_type".to_owned(), "id".to_owned()], + ) + .await; let mut result = result.unwrap(); assert_eq!(2, result.len()); let value = result.remove("field_type").unwrap(); @@ -361,10 +404,11 @@ fn test_cse_field_key_table() { assert_eq!(4i32, value.as_i32()); } -#[test] +#[tokio::test] #[serial] -fn test_field_key_part() { - let client = utils::common::build_normal_client(); +async fn test_field_key_part() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); let cse_table = "cse_field_1"; client @@ -388,30 +432,36 @@ fn test_field_key_part() { // series_id BIGINT NOT NULL, // PRIMARY KEY(series_key), KEY index_id(series_id)); // ``` -#[test] +#[tokio::test] #[serial] -fn test_series_key_table() { - let client = utils::common::build_normal_client(); - +async fn test_series_key_table() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); let cse_table = "cse_series_key_to_id_1"; + client .truncate_table(cse_table) .expect("Fail to truncate table"); client.add_row_key_element(cse_table, vec!["series_key".to_string()]); let rowk_keys = vec![Value::from("a")]; - let result = client.delete(cse_table, rowk_keys.clone()); + let result = client.delete(cse_table, rowk_keys.clone()).await; + assert!(result.is_ok()); result.unwrap(); - let result = client.insert( - cse_table, - rowk_keys.clone(), - vec!["series_id".to_owned()], - vec![Value::from(1i64)], - ); + let result = client + .insert( + cse_table, + rowk_keys.clone(), + vec!["series_id".to_owned()], + vec![Value::from(1i64)], + ) + .await; let result = result.unwrap(); assert_eq!(1i64, result); - let result = client.get(cse_table, rowk_keys.clone(), vec!["series_id".to_owned()]); + let result = client + .get(cse_table, rowk_keys.clone(), vec!["series_id".to_owned()]) + .await; let mut result = result.unwrap(); assert_eq!(1, result.len()); println!("result get:{result:?}"); @@ -419,18 +469,22 @@ fn test_series_key_table() { assert!(value.is_i64()); assert_eq!(1i64, value.as_i64()); - let result = client.update( - cse_table, - rowk_keys.clone(), - vec!["series_id".to_owned()], - vec![Value::from(3i64)], - ); + let result = client + .update( + cse_table, + rowk_keys.clone(), + vec!["series_id".to_owned()], + vec![Value::from(3i64)], + ) + .await; let result = result.unwrap(); assert_eq!(1i64, result); - let result = client.get(cse_table, rowk_keys, vec!["series_id".to_owned()]); + let result = client + .get(cse_table, rowk_keys, vec!["series_id".to_owned()]) + .await; let mut result = result.unwrap(); assert_eq!(1, result.len()); let value = result.remove("series_id").unwrap(); diff --git a/tests/test_hbase_client.rs b/tests/test_hbase_client.rs index e49d914..e5a3a66 100644 --- a/tests/test_hbase_client.rs +++ b/tests/test_hbase_client.rs @@ -20,7 +20,7 @@ mod utils; use obkv::{Table, Value}; -use test_log::test; +use tokio::task; // ```sql // CREATE TABLE TEST_HBASE_HASH( @@ -31,9 +31,10 @@ use test_log::test; // PRIMARY KEY (K, Q, T) // ) partition by hash(K) partitions 16; // ``` -#[test] -fn test_obtable_partition_hash_crud() { - let client = utils::common::build_hbase_client(); +#[tokio::test] +async fn test_obtable_partition_hash_crud() { + let client_handle = task::spawn_blocking(|| utils::common::build_hbase_client()); + let client = client_handle.await.unwrap(); const TEST_TABLE: &str = "TEST_HBASE_HASH"; let rowk_keys = vec![ @@ -41,20 +42,24 @@ fn test_obtable_partition_hash_crud() { Value::from("partition"), Value::from(1550225864000i64), ]; - let result = client.delete(TEST_TABLE, rowk_keys.clone()); + let result = client.delete(TEST_TABLE, rowk_keys.clone()).await; assert!(result.is_ok()); - let result = client.insert( - TEST_TABLE, - rowk_keys.clone(), - vec!["V".to_owned()], - vec![Value::from("aa")], - ); + let result = client + .insert( + TEST_TABLE, + rowk_keys.clone(), + vec!["V".to_owned()], + vec![Value::from("aa")], + ) + .await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(1, result); - let result = client.get(TEST_TABLE, rowk_keys.clone(), vec!["V".to_owned()]); + let result = client + .get(TEST_TABLE, rowk_keys.clone(), vec!["V".to_owned()]) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -62,12 +67,14 @@ fn test_obtable_partition_hash_crud() { assert!(value.is_bytes()); assert_eq!("aa".to_owned().into_bytes(), value.as_bytes()); - let result = client.update( - TEST_TABLE, - rowk_keys.clone(), - vec!["V".to_owned()], - vec![Value::from("bb")], - ); + let result = client + .update( + TEST_TABLE, + rowk_keys.clone(), + vec!["V".to_owned()], + vec![Value::from("bb")], + ) + .await; assert!(result.is_ok()); @@ -75,7 +82,9 @@ fn test_obtable_partition_hash_crud() { assert_eq!(1, result); - let result = client.get(TEST_TABLE, rowk_keys, vec!["V".to_owned()]); + let result = client + .get(TEST_TABLE, rowk_keys, vec!["V".to_owned()]) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -94,9 +103,10 @@ fn test_obtable_partition_hash_crud() { // ) DEFAULT CHARSET = utf8mb4 COLLATE UTF8MB4_BIN COMPRESSION = 'lz4_1.0' REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 // partition by key(k) partitions 15; // ``` -#[test] -fn test_obtable_partition_key_varbinary_crud() { - let client = utils::common::build_hbase_client(); +#[tokio::test] +async fn test_obtable_partition_key_varbinary_crud() { + let client_handle = task::spawn_blocking(|| utils::common::build_hbase_client()); + let client = client_handle.await.unwrap(); const TEST_TABLE: &str = "TEST_HBASE_PARTITION"; // same as java sdk, when k = partitionKey, after get_partition(&table_entry, @@ -106,21 +116,25 @@ fn test_obtable_partition_key_varbinary_crud() { Value::from("partition"), Value::from(1550225864000i64), ]; - let result = client.delete(TEST_TABLE, rowk_keys.clone()); + let result = client.delete(TEST_TABLE, rowk_keys.clone()).await; result.unwrap(); // assert!(result.is_ok()); - let result = client.insert( - TEST_TABLE, - rowk_keys.clone(), - vec!["V".to_owned()], - vec![Value::from("aa")], - ); + let result = client + .insert( + TEST_TABLE, + rowk_keys.clone(), + vec!["V".to_owned()], + vec![Value::from("aa")], + ) + .await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(1, result); - let result = client.get(TEST_TABLE, rowk_keys.clone(), vec!["V".to_owned()]); + let result = client + .get(TEST_TABLE, rowk_keys.clone(), vec!["V".to_owned()]) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -128,12 +142,14 @@ fn test_obtable_partition_key_varbinary_crud() { assert!(value.is_bytes()); assert_eq!("aa".to_owned().into_bytes(), value.as_bytes()); - let result = client.update( - TEST_TABLE, - rowk_keys.clone(), - vec!["V".to_owned()], - vec![Value::from("bb")], - ); + let result = client + .update( + TEST_TABLE, + rowk_keys.clone(), + vec!["V".to_owned()], + vec![Value::from("bb")], + ) + .await; assert!(result.is_ok()); @@ -141,7 +157,9 @@ fn test_obtable_partition_key_varbinary_crud() { assert_eq!(1, result); - let result = client.get(TEST_TABLE, rowk_keys, vec!["V".to_owned()]); + let result = client + .get(TEST_TABLE, rowk_keys, vec!["V".to_owned()]) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -162,9 +180,10 @@ fn test_obtable_partition_key_varbinary_crud() { // REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = // 134217728 PCTFREE = 10 partition by key(k) partitions 15; // ``` -#[test] -fn test_obtable_partition_key_varchar_crud() { - let client = utils::common::build_hbase_client(); +#[tokio::test] +async fn test_obtable_partition_key_varchar_crud() { + let client_handle = task::spawn_blocking(|| utils::common::build_hbase_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_HBASE_PARTITION"; // same as java sdk, when k = partitionKey2, part_id = 9 @@ -173,21 +192,25 @@ fn test_obtable_partition_key_varchar_crud() { Value::from("partition"), Value::from(1550225864000i64), ]; - let result = client.delete(TABLE_NAME, rowk_keys.clone()); + let result = client.delete(TABLE_NAME, rowk_keys.clone()).await; result.unwrap(); // assert!(result.is_ok()); - let result = client.insert( - TABLE_NAME, - rowk_keys.clone(), - vec!["V".to_owned()], - vec![Value::from("aa")], - ); + let result = client + .insert( + TABLE_NAME, + rowk_keys.clone(), + vec!["V".to_owned()], + vec![Value::from("aa")], + ) + .await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(1, result); - let result = client.get(TABLE_NAME, rowk_keys.clone(), vec!["V".to_owned()]); + let result = client + .get(TABLE_NAME, rowk_keys.clone(), vec!["V".to_owned()]) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -195,12 +218,14 @@ fn test_obtable_partition_key_varchar_crud() { assert!(value.is_bytes()); assert_eq!("aa".to_owned().into_bytes(), value.as_bytes()); - let result = client.update( - TABLE_NAME, - rowk_keys.clone(), - vec!["V".to_owned()], - vec![Value::from("bb")], - ); + let result = client + .update( + TABLE_NAME, + rowk_keys.clone(), + vec!["V".to_owned()], + vec![Value::from("bb")], + ) + .await; assert!(result.is_ok()); @@ -208,7 +233,9 @@ fn test_obtable_partition_key_varchar_crud() { assert_eq!(1, result); - let result = client.get(TABLE_NAME, rowk_keys, vec!["V".to_owned()]); + let result = client + .get(TABLE_NAME, rowk_keys, vec!["V".to_owned()]) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -226,9 +253,10 @@ fn test_obtable_partition_key_varchar_crud() { // primary key(K, Q, T)) DEFAULT CHARSET = utf8mb4 COLLATE UTF8MB4_BIN COMPRESSION = 'lz4_1.0' REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 // PARTITION BY RANGE columns (K) (PARTITION p0 VALUES LESS THAN ('a'), PARTITION p1 VALUES LESS THAN ('w'), PARTITION p2 VALUES LESS THAN MAXVALUE); // ``` -#[test] -fn test_obtable_partition_range_crud() { - let client = utils::common::build_hbase_client(); +#[tokio::test] +async fn test_obtable_partition_range_crud() { + let client_handle = task::spawn_blocking(|| utils::common::build_hbase_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_HBASE_RANGE"; let rowk_keys = vec![ @@ -236,21 +264,26 @@ fn test_obtable_partition_range_crud() { Value::from("partition"), Value::from(1550225864000i64), ]; - let result = client.delete(TABLE_NAME, rowk_keys.clone()); - result.unwrap(); - // assert!(result.is_ok()); - - let result = client.insert( - TABLE_NAME, - rowk_keys.clone(), - vec!["V".to_owned()], - vec![Value::from("aa")], - ); + let result = client.delete(TABLE_NAME, rowk_keys.clone()).await; + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(1, result); + + let result = client + .insert( + TABLE_NAME, + rowk_keys.clone(), + vec!["V".to_owned()], + vec![Value::from("aa")], + ) + .await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(1, result); - let result = client.get(TABLE_NAME, rowk_keys.clone(), vec!["V".to_owned()]); + let result = client + .get(TABLE_NAME, rowk_keys.clone(), vec!["V".to_owned()]) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -258,12 +291,14 @@ fn test_obtable_partition_range_crud() { assert!(value.is_bytes()); assert_eq!("aa".to_owned().into_bytes(), value.as_bytes()); - let result = client.update( - TABLE_NAME, - rowk_keys.clone(), - vec!["V".to_owned()], - vec![Value::from("bb")], - ); + let result = client + .update( + TABLE_NAME, + rowk_keys.clone(), + vec!["V".to_owned()], + vec![Value::from("bb")], + ) + .await; assert!(result.is_ok()); @@ -271,7 +306,9 @@ fn test_obtable_partition_range_crud() { assert_eq!(1, result); - let result = client.get(TABLE_NAME, rowk_keys, vec!["V".to_owned()]); + let result = client + .get(TABLE_NAME, rowk_keys, vec!["V".to_owned()]) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); diff --git a/tests/test_table_client.rs b/tests/test_table_client.rs index b382022..62f036e 100644 --- a/tests/test_table_client.rs +++ b/tests/test_table_client.rs @@ -20,31 +20,38 @@ mod utils; use obkv::{ResultCodes, Table, TableQuery, Value}; -use test_log::test; +use tokio::task; -#[test] -fn test_obtable_client_curd() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_obtable_client_curd() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TEST_TABLE_NAME: &str = "test_varchar_table"; - let result = client.delete(TEST_TABLE_NAME, vec![Value::from("foo")]); + let result = client + .delete(TEST_TABLE_NAME, vec![Value::from("foo")]) + .await; assert!(result.is_ok()); - let result = client.insert( - TEST_TABLE_NAME, - vec![Value::from("foo")], - vec!["c2".to_owned()], - vec![Value::from("bar")], - ); + let result = client + .insert( + TEST_TABLE_NAME, + vec![Value::from("foo")], + vec!["c2".to_owned()], + vec![Value::from("bar")], + ) + .await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(1, result); - let result = client.get( - TEST_TABLE_NAME, - vec![Value::from("foo")], - vec!["c2".to_owned()], - ); + let result = client + .get( + TEST_TABLE_NAME, + vec![Value::from("foo")], + vec!["c2".to_owned()], + ) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -52,19 +59,21 @@ fn test_obtable_client_curd() { assert!(value.is_string()); assert_eq!("bar", value.as_string()); - let result = client.update( - TEST_TABLE_NAME, - vec![Value::from("foo")], - vec!["c2".to_owned()], - vec![Value::from("car")], - ); + let result = client + .update( + TEST_TABLE_NAME, + vec![Value::from("foo")], + vec!["c2".to_owned()], + vec![Value::from("car")], + ) + .await; let query = client .query(TEST_TABLE_NAME) .select(vec!["c1".to_owned(), "c2".to_owned()]) .add_scan_range(vec![Value::get_min()], true, vec![Value::get_max()], true); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); @@ -72,21 +81,19 @@ fn test_obtable_client_curd() { assert!(result_set.cache_size() > 0); - for row in result_set { - println!("{row:?}"); - } - assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(1, result); - let result = client.get( - TEST_TABLE_NAME, - vec![Value::from("foo")], - vec!["c2".to_owned()], - ); + let result = client + .get( + TEST_TABLE_NAME, + vec![Value::from("foo")], + vec!["c2".to_owned()], + ) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -94,13 +101,16 @@ fn test_obtable_client_curd() { assert!(value.is_string()); assert_eq!("car", value.as_string()); - let result = client.delete(TEST_TABLE_NAME, vec![Value::from("foo")]); + let result = client + .delete(TEST_TABLE_NAME, vec![Value::from("foo")]) + .await; assert!(result.is_ok()); } -#[test] -fn test_obtable_client_batch_op() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_obtable_client_batch_op() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TEST_TABLE_NAME: &str = "test_varchar_table"; let test_key1 = "batchop-row-key-1"; @@ -119,14 +129,16 @@ fn test_obtable_client_batch_op() { vec!["c2".to_owned()], vec![Value::from("p2")], ); - let result = client.execute_batch(TEST_TABLE_NAME, batch_op); + let result = client.execute_batch(TEST_TABLE_NAME, batch_op).await; assert!(result.is_ok()); - let result = client.get( - TEST_TABLE_NAME, - vec![Value::from(test_key1)], - vec!["c2".to_owned()], - ); + let result = client + .get( + TEST_TABLE_NAME, + vec![Value::from(test_key1)], + vec!["c2".to_owned()], + ) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -152,14 +164,16 @@ fn test_obtable_client_batch_op() { vec![Value::from("p4")], ); - let result = client.execute_batch(TEST_TABLE_NAME, batch_op); + let result = client.execute_batch(TEST_TABLE_NAME, batch_op).await; assert!(result.is_ok()); - let result = client.get( - TEST_TABLE_NAME, - vec![Value::from(test_key2)], - vec!["c2".to_owned()], - ); + let result = client + .get( + TEST_TABLE_NAME, + vec![Value::from(test_key2)], + vec!["c2".to_owned()], + ) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -186,16 +200,18 @@ fn test_obtable_client_batch_op() { vec![Value::from("p0")], ); - let result = client.execute_batch(TEST_TABLE_NAME, batch_op); + let result = client.execute_batch(TEST_TABLE_NAME, batch_op).await; assert!(result.is_err()); let code = result.err().unwrap().ob_result_code().unwrap(); assert_eq!(code, ResultCodes::OB_ERR_PRIMARY_KEY_DUPLICATE); - let result = client.get( - TEST_TABLE_NAME, - vec![Value::from(test_key2)], - vec!["c2".to_owned()], - ); + let result = client + .get( + TEST_TABLE_NAME, + vec![Value::from(test_key2)], + vec!["c2".to_owned()], + ) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); diff --git a/tests/test_table_client_base.rs b/tests/test_table_client_base.rs index 30c5dc7..2772134 100644 --- a/tests/test_table_client_base.rs +++ b/tests/test_table_client_base.rs @@ -28,6 +28,7 @@ use std::{ }; use obkv::{error::CommonErrCode, ObTableClient, ResultCodes, Table, TableQuery, Value}; +use tokio::task; pub struct BaseTest { client: Arc, @@ -35,7 +36,7 @@ pub struct BaseTest { impl BaseTest { const ROW_NUM: usize = 400; - const THREAD_NUM: usize = 10; + const THREAD_NUM: usize = 5; pub fn new(client: ObTableClient) -> BaseTest { BaseTest { @@ -43,15 +44,14 @@ impl BaseTest { } } - pub fn test_varchar_concurrent(&self, table_name: &'static str) { + pub async fn test_varchar_concurrent(&self, table_name: &'static str) { let mut handles = vec![]; let start = SystemTime::now(); - let counter = Arc::new(AtomicUsize::new(0)); for _ in 0..BaseTest::THREAD_NUM { let client = self.client.clone(); let counter = counter.clone(); - handles.push(thread::spawn(move || { + handles.push(task::spawn(async move { for i in 0..BaseTest::ROW_NUM { let key = format!("foo{i}"); let value = format!("bar{i}"); @@ -62,12 +62,14 @@ impl BaseTest { vec!["c2".to_owned()], vec![Value::from(value.to_owned())], ) + .await .expect("fail to insert_or update"); assert_eq!(1, result); let start_time = SystemTime::now(); let mut result = client .get(table_name, vec![Value::from(key)], vec!["c2".to_owned()]) + .await .expect("fail to get"); let end_time = SystemTime::now(); if end_time.duration_since(start_time).unwrap().as_millis() > 500 { @@ -87,7 +89,7 @@ impl BaseTest { } for handle in handles { - handle.join().expect("should succeed to join"); + handle.await.unwrap(); } assert_eq!( BaseTest::ROW_NUM * BaseTest::THREAD_NUM, @@ -100,14 +102,14 @@ impl BaseTest { ); } - pub fn test_bigint_concurrent(&self, table_name: &'static str) { + pub async fn test_bigint_concurrent(&self, table_name: &'static str) { let mut handles = vec![]; let start = SystemTime::now(); let counter = Arc::new(AtomicUsize::new(0)); for _ in 0..10 { let client = self.client.clone(); let counter = counter.clone(); - handles.push(thread::spawn(move || { + handles.push(task::spawn(async move { for i in 0..100 { let key: i64 = i; let value = format!("value{i}"); @@ -118,11 +120,13 @@ impl BaseTest { vec!["c2".to_owned()], vec![Value::from(value.to_owned())], ) + .await .expect("fail to insert_or update"); assert_eq!(1, result); let mut result = client .get(table_name, vec![Value::from(key)], vec!["c2".to_owned()]) + .await .expect("fail to get"); assert_eq!(1, result.len()); let v = result.remove("c2").unwrap(); @@ -135,7 +139,7 @@ impl BaseTest { } for handle in handles { - handle.join().expect("should succeed to join"); + handle.await.unwrap(); } assert_eq!(1000, counter.load(Ordering::SeqCst)); println!( @@ -145,25 +149,29 @@ impl BaseTest { ); } - pub fn test_varchar_insert(&self, table_name: &str) { + pub async fn test_varchar_insert(&self, table_name: &str) { let client = &self.client; - let result = client.insert( - table_name, - vec![Value::from("foo")], - vec!["c2".to_owned()], - vec![Value::from("bar")], - ); + let result = client + .insert( + table_name, + vec![Value::from("foo")], + vec!["c2".to_owned()], + vec![Value::from("bar")], + ) + .await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(1, result); - let result = client.insert( - table_name, - vec![Value::from("foo")], - vec!["c2".to_owned()], - vec![Value::from("baz")], - ); + let result = client + .insert( + table_name, + vec![Value::from("foo")], + vec!["c2".to_owned()], + vec![Value::from("baz")], + ) + .await; let e = result.unwrap_err(); assert!(e.is_ob_exception()); @@ -172,12 +180,14 @@ impl BaseTest { e.ob_result_code().unwrap() ); - let result = client.insert( - table_name, - vec![Value::from("foo")], - vec!["c2".to_owned()], - vec![Value::from("bar")], - ); + let result = client + .insert( + table_name, + vec![Value::from("foo")], + vec!["c2".to_owned()], + vec![Value::from("bar")], + ) + .await; let e = result.unwrap_err(); assert!(e.is_ob_exception()); assert_eq!( @@ -186,12 +196,15 @@ impl BaseTest { ); } - fn assert_varchar_get_result(&self, table_name: &str, row_key: &str, expected: &str) { - let result = self.client.get( - table_name, - vec![Value::from(row_key)], - vec!["c2".to_owned()], - ); + async fn assert_varchar_get_result(&self, table_name: &str, row_key: &str, expected: &str) { + let result = self + .client + .get( + table_name, + vec![Value::from(row_key)], + vec!["c2".to_owned()], + ) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -200,127 +213,165 @@ impl BaseTest { assert_eq!(expected, value.as_string()); } - pub fn test_varchar_get(&self, table_name: &str) { + pub async fn test_varchar_get(&self, table_name: &str) { let result = self .client - .get(table_name, vec![Value::from("bar")], vec!["c2".to_owned()]); + .get(table_name, vec![Value::from("bar")], vec!["c2".to_owned()]) + .await; assert!(result.is_ok()); assert!(result.unwrap().is_empty()); - self.assert_varchar_get_result(table_name, "foo", "bar"); + self.assert_varchar_get_result(table_name, "foo", "bar") + .await; } - pub fn test_varchar_update(&self, table_name: &str) { - let result = self.client.update( - table_name, - vec![Value::from("foo")], - vec!["c2".to_owned()], - vec![Value::from("baz")], - ); + pub async fn test_varchar_update(&self, table_name: &str) { + let result = self + .client + .update( + table_name, + vec![Value::from("foo")], + vec!["c2".to_owned()], + vec![Value::from("baz")], + ) + .await; assert!(result.is_ok()); assert_eq!(1, result.unwrap()); - self.assert_varchar_get_result(table_name, "foo", "baz"); + self.assert_varchar_get_result(table_name, "foo", "baz") + .await; } - pub fn test_varchar_insert_or_update(&self, table_name: &str) { - let result = self.client.insert_or_update( - table_name, - vec![Value::from("foo")], - vec!["c2".to_owned()], - vec![Value::from("quux")], - ); + pub async fn test_varchar_insert_or_update(&self, table_name: &str) { + let result = self + .client + .insert_or_update( + table_name, + vec![Value::from("foo")], + vec!["c2".to_owned()], + vec![Value::from("quux")], + ) + .await; assert!(result.is_ok()); assert_eq!(1, result.unwrap()); - self.assert_varchar_get_result(table_name, "foo", "quux"); + self.assert_varchar_get_result(table_name, "foo", "quux") + .await; - let result = self.client.insert_or_update( - table_name, - vec![Value::from("bar")], - vec!["c2".to_owned()], - vec![Value::from("baz")], - ); + let result = self + .client + .insert_or_update( + table_name, + vec![Value::from("bar")], + vec!["c2".to_owned()], + vec![Value::from("baz")], + ) + .await; assert!(result.is_ok()); assert_eq!(1, result.unwrap()); - self.assert_varchar_get_result(table_name, "bar", "baz"); + self.assert_varchar_get_result(table_name, "bar", "baz") + .await; } - pub fn test_varchar_replace(&self, table_name: &str) { - let result = self.client.replace( - table_name, - vec![Value::from("foo")], - vec!["c2".to_owned()], - vec![Value::from("bar")], - ); + pub async fn test_varchar_replace(&self, table_name: &str) { + let result = self + .client + .replace( + table_name, + vec![Value::from("foo")], + vec!["c2".to_owned()], + vec![Value::from("bar")], + ) + .await; assert!(result.is_ok()); assert_eq!(2, result.unwrap()); - self.assert_varchar_get_result(table_name, "foo", "bar"); + self.assert_varchar_get_result(table_name, "foo", "bar") + .await; - let result = self.client.replace( - table_name, - vec![Value::from("bar")], - vec!["c2".to_owned()], - vec![Value::from("baz")], - ); + let result = self + .client + .replace( + table_name, + vec![Value::from("bar")], + vec!["c2".to_owned()], + vec![Value::from("baz")], + ) + .await; assert!(result.is_ok()); assert_eq!(2, result.unwrap()); - self.assert_varchar_get_result(table_name, "bar", "baz"); + self.assert_varchar_get_result(table_name, "bar", "baz") + .await; - let result = self.client.replace( - table_name, - vec![Value::from("unknown")], - vec!["c2".to_owned()], - vec![Value::from("baz")], - ); + let result = self + .client + .replace( + table_name, + vec![Value::from("unknown")], + vec!["c2".to_owned()], + vec![Value::from("baz")], + ) + .await; assert!(result.is_ok()); assert_eq!(1, result.unwrap()); - self.assert_varchar_get_result(table_name, "unknown", "baz"); + self.assert_varchar_get_result(table_name, "unknown", "baz") + .await; } - pub fn test_varchar_append(&self, table_name: &str) { - let result = self.client.append( - table_name, - vec![Value::from("foo")], - vec!["c2".to_owned()], - vec![Value::from("_append")], - ); + pub async fn test_varchar_append(&self, table_name: &str) { + let result = self + .client + .append( + table_name, + vec![Value::from("foo")], + vec!["c2".to_owned()], + vec![Value::from("_append")], + ) + .await; assert!(result.is_ok()); assert_eq!(1, result.unwrap()); - self.assert_varchar_get_result(table_name, "foo", "bar_append"); + self.assert_varchar_get_result(table_name, "foo", "bar_append") + .await; } - pub fn test_varchar_increment(&self, table_name: &str) { - let result = self.client.increment( - table_name, - vec![Value::from("foo")], - vec!["c3".to_owned()], - vec![Value::from(10i64)], - ); + pub async fn test_varchar_increment(&self, table_name: &str) { + let result = self + .client + .increment( + table_name, + vec![Value::from("foo")], + vec!["c3".to_owned()], + vec![Value::from(10i64)], + ) + .await; assert!(result.is_ok()); assert_eq!(1, result.unwrap()); let result = self .client - .get(table_name, vec![Value::from("foo")], vec!["c3".to_owned()]); + .get(table_name, vec![Value::from("foo")], vec!["c3".to_owned()]) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); let value = result.remove("c3").unwrap(); assert_eq!(10i64, value.as_i64()); - let result = self.client.increment( - table_name, - vec![Value::from("foo")], - vec!["c3".to_owned()], - vec![Value::from(15i64)], - ); + let result = self + .client + .increment( + table_name, + vec![Value::from("foo")], + vec!["c3".to_owned()], + vec![Value::from(15i64)], + ) + .await; assert!(result.is_ok()); assert_eq!(1, result.unwrap()); let result = self .client - .get(table_name, vec![Value::from("foo")], vec!["c3".to_owned()]); + .get(table_name, vec![Value::from("foo")], vec!["c3".to_owned()]) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -328,52 +379,67 @@ impl BaseTest { assert_eq!(25i64, value.as_i64()); } - pub fn clean_varchar_table(&self, table_name: &str) { - let result = self.client.delete(table_name, vec![Value::from("unknown")]); + pub async fn clean_varchar_table(&self, table_name: &str) { + let result = self + .client + .delete(table_name, vec![Value::from("unknown")]) + .await; assert!(result.is_ok()); - let result = self.client.delete(table_name, vec![Value::from("foo")]); + let result = self + .client + .delete(table_name, vec![Value::from("foo")]) + .await; assert!(result.is_ok()); - let result = self.client.delete(table_name, vec![Value::from("bar")]); + let result = self + .client + .delete(table_name, vec![Value::from("bar")]) + .await; assert!(result.is_ok()); - let result = self.client.delete(table_name, vec![Value::from("baz")]); + let result = self + .client + .delete(table_name, vec![Value::from("baz")]) + .await; assert!(result.is_ok()); for i in 0..100 { let key = format!("foo{i}"); - let result = self.client.delete(table_name, vec![Value::from(key)]); + let result = self.client.delete(table_name, vec![Value::from(key)]).await; assert!(result.is_ok()); } } - pub fn clean_bigint_table(&self, table_name: &str) { + pub async fn clean_bigint_table(&self, table_name: &str) { for i in 0..100 { let key: i64 = i; - let result = self.client.delete(table_name, vec![Value::from(key)]); + let result = self.client.delete(table_name, vec![Value::from(key)]).await; assert!(result.is_ok()); } } - pub fn test_blob_insert(&self, table_name: &str) { + pub async fn test_blob_insert(&self, table_name: &str) { let client = &self.client; - let bs = "hello".as_bytes(); - let result = client.insert( - table_name, - vec![Value::from("foo")], - vec!["c2".to_owned()], - vec![Value::from(bs)], - ); + let result = client + .insert( + table_name, + vec![Value::from("foo")], + vec!["c2".to_owned()], + vec![Value::from(bs)], + ) + .await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(1, result); - let result = client.insert( - table_name, - vec![Value::from("foo")], - vec!["c2".to_owned()], - vec![Value::from(bs)], - ); + let result = client + .insert( + table_name, + vec![Value::from("foo")], + vec!["c2".to_owned()], + vec![Value::from(bs)], + ) + .await; let e = result.unwrap_err(); assert!(e.is_ob_exception()); @@ -383,24 +449,29 @@ impl BaseTest { ); //test insert string - let result = client.insert( - table_name, - vec![Value::from("qux")], - vec!["c2".to_owned()], - vec![Value::from("qux")], - ); + let result = client + .insert( + table_name, + vec![Value::from("qux")], + vec!["c2".to_owned()], + vec![Value::from("qux")], + ) + .await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(1, result); } - fn assert_blob_get_result(&self, table_name: &str, row_key: &str, expected: &str) { - let result = self.client.get( - table_name, - vec![Value::from(row_key)], - vec!["c2".to_owned()], - ); + async fn assert_blob_get_result(&self, table_name: &str, row_key: &str, expected: &str) { + let result = self + .client + .get( + table_name, + vec![Value::from(row_key)], + vec!["c2".to_owned()], + ) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -409,124 +480,155 @@ impl BaseTest { assert_eq!(expected, String::from_utf8(value.as_bytes()).unwrap()); } - pub fn test_blob_get(&self, table_name: &str) { + pub async fn test_blob_get(&self, table_name: &str) { let result = self .client - .get(table_name, vec![Value::from("bar")], vec!["c2".to_owned()]); + .get(table_name, vec![Value::from("bar")], vec!["c2".to_owned()]) + .await; assert!(result.is_ok()); assert!(result.unwrap().is_empty()); - self.assert_blob_get_result(table_name, "foo", "hello"); - self.assert_blob_get_result(table_name, "qux", "qux"); + self.assert_blob_get_result(table_name, "foo", "hello") + .await; + self.assert_blob_get_result(table_name, "qux", "qux").await; } - pub fn test_blob_update(&self, table_name: &str) { - let result = self.client.update( - table_name, - vec![Value::from("foo")], - vec!["c2".to_owned()], - vec![Value::from("baz".as_bytes())], - ); + pub async fn test_blob_update(&self, table_name: &str) { + let result = self + .client + .update( + table_name, + vec![Value::from("foo")], + vec!["c2".to_owned()], + vec![Value::from("baz".as_bytes())], + ) + .await; assert!(result.is_ok()); assert_eq!(1, result.unwrap()); - self.assert_blob_get_result(table_name, "foo", "baz"); + self.assert_blob_get_result(table_name, "foo", "baz").await; - let result = self.client.update( - table_name, - vec![Value::from("qux")], - vec!["c2".to_owned()], - vec![Value::from("baz".as_bytes())], - ); + let result = self + .client + .update( + table_name, + vec![Value::from("qux")], + vec!["c2".to_owned()], + vec![Value::from("baz".as_bytes())], + ) + .await; assert!(result.is_ok()); assert_eq!(1, result.unwrap()); - self.assert_blob_get_result(table_name, "qux", "baz"); + self.assert_blob_get_result(table_name, "qux", "baz").await; } - pub fn test_blob_insert_or_update(&self, table_name: &str) { - let result = self.client.insert_or_update( - table_name, - vec![Value::from("foo")], - vec!["c2".to_owned()], - vec![Value::from("quux".as_bytes())], - ); + pub async fn test_blob_insert_or_update(&self, table_name: &str) { + let result = self + .client + .insert_or_update( + table_name, + vec![Value::from("foo")], + vec!["c2".to_owned()], + vec![Value::from("quux".as_bytes())], + ) + .await; assert!(result.is_ok()); assert_eq!(1, result.unwrap()); - self.assert_blob_get_result(table_name, "foo", "quux"); + self.assert_blob_get_result(table_name, "foo", "quux").await; - let result = self.client.insert_or_update( - table_name, - vec![Value::from("bar")], - vec!["c2".to_owned()], - vec![Value::from("baz")], - ); + let result = self + .client + .insert_or_update( + table_name, + vec![Value::from("bar")], + vec!["c2".to_owned()], + vec![Value::from("baz")], + ) + .await; assert!(result.is_ok()); assert_eq!(1, result.unwrap()); - self.assert_blob_get_result(table_name, "bar", "baz"); + self.assert_blob_get_result(table_name, "bar", "baz").await; } - pub fn test_blob_replace(&self, table_name: &str) { - let result = self.client.replace( - table_name, - vec![Value::from("foo")], - vec!["c2".to_owned()], - vec![Value::from("bar")], - ); + pub async fn test_blob_replace(&self, table_name: &str) { + let result = self + .client + .replace( + table_name, + vec![Value::from("foo")], + vec!["c2".to_owned()], + vec![Value::from("bar")], + ) + .await; assert!(result.is_ok()); assert_eq!(2, result.unwrap()); - self.assert_blob_get_result(table_name, "foo", "bar"); + self.assert_blob_get_result(table_name, "foo", "bar").await; - let result = self.client.replace( - table_name, - vec![Value::from("bar")], - vec!["c2".to_owned()], - vec![Value::from("baz")], - ); + let result = self + .client + .replace( + table_name, + vec![Value::from("bar")], + vec!["c2".to_owned()], + vec![Value::from("baz")], + ) + .await; assert!(result.is_ok()); assert_eq!(2, result.unwrap()); - self.assert_blob_get_result(table_name, "bar", "baz"); + self.assert_blob_get_result(table_name, "bar", "baz").await; - let result = self.client.replace( - table_name, - vec![Value::from("baz")], - vec!["c2".to_owned()], - vec![Value::from("baz")], - ); + let result = self + .client + .replace( + table_name, + vec![Value::from("baz")], + vec!["c2".to_owned()], + vec![Value::from("baz")], + ) + .await; assert!(result.is_ok()); assert_eq!(1, result.unwrap()); - self.assert_blob_get_result(table_name, "baz", "baz"); + self.assert_blob_get_result(table_name, "baz", "baz").await; } - pub fn clean_blob_table(&self, table_name: &str) { + pub async fn clean_blob_table(&self, table_name: &str) { self.client .delete(table_name, vec![Value::from("qux")]) + .await .expect("fail to delete row"); self.client .delete(table_name, vec![Value::from("bar")]) + .await .expect("fail to delete row"); self.client .delete(table_name, vec![Value::from("baz")]) + .await .expect("fail to delete row"); self.client .delete(table_name, vec![Value::from("foo")]) + .await .expect("fail to delete row"); } - pub fn test_varchar_exceptions(&self, table_name: &str) { + pub async fn test_varchar_exceptions(&self, table_name: &str) { // delete exception_key let result = self .client - .delete(table_name, vec![Value::from("exception_key")]); + .delete(table_name, vec![Value::from("exception_key")]) + .await; // assert result is ok assert!(result.is_ok()); //table not exists - let result = self.client.insert( - "not_exist_table", - vec![Value::from("exception_key")], - vec!["c2".to_owned()], - vec![Value::from("baz")], - ); + let result = self + .client + .insert( + "not_exist_table", + vec![Value::from("exception_key")], + vec!["c2".to_owned()], + vec![Value::from("baz")], + ) + .await; let e = result.unwrap_err(); assert!(e.is_ob_exception()); @@ -536,12 +638,15 @@ impl BaseTest { ); // column not found - let result = self.client.insert( - table_name, - vec![Value::from("exception_key")], - vec!["c4".to_owned()], - vec![Value::from("baz")], - ); + let result = self + .client + .insert( + table_name, + vec![Value::from("exception_key")], + vec!["c4".to_owned()], + vec![Value::from("baz")], + ) + .await; let e = result.unwrap_err(); assert!(e.is_ob_exception()); @@ -562,46 +667,56 @@ impl BaseTest { // assert!(e.is_ob_exception()); // assert_eq!(ResultCodes::OB_OBJ_TYPE_ERROR, e.ob_result_code().unwrap()); - let result = self.client.insert( - table_name, - vec![Value::from("exception_key")], - vec!["c2".to_owned()], - vec![Value::from(1)], - ); + let result = self + .client + .insert( + table_name, + vec![Value::from("exception_key")], + vec!["c2".to_owned()], + vec![Value::from(1)], + ) + .await; let e = result.unwrap_err(); assert!(e.is_ob_exception()); assert_eq!(ResultCodes::OB_OBJ_TYPE_ERROR, e.ob_result_code().unwrap()); // null value - let result = self.client.insert( - table_name, - vec![Value::from("exception_key")], - vec!["c2".to_owned()], - vec![Value::default()], - ); + let result = self + .client + .insert( + table_name, + vec![Value::from("exception_key")], + vec!["c2".to_owned()], + vec![Value::default()], + ) + .await; // assert result is ok assert!(result.is_ok()); // delete exception_key let result = self .client - .delete(table_name, vec![Value::from("exception_key")]); + .delete(table_name, vec![Value::from("exception_key")]) + .await; // assert result is ok assert!(result.is_ok()); } - pub fn insert_query_test_record(&self, table_name: &str, row_key: &str, value: &str) { - let result = self.client.insert_or_update( - table_name, - vec![Value::from(row_key)], - vec!["c2".to_owned()], - vec![Value::from(value)], - ); + pub async fn insert_query_test_record(&self, table_name: &str, row_key: &str, value: &str) { + let result = self + .client + .insert_or_update( + table_name, + vec![Value::from(row_key)], + vec!["c2".to_owned()], + vec![Value::from(value)], + ) + .await; assert!(result.is_ok()); assert_eq!(1, result.unwrap()); } - pub fn test_stream_query(&self, table_name: &str) { + pub async fn test_stream_query(&self, table_name: &str) { println!("test_stream_query for table name: {table_name} is unsupported now"); // for i in 0..10 { // let key = format!("{}", i); @@ -637,12 +752,12 @@ impl BaseTest { // assert_eq!(10, i); } - pub fn test_query(&self, table_name: &str) { - self.insert_query_test_record(table_name, "123", "123c2"); - self.insert_query_test_record(table_name, "124", "124c2"); - self.insert_query_test_record(table_name, "234", "234c2"); - self.insert_query_test_record(table_name, "456", "456c2"); - self.insert_query_test_record(table_name, "567", "567c2"); + pub async fn test_query(&self, table_name: &str) { + self.insert_query_test_record(table_name, "123", "123c2").await; + self.insert_query_test_record(table_name, "124", "124c2").await; + self.insert_query_test_record(table_name, "234", "234c2").await; + self.insert_query_test_record(table_name, "456", "456c2").await; + self.insert_query_test_record(table_name, "567", "567c2").await; let query = self .client @@ -656,17 +771,18 @@ impl BaseTest { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); - let result_set = result_set.unwrap(); + let mut result_set = result_set.unwrap(); assert_eq!(5, result_set.cache_size()); - for (i, row) in result_set.enumerate() { - assert!(row.is_ok()); - let mut row = row.unwrap(); + for i in 0..5 { + let result = result_set.next().await.unwrap(); + assert!(result.is_ok()); + let mut row = result.unwrap(); match i { 0 => assert_eq!("123c2", row.remove("c2").unwrap().as_string()), 1 => assert_eq!("124c2", row.remove("c2").unwrap().as_string()), @@ -691,17 +807,18 @@ impl BaseTest { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); - let result_set = result_set.unwrap(); + let mut result_set = result_set.unwrap(); assert_eq!(5, result_set.cache_size()); - for (i, row) in result_set.enumerate() { - assert!(row.is_ok()); - let mut row = row.unwrap(); + for i in 0..5 { + let result = result_set.next().await.unwrap(); + assert!(result.is_ok()); + let mut row = result.unwrap(); match i { 0 => assert_eq!("567c2", row.remove("c2").unwrap().as_string()), 1 => assert_eq!("456c2", row.remove("c2").unwrap().as_string()), @@ -725,7 +842,7 @@ impl BaseTest { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); @@ -737,6 +854,7 @@ impl BaseTest { "123c2", result_set .next() + .await .unwrap() .unwrap() .remove("c2") @@ -756,17 +874,18 @@ impl BaseTest { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); - let result_set = result_set.unwrap(); + let mut result_set = result_set.unwrap(); assert_eq!(3, result_set.cache_size()); - for (i, row) in result_set.enumerate() { - assert!(row.is_ok()); - let mut row = row.unwrap(); + for i in 0..3 { + let result = result_set.next().await.unwrap(); + assert!(result.is_ok()); + let mut row = result.unwrap(); match i { 0 => assert_eq!("124c2", row.remove("c2").unwrap().as_string()), 1 => assert_eq!("234c2", row.remove("c2").unwrap().as_string()), @@ -787,17 +906,18 @@ impl BaseTest { false, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); - let result_set = result_set.unwrap(); + let mut result_set = result_set.unwrap(); assert_eq!(3, result_set.cache_size()); - for (i, row) in result_set.enumerate() { - assert!(row.is_ok()); - let mut row = row.unwrap(); + for i in 0..3 { + let result = result_set.next().await.unwrap(); + assert!(result.is_ok()); + let mut row = result.unwrap(); match i { 0 => assert_eq!("124c2", row.remove("c2").unwrap().as_string()), 1 => assert_eq!("234c2", row.remove("c2").unwrap().as_string()), @@ -818,17 +938,18 @@ impl BaseTest { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); - let result_set = result_set.unwrap(); + let mut result_set = result_set.unwrap(); assert_eq!(4, result_set.cache_size()); - for (i, row) in result_set.enumerate() { - assert!(row.is_ok()); - let mut row = row.unwrap(); + for i in 0..4 { + let result = result_set.next().await.unwrap(); + assert!(result.is_ok()); + let mut row = result.unwrap(); match i { 0 => assert_eq!("124c2", row.remove("c2").unwrap().as_string()), 1 => assert_eq!("234c2", row.remove("c2").unwrap().as_string()), @@ -850,17 +971,18 @@ impl BaseTest { false, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); - let result_set = result_set.unwrap(); + let mut result_set = result_set.unwrap(); assert_eq!(4, result_set.cache_size()); - for (i, row) in result_set.enumerate() { - assert!(row.is_ok()); - let mut row = row.unwrap(); + for i in 0..4 { + let result = result_set.next().await.unwrap(); + assert!(result.is_ok()); + let mut row = result.unwrap(); match i { 0 => assert_eq!("123c2", row.remove("c2").unwrap().as_string()), 1 => assert_eq!("124c2", row.remove("c2").unwrap().as_string()), @@ -882,17 +1004,18 @@ impl BaseTest { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); - let result_set = result_set.unwrap(); + let mut result_set = result_set.unwrap(); assert_eq!(2, result_set.cache_size()); - for (i, row) in result_set.enumerate() { - assert!(row.is_ok()); - let mut row = row.unwrap(); + for i in 0..2 { + let result = result_set.next().await.unwrap(); + assert!(result.is_ok()); + let mut row = result.unwrap(); match i { 0 => assert_eq!("123c2", row.remove("c2").unwrap().as_string()), 1 => assert_eq!("124c2", row.remove("c2").unwrap().as_string()), @@ -918,13 +1041,15 @@ impl BaseTest { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); - let result_set = result_set.unwrap(); + let mut result_set = result_set.unwrap(); assert_eq!(4, result_set.cache_size()); - for (i, row) in result_set.enumerate() { - assert!(row.is_ok()); - let mut row = row.unwrap(); + + for i in 0..4 { + let result = result_set.next().await.unwrap(); + assert!(result.is_ok()); + let mut row = result.unwrap(); match i { 0 => assert_eq!("123c2", row.remove("c2").unwrap().as_string()), 1 => assert_eq!("124c2", row.remove("c2").unwrap().as_string()), @@ -947,7 +1072,7 @@ impl BaseTest { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); @@ -959,6 +1084,7 @@ impl BaseTest { "124c2", result_set .next() + .await .unwrap() .unwrap() .remove("c2") @@ -979,7 +1105,7 @@ impl BaseTest { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); let result_set = result_set.unwrap(); assert_eq!(0, result_set.cache_size()); @@ -1004,13 +1130,15 @@ impl BaseTest { true, ); - let query_result_set = query.execute(); + let query_result_set = query.execute().await; assert!(query_result_set.is_ok()); - let query_result_set = query_result_set.unwrap(); + let mut query_result_set = query_result_set.unwrap(); assert_eq!(4, query_result_set.cache_size()); - for (i, row) in query_result_set.enumerate() { - assert!(row.is_ok()); - let mut row = row.unwrap(); + + for i in 0..4 { + let result = query_result_set.next().await.unwrap(); + assert!(result.is_ok()); + let mut row = result.unwrap(); match i { 0 => assert_eq!("123c2", row.remove("c2").unwrap().as_string()), 1 => assert_eq!("124c2", row.remove("c2").unwrap().as_string()), @@ -1040,12 +1168,12 @@ impl BaseTest { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); let mut result_set = result_set.unwrap(); assert_eq!(0, result_set.cache_size()); for i in 0..1 { - let row = result_set.next().unwrap(); + let row = result_set.next().await.unwrap(); assert!(row.is_ok()); let mut row = row.unwrap(); match i { @@ -1053,10 +1181,10 @@ impl BaseTest { _ => unreachable!(), } } - let ret = result_set.close(); + let ret = result_set.async_close().await; assert!(ret.is_ok()); - match result_set.next() { + match result_set.next().await { Some(Err(e)) => { assert!(e.is_common_err()); assert_eq!(CommonErrCode::AlreadyClosed, e.common_err_code().unwrap()); @@ -1125,22 +1253,22 @@ impl BaseTest { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); let mut result_set = result_set.unwrap(); assert_eq!(0, result_set.cache_size()); - let row = result_set.next(); + let row = result_set.next().await; assert!(row.is_some()); - thread::sleep(Duration::from_secs(2)); - let row = result_set.next(); + tokio::time::sleep(Duration::from_secs(2)).await; + let row = result_set.next().await; assert!(row.is_some()); let row = row.unwrap(); - println!( - "TODO: could not find data, row error code: {:?}", - row.unwrap_err().ob_result_code() - ); - // assert!(row.is_ok()); + // println!( + // "TODO: could not find data, row error code: {:?}", + // row.unwrap_err().ob_result_code() + // ); + assert!(row.is_ok()); } } diff --git a/tests/test_table_client_hash.rs b/tests/test_table_client_hash.rs index ecbea2d..d94cb6f 100644 --- a/tests/test_table_client_hash.rs +++ b/tests/test_table_client_hash.rs @@ -22,7 +22,7 @@ mod utils; use obkv::{Table, Value}; use serial_test_derive::serial; -use test_log::test; +use tokio::task; // ```sql // CREATE TABLE `TEST_VARCHAR_TABLE_HASH_CONCURRENT` ( @@ -32,16 +32,17 @@ use test_log::test; // ) DEFAULT CHARSET = utf8mb4 COMPRESSION = 'lz4_1.0' REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 // partition by hash(c1) partitions 16; // ``` -#[test] +#[tokio::test] #[serial] -fn test_concurrent() { - let client = utils::common::build_normal_client(); +async fn test_concurrent() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_VARCHAR_TABLE_HASH_CONCURRENT"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); let test = test_table_client_base::BaseTest::new(client); - test.test_bigint_concurrent(TABLE_NAME); - test.clean_bigint_table(TABLE_NAME); + test.test_bigint_concurrent(TABLE_NAME).await; + test.clean_bigint_table(TABLE_NAME).await; } // ```sql @@ -52,9 +53,10 @@ fn test_concurrent() { // PRIMARY KEY (`c1`, `c1sk`)) DEFAULT CHARSET = utf8mb4 ROW_FORMAT = DYNAMIC COMPRESSION = 'lz4_1.0' REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 // partition by hash(`c1`) partitions 16; // ``` -#[test] -fn test_obtable_client_hash() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_obtable_client_hash() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_TABLE_BATCH_HASH"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string(), "c1sb".to_string()]); @@ -74,6 +76,6 @@ fn test_obtable_client_hash() { vec!["c2".to_owned()], vec![Value::from("batchValue_1")], ); - let result = client.execute_batch(TABLE_NAME, batch_op); + let result = client.execute_batch(TABLE_NAME, batch_op).await; assert!(result.is_ok()); } diff --git a/tests/test_table_client_key.rs b/tests/test_table_client_key.rs index 34a0f47..a017a13 100644 --- a/tests/test_table_client_key.rs +++ b/tests/test_table_client_key.rs @@ -23,7 +23,7 @@ mod utils; use obkv::{ObTableClient, Table, TableQuery, Value}; use rand::{distributions::Alphanumeric, thread_rng, Rng}; use serial_test_derive::serial; -use test_log::test; +use tokio::task; // ```sql // CREATE TABLE `TEST_VARCHAR_TABLE_KEY` ( @@ -33,24 +33,25 @@ use test_log::test; // ) DEFAULT CHARSET = utf8mb4 COMPRESSION = 'lz4_1.0' REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 // partition by key(c1) partitions 16; // ``` -#[test] -fn test_varchar_all_ob() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_varchar_all_ob() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_VARCHAR_TABLE_KEY"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); let test = test_table_client_base::BaseTest::new(client); - test.clean_varchar_table(TABLE_NAME); - test.test_varchar_insert(TABLE_NAME); + test.clean_varchar_table(TABLE_NAME).await; + test.test_varchar_insert(TABLE_NAME).await; for _ in 0..10 { - test.test_varchar_get(TABLE_NAME); + test.test_varchar_get(TABLE_NAME).await; } - test.test_varchar_update(TABLE_NAME); - test.test_varchar_insert_or_update(TABLE_NAME); - test.test_varchar_replace(TABLE_NAME); - test.test_varchar_append(TABLE_NAME); - test.test_varchar_increment(TABLE_NAME); - test.clean_varchar_table(TABLE_NAME); + test.test_varchar_update(TABLE_NAME).await; + test.test_varchar_insert_or_update(TABLE_NAME).await; + test.test_varchar_replace(TABLE_NAME).await; + test.test_varchar_append(TABLE_NAME).await; + test.test_varchar_increment(TABLE_NAME).await; + test.clean_varchar_table(TABLE_NAME).await; } // ```sql @@ -61,41 +62,50 @@ fn test_varchar_all_ob() { // ) DEFAULT CHARSET = utf8mb4 COMPRESSION = 'lz4_1.0' REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 // partition by key(c1) partitions 16; // ``` -#[test] -fn test_blob_all() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_blob_all() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_BLOB_TABLE_KEY"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); let test = test_table_client_base::BaseTest::new(client); - test.clean_blob_table(TABLE_NAME); - test.test_blob_insert(TABLE_NAME); + test.clean_blob_table(TABLE_NAME).await; + test.test_blob_insert(TABLE_NAME).await; for _ in 0..10 { - test.test_blob_get(TABLE_NAME); + test.test_blob_get(TABLE_NAME).await; } - test.test_blob_update(TABLE_NAME); - test.test_blob_insert_or_update(TABLE_NAME); - test.test_blob_replace(TABLE_NAME); - test.clean_blob_table(TABLE_NAME); + test.test_blob_update(TABLE_NAME).await; + test.test_blob_insert_or_update(TABLE_NAME).await; + test.test_blob_replace(TABLE_NAME).await; + test.clean_blob_table(TABLE_NAME).await; } -#[test] -fn test_ob_exceptions() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_ob_exceptions() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_VARCHAR_TABLE_KEY"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); let test = test_table_client_base::BaseTest::new(client); - test.test_varchar_exceptions(TABLE_NAME); + test.test_varchar_exceptions(TABLE_NAME).await; } -fn insert_query_test_record(client: &ObTableClient, table_name: &str, row_key: &str, value: &str) { - let result = client.insert_or_update( - table_name, - vec![Value::from(row_key)], - vec!["c2".to_owned()], - vec![Value::from(value)], - ); +async fn insert_query_test_record( + client: &ObTableClient, + table_name: &str, + row_key: &str, + value: &str, +) { + let result = client + .insert_or_update( + table_name, + vec![Value::from(row_key)], + vec!["c2".to_owned()], + vec![Value::from(value)], + ) + .await; assert!(result.is_ok()); assert_eq!(1, result.unwrap()); } @@ -108,18 +118,22 @@ fn insert_query_test_record(client: &ObTableClient, table_name: &str, row_key: & // ) DEFAULT CHARSET = utf8mb4 COMPRESSION = 'lz4_1.0' REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 // partition by key(c1) partitions 16; // ``` -#[test] +#[tokio::test] #[serial] -fn test_query() { - let client = utils::common::build_normal_client(); +async fn test_query() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); + const TABLE_NAME: &str = "TEST_QUERY_TABLE_KEY"; + clean_table(client.to_owned(), TABLE_NAME); client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); - insert_query_test_record(&client, TABLE_NAME, "123", "123c2"); - insert_query_test_record(&client, TABLE_NAME, "124", "124c2"); - insert_query_test_record(&client, TABLE_NAME, "234", "234c2"); - insert_query_test_record(&client, TABLE_NAME, "456", "456c2"); - insert_query_test_record(&client, TABLE_NAME, "567", "567c2"); + insert_query_test_record(&client, TABLE_NAME, "123", "123c2").await; + insert_query_test_record(&client, TABLE_NAME, "124", "124c2").await; + insert_query_test_record(&client, TABLE_NAME, "234", "234c2").await; + insert_query_test_record(&client, TABLE_NAME, "456", "456c2").await; + insert_query_test_record(&client, TABLE_NAME, "567", "567c2").await; + println!("test_query"); let query = client .query(TABLE_NAME) .select(vec!["c2".to_owned()]) @@ -131,7 +145,8 @@ fn test_query() { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; + println!("result_set: {:?}", result_set); assert!(result_set.is_ok()); let result_set = result_set.unwrap(); assert_eq!(5, result_set.cache_size()); @@ -148,7 +163,7 @@ fn test_query() { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); let mut result_set = result_set.unwrap(); assert_eq!(1, result_set.cache_size()); @@ -156,6 +171,7 @@ fn test_query() { "123c2", result_set .next() + .await .unwrap() .unwrap() .remove("c2") @@ -175,7 +191,7 @@ fn test_query() { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); let result_set = result_set.unwrap(); assert_eq!(3, result_set.cache_size()); @@ -192,7 +208,7 @@ fn test_query() { false, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); let result_set = result_set.unwrap(); assert_eq!(3, result_set.cache_size()); @@ -209,7 +225,7 @@ fn test_query() { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); let result_set = result_set.unwrap(); assert_eq!(4, result_set.cache_size()); @@ -226,7 +242,7 @@ fn test_query() { false, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); let result_set = result_set.unwrap(); assert_eq!(4, result_set.cache_size()); @@ -243,7 +259,7 @@ fn test_query() { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); let result_set = result_set.unwrap(); assert_eq!(2, result_set.cache_size()); @@ -266,7 +282,7 @@ fn test_query() { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); let result_set = result_set.unwrap(); assert_eq!(4, result_set.cache_size()); @@ -283,7 +299,7 @@ fn test_query() { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); let mut result_set = result_set.unwrap(); assert_eq!(1, result_set.cache_size()); @@ -291,6 +307,7 @@ fn test_query() { "124c2", result_set .next() + .await .unwrap() .unwrap() .remove("c2") @@ -310,7 +327,7 @@ fn test_query() { true, ); - let result_set = query.execute(); + let result_set = query.execute().await; assert!(result_set.is_ok()); let result_set = result_set.unwrap(); assert_eq!(0, result_set.cache_size()); @@ -326,15 +343,16 @@ fn test_query() { // ) DEFAULT CHARSET = utf8mb4 COMPRESSION = 'lz4_1.0' REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 // partition by key(c1) partitions 16; // ``` -#[test] +#[tokio::test] #[serial] -fn test_stream_query() { - let client = utils::common::build_normal_client(); +async fn test_stream_query() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_STREAM_QUERY_TABLE_KEY"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); let test = test_table_client_base::BaseTest::new(client); - test.test_stream_query(TABLE_NAME); + test.test_stream_query(TABLE_NAME).await; } // ```sql @@ -345,15 +363,16 @@ fn test_stream_query() { // ) DEFAULT CHARSET = utf8mb4 COMPRESSION = 'lz4_1.0' REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 // partition by key(c1) partitions 16; // ``` -#[test] -fn test_concurrent() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_concurrent() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_VARCHAR_TABLE_KEY_CONCURRENT"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); let test = test_table_client_base::BaseTest::new(client); - test.test_varchar_concurrent(TABLE_NAME); - test.clean_varchar_table(TABLE_NAME); + test.test_varchar_concurrent(TABLE_NAME).await; + test.clean_varchar_table(TABLE_NAME).await; } // ```sql @@ -364,9 +383,10 @@ fn test_concurrent() { // PRIMARY KEY (`c1`, `c1sk`)) DEFAULT CHARSET = utf8mb4 ROW_FORMAT = DYNAMIC COMPRESSION = 'lz4_1.0' REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 // partition by key(`c1`) partitions 16; // ``` -#[test] -fn test_batch() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_batch() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_TABLE_BATCH_KEY"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string(), "c1sb".to_string()]); @@ -384,7 +404,7 @@ fn test_batch() { vec!["c2".to_owned()], vec![Value::from("batchValue_1")], ); - let result = client.execute_batch(TABLE_NAME, batch_op); + let result = client.execute_batch(TABLE_NAME, batch_op).await; assert!(result.is_ok()); } @@ -393,27 +413,32 @@ fn clean_table(client: ObTableClient, table_name: &str) { client.execute_sql(&sql).expect("clean table failed"); } -#[test] -fn test_partition_varchar_general_ci() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_partition_varchar_general_ci() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const VARCHAR_TABLE_NAME: &str = "TEST_TABLE_PARTITION_VARCHAR_KEY"; client.add_row_key_element(VARCHAR_TABLE_NAME, vec!["c1".to_string()]); // test varchar partition for i in 926..977 { let rowkey = format!("{i}"); - let result = client.delete(VARCHAR_TABLE_NAME, vec![Value::from(rowkey.to_owned())]); + let result = client + .delete(VARCHAR_TABLE_NAME, vec![Value::from(rowkey.to_owned())]) + .await; assert!(result.is_ok()); let insert_sql = format!("insert into {VARCHAR_TABLE_NAME} values({rowkey}, 'value');"); client.execute_sql(&insert_sql).expect("fail to insert"); } for i in 926..977 { let rowkey = format!("{i}"); - let result = client.get( - VARCHAR_TABLE_NAME, - vec![Value::from(rowkey.to_owned())], - vec!["c2".to_owned()], - ); + let result = client + .get( + VARCHAR_TABLE_NAME, + vec![Value::from(rowkey.to_owned())], + vec!["c2".to_owned()], + ) + .await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(1, result.len()); @@ -425,24 +450,29 @@ fn test_partition_varchar_general_ci() { .take(512) .collect(); let sql_rowkey = format!("'{rowkey}'"); - let result = client.delete(VARCHAR_TABLE_NAME, vec![Value::from(rowkey.to_owned())]); + let result = client + .delete(VARCHAR_TABLE_NAME, vec![Value::from(rowkey.to_owned())]) + .await; assert!(result.is_ok()); let insert_sql = format!("insert into {VARCHAR_TABLE_NAME} values({sql_rowkey}, 'value');"); client.execute_sql(&insert_sql).expect("fail to insert"); - let result = client.get( - VARCHAR_TABLE_NAME, - vec![Value::from(rowkey.to_owned())], - vec!["c2".to_owned()], - ); + let result = client + .get( + VARCHAR_TABLE_NAME, + vec![Value::from(rowkey.to_owned())], + vec!["c2".to_owned()], + ) + .await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(1, result.len()); } } -#[test] -fn test_partition_complex() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_partition_complex() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_TABLE_PARTITION_COMPLEX_KEY"; client.add_row_key_element( TABLE_NAME, @@ -462,37 +492,42 @@ fn test_partition_complex() { .collect(); let sql_rowkeyc2 = format!("'{rowkey_c2}'"); let sql_rowkeyc3 = format!("'{rowkey_c3}'"); - let result = client.delete( - TABLE_NAME, - vec![ - Value::from(i as u64), - Value::from(rowkey_c2.to_owned()), - Value::from(rowkey_c3.to_owned()), - ], - ); + let result = client + .delete( + TABLE_NAME, + vec![ + Value::from(i as u64), + Value::from(rowkey_c2.to_owned()), + Value::from(rowkey_c3.to_owned()), + ], + ) + .await; assert!(result.is_ok()); let insert_sql = format!( "insert into {TABLE_NAME} values({i}, {sql_rowkeyc2}, {sql_rowkeyc3}, 'value');" ); client.execute_sql(&insert_sql).expect("fail to insert"); - let result = client.get( - TABLE_NAME, - vec![ - Value::from(i as u64), - Value::from(rowkey_c2.to_owned()), - Value::from(rowkey_c3.to_owned()), - ], - vec!["c4".to_owned()], - ); + let result = client + .get( + TABLE_NAME, + vec![ + Value::from(i as u64), + Value::from(rowkey_c2.to_owned()), + Value::from(rowkey_c3.to_owned()), + ], + vec!["c4".to_owned()], + ) + .await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(1, result.len()); } } -#[test] -fn test_sub_partition_complex() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_sub_partition_complex() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_TABLE_SUB_PARTITION_COMPLEX_KEY"; client.add_row_key_element( TABLE_NAME, @@ -505,6 +540,7 @@ fn test_sub_partition_complex() { ); clean_table(client.to_owned(), TABLE_NAME); + for i in 0..16 { let rowkey_c2: String = thread_rng() .sample_iter(&Alphanumeric) @@ -524,28 +560,32 @@ fn test_sub_partition_complex() { let sql_rowkeyc2 = format!("'{rowkey_c2}'"); let sql_rowkeyc3 = format!("'{rowkey_c3}'"); let sql_rowkeyc4 = format!("'{rowkey_c4}'"); - let result = client.delete( - TABLE_NAME, - vec![ - Value::from(i as i64), - Value::from(rowkey_c2.to_owned()), - Value::from(rowkey_c3.to_owned()), - Value::from(rowkey_c4.to_owned()), - ], - ); + let result = client + .delete( + TABLE_NAME, + vec![ + Value::from(i as i64), + Value::from(rowkey_c2.to_owned()), + Value::from(rowkey_c3.to_owned()), + Value::from(rowkey_c4.to_owned()), + ], + ) + .await; assert!(result.is_ok()); - let insert_sql = format!("insert into {TABLE_NAME} values({i}, {sql_rowkeyc2}, {sql_rowkeyc3}, {sql_rowkeyc4}, 'value');"); + let insert_sql = format!("insert into {TABLE_NAME} values({i}, {sql_rowkeyc2}, {sql_rowkeyc3}, {sql_rowkeyc4}, 'value');"); client.execute_sql(&insert_sql).expect("fail to insert"); - let result = client.get( - TABLE_NAME, - vec![ - Value::from(i as i64), - Value::from(rowkey_c2.to_owned()), - Value::from(rowkey_c3.to_owned()), - Value::from(rowkey_c4.to_owned()), - ], - vec!["c5".to_owned()], - ); + let result = client + .get( + TABLE_NAME, + vec![ + Value::from(i as i64), + Value::from(rowkey_c2.to_owned()), + Value::from(rowkey_c3.to_owned()), + Value::from(rowkey_c4.to_owned()), + ], + vec!["c5".to_owned()], + ) + .await; assert!(result.is_ok()); let result = result.unwrap(); assert_eq!(1, result.len()); diff --git a/tests/test_table_client_range.rs b/tests/test_table_client_range.rs index 65f3cdc..c31fa98 100644 --- a/tests/test_table_client_range.rs +++ b/tests/test_table_client_range.rs @@ -22,7 +22,7 @@ mod utils; use obkv::{Table, Value}; use serial_test_derive::serial; -use test_log::test; +use tokio::task; // ```sql // CREATE TABLE `TEST_VARCHAR_TABLE_RANGE` ( @@ -32,24 +32,25 @@ use test_log::test; // ) DEFAULT CHARSET = utf8mb4 COMPRESSION = 'lz4_1.0' REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 // partition by range columns (c1) (PARTITION p0 VALUES LESS THAN ('a'), PARTITION p1 VALUES LESS THAN ('w'), PARTITION p2 VALUES LESS THAN MAXVALUE); // ``` -#[test] -fn test_varchar_all_ob() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_varchar_all_ob() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_VARCHAR_TABLE_RANGE"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); let test = test_table_client_base::BaseTest::new(client); - test.clean_varchar_table(TABLE_NAME); - test.test_varchar_insert(TABLE_NAME); + test.clean_varchar_table(TABLE_NAME).await; + test.test_varchar_insert(TABLE_NAME).await; for _ in 0..10 { - test.test_varchar_get(TABLE_NAME); + test.test_varchar_get(TABLE_NAME).await; } - test.test_varchar_update(TABLE_NAME); - test.test_varchar_insert_or_update(TABLE_NAME); - test.test_varchar_replace(TABLE_NAME); - test.test_varchar_append(TABLE_NAME); - test.test_varchar_increment(TABLE_NAME); - test.clean_varchar_table(TABLE_NAME); + test.test_varchar_update(TABLE_NAME).await; + test.test_varchar_insert_or_update(TABLE_NAME).await; + test.test_varchar_replace(TABLE_NAME).await; + test.test_varchar_append(TABLE_NAME).await; + test.test_varchar_increment(TABLE_NAME).await; + test.clean_varchar_table(TABLE_NAME).await; } // ```sql @@ -60,32 +61,34 @@ fn test_varchar_all_ob() { // ) DEFAULT CHARSET = utf8mb4 COMPRESSION = 'lz4_1.0' REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 // partition by range columns (c1) (PARTITION p0 VALUES LESS THAN ('a'), PARTITION p1 VALUES LESS THAN ('w'), PARTITION p2 VALUES LESS THAN MAXVALUE); // ``` -#[test] -fn test_blob_all() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_blob_all() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_BLOB_TABLE_RANGE"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); let test = test_table_client_base::BaseTest::new(client); - test.clean_blob_table(TABLE_NAME); - test.test_blob_insert(TABLE_NAME); + test.clean_blob_table(TABLE_NAME).await; + test.test_blob_insert(TABLE_NAME).await; for _ in 0..10 { - test.test_blob_get(TABLE_NAME); + test.test_blob_get(TABLE_NAME).await; } - test.test_blob_update(TABLE_NAME); - test.test_blob_insert_or_update(TABLE_NAME); - test.test_blob_replace(TABLE_NAME); - test.clean_blob_table(TABLE_NAME); + test.test_blob_update(TABLE_NAME).await; + test.test_blob_insert_or_update(TABLE_NAME).await; + test.test_blob_replace(TABLE_NAME).await; + test.clean_blob_table(TABLE_NAME).await; } -#[test] -fn test_ob_exceptions() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_ob_exceptions() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_VARCHAR_TABLE_RANGE"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); let test = test_table_client_base::BaseTest::new(client); - test.test_varchar_exceptions(TABLE_NAME); + test.test_varchar_exceptions(TABLE_NAME).await; } // ```sql @@ -96,15 +99,16 @@ fn test_ob_exceptions() { // ) DEFAULT CHARSET = utf8mb4 COMPRESSION = 'lz4_1.0' REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 // partition by range columns (c1) (PARTITION p0 VALUES LESS THAN ('a'), PARTITION p1 VALUES LESS THAN ('w'), PARTITION p2 VALUES LESS THAN MAXVALUE); // ``` -#[test] +#[tokio::test] #[serial] -fn test_query() { - let client = utils::common::build_normal_client(); +async fn test_query() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_QUERY_TABLE_RANGE"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); let test = test_table_client_base::BaseTest::new(client); - test.test_query(TABLE_NAME); + test.test_query(TABLE_NAME).await; } // ```sql @@ -115,15 +119,16 @@ fn test_query() { // ) DEFAULT CHARSET = utf8mb4 COMPRESSION = 'lz4_1.0' REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 // partition by range columns (c1) (PARTITION p0 VALUES LESS THAN ('a'), PARTITION p1 VALUES LESS THAN ('w'), PARTITION p2 VALUES LESS THAN MAXVALUE); // ``` -#[test] +#[tokio::test] #[serial] -fn test_stream_query() { - let client = utils::common::build_normal_client(); +async fn test_stream_query() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_STREAM_QUERY_TABLE_RANGE"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); let test = test_table_client_base::BaseTest::new(client); - test.test_stream_query(TABLE_NAME); + test.test_stream_query(TABLE_NAME).await; } // ```sql @@ -134,15 +139,16 @@ fn test_stream_query() { // ) DEFAULT CHARSET = utf8mb4 COMPRESSION = 'lz4_1.0' REPLICA_NUM = 3 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 // partition by range columns (c1) (PARTITION p0 VALUES LESS THAN ('a'), PARTITION p1 VALUES LESS THAN ('w'), PARTITION p2 VALUES LESS THAN MAXVALUE); // ``` -#[test] -fn test_concurrent() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_concurrent() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_VARCHAR_TABLE_RANGE_CONCURRENT"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); let test = test_table_client_base::BaseTest::new(client); - test.test_varchar_concurrent(TABLE_NAME); - test.clean_varchar_table(TABLE_NAME); + test.test_varchar_concurrent(TABLE_NAME).await; + test.clean_varchar_table(TABLE_NAME).await; } // ```sql @@ -161,11 +167,12 @@ fn test_concurrent() { // partition by range(`c1`)(partition p0 values less than(200), // partition p1 values less than(500), partition p2 values less than(900)); // ``` -#[test] -fn test_obtable_client_batch_atomic_op() { +#[tokio::test] +async fn test_obtable_client_batch_atomic_op() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_TABLE_BATCH_RANGE"; const TABLE_NAME_COMPLEX: &str = "TEST_TABLE_BATCH_RANGE_COMPLEX"; - let client = utils::common::build_normal_client(); client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); client.add_row_key_element( TABLE_NAME_COMPLEX, @@ -189,14 +196,16 @@ fn test_obtable_client_batch_atomic_op() { vec!["c2".to_owned()], vec![Value::from("batchValue_1")], ); - let result = client.execute_batch(TABLE_NAME, batch_op); + let result = client.execute_batch(TABLE_NAME, batch_op).await; assert!(result.is_ok()); - let result = client.get( - TABLE_NAME, - vec![Value::from(test_key0)], - vec!["c2".to_owned()], - ); + let result = client + .get( + TABLE_NAME, + vec![Value::from(test_key0)], + vec!["c2".to_owned()], + ) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -223,18 +232,20 @@ fn test_obtable_client_batch_atomic_op() { vec![Value::from("AlterValue_3")], ); - let result = client.execute_batch(TABLE_NAME, batch_op); + let result = client.execute_batch(TABLE_NAME, batch_op).await; assert!(result.is_err()); assert_eq!( obkv::ResultCodes::OB_ERR_PRIMARY_KEY_DUPLICATE, result.expect_err("Common").ob_result_code().unwrap() ); - let result = client.get( - TABLE_NAME, - vec![Value::from(test_key0)], - vec!["c2".to_owned()], - ); + let result = client + .get( + TABLE_NAME, + vec![Value::from(test_key0)], + vec!["c2".to_owned()], + ) + .await; assert!(result.is_ok()); let mut result = result.unwrap(); assert_eq!(1, result.len()); @@ -262,7 +273,7 @@ fn test_obtable_client_batch_atomic_op() { vec![Value::from("AlterValue_1")], ); - let result = client.execute_batch(TABLE_NAME, batch_op); + let result = client.execute_batch(TABLE_NAME, batch_op).await; assert!(result.is_err()); assert_eq!( obkv::ResultCodes::OB_INVALID_PARTITION, @@ -283,6 +294,6 @@ fn test_obtable_client_batch_atomic_op() { vec!["c2".to_owned()], vec![Value::from("batchValue_1")], ); - let result = client.execute_batch(TABLE_NAME_COMPLEX, batch_op); + let result = client.execute_batch(TABLE_NAME_COMPLEX, batch_op).await; assert!(result.is_ok()); } diff --git a/tests/test_table_client_sql.rs b/tests/test_table_client_sql.rs index 67929ee..4c2e4c8 100644 --- a/tests/test_table_client_sql.rs +++ b/tests/test_table_client_sql.rs @@ -19,11 +19,13 @@ #[allow(unused)] mod utils; -use obkv::{Table, Value}; +use obkv::{ObTableClient, Table, Value}; +use tokio::task; -#[test] -fn test_execute_sql() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_execute_sql() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); let test_table_name = "test_execute_sql"; let create_table = format!("create table IF NOT EXISTS {test_table_name}(id int, PRIMARY KEY(id));"); @@ -32,9 +34,10 @@ fn test_execute_sql() { .expect("fail to create table"); } -#[test] -fn test_check_table_exists() { - let client = utils::common::build_normal_client(); +#[tokio::test] +async fn test_check_table_exists() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); let test_table_name = "test_check_table_exists"; let drop_table = format!("drop table IF EXISTS {test_table_name};"); @@ -60,48 +63,54 @@ fn test_check_table_exists() { assert!(exists, "should exists"); } -#[test] -fn test_truncate_table() { - let client = utils::common::build_normal_client(); +async fn truncate_table(client: &ObTableClient, test_table_name: &str) { + client + .truncate_table(test_table_name) + .expect("Fail to truncate first test table"); + + let result = client + .get( + test_table_name, + vec![Value::from("foo")], + vec!["c2".to_owned()], + ) + .await + .expect("Fail to get row"); + assert!(result.is_empty()); + + let result = client + .insert( + test_table_name, + vec![Value::from("foo")], + vec!["c2".to_owned()], + vec![Value::from("bar")], + ) + .await + .expect("Fail to insert row"); + assert_eq!(result, 1); + + client + .truncate_table(test_table_name) + .expect("Fail to truncate first test table"); + + let result = client + .get( + test_table_name, + vec![Value::from("foo")], + vec!["c2".to_owned()], + ) + .await + .expect("Fail to get row"); + assert!(result.is_empty()); +} + +#[tokio::test] +async fn test_truncate_table() { + let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client = client_handle.await.unwrap(); let test_table_name = "test_varchar_table"; - let truncate_once = || { - client - .truncate_table(test_table_name) - .expect("Fail to truncate first test table"); - - let result = client - .get( - test_table_name, - vec![Value::from("foo")], - vec!["c2".to_owned()], - ) - .expect("Fail to get row"); - assert!(result.is_empty()); - - let result = client - .insert( - test_table_name, - vec![Value::from("foo")], - vec!["c2".to_owned()], - vec![Value::from("bar")], - ) - .expect("Fail to insert row"); - assert_eq!(result, 1); - - client - .truncate_table(test_table_name) - .expect("Fail to truncate first test table"); - - let result = client - .get( - test_table_name, - vec![Value::from("foo")], - vec!["c2".to_owned()], - ) - .expect("Fail to get row"); - assert!(result.is_empty()); - }; + for _ in 0..1 { - truncate_once(); + truncate_table(&client, test_table_name).await; } } diff --git a/ycsb-rs/Makefile b/ycsb-rs/Makefile index d166479..dfa8d93 100644 --- a/ycsb-rs/Makefile +++ b/ycsb-rs/Makefile @@ -5,7 +5,7 @@ export OBKV_YCSB_BINARY ?= $(ROOT)/../target/$(MODE)/obkv-ycsb database ?= obkv workload ?= $(ROOT)/workloads/workload_obkv.toml -threads ?= 200 +threads ?= 400 build-test: cargo build --$(MODE) diff --git a/ycsb-rs/src/db.rs b/ycsb-rs/src/db.rs index 6da4590..a39ee36 100644 --- a/ycsb-rs/src/db.rs +++ b/ycsb-rs/src/db.rs @@ -21,10 +21,9 @@ pub trait DB { ) -> Result<()>; } -pub fn create_db(db: &str, config: Arc) -> Result> { +pub fn create_db(db: &str, _config: Arc) -> Result> { match db { "sqlite" => Ok(Rc::new(SQLite::new()?)), - "obkv" => Ok(Rc::new(OBKVClient::build_normal_client(config)?)), db => Err(anyhow!("{} is an invalid database name", db)), } } diff --git a/ycsb-rs/src/main.rs b/ycsb-rs/src/main.rs index b34c18c..5ce9d3f 100644 --- a/ycsb-rs/src/main.rs +++ b/ycsb-rs/src/main.rs @@ -1,4 +1,5 @@ use std::{cell::RefCell, fs, rc::Rc, sync::Arc, thread, time::Instant}; +use std::sync::Mutex; use anyhow::{bail, Result}; use obkv::dump_metrics; @@ -19,6 +20,7 @@ pub mod obkv_client; pub mod properties; pub mod sqlite; pub mod workload; +mod runtime; #[derive(StructOpt, Debug)] #[structopt(name = "ycsb")] @@ -45,20 +47,20 @@ fn run(wl: Arc, db: Rc, rng: Rc>, operat } } -fn load_ob(wl: Arc, db: Arc, operation_count: usize) { +async fn load_ob(wl: Arc, db: Arc, operation_count: usize) { for _ in 0..operation_count { - wl.ob_insert(db.clone()); + wl.ob_insert(db.clone()).await; } } -fn run_ob( +async fn run_ob( wl: Arc, db: Arc, - rng: Rc>, + rng: Arc>, operation_count: usize, ) { for _ in 0..operation_count { - wl.ob_transaction(rng.clone(), db.clone()); + wl.ob_transaction(rng.clone(), db.clone()).await; } } @@ -84,6 +86,7 @@ fn main() -> Result<()> { let actual_client_count = opt.threads / props.obkv_client_reuse; for cmd in opt.commands { let start = Instant::now(); + let mut tasks = vec![]; let mut threads = vec![]; println!( "Database: {database}, Command: {cmd}, Counts Per Threads: {thread_operation_count}" @@ -93,6 +96,7 @@ fn main() -> Result<()> { props.obkv_client_reuse ); if database.eq_ignore_ascii_case("obkv") { + let runtimes = runtime::build_ycsb_runtimes(props.clone()); for _client_idx in 0..actual_client_count { let database = database.clone(); let db = db::create_ob(&database, config.clone()).unwrap(); @@ -100,17 +104,23 @@ fn main() -> Result<()> { let db = db.clone(); let wl = wl.clone(); let cmd = cmd.clone(); - threads.push(thread::spawn(move || { - let rng = Rc::new(RefCell::new(SmallRng::from_entropy())); + let runtime = runtimes.default_runtime.clone(); + tasks.push(runtime.spawn(async move { + let rng = Arc::new(Mutex::new(SmallRng::from_entropy())); db.init().unwrap(); match &cmd[..] { - "load" => load_ob(wl.clone(), db, thread_operation_count), - "run" => run_ob(wl.clone(), db, rng, thread_operation_count), + "load" => load_ob(wl.clone(), db, thread_operation_count).await, + "run" => run_ob(wl.clone(), db, rng, thread_operation_count).await, cmd => panic!("invalid command: {cmd}"), }; })); } } + runtimes.block_runtime.block_on(async move { + for task in tasks { + task.await.expect("task failed"); + } + }); } else { for _ in 0..opt.threads { let database = database.clone(); @@ -130,9 +140,9 @@ fn main() -> Result<()> { }; })); } - } - for t in threads { - let _ = t.join(); + for t in threads { + let _ = t.join(); + } } let runtime = start.elapsed().as_millis(); println!("[OVERALL], ThreadCount, {}", opt.threads); diff --git a/ycsb-rs/src/obkv_client.rs b/ycsb-rs/src/obkv_client.rs index b6e558c..51cbcd2 100644 --- a/ycsb-rs/src/obkv_client.rs +++ b/ycsb-rs/src/obkv_client.rs @@ -22,7 +22,7 @@ use anyhow::Result; use obkv::error::CommonErrCode; use obkv::{Builder, ClientConfig, ObTableClient, RunningMode, Table, TableQuery, Value}; -use crate::{db::DB, properties::Properties}; +use crate::{properties::Properties}; const PRIMARY_KEY: &str = "ycsb_key"; const COLUMN_NAMES: [&str; 10] = [ @@ -127,14 +127,12 @@ impl OBKVClient { pub fn build_hbase_client(config: Arc) -> Result { Self::build_client(config, RunningMode::HBase) } -} -impl DB for OBKVClient { - fn init(&self) -> Result<()> { + pub fn init(&self) -> Result<()> { Ok(()) } - fn insert(&self, table: &str, key: &str, values: &HashMap<&str, String>) -> Result<()> { + pub async fn insert(&self, table: &str, key: &str, values: &HashMap<&str, String>) -> Result<()> { let mut columns: Vec = Vec::new(); let mut properties: Vec = Vec::new(); for (key, value) in values { @@ -149,6 +147,7 @@ impl DB for OBKVClient { columns, properties, ) + .await .expect("fail to insert_or update"); assert_eq!(1, result); @@ -156,19 +155,19 @@ impl DB for OBKVClient { } #[allow(unused)] - fn read(&self, table: &str, key: &str, result: &mut HashMap) -> Result<()> { + pub async fn read(&self, table: &str, key: &str, result: &mut HashMap) -> Result<()> { let result = self.client.get( table, vec![Value::from(key)], COLUMN_NAMES.iter().map(|s| s.to_string()).collect(), - ); + ).await; assert!(result.is_ok()); assert_eq!(10, result?.len()); Ok(()) } - fn update(&self, table: &str, key: &str, values: &HashMap<&str, String>) -> Result<()> { + pub async fn update(&self, table: &str, key: &str, values: &HashMap<&str, String>) -> Result<()> { let mut columns: Vec = Vec::new(); let mut properties: Vec = Vec::new(); for (key, value) in values { @@ -183,6 +182,7 @@ impl DB for OBKVClient { columns, properties, ) + .await .expect("fail to insert_or update"); assert_eq!(10, result); @@ -190,14 +190,14 @@ impl DB for OBKVClient { } #[allow(unused)] - fn scan( + pub async fn scan( &self, table: &str, startkey: &str, endkey: &str, result: &mut HashMap, ) -> Result<()> { - let result = self + let query = self .client .query(table) .select(COLUMN_NAMES.iter().map(|s| s.to_string()).collect()) @@ -207,8 +207,8 @@ impl DB for OBKVClient { true, vec![Value::from(endkey)], true, - ) - .execute(); + ); + let result = query.execute().await; assert!(result.is_ok()); Ok(()) } diff --git a/ycsb-rs/src/properties.rs b/ycsb-rs/src/properties.rs index 369e297..0f748bc 100644 --- a/ycsb-rs/src/properties.rs +++ b/ycsb-rs/src/properties.rs @@ -112,6 +112,10 @@ fn conn_writer_thread_num_default() -> usize { 4 } +fn ycsb_thread_num_default() -> usize { + 16 +} + #[derive(Deserialize, Debug)] pub struct Properties { #[serde(default = "zero_u64", rename = "insertstart")] @@ -219,4 +223,6 @@ pub struct Properties { rename = "conn_writer_thread_num" )] pub conn_writer_thread_num: usize, + #[serde(default = "ycsb_thread_num_default", rename = "ycsb_thread_num")] + pub ycsb_thread_num: usize, } diff --git a/ycsb-rs/src/runtime.rs b/ycsb-rs/src/runtime.rs new file mode 100644 index 0000000..ebecb1f --- /dev/null +++ b/ycsb-rs/src/runtime.rs @@ -0,0 +1,45 @@ +/*- + * #%L + * OBKV Table Client Framework + * %% + * Copyright (C) 2021 OceanBase + * %% + * OBKV Table Client Framework is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the + * Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY + * KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO + * NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * #L% + */ + +use std::sync::Arc; +use obkv::runtime; +use crate::properties::Properties; + +/// OBKV Table Runtime +#[derive(Clone, Debug)] +pub struct ObYCSBRuntimes { + /// Default runtime tasks + pub default_runtime: runtime::RuntimeRef, + /// Runtime for block_on + pub block_runtime: runtime::RuntimeRef, +} + +fn build_runtime(name: &str, threads_num: usize) -> runtime::Runtime { + runtime::Builder::default() + .worker_threads(threads_num) + .thread_name(name) + .enable_all() + .build() + .expect("Failed to create runtime") +} + +pub fn build_ycsb_runtimes(props: Arc) -> ObYCSBRuntimes { + ObYCSBRuntimes { + default_runtime: Arc::new(build_runtime("ycsb-default", props.ycsb_thread_num)), + block_runtime: Arc::new(build_runtime("ycsb-block", 1)), + } +} \ No newline at end of file diff --git a/ycsb-rs/src/workload.rs b/ycsb-rs/src/workload.rs index c2d94d0..92bb462 100644 --- a/ycsb-rs/src/workload.rs +++ b/ycsb-rs/src/workload.rs @@ -1,15 +1,13 @@ mod core_workload; -use std::{cell::RefCell, rc::Rc, sync::Arc}; +use std::{cell::RefCell, rc::Rc}; pub use core_workload::CoreWorkload; use rand::rngs::SmallRng; -use crate::{db::DB, obkv_client::OBKVClient}; +use crate::{db::DB}; pub trait Workload { fn do_insert(&self, db: Rc); fn do_transaction(&self, rng: Rc>, db: Rc); - fn ob_insert(&self, db: Arc); - fn ob_transaction(&self, rng: Rc>, db: Arc); } diff --git a/ycsb-rs/src/workload/core_workload.rs b/ycsb-rs/src/workload/core_workload.rs index f08a36a..a974656 100644 --- a/ycsb-rs/src/workload/core_workload.rs +++ b/ycsb-rs/src/workload/core_workload.rs @@ -110,7 +110,7 @@ impl CoreWorkload { db.insert(&self.table, &dbkey, &values).unwrap(); } - fn ob_transaction_insert(&self, db: Arc) { + async fn ob_transaction_insert(&self, db: Arc) { let keynum = self.next_key_num(); let dbkey = format!("{}", fnvhash64(keynum)); let mut values = HashMap::new(); @@ -124,7 +124,7 @@ impl CoreWorkload { .sample_string::(&mut self.rng.lock().unwrap(), field_len as usize); values.insert(&field_name[..], s); } - db.insert(&self.table, &dbkey, &values).unwrap(); + db.insert(&self.table, &dbkey, &values).await.unwrap(); } fn do_transaction_read(&self, db: Rc) { @@ -135,11 +135,11 @@ impl CoreWorkload { // TODO: verify rows } - fn ob_transaction_read(&self, db: Arc) { + async fn ob_transaction_read(&self, db: Arc) { let keynum = self.next_key_num(); let dbkey = format!("{}", fnvhash64(keynum)); let mut result = HashMap::new(); - db.read(&self.table, &dbkey, &mut result).unwrap(); + db.read(&self.table, &dbkey, &mut result).await.unwrap(); // TODO: verify rows } @@ -160,7 +160,7 @@ impl CoreWorkload { db.update(&self.table, &dbkey, &values).unwrap(); } - fn ob_transaction_update(&self, db: Arc) { + async fn ob_transaction_update(&self, db: Arc) { let keynum = self.next_key_num(); let dbkey = format!("{}", fnvhash64(keynum)); let mut values = HashMap::new(); @@ -174,15 +174,15 @@ impl CoreWorkload { .sample_string::(&mut self.rng.lock().unwrap(), field_len as usize); values.insert(&field_name[..], s); } - db.update(&self.table, &dbkey, &values).unwrap(); + db.update(&self.table, &dbkey, &values).await.unwrap(); } - fn ob_transaction_scan(&self, db: Arc) { + async fn ob_transaction_scan(&self, db: Arc) { let start = self.next_key_num(); let dbstart = format!("{}", fnvhash64(start)); let dbend = format!("{}", fnvhash64(start)); let mut result = HashMap::new(); - db.scan(&self.table, &dbstart, &dbend, &mut result).unwrap(); + db.scan(&self.table, &dbstart, &dbend, &mut result).await.unwrap(); } fn next_key_num(&self) -> u64 { @@ -194,10 +194,8 @@ impl CoreWorkload { .unwrap() .next_value(&mut self.rng.lock().unwrap()) } -} -impl Workload for CoreWorkload { - fn do_insert(&self, db: Rc) { + pub async fn ob_insert(&self, db: Arc) { let dbkey = self .key_sequence .lock() @@ -215,10 +213,35 @@ impl Workload for CoreWorkload { .sample_string::(&mut self.rng.lock().unwrap(), field_len as usize); values.insert(&field_name[..], s); } - db.insert(&self.table, &dbkey, &values).unwrap(); + db.insert(&self.table, &dbkey, &values).await.unwrap(); } - fn ob_insert(&self, db: Arc) { + pub async fn ob_transaction(&self, rng: Arc>, db: Arc) { + let op = self + .operation_chooser + .lock() + .unwrap() + .next_value(&mut *rng.lock().unwrap()); + match op { + CoreOperation::Insert => { + self.ob_transaction_insert(db).await; + } + CoreOperation::Read => { + self.ob_transaction_read(db).await; + } + CoreOperation::Update => { + self.ob_transaction_update(db).await; + } + CoreOperation::Scan => { + self.ob_transaction_scan(db).await; + } + _ => todo!(), + } + } +} + +impl Workload for CoreWorkload { + fn do_insert(&self, db: Rc) { let dbkey = self .key_sequence .lock() @@ -258,29 +281,6 @@ impl Workload for CoreWorkload { _ => todo!(), } } - - fn ob_transaction(&self, rng: Rc>, db: Arc) { - let op = self - .operation_chooser - .lock() - .unwrap() - .next_value(rng.borrow_mut().deref_mut()); - match op { - CoreOperation::Insert => { - self.ob_transaction_insert(db); - } - CoreOperation::Read => { - self.ob_transaction_read(db); - } - CoreOperation::Update => { - self.ob_transaction_update(db); - } - CoreOperation::Scan => { - self.ob_transaction_scan(db); - } - _ => todo!(), - } - } } // http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash diff --git a/ycsb-rs/workloads/workload_obkv.toml b/ycsb-rs/workloads/workload_obkv.toml index 1b33790..6398dd9 100644 --- a/ycsb-rs/workloads/workload_obkv.toml +++ b/ycsb-rs/workloads/workload_obkv.toml @@ -50,7 +50,9 @@ rpc_retry_interval = 0 refresh_workers_num = 1 max_conns_per_server = 10 min_idle_conns_per_server = 10 -conn_init_thread_num = 1 +ycsb_thread_num = 10 + +conn_init_thread_num = 2 conn_reader_thread_num = 6 conn_writer_thread_num = 4 From 8f1304b3b75f0e756012693a4b7302cc74233891 Mon Sep 17 00:00:00 2001 From: "zeli.lwb" Date: Tue, 6 Jun 2023 15:49:50 +0800 Subject: [PATCH 2/9] [Feat] async client --- benches/concurrent_insert/mod.rs | 20 +- docs/simple_demo/simple_operation/demo.md | 47 ++--- src/client/mod.rs | 19 +- src/client/query.rs | 13 +- src/client/table.rs | 217 +++------------------- src/client/table_client.rs | 108 +++++------ src/lib.rs | 2 - src/rpc/conn_pool.rs | 6 +- tests/test_cse_table.rs | 22 +-- tests/test_hbase_client.rs | 10 +- tests/test_table_client.rs | 6 +- tests/test_table_client_base.rs | 17 +- tests/test_table_client_hash.rs | 6 +- tests/test_table_client_key.rs | 24 +-- tests/test_table_client_range.rs | 16 +- tests/test_table_client_sql.rs | 8 +- ycsb-rs/src/main.rs | 12 +- ycsb-rs/src/obkv_client.rs | 38 +++- ycsb-rs/src/runtime.rs | 4 +- ycsb-rs/src/workload.rs | 2 +- ycsb-rs/src/workload/core_workload.rs | 6 +- 21 files changed, 233 insertions(+), 370 deletions(-) diff --git a/benches/concurrent_insert/mod.rs b/benches/concurrent_insert/mod.rs index 96b9f3a..ffab463 100644 --- a/benches/concurrent_insert/mod.rs +++ b/benches/concurrent_insert/mod.rs @@ -17,9 +17,10 @@ extern crate obkv; -use std::{sync::Arc, thread, time}; +use std::{sync::Arc, time}; -use obkv::{serde_obkv::value::Value, Builder, ObTableClient, RunningMode, Table}; +use obkv::{serde_obkv::value::Value, Builder, ObTableClient, RunningMode}; +use tokio::task; // TODO: use test conf to control which environments to test. const TEST_FULL_USER_NAME: &str = "test"; @@ -54,11 +55,11 @@ const TABLE_NAME: &str = "series_key_to_id_0"; // PRIMARY KEY(series_key), // KEY index_id(series_id) // ); -fn concurrent_insert(client: Arc) { +async fn concurrent_insert(client: Arc) { let mut thds = Vec::with_capacity(20); for i in 0..50 { let client = client.clone(); - let thd = thread::spawn(move || { + let thd = task::spawn(async move { for j in i * 100..(i * 100 + 50) { let series_key = format!("series_key_test_padding_padding_{j}"); let series_id = j * j; @@ -69,6 +70,7 @@ fn concurrent_insert(client: Arc) { vec!["series_id".to_owned()], vec![Value::from(series_id as i64)], ) + .await .unwrap_or_else(|err| { panic!("fail to insert row:{series_key} {series_id}, err:{err}") }); @@ -78,18 +80,20 @@ fn concurrent_insert(client: Arc) { } for (i, thd) in thds.into_iter().enumerate() { - thd.join() + thd.await .unwrap_or_else(|_| panic!("thread#{i} fail to join")); } } -fn main() { - let client = build_client(RunningMode::Normal); +#[tokio::main] +async fn main() { + let client_handle = task::spawn_blocking(|| build_client(RunningMode::Normal)); + let client = client_handle.await.unwrap(); client .truncate_table(TABLE_NAME) .expect("fail to truncate the table"); let start = time::Instant::now(); - concurrent_insert(Arc::new(client)); + concurrent_insert(Arc::new(client)).await; let elapsed = time::Instant::now() - start; println!("Benches::concurrent_insert cost time:{elapsed:?}"); } diff --git a/docs/simple_demo/simple_operation/demo.md b/docs/simple_demo/simple_operation/demo.md index d7cd3d7..da86a6c 100644 --- a/docs/simple_demo/simple_operation/demo.md +++ b/docs/simple_demo/simple_operation/demo.md @@ -1,5 +1,5 @@ # Demo for obkv-table-client-rs -Edited by OBKV developers on March 3, 2023. +Edited by OBKV developers on June 6, 2023. ## Introduction obkv-table-client-rs is Rust Library that can access table data from OceanBase storage layer. @@ -13,34 +13,32 @@ Now we provide an interface to access data from OceanBase, which we will introdu obkv-table-client-rs support several simple operations, such as get, insert, update, insert_or_update, replace, append, increment, delete. ```rust Table and ObTableClient -impl Table for ObTableClient { - // implement operation in Table - // ... -} - -pub trait Table { - fn insert( +impl ObTableClient { + // implement operation + #[inline] + pub async fn insert( &self, table_name: &str, row_keys: Vec, columns: Vec, properties: Vec, - ) -> Result; + ) -> Result {} - fn update( + #[inline] + pub async fn update( &self, table_name: &str, row_keys: Vec, columns: Vec, properties: Vec, - ) -> Result; + ) -> Result {} // ... } ``` A simple operation example is shown below: ```rust simple operation example -fn simple_operation() { +async fn simple_operation() { let client = build_normal_client(); let result = client.insert( @@ -48,7 +46,7 @@ fn simple_operation() { vec![Value::from("foo")], vec!["c2".to_owned()], vec![Value::from("baz")], - ); + ).await; assert!(result.is_ok()); } @@ -72,7 +70,7 @@ impl ObTableBatchOperation { ``` A simple batch operation example is shown below: ```rust batch operation example -fn batch_operation() { +async fn batch_operation() { let client = utils::common::build_normal_client(); // set number of operations in batch_op @@ -87,7 +85,7 @@ fn batch_operation() { ); // execute - let result = client.execute_batch("your_table_name", batch_op); + let result = client.execute_batch("your_table_name", batch_op).await; assert!(result.is_ok()); } ``` @@ -97,30 +95,25 @@ More [demos](https://github.com/oceanbase/obkv-table-client-rs/blob/main/tests/t Query is different from get, it allows the user to get a range of data. A **Query** could get from **ObTableClient** by calling ```query()``` method, then you could customize your query by calling methods in **ObTableClientQueryImpl** and **TableQuery**. ```rust ObTableClientQueryImpll -impl TableQuery for ObTableClientQueryImpl { - // implement methods from TableQuery - // ... -} - -pub trait TableQuery { - fn execute(&self) -> Result; - fn select(self, columns: Vec) -> Self; +impl ObTableClientQueryImpl { + pub async fn execute(&self) -> Result {} + pub fn select(self, columns: Vec) -> Self {} // ... - fn clear(&mut self); + pub fn clear(&mut self) {} } ``` A simple query example is shown below: ```rust query example -fn query() { +async fn query() { let client = utils::common::build_normal_client(); let query = client .query("your_table_name") .select(vec!["c1".to_owned()]) .scan_order(false) - .add_scan_range(vec![Value::from("123")], true, vec![Value::from("567")], true) + .add_scan_range(vec![Value::from("123")], true, vec![Value::from("567")], true); - let result = query.execute(); + let result = query.execute().await; assert!(result.is_ok()); } ``` diff --git a/src/client/mod.rs b/src/client/mod.rs index 6bc303c..ad5b4dd 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -36,8 +36,9 @@ pub enum TableOpResult { } pub trait Table { + // TODO: async operation support /// Insert a record - async fn insert( + fn insert( &self, table_name: &str, row_keys: Vec, @@ -46,7 +47,7 @@ pub trait Table { ) -> Result; /// Update a record - async fn update( + fn update( &self, table_name: &str, row_keys: Vec, @@ -56,7 +57,7 @@ pub trait Table { /// Insert or update a record, if the record exists, update it. /// Otherwise insert a new one. - async fn insert_or_update( + fn insert_or_update( &self, table_name: &str, row_keys: Vec, @@ -65,7 +66,7 @@ pub trait Table { ) -> Result; /// Replace a record. - async fn replace( + fn replace( &self, table_name: &str, row_keys: Vec, @@ -74,7 +75,7 @@ pub trait Table { ) -> Result; /// Append - async fn append( + fn append( &self, table_name: &str, row_keys: Vec, @@ -83,7 +84,7 @@ pub trait Table { ) -> Result; /// Increment - async fn increment( + fn increment( &self, table_name: &str, row_keys: Vec, @@ -92,10 +93,10 @@ pub trait Table { ) -> Result; /// Delete records by row keys. - async fn delete(&self, table_name: &str, row_keys: Vec) -> Result; + fn delete(&self, table_name: &str, row_keys: Vec) -> Result; /// Retrieve a record by row keys. - async fn get( + fn get( &self, table_name: &str, row_keys: Vec, @@ -105,7 +106,7 @@ pub trait Table { /// Create a batch operation fn batch_operation(&self, ops_num_hint: usize) -> ObTableBatchOperation; // Execute a batch operation - async fn execute_batch( + fn execute_batch( &self, table_name: &str, batch_op: ObTableBatchOperation, diff --git a/src/client/query.rs b/src/client/query.rs index 9bf2d09..4cd57f4 100644 --- a/src/client/query.rs +++ b/src/client/query.rs @@ -44,14 +44,15 @@ use crate::{ const ZERO_TIMEOUT_MS: Duration = Duration::from_millis(0); pub trait StreamQuerier { - async fn execute_query( + // TODO: `async` trait functions are not currently supported + fn execute_query( &self, result: &mut QueryStreamResult, part_id_and_table: (i64, Arc), payload: &mut ObTableQueryRequest, ) -> Result; - async fn execute_stream( + fn execute_stream( &self, result: &mut QueryStreamResult, part_id_and_table: (i64, Arc), @@ -398,7 +399,6 @@ impl QueryStreamResult { impl Drop for QueryStreamResult { fn drop(&mut self) { if self.closed { - () } else { error!("QueryStreamResult::close fail") } @@ -442,7 +442,7 @@ impl QueryResultSet { QueryResultSet::Some(stream_result) => { // TODO: async close if stream_result.closed { - return Ok(()); + Ok(()) } else { Err(CommonErr( CommonErrCode::Rpc, @@ -502,10 +502,11 @@ impl Drop for QueryResultSet { /// Table Query Trait -const PRIMARY_INDEX_NAME: &str = "PRIMARY"; +pub const PRIMARY_INDEX_NAME: &str = "PRIMARY"; pub trait TableQuery { - async fn execute(&self) -> Result; + // TODO: async execute / `async` trait functions are not currently supported + fn execute(&self) -> Result; fn get_table_name(&self) -> String; fn set_entity_type(&mut self, entity_type: ObTableEntityType); fn entity_type(&self) -> ObTableEntityType; diff --git a/src/client/table.rs b/src/client/table.rs index 93a6c81..856c7de 100644 --- a/src/client/table.rs +++ b/src/client/table.rs @@ -15,21 +15,16 @@ * #L% */ -use std::{collections::HashMap, fmt::Formatter, sync::Arc, time::Duration}; +use std::{fmt::Formatter, sync::Arc, time::Duration}; -use super::{ - query::{QueryResultSet, TableQuery}, - ClientConfig, Table, TableOpResult, -}; +use super::{query::QueryResultSet, ClientConfig, TableOpResult}; use crate::{ error::{CommonErrCode, Error::Common as CommonErr, Result}, rpc::{ protocol::{ codes::ResultCodes, payloads::*, - query::{ - ObHTableFilter, ObNewRange, ObScanOrder, ObTableQuery - }, + query::{ObHTableFilter, ObNewRange, ObScanOrder, ObTableQuery}, ObPayload, }, proxy::Proxy, @@ -61,6 +56,7 @@ impl std::fmt::Debug for ObTable { } } +// TODO: impl Table for ObTable impl ObTable { pub async fn execute_payload( &self, @@ -71,34 +67,32 @@ impl ObTable { Ok(()) } - pub fn query(&self, table_name: &str) -> impl TableQuery { - ObTableQueryImpl::new(table_name, Arc::new(self.clone())) + pub fn query(&self, _table_name: &str) { + // TODO: return impl TableQuery + todo!() + // ObTableQueryImpl::new(table_name, Arc::new(self.clone())) } pub fn operation_timeout(&self) -> Duration { self.config.rpc_operation_timeout } - async fn execute( + /// Execute a batch operation on a table + pub async fn execute_batch( &self, - table_name: &str, - operation_type: ObTableOperationType, - row_keys: Vec, - columns: Option>, - properties: Option>, - ) -> Result { - let mut payload = ObTableOperationRequest::new( - table_name, - operation_type, - row_keys, - columns, - properties, + _table_name: &str, + batch_op: ObTableBatchOperation, + ) -> Result> { + let mut payload = ObTableBatchOperationRequest::new( + batch_op, self.config.rpc_operation_timeout, self.config.log_level_flag, ); - let mut result = ObTableOperationResult::new(); - self.execute_payload(&mut payload, &mut result).await?; - Ok(result) + let mut result = ObTableBatchOperationResult::new(); + + self.rpc_proxy.execute(&mut payload, &mut result).await?; + + result.into() } } @@ -172,170 +166,6 @@ impl Builder { } } -// TODO: Table has no retry for any operation -impl Table for ObTable { - async fn insert( - &self, - table_name: &str, - row_keys: Vec, - columns: Vec, - properties: Vec, - ) -> Result { - Ok(self - .execute( - table_name, - ObTableOperationType::Insert, - row_keys, - Some(columns), - Some(properties), - ) - .await? - .affected_rows()) - } - - async fn update( - &self, - table_name: &str, - row_keys: Vec, - columns: Vec, - properties: Vec, - ) -> Result { - Ok(self - .execute( - table_name, - ObTableOperationType::Update, - row_keys, - Some(columns), - Some(properties), - ) - .await? - .affected_rows()) - } - - async fn insert_or_update( - &self, - table_name: &str, - row_keys: Vec, - columns: Vec, - properties: Vec, - ) -> Result { - Ok(self - .execute( - table_name, - ObTableOperationType::InsertOrUpdate, - row_keys, - Some(columns), - Some(properties), - ) - .await? - .affected_rows()) - } - - async fn replace( - &self, - table_name: &str, - row_keys: Vec, - columns: Vec, - properties: Vec, - ) -> Result { - Ok(self - .execute( - table_name, - ObTableOperationType::Replace, - row_keys, - Some(columns), - Some(properties), - ) - .await? - .affected_rows()) - } - - async fn append( - &self, - table_name: &str, - row_keys: Vec, - columns: Vec, - properties: Vec, - ) -> Result { - Ok(self - .execute( - table_name, - ObTableOperationType::Append, - row_keys, - Some(columns), - Some(properties), - ) - .await? - .affected_rows()) - } - - async fn increment( - &self, - table_name: &str, - row_keys: Vec, - columns: Vec, - properties: Vec, - ) -> Result { - Ok(self - .execute( - table_name, - ObTableOperationType::Increment, - row_keys, - Some(columns), - Some(properties), - ) - .await? - .affected_rows()) - } - - async fn delete(&self, table_name: &str, row_keys: Vec) -> Result { - Ok(self - .execute(table_name, ObTableOperationType::Del, row_keys, None, None) - .await? - .affected_rows()) - } - - async fn get( - &self, - table_name: &str, - row_keys: Vec, - columns: Vec, - ) -> Result> { - Ok(self - .execute( - table_name, - ObTableOperationType::Get, - row_keys, - Some(columns), - None, - ) - .await? - .take_entity() - .take_properties()) - } - - fn batch_operation(&self, ops_num_hint: usize) -> ObTableBatchOperation { - ObTableBatchOperation::with_ops_num(ops_num_hint) - } - - async fn execute_batch( - &self, - _table_name: &str, - batch_op: ObTableBatchOperation, - ) -> Result> { - let mut payload = ObTableBatchOperationRequest::new( - batch_op, - self.config.rpc_operation_timeout, - self.config.log_level_flag, - ); - let mut result = ObTableBatchOperationResult::new(); - - self.rpc_proxy.execute(&mut payload, &mut result).await?; - - result.into() - } -} - impl From for Result> { fn from(batch_result: ObTableBatchOperationResult) -> Result> { let op_results = batch_result.take_op_results(); @@ -362,8 +192,8 @@ impl From for Result> { } } +#[allow(dead_code)] // impl ObTableStreamQuerier for obtable - pub struct ObTableQueryImpl { operation_timeout: Option, entity_type: ObTableEntityType, @@ -389,8 +219,9 @@ impl ObTableQueryImpl { } } -impl TableQuery for ObTableQueryImpl { - async fn execute(&self) -> Result { +#[allow(dead_code)] +impl ObTableQueryImpl { + fn execute(&self) -> Result { todo!() } diff --git a/src/client/table_client.rs b/src/client/table_client.rs index 50f834f..8cce7ed 100644 --- a/src/client/table_client.rs +++ b/src/client/table_client.rs @@ -32,9 +32,9 @@ use scheduled_thread_pool::ScheduledThreadPool; use super::{ ocp::{ObOcpModelManager, OcpModel}, - query::{QueryResultSet, QueryStreamResult, StreamQuerier, TableQuery}, + query::{QueryResultSet, QueryStreamResult}, table::{self, ObTable}, - ClientConfig, Table, TableOpResult, + ClientConfig, TableOpResult, }; use crate::{ error::{self, CommonErrCode, Error::Common as CommonErr, Result}, @@ -1393,7 +1393,7 @@ impl ObTableClientInner { Ok(result) => { let error_no = result.header().errorno(); let result_code = ResultCodes::from_i32(error_no); - let result = if result_code == ResultCodes::OB_SUCCESS { + if result_code == ResultCodes::OB_SUCCESS { self.reset_table_failure(table_name); Ok(result) } else { @@ -1404,8 +1404,7 @@ impl ObTableClientInner { result.header().message() ), )) - }; - result + } } Err(e) => { debug!( @@ -1436,7 +1435,7 @@ impl ObTableClientInner { ); Err(e) } - } + }; } } } @@ -1659,11 +1658,10 @@ impl ObTableClient { Ok(all_results) } -} -impl Table for ObTableClient { + // TODO: impl ObTable async methods #[inline] - async fn insert( + pub async fn insert( &self, table_name: &str, row_keys: Vec, @@ -1684,7 +1682,7 @@ impl Table for ObTableClient { } #[inline] - async fn update( + pub async fn update( &self, table_name: &str, row_keys: Vec, @@ -1705,7 +1703,7 @@ impl Table for ObTableClient { } #[inline] - async fn insert_or_update( + pub async fn insert_or_update( &self, table_name: &str, row_keys: Vec, @@ -1726,7 +1724,7 @@ impl Table for ObTableClient { } #[inline] - async fn replace( + pub async fn replace( &self, table_name: &str, row_keys: Vec, @@ -1747,7 +1745,7 @@ impl Table for ObTableClient { } #[inline] - async fn append( + pub async fn append( &self, table_name: &str, row_keys: Vec, @@ -1768,7 +1766,7 @@ impl Table for ObTableClient { } #[inline] - async fn increment( + pub async fn increment( &self, table_name: &str, row_keys: Vec, @@ -1789,7 +1787,7 @@ impl Table for ObTableClient { } #[inline] - async fn delete(&self, table_name: &str, row_keys: Vec) -> Result { + pub async fn delete(&self, table_name: &str, row_keys: Vec) -> Result { Ok(self .inner .execute(table_name, ObTableOperationType::Del, row_keys, None, None) @@ -1798,7 +1796,7 @@ impl Table for ObTableClient { } #[inline] - async fn get( + pub async fn get( &self, table_name: &str, row_keys: Vec, @@ -1819,11 +1817,11 @@ impl Table for ObTableClient { } #[inline] - fn batch_operation(&self, ops_num_hint: usize) -> ObTableBatchOperation { + pub fn batch_operation(&self, ops_num_hint: usize) -> ObTableBatchOperation { ObTableBatchOperation::with_ops_num_raw(ops_num_hint) } - async fn execute_batch( + pub async fn execute_batch( &self, table_name: &str, batch_op: ObTableBatchOperation, @@ -1877,16 +1875,6 @@ pub struct ObTableClientStreamQuerier { start_execute_ts: AtomicI64, } -impl ObTableClientStreamQuerier { - fn new(table_name: &str, client: Arc) -> Self { - Self { - client, - table_name: table_name.to_owned(), - start_execute_ts: AtomicI64::new(0), - } - } -} - impl Drop for ObTableClientStreamQuerier { fn drop(&mut self) { let start_ts = self.start_execute_ts.load(Ordering::Relaxed); @@ -1901,8 +1889,17 @@ impl Drop for ObTableClientStreamQuerier { } } -impl StreamQuerier for ObTableClientStreamQuerier { - async fn execute_query( +impl ObTableClientStreamQuerier { + fn new(table_name: &str, client: Arc) -> Self { + Self { + client, + table_name: table_name.to_owned(), + start_execute_ts: AtomicI64::new(0), + } + } + + // TODO: impl StreamQuerier for ObTableClientStreamQuerier + pub async fn execute_query( &self, stream_result: &mut QueryStreamResult, (part_id, ob_table): (i64, Arc), @@ -1932,7 +1929,7 @@ impl StreamQuerier for ObTableClientStreamQuerier { Ok(row_count) } - async fn execute_stream( + pub async fn execute_stream( &self, stream_result: &mut QueryStreamResult, (part_id, ob_table): (i64, Arc), @@ -1961,12 +1958,10 @@ impl StreamQuerier for ObTableClientStreamQuerier { } Ok(row_count) } - - fn get_runtime(&self) -> RuntimeRef { - self.client.runtimes.query_runtime.clone() - } } +pub const PRIMARY_INDEX_NAME: &str = "PRIMARY"; + /// TODO refactor with ObTableQueryImpl pub struct ObTableClientQueryImpl { operation_timeout: Option, @@ -1990,10 +1985,9 @@ impl ObTableClientQueryImpl { fn reset(&mut self) { self.table_query = ObTableQuery::new(); } -} -impl TableQuery for ObTableClientQueryImpl { - async fn execute(&self) -> Result { + // TODO: impl TableQuery for ObTableClientQueryImpl + pub async fn execute(&self) -> Result { let mut partition_table: HashMap)> = HashMap::new(); self.table_query.verify()?; @@ -2042,22 +2036,22 @@ impl TableQuery for ObTableClientQueryImpl { } #[inline] - fn get_table_name(&self) -> String { + pub fn get_table_name(&self) -> String { self.table_name.to_owned() } #[inline] - fn set_entity_type(&mut self, entity_type: ObTableEntityType) { + pub fn set_entity_type(&mut self, entity_type: ObTableEntityType) { self.entity_type = entity_type; } #[inline] - fn entity_type(&self) -> ObTableEntityType { + pub fn entity_type(&self) -> ObTableEntityType { self.entity_type } #[inline] - fn select(mut self, columns: Vec) -> Self + pub fn select(mut self, columns: Vec) -> Self where Self: Sized, { @@ -2066,7 +2060,7 @@ impl TableQuery for ObTableClientQueryImpl { } #[inline] - fn limit(mut self, offset: Option, limit: i32) -> Self + pub fn limit(mut self, offset: Option, limit: i32) -> Self where Self: Sized, { @@ -2077,7 +2071,7 @@ impl TableQuery for ObTableClientQueryImpl { self } - fn add_scan_range( + pub fn add_scan_range( mut self, start: Vec, start_equals: bool, @@ -2104,7 +2098,7 @@ impl TableQuery for ObTableClientQueryImpl { self } - fn add_scan_range_starts_with(mut self, start: Vec, start_equals: bool) -> Self + pub fn add_scan_range_starts_with(mut self, start: Vec, start_equals: bool) -> Self where Self: Sized, { @@ -2126,7 +2120,7 @@ impl TableQuery for ObTableClientQueryImpl { self } - fn add_scan_range_ends_with(mut self, end: Vec, end_equals: bool) -> Self + pub fn add_scan_range_ends_with(mut self, end: Vec, end_equals: bool) -> Self where Self: Sized, { @@ -2149,7 +2143,7 @@ impl TableQuery for ObTableClientQueryImpl { } #[inline] - fn scan_order(mut self, forward: bool) -> Self + pub fn scan_order(mut self, forward: bool) -> Self where Self: Sized, { @@ -2159,7 +2153,7 @@ impl TableQuery for ObTableClientQueryImpl { } #[inline] - fn index_name(mut self, index_name: &str) -> Self + pub fn index_name(mut self, index_name: &str) -> Self where Self: Sized, { @@ -2168,7 +2162,15 @@ impl TableQuery for ObTableClientQueryImpl { } #[inline] - fn filter_string(mut self, filter_string: &str) -> Self + pub fn primary_index(self) -> Self + where + Self: Sized, + { + self.index_name(PRIMARY_INDEX_NAME) + } + + #[inline] + pub fn filter_string(mut self, filter_string: &str) -> Self where Self: Sized, { @@ -2177,7 +2179,7 @@ impl TableQuery for ObTableClientQueryImpl { } #[inline] - fn htable_filter(mut self, filter: ObHTableFilter) -> Self + pub fn htable_filter(mut self, filter: ObHTableFilter) -> Self where Self: Sized, { @@ -2186,7 +2188,7 @@ impl TableQuery for ObTableClientQueryImpl { } #[inline] - fn batch_size(mut self, batch_size: i32) -> Self + pub fn batch_size(mut self, batch_size: i32) -> Self where Self: Sized, { @@ -2195,7 +2197,7 @@ impl TableQuery for ObTableClientQueryImpl { } #[inline] - fn operation_timeout(mut self, timeout: Duration) -> Self + pub fn operation_timeout(mut self, timeout: Duration) -> Self where Self: Sized, { @@ -2204,7 +2206,7 @@ impl TableQuery for ObTableClientQueryImpl { } #[inline] - fn clear(&mut self) { + pub fn clear(&mut self) { self.reset(); } } diff --git a/src/lib.rs b/src/lib.rs index 79bd484..b174818 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -15,8 +15,6 @@ * #L% */ -#![feature(async_fn_in_trait)] - extern crate bytes; extern crate chrono; extern crate crossbeam; diff --git a/src/rpc/conn_pool.rs b/src/rpc/conn_pool.rs index 1db2ca9..069aa66 100644 --- a/src/rpc/conn_pool.rs +++ b/src/rpc/conn_pool.rs @@ -192,7 +192,7 @@ impl ConnPool { error!("ConnPool::add_connection_background::bg_add fail to build a connection after {} retries, err:{}", retry_num, e); let delay = cmp::max(min_build_retry_interval, delay); let delay = cmp::min(shared_pool.conn_builder.connect_timeout / 2, delay * 2); - sleep(delay.into()).await; + sleep(delay).await; } } } @@ -438,7 +438,7 @@ mod test { builder.build().expect("fail to build ConnPool") } - #[test] + #[tokio::test] #[ignore] async fn check_conn_valid() { let (min_conn_num, max_conn_num) = (2, 3); @@ -456,7 +456,7 @@ mod test { assert!(conn.is_active(), "conn should be active"); } - #[test] + #[tokio::test] #[ignore] async fn rebuild_conn() { let (min_conn_num, max_conn_num) = (3, 5); diff --git a/tests/test_cse_table.rs b/tests/test_cse_table.rs index 976e54a..7e64106 100644 --- a/tests/test_cse_table.rs +++ b/tests/test_cse_table.rs @@ -19,7 +19,7 @@ mod utils; use std::collections::HashSet; -use obkv::{client::query::TableQuery, Table, Value}; +use obkv::Value; #[allow(unused_imports)] use serial_test_derive::serial; use tokio::task; @@ -43,7 +43,7 @@ use tokio::task; #[tokio::test] #[serial] async fn test_cse_data_range_table() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); let cse_table = "cse_data_20190308_1"; @@ -109,7 +109,7 @@ async fn test_cse_data_range_table() { #[tokio::test] #[serial] async fn test_data_range_part() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); let cse_table = "cse_data_20190308_1"; client @@ -145,7 +145,7 @@ async fn test_data_range_part() { #[tokio::test] #[serial] async fn test_cse_meta_data_table() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); let cse_table = "cse_meta_data_0"; @@ -207,7 +207,7 @@ async fn test_cse_meta_data_table() { #[tokio::test] #[serial] async fn test_cse_index_key_table() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); let cse_table = "cse_index_1"; @@ -278,7 +278,7 @@ async fn test_cse_index_key_table() { row[2].to_owned(), row[3].to_owned(), ); - assert_eq!(rows.contains(&vec![m, k, v]), true); + assert!(rows.contains(&vec![m, k, v])); assert_eq!(ids, series_ids); } None => { @@ -286,7 +286,7 @@ async fn test_cse_index_key_table() { break; } Some(Err(e)) => { - panic!("Error: {:?}", e); + panic!("Error: {e:?}"); } } } @@ -299,7 +299,7 @@ async fn test_cse_index_key_table() { #[tokio::test] #[serial] async fn test_index_key_part() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); let cse_table = "cse_index_1"; @@ -332,7 +332,7 @@ async fn test_index_key_part() { #[tokio::test] #[serial] async fn test_cse_field_key_table() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); let cse_table = "cse_field_1"; @@ -407,7 +407,7 @@ async fn test_cse_field_key_table() { #[tokio::test] #[serial] async fn test_field_key_part() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); let cse_table = "cse_field_1"; @@ -435,7 +435,7 @@ async fn test_field_key_part() { #[tokio::test] #[serial] async fn test_series_key_table() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); let cse_table = "cse_series_key_to_id_1"; diff --git a/tests/test_hbase_client.rs b/tests/test_hbase_client.rs index e5a3a66..2843740 100644 --- a/tests/test_hbase_client.rs +++ b/tests/test_hbase_client.rs @@ -19,7 +19,7 @@ #[allow(unused)] mod utils; -use obkv::{Table, Value}; +use obkv::Value; use tokio::task; // ```sql @@ -33,7 +33,7 @@ use tokio::task; // ``` #[tokio::test] async fn test_obtable_partition_hash_crud() { - let client_handle = task::spawn_blocking(|| utils::common::build_hbase_client()); + let client_handle = task::spawn_blocking(utils::common::build_hbase_client); let client = client_handle.await.unwrap(); const TEST_TABLE: &str = "TEST_HBASE_HASH"; @@ -105,7 +105,7 @@ async fn test_obtable_partition_hash_crud() { // ``` #[tokio::test] async fn test_obtable_partition_key_varbinary_crud() { - let client_handle = task::spawn_blocking(|| utils::common::build_hbase_client()); + let client_handle = task::spawn_blocking(utils::common::build_hbase_client); let client = client_handle.await.unwrap(); const TEST_TABLE: &str = "TEST_HBASE_PARTITION"; @@ -182,7 +182,7 @@ async fn test_obtable_partition_key_varbinary_crud() { // ``` #[tokio::test] async fn test_obtable_partition_key_varchar_crud() { - let client_handle = task::spawn_blocking(|| utils::common::build_hbase_client()); + let client_handle = task::spawn_blocking(utils::common::build_hbase_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_HBASE_PARTITION"; @@ -255,7 +255,7 @@ async fn test_obtable_partition_key_varchar_crud() { // ``` #[tokio::test] async fn test_obtable_partition_range_crud() { - let client_handle = task::spawn_blocking(|| utils::common::build_hbase_client()); + let client_handle = task::spawn_blocking(utils::common::build_hbase_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_HBASE_RANGE"; diff --git a/tests/test_table_client.rs b/tests/test_table_client.rs index 62f036e..dba88ac 100644 --- a/tests/test_table_client.rs +++ b/tests/test_table_client.rs @@ -19,12 +19,12 @@ #[allow(unused)] mod utils; -use obkv::{ResultCodes, Table, TableQuery, Value}; +use obkv::{ResultCodes, Value}; use tokio::task; #[tokio::test] async fn test_obtable_client_curd() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TEST_TABLE_NAME: &str = "test_varchar_table"; @@ -109,7 +109,7 @@ async fn test_obtable_client_curd() { #[tokio::test] async fn test_obtable_client_batch_op() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TEST_TABLE_NAME: &str = "test_varchar_table"; diff --git a/tests/test_table_client_base.rs b/tests/test_table_client_base.rs index 2772134..b71204a 100644 --- a/tests/test_table_client_base.rs +++ b/tests/test_table_client_base.rs @@ -27,7 +27,7 @@ use std::{ time::Duration, }; -use obkv::{error::CommonErrCode, ObTableClient, ResultCodes, Table, TableQuery, Value}; +use obkv::{error::CommonErrCode, ObTableClient, ResultCodes, Value}; use tokio::task; pub struct BaseTest { @@ -753,11 +753,16 @@ impl BaseTest { } pub async fn test_query(&self, table_name: &str) { - self.insert_query_test_record(table_name, "123", "123c2").await; - self.insert_query_test_record(table_name, "124", "124c2").await; - self.insert_query_test_record(table_name, "234", "234c2").await; - self.insert_query_test_record(table_name, "456", "456c2").await; - self.insert_query_test_record(table_name, "567", "567c2").await; + self.insert_query_test_record(table_name, "123", "123c2") + .await; + self.insert_query_test_record(table_name, "124", "124c2") + .await; + self.insert_query_test_record(table_name, "234", "234c2") + .await; + self.insert_query_test_record(table_name, "456", "456c2") + .await; + self.insert_query_test_record(table_name, "567", "567c2") + .await; let query = self .client diff --git a/tests/test_table_client_hash.rs b/tests/test_table_client_hash.rs index d94cb6f..6a2eddc 100644 --- a/tests/test_table_client_hash.rs +++ b/tests/test_table_client_hash.rs @@ -20,7 +20,7 @@ pub mod test_table_client_base; #[allow(unused)] mod utils; -use obkv::{Table, Value}; +use obkv::Value; use serial_test_derive::serial; use tokio::task; @@ -35,7 +35,7 @@ use tokio::task; #[tokio::test] #[serial] async fn test_concurrent() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_VARCHAR_TABLE_HASH_CONCURRENT"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); @@ -55,7 +55,7 @@ async fn test_concurrent() { // ``` #[tokio::test] async fn test_obtable_client_hash() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_TABLE_BATCH_HASH"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string(), "c1sb".to_string()]); diff --git a/tests/test_table_client_key.rs b/tests/test_table_client_key.rs index a017a13..5d90745 100644 --- a/tests/test_table_client_key.rs +++ b/tests/test_table_client_key.rs @@ -20,7 +20,7 @@ pub mod test_table_client_base; #[allow(unused)] mod utils; -use obkv::{ObTableClient, Table, TableQuery, Value}; +use obkv::{ObTableClient, Value}; use rand::{distributions::Alphanumeric, thread_rng, Rng}; use serial_test_derive::serial; use tokio::task; @@ -35,7 +35,7 @@ use tokio::task; // ``` #[tokio::test] async fn test_varchar_all_ob() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_VARCHAR_TABLE_KEY"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); @@ -64,7 +64,7 @@ async fn test_varchar_all_ob() { // ``` #[tokio::test] async fn test_blob_all() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_BLOB_TABLE_KEY"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); @@ -83,7 +83,7 @@ async fn test_blob_all() { #[tokio::test] async fn test_ob_exceptions() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_VARCHAR_TABLE_KEY"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); @@ -121,7 +121,7 @@ async fn insert_query_test_record( #[tokio::test] #[serial] async fn test_query() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_QUERY_TABLE_KEY"; @@ -146,7 +146,7 @@ async fn test_query() { ); let result_set = query.execute().await; - println!("result_set: {:?}", result_set); + println!("result_set: {result_set:?}"); assert!(result_set.is_ok()); let result_set = result_set.unwrap(); assert_eq!(5, result_set.cache_size()); @@ -346,7 +346,7 @@ async fn test_query() { #[tokio::test] #[serial] async fn test_stream_query() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_STREAM_QUERY_TABLE_KEY"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); @@ -365,7 +365,7 @@ async fn test_stream_query() { // ``` #[tokio::test] async fn test_concurrent() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_VARCHAR_TABLE_KEY_CONCURRENT"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); @@ -385,7 +385,7 @@ async fn test_concurrent() { // ``` #[tokio::test] async fn test_batch() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_TABLE_BATCH_KEY"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string(), "c1sb".to_string()]); @@ -415,7 +415,7 @@ fn clean_table(client: ObTableClient, table_name: &str) { #[tokio::test] async fn test_partition_varchar_general_ci() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const VARCHAR_TABLE_NAME: &str = "TEST_TABLE_PARTITION_VARCHAR_KEY"; client.add_row_key_element(VARCHAR_TABLE_NAME, vec!["c1".to_string()]); @@ -471,7 +471,7 @@ async fn test_partition_varchar_general_ci() { #[tokio::test] async fn test_partition_complex() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_TABLE_PARTITION_COMPLEX_KEY"; client.add_row_key_element( @@ -526,7 +526,7 @@ async fn test_partition_complex() { #[tokio::test] async fn test_sub_partition_complex() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_TABLE_SUB_PARTITION_COMPLEX_KEY"; client.add_row_key_element( diff --git a/tests/test_table_client_range.rs b/tests/test_table_client_range.rs index c31fa98..f24670f 100644 --- a/tests/test_table_client_range.rs +++ b/tests/test_table_client_range.rs @@ -20,7 +20,7 @@ pub mod test_table_client_base; #[allow(unused)] mod utils; -use obkv::{Table, Value}; +use obkv::Value; use serial_test_derive::serial; use tokio::task; @@ -34,7 +34,7 @@ use tokio::task; // ``` #[tokio::test] async fn test_varchar_all_ob() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_VARCHAR_TABLE_RANGE"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); @@ -63,7 +63,7 @@ async fn test_varchar_all_ob() { // ``` #[tokio::test] async fn test_blob_all() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_BLOB_TABLE_RANGE"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); @@ -82,7 +82,7 @@ async fn test_blob_all() { #[tokio::test] async fn test_ob_exceptions() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_VARCHAR_TABLE_RANGE"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); @@ -102,7 +102,7 @@ async fn test_ob_exceptions() { #[tokio::test] #[serial] async fn test_query() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_QUERY_TABLE_RANGE"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); @@ -122,7 +122,7 @@ async fn test_query() { #[tokio::test] #[serial] async fn test_stream_query() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_STREAM_QUERY_TABLE_RANGE"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); @@ -141,7 +141,7 @@ async fn test_stream_query() { // ``` #[tokio::test] async fn test_concurrent() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_VARCHAR_TABLE_RANGE_CONCURRENT"; client.add_row_key_element(TABLE_NAME, vec!["c1".to_string()]); @@ -169,7 +169,7 @@ async fn test_concurrent() { // ``` #[tokio::test] async fn test_obtable_client_batch_atomic_op() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); const TABLE_NAME: &str = "TEST_TABLE_BATCH_RANGE"; const TABLE_NAME_COMPLEX: &str = "TEST_TABLE_BATCH_RANGE_COMPLEX"; diff --git a/tests/test_table_client_sql.rs b/tests/test_table_client_sql.rs index 4c2e4c8..bbe633d 100644 --- a/tests/test_table_client_sql.rs +++ b/tests/test_table_client_sql.rs @@ -19,12 +19,12 @@ #[allow(unused)] mod utils; -use obkv::{ObTableClient, Table, Value}; +use obkv::{ObTableClient, Value}; use tokio::task; #[tokio::test] async fn test_execute_sql() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); let test_table_name = "test_execute_sql"; let create_table = @@ -36,7 +36,7 @@ async fn test_execute_sql() { #[tokio::test] async fn test_check_table_exists() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); let test_table_name = "test_check_table_exists"; let drop_table = format!("drop table IF EXISTS {test_table_name};"); @@ -106,7 +106,7 @@ async fn truncate_table(client: &ObTableClient, test_table_name: &str) { #[tokio::test] async fn test_truncate_table() { - let client_handle = task::spawn_blocking(|| utils::common::build_normal_client()); + let client_handle = task::spawn_blocking(utils::common::build_normal_client); let client = client_handle.await.unwrap(); let test_table_name = "test_varchar_table"; diff --git a/ycsb-rs/src/main.rs b/ycsb-rs/src/main.rs index 5ce9d3f..55c7a17 100644 --- a/ycsb-rs/src/main.rs +++ b/ycsb-rs/src/main.rs @@ -1,5 +1,11 @@ -use std::{cell::RefCell, fs, rc::Rc, sync::Arc, thread, time::Instant}; -use std::sync::Mutex; +use std::{ + cell::RefCell, + fs, + rc::Rc, + sync::{Arc, Mutex}, + thread, + time::Instant, +}; use anyhow::{bail, Result}; use obkv::dump_metrics; @@ -18,9 +24,9 @@ pub mod db; pub mod generator; pub mod obkv_client; pub mod properties; +mod runtime; pub mod sqlite; pub mod workload; -mod runtime; #[derive(StructOpt, Debug)] #[structopt(name = "ycsb")] diff --git a/ycsb-rs/src/obkv_client.rs b/ycsb-rs/src/obkv_client.rs index 51cbcd2..dea6983 100644 --- a/ycsb-rs/src/obkv_client.rs +++ b/ycsb-rs/src/obkv_client.rs @@ -20,9 +20,9 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; use anyhow::Result; #[allow(unused)] use obkv::error::CommonErrCode; -use obkv::{Builder, ClientConfig, ObTableClient, RunningMode, Table, TableQuery, Value}; +use obkv::{Builder, ClientConfig, ObTableClient, RunningMode, Value}; -use crate::{properties::Properties}; +use crate::properties::Properties; const PRIMARY_KEY: &str = "ycsb_key"; const COLUMN_NAMES: [&str; 10] = [ @@ -132,7 +132,12 @@ impl OBKVClient { Ok(()) } - pub async fn insert(&self, table: &str, key: &str, values: &HashMap<&str, String>) -> Result<()> { + pub async fn insert( + &self, + table: &str, + key: &str, + values: &HashMap<&str, String>, + ) -> Result<()> { let mut columns: Vec = Vec::new(); let mut properties: Vec = Vec::new(); for (key, value) in values { @@ -155,19 +160,32 @@ impl OBKVClient { } #[allow(unused)] - pub async fn read(&self, table: &str, key: &str, result: &mut HashMap) -> Result<()> { - let result = self.client.get( - table, - vec![Value::from(key)], - COLUMN_NAMES.iter().map(|s| s.to_string()).collect(), - ).await; + pub async fn read( + &self, + table: &str, + key: &str, + result: &mut HashMap, + ) -> Result<()> { + let result = self + .client + .get( + table, + vec![Value::from(key)], + COLUMN_NAMES.iter().map(|s| s.to_string()).collect(), + ) + .await; assert!(result.is_ok()); assert_eq!(10, result?.len()); Ok(()) } - pub async fn update(&self, table: &str, key: &str, values: &HashMap<&str, String>) -> Result<()> { + pub async fn update( + &self, + table: &str, + key: &str, + values: &HashMap<&str, String>, + ) -> Result<()> { let mut columns: Vec = Vec::new(); let mut properties: Vec = Vec::new(); for (key, value) in values { diff --git a/ycsb-rs/src/runtime.rs b/ycsb-rs/src/runtime.rs index ebecb1f..313282f 100644 --- a/ycsb-rs/src/runtime.rs +++ b/ycsb-rs/src/runtime.rs @@ -16,7 +16,9 @@ */ use std::sync::Arc; + use obkv::runtime; + use crate::properties::Properties; /// OBKV Table Runtime @@ -42,4 +44,4 @@ pub fn build_ycsb_runtimes(props: Arc) -> ObYCSBRuntimes { default_runtime: Arc::new(build_runtime("ycsb-default", props.ycsb_thread_num)), block_runtime: Arc::new(build_runtime("ycsb-block", 1)), } -} \ No newline at end of file +} diff --git a/ycsb-rs/src/workload.rs b/ycsb-rs/src/workload.rs index 92bb462..fc06445 100644 --- a/ycsb-rs/src/workload.rs +++ b/ycsb-rs/src/workload.rs @@ -5,7 +5,7 @@ use std::{cell::RefCell, rc::Rc}; pub use core_workload::CoreWorkload; use rand::rngs::SmallRng; -use crate::{db::DB}; +use crate::db::DB; pub trait Workload { fn do_insert(&self, db: Rc); diff --git a/ycsb-rs/src/workload/core_workload.rs b/ycsb-rs/src/workload/core_workload.rs index a974656..a1ca888 100644 --- a/ycsb-rs/src/workload/core_workload.rs +++ b/ycsb-rs/src/workload/core_workload.rs @@ -182,7 +182,9 @@ impl CoreWorkload { let dbstart = format!("{}", fnvhash64(start)); let dbend = format!("{}", fnvhash64(start)); let mut result = HashMap::new(); - db.scan(&self.table, &dbstart, &dbend, &mut result).await.unwrap(); + db.scan(&self.table, &dbstart, &dbend, &mut result) + .await + .unwrap(); } fn next_key_num(&self) -> u64 { @@ -221,7 +223,7 @@ impl CoreWorkload { .operation_chooser .lock() .unwrap() - .next_value(&mut *rng.lock().unwrap()); + .next_value(&mut rng.lock().unwrap()); match op { CoreOperation::Insert => { self.ob_transaction_insert(db).await; From e9c1c082a0fe74ac37d9fa4c900d128ac8609b33 Mon Sep 17 00:00:00 2001 From: "zeli.lwb" Date: Wed, 7 Jun 2023 20:51:49 +0800 Subject: [PATCH 3/9] [Fix] remove useless APIs --- src/client/mod.rs | 18 ++-- src/client/query.rs | 64 +----------- src/client/table.rs | 205 +------------------------------------ src/client/table_client.rs | 61 ++++------- src/lib.rs | 2 +- src/rpc/conn_pool.rs | 10 +- src/rpc/mod.rs | 17 ++- ycsb-rs/src/runtime.rs | 2 +- 8 files changed, 52 insertions(+), 327 deletions(-) diff --git a/src/client/mod.rs b/src/client/mod.rs index ad5b4dd..2a60c58 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -155,12 +155,9 @@ pub struct ClientConfig { pub min_idle_conns_per_server: usize, pub query_concurrency_limit: Option, - pub batch_op_thread_num: usize, - pub query_thread_num: usize, - pub conn_init_thread_num: usize, - pub conn_reader_thread_num: usize, - pub conn_writer_thread_num: usize, - pub default_thread_num: usize, + pub tcp_recv_thread_num: usize, + pub tcp_send_thread_num: usize, + pub bg_thread_num: usize, pub max_inflight_reqs_per_conn: usize, @@ -205,14 +202,11 @@ impl Default for ClientConfig { max_conns_per_server: 10, min_idle_conns_per_server: 5, - conn_init_thread_num: 2, query_concurrency_limit: None, - batch_op_thread_num: 2, - query_thread_num: 2, - conn_reader_thread_num: 4, - conn_writer_thread_num: 2, - default_thread_num: 2, + tcp_recv_thread_num: 4, + tcp_send_thread_num: 2, + bg_thread_num: 2, max_inflight_reqs_per_conn: 100, diff --git a/src/client/query.rs b/src/client/query.rs index 4cd57f4..5b4d8ca 100644 --- a/src/client/query.rs +++ b/src/client/query.rs @@ -25,17 +25,16 @@ use std::{ /// Query API for ob table use super::ObTable; use crate::{ - client::table_client::{ObTableClientStreamQuerier, OBKV_CLIENT_METRICS}, + client::table_client::{StreamQuerier, OBKV_CLIENT_METRICS}, error::{CommonErrCode, Error::Common as CommonErr, Result}, rpc::protocol::{ payloads::ObTableEntityType, query::{ - ObHTableFilter, ObTableQuery, ObTableQueryRequest, ObTableQueryResult, + ObTableQuery, ObTableQueryRequest, ObTableQueryResult, ObTableStreamRequest, }, DEFAULT_FLAG, }, - runtime::RuntimeRef, serde_obkv::value::Value, }; @@ -43,29 +42,10 @@ use crate::{ // Zero timeout means no-wait request. const ZERO_TIMEOUT_MS: Duration = Duration::from_millis(0); -pub trait StreamQuerier { - // TODO: `async` trait functions are not currently supported - fn execute_query( - &self, - result: &mut QueryStreamResult, - part_id_and_table: (i64, Arc), - payload: &mut ObTableQueryRequest, - ) -> Result; - - fn execute_stream( - &self, - result: &mut QueryStreamResult, - part_id_and_table: (i64, Arc), - payload: &mut ObTableStreamRequest, - ) -> Result; - - fn get_runtime(&self) -> RuntimeRef; -} - type PartitionQueryResultDeque = VecDeque<((i64, Arc), ObTableQueryResult)>; pub struct QueryStreamResult { - querier: Arc, + querier: Arc, initialized: bool, eof: bool, closed: bool, @@ -91,7 +71,7 @@ impl fmt::Debug for QueryStreamResult { } impl QueryStreamResult { - pub fn new(querier: Arc, table_query: ObTableQuery) -> Self { + pub fn new(querier: Arc, table_query: ObTableQuery) -> Self { Self { querier, initialized: false, @@ -499,39 +479,3 @@ impl Drop for QueryResultSet { } } } - -/// Table Query Trait - -pub const PRIMARY_INDEX_NAME: &str = "PRIMARY"; - -pub trait TableQuery { - // TODO: async execute / `async` trait functions are not currently supported - fn execute(&self) -> Result; - fn get_table_name(&self) -> String; - fn set_entity_type(&mut self, entity_type: ObTableEntityType); - fn entity_type(&self) -> ObTableEntityType; - fn select(self, columns: Vec) -> Self; - fn limit(self, offset: Option, limit: i32) -> Self; - fn add_scan_range( - self, - start: Vec, - start_equals: bool, - end: Vec, - end_equals: bool, - ) -> Self; - fn add_scan_range_starts_with(self, start: Vec, start_equals: bool) -> Self; - fn add_scan_range_ends_with(self, end: Vec, end_equals: bool) -> Self; - fn scan_order(self, forward: bool) -> Self; - fn index_name(self, index_name: &str) -> Self; - fn primary_index(self) -> Self - where - Self: Sized, - { - self.index_name(PRIMARY_INDEX_NAME) - } - fn filter_string(self, filter_string: &str) -> Self; - fn htable_filter(self, filter: ObHTableFilter) -> Self; - fn batch_size(self, batch_size: i32) -> Self; - fn operation_timeout(self, timeout: Duration) -> Self; - fn clear(&mut self); -} diff --git a/src/client/table.rs b/src/client/table.rs index 856c7de..714d950 100644 --- a/src/client/table.rs +++ b/src/client/table.rs @@ -15,21 +15,19 @@ * #L% */ -use std::{fmt::Formatter, sync::Arc, time::Duration}; +use std::{fmt::Formatter, time::Duration}; -use super::{query::QueryResultSet, ClientConfig, TableOpResult}; +use super::{ClientConfig, TableOpResult}; use crate::{ error::{CommonErrCode, Error::Common as CommonErr, Result}, rpc::{ protocol::{ codes::ResultCodes, payloads::*, - query::{ObHTableFilter, ObNewRange, ObScanOrder, ObTableQuery}, ObPayload, }, proxy::Proxy, }, - serde_obkv::value::Value, }; #[derive(Clone)] @@ -56,8 +54,8 @@ impl std::fmt::Debug for ObTable { } } -// TODO: impl Table for ObTable impl ObTable { + /// execute partition payload pub async fn execute_payload( &self, payload: &mut T, @@ -67,17 +65,11 @@ impl ObTable { Ok(()) } - pub fn query(&self, _table_name: &str) { - // TODO: return impl TableQuery - todo!() - // ObTableQueryImpl::new(table_name, Arc::new(self.clone())) - } - pub fn operation_timeout(&self) -> Duration { self.config.rpc_operation_timeout } - /// Execute a batch operation on a table + /// Execute a batch operation on a partition table pub async fn execute_batch( &self, _table_name: &str, @@ -191,192 +183,3 @@ impl From for Result> { Ok(results) } } - -#[allow(dead_code)] -// impl ObTableStreamQuerier for obtable -pub struct ObTableQueryImpl { - operation_timeout: Option, - entity_type: ObTableEntityType, - table_name: String, - table: Arc, - table_query: ObTableQuery, -} - -impl ObTableQueryImpl { - pub fn new(table_name: &str, table: Arc) -> Self { - Self { - operation_timeout: None, - entity_type: ObTableEntityType::Dynamic, - table_name: table_name.to_owned(), - table, - table_query: ObTableQuery::new(), - } - } - - fn reset(&mut self) { - //FIXME table query should set partition_id - self.table_query = ObTableQuery::new(); - } -} - -#[allow(dead_code)] -impl ObTableQueryImpl { - fn execute(&self) -> Result { - todo!() - } - - fn get_table_name(&self) -> String { - self.table_name.to_owned() - } - - fn set_entity_type(&mut self, entity_type: ObTableEntityType) { - self.entity_type = entity_type; - } - - fn entity_type(&self) -> ObTableEntityType { - self.entity_type - } - - fn select(mut self, columns: Vec) -> Self - where - Self: Sized, - { - self.table_query.select_columns(columns); - self - } - - fn limit(mut self, offset: Option, limit: i32) -> Self - where - Self: Sized, - { - if let Some(v) = offset { - self.table_query.set_offset(v); - } - self.table_query.set_limit(limit); - self - } - - fn add_scan_range( - mut self, - start: Vec, - start_equals: bool, - end: Vec, - end_equals: bool, - ) -> Self - where - Self: Sized, - { - let mut range = ObNewRange::from_keys(start, end); - if start_equals { - range.set_inclusive_start(); - } else { - range.unset_inclusive_start(); - } - - if end_equals { - range.set_inclusive_end(); - } else { - range.unset_inclusive_end(); - } - - self.table_query.add_key_range(range); - self - } - - fn add_scan_range_starts_with(mut self, start: Vec, start_equals: bool) -> Self - where - Self: Sized, - { - let mut end = Vec::with_capacity(start.len()); - - for _ in 0..start.len() { - end.push(Value::get_max()); - } - - let mut range = ObNewRange::from_keys(start, end); - - if start_equals { - range.set_inclusive_start(); - } else { - range.unset_inclusive_start(); - } - - self.table_query.add_key_range(range); - self - } - - fn add_scan_range_ends_with(mut self, end: Vec, end_equals: bool) -> Self - where - Self: Sized, - { - let mut start = Vec::with_capacity(end.len()); - - for _ in 0..end.len() { - start.push(Value::get_min()); - } - - let mut range = ObNewRange::from_keys(start, end); - - if end_equals { - range.set_inclusive_end(); - } else { - range.unset_inclusive_end(); - } - - self.table_query.add_key_range(range); - self - } - - fn scan_order(mut self, forward: bool) -> Self - where - Self: Sized, - { - self.table_query - .set_scan_order(ObScanOrder::from_bool(forward)); - self - } - - fn index_name(mut self, index_name: &str) -> Self - where - Self: Sized, - { - self.table_query.set_index_name(index_name.to_owned()); - self - } - - fn filter_string(mut self, filter_string: &str) -> Self - where - Self: Sized, - { - self.table_query.set_filter_string(filter_string.to_owned()); - self - } - - fn htable_filter(mut self, filter: ObHTableFilter) -> Self - where - Self: Sized, - { - self.table_query.set_htable_filter(filter); - self - } - - fn batch_size(mut self, batch_size: i32) -> Self - where - Self: Sized, - { - self.table_query.set_batch_size(batch_size); - self - } - - fn operation_timeout(mut self, timeout: Duration) -> Self - where - Self: Sized, - { - self.operation_timeout = Some(timeout); - self - } - - fn clear(&mut self) { - self.reset(); - } -} diff --git a/src/client/table_client.rs b/src/client/table_client.rs index 8cce7ed..3d567d2 100644 --- a/src/client/table_client.rs +++ b/src/client/table_client.rs @@ -1454,29 +1454,20 @@ pub type RuntimesRef = Arc; /// OBKV Table Runtime #[derive(Clone, Debug)] pub struct ObClientRuntimes { - /// Runtime for multi-batch operation - pub batch_op_runtime: RuntimeRef, - /// Runtime for query - pub query_runtime: RuntimeRef, - /// Runtime for init connection - pub conn_init_runtime: RuntimeRef, /// Runtime for connection to read data - pub reader_runtime: RuntimeRef, + pub tcp_send_runtime: RuntimeRef, /// Runtime for connection to write data - pub writer_runtime: RuntimeRef, - /// Runtime for some other tasks - pub default_runtime: RuntimeRef, + pub tcp_recv_runtime: RuntimeRef, + /// Runtime for background task such as: conn_init / batch operation + pub bg_runtime: RuntimeRef, } impl ObClientRuntimes { pub fn test_default() -> ObClientRuntimes { ObClientRuntimes { - batch_op_runtime: Arc::new(build_runtime("ob-batch-executor", 1)), - query_runtime: Arc::new(build_runtime("ob-query-executor", 1)), - conn_init_runtime: Arc::new(build_runtime("ob-conn-initer", 1)), - reader_runtime: Arc::new(build_runtime("ob-conn-reader", 1)), - writer_runtime: Arc::new(build_runtime("ob-conn-writer", 1)), - default_runtime: Arc::new(build_runtime("ob-default", 1)), + tcp_recv_runtime: Arc::new(build_runtime("ob-tcp-reviever", 1)), + tcp_send_runtime: Arc::new(build_runtime("ob-tcp-sender", 1)), + bg_runtime: Arc::new(build_runtime("ob-default", 1)), } } } @@ -1492,21 +1483,15 @@ fn build_runtime(name: &str, threads_num: usize) -> runtime::Runtime { fn build_obkv_runtimes(config: &ClientConfig) -> ObClientRuntimes { ObClientRuntimes { - batch_op_runtime: Arc::new(build_runtime( - "ob-batch-executor", - config.batch_op_thread_num, + tcp_recv_runtime: Arc::new(build_runtime( + "ob-tcp-reviever", + config.tcp_recv_thread_num, )), - query_runtime: Arc::new(build_runtime("ob-query-executor", config.query_thread_num)), - conn_init_runtime: Arc::new(build_runtime("ob-conn-initer", config.conn_init_thread_num)), - reader_runtime: Arc::new(build_runtime( - "ob-conn-reader", - config.conn_reader_thread_num, + tcp_send_runtime: Arc::new(build_runtime( + "ob-tcp-sender", + config.tcp_send_thread_num, )), - writer_runtime: Arc::new(build_runtime( - "ob-conn-writer", - config.conn_writer_thread_num, - )), - default_runtime: Arc::new(build_runtime("ob-default", config.default_thread_num)), + bg_runtime: Arc::new(build_runtime("ob_bg", config.bg_thread_num)), } } @@ -1644,7 +1629,7 @@ impl ObTableClient { .inner .get_or_create_table(table_name, &table_entry, part_id)?; let table_name = table_name.to_owned(); - handles.push(self.inner.runtimes.batch_op_runtime.spawn(async move { + handles.push(self.inner.runtimes.bg_runtime.spawn(async move { batch_op.set_partition_id(part_id); batch_op.set_table_name(table_name.clone()); table.execute_batch(&table_name, batch_op).await @@ -1659,7 +1644,6 @@ impl ObTableClient { Ok(all_results) } - // TODO: impl ObTable async methods #[inline] pub async fn insert( &self, @@ -1869,13 +1853,13 @@ impl ObTableClient { } } -pub struct ObTableClientStreamQuerier { +pub struct StreamQuerier { client: Arc, table_name: String, start_execute_ts: AtomicI64, } -impl Drop for ObTableClientStreamQuerier { +impl Drop for StreamQuerier { fn drop(&mut self) { let start_ts = self.start_execute_ts.load(Ordering::Relaxed); @@ -1889,7 +1873,7 @@ impl Drop for ObTableClientStreamQuerier { } } -impl ObTableClientStreamQuerier { +impl StreamQuerier { fn new(table_name: &str, client: Arc) -> Self { Self { client, @@ -1898,7 +1882,6 @@ impl ObTableClientStreamQuerier { } } - // TODO: impl StreamQuerier for ObTableClientStreamQuerier pub async fn execute_query( &self, stream_result: &mut QueryStreamResult, @@ -1916,7 +1899,7 @@ impl ObTableClientStreamQuerier { Err(e) => { if let Err(e) = self.client.on_table_op_failure(&self.table_name, &e) { error!( - "ObTableClientStreamQuerier::execute_query on_table_op_failure err: {}.", + "StreamQuerier::execute_query on_table_op_failure err: {}.", e ); } @@ -1943,7 +1926,7 @@ impl ObTableClientStreamQuerier { Err(e) => { if let Err(e) = self.client.on_table_op_failure(&self.table_name, &e) { error!( - "ObTableClientStreamQuerier::execute_query on_table_op_failure err: {}.", + "StreamQuerier::execute_query on_table_op_failure err: {}.", e ); } @@ -1962,7 +1945,6 @@ impl ObTableClientStreamQuerier { pub const PRIMARY_INDEX_NAME: &str = "PRIMARY"; -/// TODO refactor with ObTableQueryImpl pub struct ObTableClientQueryImpl { operation_timeout: Option, entity_type: ObTableEntityType, @@ -1986,7 +1968,6 @@ impl ObTableClientQueryImpl { self.table_query = ObTableQuery::new(); } - // TODO: impl TableQuery for ObTableClientQueryImpl pub async fn execute(&self) -> Result { let mut partition_table: HashMap)> = HashMap::new(); @@ -2014,7 +1995,7 @@ impl ObTableClientQueryImpl { let start = Instant::now(); let mut stream_result = QueryStreamResult::new( - Arc::new(ObTableClientStreamQuerier::new( + Arc::new(StreamQuerier::new( &self.table_name, self.client.clone(), )), diff --git a/src/lib.rs b/src/lib.rs index b174818..e1ff2de 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -56,7 +56,7 @@ pub mod serde_obkv; mod util; pub use self::{ client::{ - query::{QueryResultSet, TableQuery}, + query::{QueryResultSet}, table::ObTable, table_client::{Builder, ObTableClient, RunningMode}, ClientConfig, Table, TableOpResult, diff --git a/src/rpc/conn_pool.rs b/src/rpc/conn_pool.rs index 069aa66..ccb6df1 100644 --- a/src/rpc/conn_pool.rs +++ b/src/rpc/conn_pool.rs @@ -51,7 +51,7 @@ impl PoolInner { } } - // TODO: use more random/fair policy to pick a connection + // TODO: use more random/fair policy to pick a connection / async remove conn fn try_get(&mut self) -> (Option>, usize) { let mut removed = 0usize; while !self.conns.is_empty() { @@ -171,9 +171,13 @@ impl ConnPool { mut retry_num: usize, build_retry_limit: usize, ) { - let shared_pool = shared_pool.clone(); - shared_pool.clone().runtimes.conn_init_runtime.spawn(async move { + let weak_shared_pool = Arc::downgrade(shared_pool); + shared_pool.clone().runtimes.bg_runtime.spawn(async move { loop { + let shared_pool = match weak_shared_pool.upgrade() { + None => return, + Some(p) => p, + }; match shared_pool.build_conn().await { Ok(conn) => { let mut inner = shared_pool.inner.lock().unwrap(); diff --git a/src/rpc/mod.rs b/src/rpc/mod.rs index 80e3741..ec17794 100644 --- a/src/rpc/mod.rs +++ b/src/rpc/mod.rs @@ -199,7 +199,7 @@ impl ConnectionSender { /// A Connection to OBKV Server pub struct Connection { - //remote addr + // remote addr addr: SocketAddr, reader: Option>>, reader_signal_sender: mpsc::Sender<()>, @@ -214,7 +214,7 @@ pub struct Connection { trace_id_counter: AtomicU32, load: AtomicUsize, // TODO: check unused runtime - runtimes: RuntimesRef, + bg_runtime: RuntimeRef, } const OB_MYSQL_MAX_PACKET_LENGTH: usize = 1 << 24; @@ -252,7 +252,7 @@ impl Connection { let read_active = active.clone(); let (sender, receiver): (mpsc::Sender<()>, mpsc::Receiver<()>) = mpsc::channel(1); - let join_handle = runtimes.reader_runtime.spawn(async move { + let join_handle = runtimes.tcp_recv_runtime.spawn(async move { let addr = read_stream.peer_addr()?; Connection::process_reading_data(receiver, read_stream, read_requests.clone(), &addr) @@ -272,7 +272,7 @@ impl Connection { write_stream, requests.clone(), active.clone(), - runtimes.writer_runtime.clone(), + runtimes.tcp_send_runtime.clone(), channel_capacity, ), requests, @@ -285,7 +285,7 @@ impl Connection { id, trace_id_counter: AtomicU32::new(0), load: AtomicUsize::new(0), - runtimes: runtimes.clone(), + bg_runtime: runtimes.bg_runtime.clone(), }) } @@ -687,15 +687,14 @@ impl Connection { // 1. close writer if let Err(e) = self - .runtimes - .default_runtime + .bg_runtime .block_on(async { self.sender.close().await }) { error!("Connection::close fail to close writer, err: {}.", e); } // 2. close reader - if let Err(e) = self.runtimes.default_runtime.block_on(async { + if let Err(e) = self.bg_runtime.block_on(async { self.reader_signal_sender .send(()) .await @@ -957,6 +956,6 @@ mod test { .expect("fail to send request") .try_recv(); assert!(res.is_ok()); - assert!(conn.close().is_ok()); + assert!(conn.close().await.is_ok()); } } diff --git a/ycsb-rs/src/runtime.rs b/ycsb-rs/src/runtime.rs index 313282f..92b40a2 100644 --- a/ycsb-rs/src/runtime.rs +++ b/ycsb-rs/src/runtime.rs @@ -2,7 +2,7 @@ * #%L * OBKV Table Client Framework * %% - * Copyright (C) 2021 OceanBase + * Copyright (C) 2023 OceanBase * %% * OBKV Table Client Framework is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the From 5164c233e4c20988acb5891756ddbc02cff6f9c9 Mon Sep 17 00:00:00 2001 From: "zeli.lwb" Date: Thu, 8 Jun 2023 17:12:30 +0800 Subject: [PATCH 4/9] [Fix] modify runtime/query --- src/client/query.rs | 16 ++++++---------- src/client/table.rs | 6 +----- src/client/table_client.rs | 15 +++------------ src/lib.rs | 2 +- src/rpc/conn_pool.rs | 12 +++--------- src/rpc/mod.rs | 4 ++-- src/rpc/proxy.rs | 4 ++-- tests/test_cse_table.rs | 5 +---- tests/test_hbase_client.rs | 3 +-- tests/test_table_client.rs | 8 +++++++- tests/test_table_client_base.rs | 2 +- ycsb-rs/src/obkv_client.rs | 18 +++++++++--------- ycsb-rs/src/properties.rs | 25 +++++++++++-------------- ycsb-rs/workloads/workload_obkv.toml | 6 +++--- 14 files changed, 51 insertions(+), 75 deletions(-) diff --git a/src/client/query.rs b/src/client/query.rs index 5b4d8ca..0c9bd12 100644 --- a/src/client/query.rs +++ b/src/client/query.rs @@ -29,10 +29,7 @@ use crate::{ error::{CommonErrCode, Error::Common as CommonErr, Result}, rpc::protocol::{ payloads::ObTableEntityType, - query::{ - ObTableQuery, ObTableQueryRequest, ObTableQueryResult, - ObTableStreamRequest, - }, + query::{ObTableQuery, ObTableQueryRequest, ObTableQueryResult, ObTableStreamRequest}, DEFAULT_FLAG, }, serde_obkv::value::Value, @@ -378,9 +375,8 @@ impl QueryStreamResult { impl Drop for QueryStreamResult { fn drop(&mut self) { - if self.closed { - } else { - error!("QueryStreamResult::close fail") + if !self.closed { + error!("QueryStreamResult::drop stream is not closed when drop") } } } @@ -416,7 +412,7 @@ impl QueryResultSet { } } - pub fn close(&mut self) -> Result<()> { + pub fn check_close(&mut self) -> Result<()> { match self { QueryResultSet::None => Ok(()), QueryResultSet::Some(stream_result) => { @@ -433,7 +429,7 @@ impl QueryResultSet { } } - pub async fn async_close(&mut self) -> Result<()> { + pub async fn close(&mut self) -> Result<()> { match self { QueryResultSet::None => Ok(()), QueryResultSet::Some(stream_result) => stream_result.close().await, @@ -473,7 +469,7 @@ impl QueryResultSet { impl Drop for QueryResultSet { fn drop(&mut self) { - match self.close() { + match self.check_close() { Ok(()) => (), Err(e) => error!("QueryResultSet:drop failed: {:?}", e), } diff --git a/src/client/table.rs b/src/client/table.rs index 714d950..23e8bd3 100644 --- a/src/client/table.rs +++ b/src/client/table.rs @@ -21,11 +21,7 @@ use super::{ClientConfig, TableOpResult}; use crate::{ error::{CommonErrCode, Error::Common as CommonErr, Result}, rpc::{ - protocol::{ - codes::ResultCodes, - payloads::*, - ObPayload, - }, + protocol::{codes::ResultCodes, payloads::*, ObPayload}, proxy::Proxy, }, }; diff --git a/src/client/table_client.rs b/src/client/table_client.rs index 3d567d2..63c7fa2 100644 --- a/src/client/table_client.rs +++ b/src/client/table_client.rs @@ -1483,14 +1483,8 @@ fn build_runtime(name: &str, threads_num: usize) -> runtime::Runtime { fn build_obkv_runtimes(config: &ClientConfig) -> ObClientRuntimes { ObClientRuntimes { - tcp_recv_runtime: Arc::new(build_runtime( - "ob-tcp-reviever", - config.tcp_recv_thread_num, - )), - tcp_send_runtime: Arc::new(build_runtime( - "ob-tcp-sender", - config.tcp_send_thread_num, - )), + tcp_recv_runtime: Arc::new(build_runtime("ob-tcp-reviever", config.tcp_recv_thread_num)), + tcp_send_runtime: Arc::new(build_runtime("ob-tcp-sender", config.tcp_send_thread_num)), bg_runtime: Arc::new(build_runtime("ob_bg", config.bg_thread_num)), } } @@ -1995,10 +1989,7 @@ impl ObTableClientQueryImpl { let start = Instant::now(); let mut stream_result = QueryStreamResult::new( - Arc::new(StreamQuerier::new( - &self.table_name, - self.client.clone(), - )), + Arc::new(StreamQuerier::new(&self.table_name, self.client.clone())), self.table_query.clone(), ); diff --git a/src/lib.rs b/src/lib.rs index e1ff2de..53f71d2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -56,7 +56,7 @@ pub mod serde_obkv; mod util; pub use self::{ client::{ - query::{QueryResultSet}, + query::QueryResultSet, table::ObTable, table_client::{Builder, ObTableClient, RunningMode}, ClientConfig, Table, TableOpResult, diff --git a/src/rpc/conn_pool.rs b/src/rpc/conn_pool.rs index ccb6df1..7d6292d 100644 --- a/src/rpc/conn_pool.rs +++ b/src/rpc/conn_pool.rs @@ -250,7 +250,7 @@ impl ConnPool { Ok(()) } - pub async fn get(&self) -> Result> { + pub fn get(&self) -> Result> { let start = Instant::now(); let pool = &self.shared_pool; @@ -453,10 +453,7 @@ mod test { "conn_num({conn_num}) should in the range: [{min_conn_num}, {max_conn_num}]", ); - let conn = pool - .get() - .await - .expect("fail to get connection from the pool"); + let conn = pool.get().expect("fail to get connection from the pool"); assert!(conn.is_active(), "conn should be active"); } @@ -466,10 +463,7 @@ mod test { let (min_conn_num, max_conn_num) = (3, 5); let pool = gen_test_conn_pool(min_conn_num, max_conn_num); for _ in 0..max_conn_num * 2 { - let conn = pool - .get() - .await - .expect("fail to get connection from the pool"); + let conn = pool.get().expect("fail to get connection from the pool"); assert!(conn.is_active(), "should get active connection"); conn.active.store(false, Ordering::SeqCst); } diff --git a/src/rpc/mod.rs b/src/rpc/mod.rs index ec17794..f2c6ce8 100644 --- a/src/rpc/mod.rs +++ b/src/rpc/mod.rs @@ -672,7 +672,7 @@ impl Connection { /// invalidated. /// ///For info on default settings see [Builder](struct.Builder.html) - pub async fn new() -> Result { + pub async fn try_new() -> Result { Builder::new().build().await } @@ -956,6 +956,6 @@ mod test { .expect("fail to send request") .try_recv(); assert!(res.is_ok()); - assert!(conn.close().await.is_ok()); + assert!(conn.close().is_ok()); } } diff --git a/src/rpc/proxy.rs b/src/rpc/proxy.rs index 3ae32c7..b77e1a8 100644 --- a/src/rpc/proxy.rs +++ b/src/rpc/proxy.rs @@ -46,7 +46,7 @@ impl Proxy { ) -> Result<()> { // the connection is ensured to be active now by checking conn.is_active // but it may be actually broken already. - let conn = self.0.get().await?; + let conn = self.0.get()?; OBKV_PROXY_METRICS.observe_proxy_misc("conn_load", conn.load() as f64); @@ -80,7 +80,7 @@ impl Proxy { retry_cnt, err ); - let conn = self.0.get().await?; + let conn = self.0.get()?; let res = conn.execute(payload, response).await; if res.is_ok() || conn.is_active() { OBKV_PROXY_METRICS.observe_proxy_misc("retry_times", retry_cnt as f64); diff --git a/tests/test_cse_table.rs b/tests/test_cse_table.rs index 7e64106..339c625 100644 --- a/tests/test_cse_table.rs +++ b/tests/test_cse_table.rs @@ -290,10 +290,7 @@ async fn test_cse_index_key_table() { } } } - result_set - .async_close() - .await - .expect("Fail to close result set"); + result_set.close().await.expect("Fail to close result set"); } #[tokio::test] diff --git a/tests/test_hbase_client.rs b/tests/test_hbase_client.rs index 2843740..42930b4 100644 --- a/tests/test_hbase_client.rs +++ b/tests/test_hbase_client.rs @@ -266,8 +266,7 @@ async fn test_obtable_partition_range_crud() { ]; let result = client.delete(TABLE_NAME, rowk_keys.clone()).await; assert!(result.is_ok()); - let result = result.unwrap(); - assert_eq!(1, result); + result.unwrap(); let result = client .insert( diff --git a/tests/test_table_client.rs b/tests/test_table_client.rs index dba88ac..9f39603 100644 --- a/tests/test_table_client.rs +++ b/tests/test_table_client.rs @@ -77,7 +77,7 @@ async fn test_obtable_client_curd() { assert!(result_set.is_ok()); - let result_set = result_set.unwrap(); + let mut result_set = result_set.unwrap(); assert!(result_set.cache_size() > 0); @@ -87,6 +87,12 @@ async fn test_obtable_client_curd() { assert_eq!(1, result); + if let Err(e) = result_set.close().await { + println!("Error: {e}"); + } + + drop(result_set); + let result = client .get( TEST_TABLE_NAME, diff --git a/tests/test_table_client_base.rs b/tests/test_table_client_base.rs index b71204a..b217088 100644 --- a/tests/test_table_client_base.rs +++ b/tests/test_table_client_base.rs @@ -1186,7 +1186,7 @@ impl BaseTest { _ => unreachable!(), } } - let ret = result_set.async_close().await; + let ret = result_set.close().await; assert!(ret.is_ok()); match result_set.next().await { diff --git a/ycsb-rs/src/obkv_client.rs b/ycsb-rs/src/obkv_client.rs index dea6983..f8cace7 100644 --- a/ycsb-rs/src/obkv_client.rs +++ b/ycsb-rs/src/obkv_client.rs @@ -48,10 +48,10 @@ pub struct OBKVClientInitStruct { pub max_conns_per_server: usize, pub min_idle_conns_per_server: usize, - pub conn_init_thread_num: usize, - pub conn_reader_thread_num: usize, - pub conn_writer_thread_num: usize, + pub bg_thread_num: usize, + pub tcp_recv_thread_num: usize, + pub tcp_send_thread_num: usize, } impl OBKVClientInitStruct { @@ -71,9 +71,9 @@ impl OBKVClientInitStruct { refresh_workers_num: props.refresh_workers_num, max_conns_per_server: props.max_conns_per_server, min_idle_conns_per_server: props.min_idle_conns_per_server, - conn_init_thread_num: props.conn_init_thread_num, - conn_reader_thread_num: props.conn_reader_thread_num, - conn_writer_thread_num: props.conn_writer_thread_num, + bg_thread_num: props.bg_thread_num, + tcp_recv_thread_num: props.tcp_recv_thread_num, + tcp_send_thread_num: props.tcp_send_thread_num, } } } @@ -94,9 +94,9 @@ impl OBKVClient { refresh_workers_num: config.refresh_workers_num, max_conns_per_server: config.max_conns_per_server, min_idle_conns_per_server: config.min_idle_conns_per_server, - conn_init_thread_num: config.conn_init_thread_num, - conn_reader_thread_num: config.conn_reader_thread_num, - conn_writer_thread_num: config.conn_writer_thread_num, + bg_thread_num: config.bg_thread_num, + tcp_recv_thread_num: config.tcp_recv_thread_num, + tcp_send_thread_num: config.tcp_send_thread_num, ..Default::default() }; let builder = Builder::new() diff --git a/ycsb-rs/src/properties.rs b/ycsb-rs/src/properties.rs index 0f748bc..8510f84 100644 --- a/ycsb-rs/src/properties.rs +++ b/ycsb-rs/src/properties.rs @@ -100,15 +100,15 @@ fn min_idle_conns_per_server_default() -> usize { 1 } -fn conn_init_thread_num_default() -> usize { +fn bg_thread_num_default() -> usize { 2 } -fn conn_reader_thread_num_default() -> usize { +fn tcp_recv_thread_num_default() -> usize { 6 } -fn conn_writer_thread_num_default() -> usize { +fn tcp_send_thread_num_default() -> usize { 4 } @@ -208,21 +208,18 @@ pub struct Properties { rename = "min_idle_conns_per_server" )] pub min_idle_conns_per_server: usize, + #[serde(default = "bg_thread_num_default", rename = "bg_thread_num")] + pub bg_thread_num: usize, #[serde( - default = "conn_init_thread_num_default", - rename = "conn_init_thread_num" + default = "tcp_recv_thread_num_default", + rename = "tcp_recv_thread_num" )] - pub conn_init_thread_num: usize, + pub tcp_recv_thread_num: usize, #[serde( - default = "conn_reader_thread_num_default", - rename = "conn_reader_thread_num" + default = "tcp_send_thread_num_default", + rename = "tcp_send_thread_num" )] - pub conn_reader_thread_num: usize, - #[serde( - default = "conn_writer_thread_num_default", - rename = "conn_writer_thread_num" - )] - pub conn_writer_thread_num: usize, + pub tcp_send_thread_num: usize, #[serde(default = "ycsb_thread_num_default", rename = "ycsb_thread_num")] pub ycsb_thread_num: usize, } diff --git a/ycsb-rs/workloads/workload_obkv.toml b/ycsb-rs/workloads/workload_obkv.toml index 6398dd9..b0733d4 100644 --- a/ycsb-rs/workloads/workload_obkv.toml +++ b/ycsb-rs/workloads/workload_obkv.toml @@ -53,6 +53,6 @@ min_idle_conns_per_server = 10 ycsb_thread_num = 10 -conn_init_thread_num = 2 -conn_reader_thread_num = 6 -conn_writer_thread_num = 4 +bg_thread_num = 2 +tcp_recv_thread_num = 6 +tcp_send_thread_num = 4 From 7805abe339efbfbdaef561814abf8112e7cd5e7d Mon Sep 17 00:00:00 2001 From: "zeli.lwb" Date: Fri, 9 Jun 2023 11:26:30 +0800 Subject: [PATCH 5/9] [Feat] batch ycsb --- ycsb-rs/src/obkv_client.rs | 93 +++++++++++++++++++++++---- ycsb-rs/src/properties.rs | 24 +++++++ ycsb-rs/src/workload/core_workload.rs | 79 ++++++++++++++++++++++- ycsb-rs/workloads/workload_obkv.toml | 5 ++ 4 files changed, 186 insertions(+), 15 deletions(-) diff --git a/ycsb-rs/src/obkv_client.rs b/ycsb-rs/src/obkv_client.rs index f8cace7..3c65a3c 100644 --- a/ycsb-rs/src/obkv_client.rs +++ b/ycsb-rs/src/obkv_client.rs @@ -20,15 +20,11 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; use anyhow::Result; #[allow(unused)] use obkv::error::CommonErrCode; -use obkv::{Builder, ClientConfig, ObTableClient, RunningMode, Value}; +use obkv::{Builder, ClientConfig, ObTableClient, RunningMode, TableOpResult, Value}; use crate::properties::Properties; const PRIMARY_KEY: &str = "ycsb_key"; -const COLUMN_NAMES: [&str; 10] = [ - "field0", "field1", "field2", "field3", "field4", "field5", "field6", "field7", "field8", - "field9", -]; pub struct OBKVClientInitStruct { pub full_user_name: String, @@ -164,15 +160,12 @@ impl OBKVClient { &self, table: &str, key: &str, + columns: &Vec, result: &mut HashMap, ) -> Result<()> { let result = self .client - .get( - table, - vec![Value::from(key)], - COLUMN_NAMES.iter().map(|s| s.to_string()).collect(), - ) + .get(table, vec![Value::from(key)], columns.to_owned()) .await; assert!(result.is_ok()); assert_eq!(10, result?.len()); @@ -201,7 +194,7 @@ impl OBKVClient { properties, ) .await - .expect("fail to insert_or update"); + .expect("fail to insert or update"); assert_eq!(10, result); Ok(()) @@ -213,12 +206,13 @@ impl OBKVClient { table: &str, startkey: &str, endkey: &str, + columns: &Vec, result: &mut HashMap, ) -> Result<()> { let query = self .client .query(table) - .select(COLUMN_NAMES.iter().map(|s| s.to_string()).collect()) + .select(columns.to_owned()) .primary_index() .add_scan_range( vec![Value::from(startkey)], @@ -230,4 +224,79 @@ impl OBKVClient { assert!(result.is_ok()); Ok(()) } + + #[allow(unused)] + pub async fn batch_read( + &self, + table: &str, + keys: &Vec, + columns: &Vec, + result: &mut HashMap, + ) -> Result<()> { + let mut batch_op = self.client.batch_operation(keys.len()); + for key in keys { + batch_op.get(vec![Value::from(key.to_owned())], columns.to_owned()); + } + let results = self.client.execute_batch(table, batch_op).await; + + // Verify the results + assert!(results.is_ok()); + let results = results.unwrap(); + assert_eq!(results.len(), keys.len()); + for result in results { + match result { + TableOpResult::RetrieveRows(rows) => { + assert_eq!(10, rows.len()); + } + _ => { + unreachable!() + } + } + } + + Ok(()) + } + + #[allow(unused)] + pub async fn batch_insertup( + &self, + table: &str, + keys: &Vec, + fields: &Vec, + values: &Vec, + ) -> Result<()> { + let mut batch_op = self.client.batch_operation(keys.len()); + for key in keys { + let mut properties: Vec = Vec::new(); + for value in values { + properties.push(Value::from(value.to_owned())); + } + batch_op.insert_or_update( + vec![Value::from(key.to_owned())], + fields.to_owned(), + properties, + ); + } + let results = self.client.execute_batch(table, batch_op).await; + + // Verify the results + if results.is_err() { + println!("Error: {:?}", results.as_ref().err()); + } + assert!(results.is_ok()); + let results = results.unwrap(); + assert_eq!(results.len(), keys.len()); + for result in results { + match result { + TableOpResult::AffectedRows(affected_rows) => { + assert_eq!(1, affected_rows); + } + _ => { + unreachable!() + } + } + } + + Ok(()) + } } diff --git a/ycsb-rs/src/properties.rs b/ycsb-rs/src/properties.rs index 8510f84..1796e98 100644 --- a/ycsb-rs/src/properties.rs +++ b/ycsb-rs/src/properties.rs @@ -20,6 +20,10 @@ fn field_length_default() -> u64 { 100 } +fn batch_count_default() -> u64 { + 100 +} + fn read_proportion_default() -> f64 { 0.95 } @@ -40,6 +44,14 @@ fn read_modify_write_proportion_default() -> f64 { 0.0 } +fn batch_read_proportion_default() -> f64 { + 0.0 +} + +fn batch_insertup_proportion_default() -> f64 { + 0.0 +} + fn full_user_name_default() -> String { "FULL_USER_NAME".to_string() } @@ -145,6 +157,8 @@ pub struct Properties { pub request_distribution: String, #[serde(default = "field_length_default", rename = "fieldlength")] pub field_length: u64, + #[serde(default = "batch_count_default", rename = "batchcount")] + pub batch_count: u64, // read, update, insert, scan, read-modify-write #[serde(default = "read_proportion_default", rename = "readproportion")] @@ -160,6 +174,16 @@ pub struct Properties { rename = "readmodifywriteproportion" )] pub read_modify_write_proportion: f64, + #[serde( + default = "batch_read_proportion_default", + rename = "batchreadproportion" + )] + pub batch_read_proportion: f64, + #[serde( + default = "batch_insertup_proportion_default", + rename = "batchinsertupproportion" + )] + pub batch_insertup_proportion: f64, #[serde(default = "full_user_name_default", rename = "full_user_name")] pub full_user_name: String, diff --git a/ycsb-rs/src/workload/core_workload.rs b/ycsb-rs/src/workload/core_workload.rs index a1ca888..e8d4da7 100644 --- a/ycsb-rs/src/workload/core_workload.rs +++ b/ycsb-rs/src/workload/core_workload.rs @@ -30,6 +30,8 @@ pub enum CoreOperation { Insert, Scan, ReadModifyWrite, + BatchRead, + BatchInsertUp, } impl std::fmt::Display for CoreOperation { @@ -44,6 +46,7 @@ pub struct CoreWorkload { table: String, field_count: u64, field_names: Vec, + batch_count: u64, field_length_generator: Mutex + Send>>, read_all_fields: bool, write_all_fields: bool, @@ -75,6 +78,7 @@ impl CoreWorkload { table: String::from("usertable"), field_count, field_names, + batch_count: prop.batch_count, field_length_generator: Mutex::new(get_field_length_generator(prop)), read_all_fields: true, write_all_fields: true, @@ -139,8 +143,9 @@ impl CoreWorkload { let keynum = self.next_key_num(); let dbkey = format!("{}", fnvhash64(keynum)); let mut result = HashMap::new(); - db.read(&self.table, &dbkey, &mut result).await.unwrap(); - // TODO: verify rows + db.read(&self.table, &dbkey, &self.field_names, &mut result) + .await + .unwrap(); } fn do_transaction_update(&self, db: Rc) { @@ -182,7 +187,57 @@ impl CoreWorkload { let dbstart = format!("{}", fnvhash64(start)); let dbend = format!("{}", fnvhash64(start)); let mut result = HashMap::new(); - db.scan(&self.table, &dbstart, &dbend, &mut result) + db.scan( + &self.table, + &dbstart, + &dbend, + &self.field_names, + &mut result, + ) + .await + .unwrap(); + } + + async fn ob_transaction_batchread(&self, db: Arc) { + let mut keys = Vec::new(); + for _ in 0..self.batch_count { + // generate key + let keynum = self.next_key_num(); + let dbkey = format!("{}", fnvhash64(keynum)); + keys.push(dbkey); + } + + let mut result = HashMap::new(); + db.batch_read(&self.table, &keys, &self.field_names, &mut result) + .await + .unwrap(); + } + + async fn ob_transaction_batchinsertup(&self, db: Arc) { + let mut keys = Vec::new(); + let mut field_values = Vec::new(); + + // generate value for each field + // operation in batch will reuse field values + for _ in 0..self.field_count { + let field_len = self + .field_length_generator + .lock() + .unwrap() + .next_value(&mut self.rng.lock().unwrap()); + let s = Alphanumeric + .sample_string::(&mut self.rng.lock().unwrap(), field_len as usize); + field_values.push(s); + } + + // generate key + for _ in 0..self.batch_count { + let keynum = self.next_key_num(); + let dbkey = format!("{}", fnvhash64(keynum)); + keys.push(dbkey); + } + + db.batch_insertup(&self.table, &keys, &self.field_names, &field_values) .await .unwrap(); } @@ -237,6 +292,12 @@ impl CoreWorkload { CoreOperation::Scan => { self.ob_transaction_scan(db).await; } + CoreOperation::BatchRead => { + self.ob_transaction_batchread(db).await; + } + CoreOperation::BatchInsertUp => { + self.ob_transaction_batchinsertup(db).await; + } _ => todo!(), } } @@ -354,6 +415,18 @@ fn create_operation_generator(prop: &Properties) -> DiscreteGenerator 0.0 { + pairs.push(WeightPair::new( + prop.batch_read_proportion, + CoreOperation::BatchRead, + )); + } + if prop.batch_insertup_proportion > 0.0 { + pairs.push(WeightPair::new( + prop.batch_insertup_proportion, + CoreOperation::BatchInsertUp, + )); + } DiscreteGenerator::new(pairs) } diff --git a/ycsb-rs/workloads/workload_obkv.toml b/ycsb-rs/workloads/workload_obkv.toml index b0733d4..4764755 100644 --- a/ycsb-rs/workloads/workload_obkv.toml +++ b/ycsb-rs/workloads/workload_obkv.toml @@ -24,10 +24,15 @@ measurementtype = "histogram" readallfields = true +# The number of operation in one batch +batchcount = 100 + insertproportion = 0 readproportion = 1.0 scanproportion = 0 updateproportion = 0 +batchreadproportion = 0 +batchinsertupproportion = 0 requestdistribution = "uniform" From 38d695047bd539695b0c01b6f01a2310232aabe7 Mon Sep 17 00:00:00 2001 From: "zeli.lwb" Date: Fri, 9 Jun 2023 14:36:07 +0800 Subject: [PATCH 6/9] [Fix] review --- src/rpc/conn_pool.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rpc/conn_pool.rs b/src/rpc/conn_pool.rs index 7d6292d..63c23af 100644 --- a/src/rpc/conn_pool.rs +++ b/src/rpc/conn_pool.rs @@ -26,9 +26,9 @@ use tokio::time::sleep; use super::{Builder as ConnBuilder, Connection}; use crate::{ - client::table_client::RuntimesRef, error::{CommonErrCode, Error::Common as CommonErr, Result}, proxy::OBKV_PROXY_METRICS, + runtime::RuntimeRef, }; const MIN_BUILD_RETRY_INTERVAL_MS: u64 = 50 * 1000; @@ -172,7 +172,7 @@ impl ConnPool { build_retry_limit: usize, ) { let weak_shared_pool = Arc::downgrade(shared_pool); - shared_pool.clone().runtimes.bg_runtime.spawn(async move { + shared_pool.clone().runtime.spawn(async move { loop { let shared_pool = match weak_shared_pool.upgrade() { None => return, @@ -320,7 +320,7 @@ struct SharedPool { conn_builder: ConnBuilder, inner: Mutex, cond: Condvar, - runtimes: RuntimesRef, + runtime: RuntimeRef, } impl SharedPool { @@ -336,7 +336,7 @@ impl SharedPool { conn_builder: builder, inner: Mutex::new(PoolInner::new(max_conn_num)), cond: Condvar::new(), - runtimes, + runtime: runtimes.bg_runtime.clone(), }) } From 54a62fc68b1505c77c7ce3ae3d972d9387d87074 Mon Sep 17 00:00:00 2001 From: "zeli.lwb" Date: Tue, 13 Jun 2023 09:54:36 +0800 Subject: [PATCH 7/9] [Fix] connection close process --- src/client/mod.rs | 4 -- src/client/table.rs | 2 +- src/client/table_client.rs | 2 +- src/rpc/conn_pool.rs | 2 +- src/rpc/mod.rs | 85 ++++++++++++++++++++++++++------------ 5 files changed, 61 insertions(+), 34 deletions(-) diff --git a/src/client/mod.rs b/src/client/mod.rs index 2a60c58..ced0554 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -137,8 +137,6 @@ pub struct ClientConfig { pub table_entry_refresh_try_interval: Duration, pub table_entry_refresh_continuous_failure_ceiling: usize, - pub table_batch_op_thread_num: usize, - pub server_address_priority_timeout: Duration, pub runtime_continuous_failure_ceiling: usize, @@ -186,8 +184,6 @@ impl Default for ClientConfig { table_entry_refresh_try_interval: Duration::from_millis(20), table_entry_refresh_continuous_failure_ceiling: 10, - table_batch_op_thread_num: 16, - server_address_priority_timeout: Duration::from_secs(1800), runtime_continuous_failure_ceiling: 100, diff --git a/src/client/table.rs b/src/client/table.rs index 23e8bd3..62aa309 100644 --- a/src/client/table.rs +++ b/src/client/table.rs @@ -65,7 +65,7 @@ impl ObTable { self.config.rpc_operation_timeout } - /// Execute a batch operation on a partition table + /// Execute batch operation pub async fn execute_batch( &self, _table_name: &str, diff --git a/src/client/table_client.rs b/src/client/table_client.rs index 63c7fa2..b7f175d 100644 --- a/src/client/table_client.rs +++ b/src/client/table_client.rs @@ -1485,7 +1485,7 @@ fn build_obkv_runtimes(config: &ClientConfig) -> ObClientRuntimes { ObClientRuntimes { tcp_recv_runtime: Arc::new(build_runtime("ob-tcp-reviever", config.tcp_recv_thread_num)), tcp_send_runtime: Arc::new(build_runtime("ob-tcp-sender", config.tcp_send_thread_num)), - bg_runtime: Arc::new(build_runtime("ob_bg", config.bg_thread_num)), + bg_runtime: Arc::new(build_runtime("ob-bg", config.bg_thread_num)), } } diff --git a/src/rpc/conn_pool.rs b/src/rpc/conn_pool.rs index 63c23af..483dfe4 100644 --- a/src/rpc/conn_pool.rs +++ b/src/rpc/conn_pool.rs @@ -51,7 +51,7 @@ impl PoolInner { } } - // TODO: use more random/fair policy to pick a connection / async remove conn + // TODO: use more random / fair policy to pick a connection / async remove conn fn try_get(&mut self) -> (Option>, usize) { let mut removed = 0usize; while !self.conns.is_empty() { diff --git a/src/rpc/mod.rs b/src/rpc/mod.rs index f2c6ce8..57f2b03 100644 --- a/src/rpc/mod.rs +++ b/src/rpc/mod.rs @@ -180,8 +180,15 @@ impl ConnectionSender { self.sender.send(message).await.map_err(Self::broken_pipe) } + #[allow(dead_code)] + /// close the connection async fn close(&mut self) -> Result<()> { self.request(ObTablePacket::ClosePoison).await?; + self.shutdown() + } + + /// Shutdown the sender without closing remote + fn shutdown(&mut self) -> Result<()> { let writer = mem::replace(&mut self.writer, None); let drop_helper = AbortOnDropMany(vec![writer.unwrap()]); drop(drop_helper); @@ -202,7 +209,7 @@ pub struct Connection { // remote addr addr: SocketAddr, reader: Option>>, - reader_signal_sender: mpsc::Sender<()>, + reader_signal_sender: Option>, sender: ConnectionSender, requests: RequestsMap, continuous_timeout_failures: AtomicUsize, @@ -213,8 +220,6 @@ pub struct Connection { id: u32, trace_id_counter: AtomicU32, load: AtomicUsize, - // TODO: check unused runtime - bg_runtime: RuntimeRef, } const OB_MYSQL_MAX_PACKET_LENGTH: usize = 1 << 24; @@ -250,7 +255,7 @@ impl Connection { let active = Arc::new(AtomicBool::new(false)); let read_active = active.clone(); - let (sender, receiver): (mpsc::Sender<()>, mpsc::Receiver<()>) = mpsc::channel(1); + let (sender, receiver) = oneshot::channel(); let join_handle = runtimes.tcp_recv_runtime.spawn(async move { let addr = read_stream.peer_addr()?; @@ -278,14 +283,13 @@ impl Connection { requests, continuous_timeout_failures: AtomicUsize::new(0), continuous_timeout_failures_ceiling: CONN_CONTINUOUS_TIMEOUT_CEILING, - reader_signal_sender: sender, + reader_signal_sender: Some(sender), credential: None, tenant_id: None, active, id, trace_id_counter: AtomicU32::new(0), load: AtomicUsize::new(0), - bg_runtime: runtimes.bg_runtime.clone(), }) } @@ -294,7 +298,7 @@ impl Connection { } async fn process_reading_data( - mut signal_receiver: mpsc::Receiver<()>, + mut signal_receiver: oneshot::Receiver<()>, mut read_stream: OwnedReadHalf, read_requests: RequestsMap, addr: &SocketAddr, @@ -482,6 +486,7 @@ impl Connection { ); self.set_active(false); Connection::cancel_requests(&self.requests); + // TODO: although TCP connection may be closed by remote, we should do async close } } @@ -676,34 +681,60 @@ impl Connection { Builder::new().build().await } + #[allow(dead_code)] /// close the connection - /// close is used by Drop, since Drop is sync, we need to use block_on to - /// wait for the future - fn close(&mut self) -> Result<()> { + /// + /// client should use close() if client close the connection voluntarily + async fn close(&mut self) -> Result<()> { if self.reader.is_none() { return Ok(()); } self.set_active(false); // 1. close writer - if let Err(e) = self - .bg_runtime - .block_on(async { self.sender.close().await }) - { + if let Err(e) = self.sender.close().await { error!("Connection::close fail to close writer, err: {}.", e); } // 2. close reader - if let Err(e) = self.bg_runtime.block_on(async { - self.reader_signal_sender - .send(()) - .await - .map_err(ConnectionSender::broken_pipe) - }) { - error!( - "Connection::close fail to send signal to reader, err: {}.", - e - ); + if let Some(sender) = self.reader_signal_sender.take() { + if let Err(e) = sender.send(()).map_err(ConnectionSender::broken_pipe) { + error!( + "Connection::close fail to send signal to reader, err: {}.", + e + ); + } + } + + let reader = mem::replace(&mut self.reader, None); + + drop(reader); + + Ok(()) + } + + /// shutdown the connection + /// + /// shutdown the conection without closing the TCP connection + fn shutdown(&mut self) -> Result<()> { + if self.reader.is_none() { + return Ok(()); + } + self.set_active(false); + + // 1. shutdown writer + if let Err(e) = self.sender.shutdown() { + error!("Connection::shutdown fail to shutdown writer, err: {}.", e); + } + + // 2. close reader + if let Some(sender) = self.reader_signal_sender.take() { + if let Err(e) = sender.send(()).map_err(ConnectionSender::broken_pipe) { + error!( + "Connection::shutdown fail to send signal to reader, err: {}.", + e + ); + } } let reader = mem::replace(&mut self.reader, None); @@ -741,8 +772,8 @@ impl Connection { impl Drop for Connection { fn drop(&mut self) { - if let Err(err) = self.close() { - warn!("Connection::drop fail to close connection, err: {}.", err) + if let Err(err) = self.shutdown() { + warn!("Connection::drop fail to shutdown connection, err: {}.", err) } let mut requests = self.requests.lock().unwrap(); for (_id, sender) in requests.drain() { @@ -956,6 +987,6 @@ mod test { .expect("fail to send request") .try_recv(); assert!(res.is_ok()); - assert!(conn.close().is_ok()); + assert!(conn.close().await.is_ok()); } } From f136b2196c037b57addbd9fe1799a631f5fecd20 Mon Sep 17 00:00:00 2001 From: "zeli.lwb" Date: Tue, 13 Jun 2023 16:58:01 +0800 Subject: [PATCH 8/9] [Fix] remove trait Table & fix review --- src/client/mod.rs | 84 +------------------------------------------ src/lib.rs | 2 +- src/rpc/mod.rs | 63 ++++++++++++++++---------------- tests/utils/common.rs | 2 +- 4 files changed, 34 insertions(+), 117 deletions(-) diff --git a/src/client/mod.rs b/src/client/mod.rs index ced0554..a95826d 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -17,11 +17,7 @@ use std::{collections::HashMap, time::Duration}; -use crate::{ - error::Result, - rpc::protocol::{payloads::ObTableBatchOperation, DEFAULT_FLAG}, - serde_obkv::value::Value, -}; +use crate::{rpc::protocol::DEFAULT_FLAG, serde_obkv::value::Value}; mod ocp; pub mod query; @@ -35,84 +31,6 @@ pub enum TableOpResult { RetrieveRows(HashMap), } -pub trait Table { - // TODO: async operation support - /// Insert a record - fn insert( - &self, - table_name: &str, - row_keys: Vec, - columns: Vec, - properties: Vec, - ) -> Result; - - /// Update a record - fn update( - &self, - table_name: &str, - row_keys: Vec, - columns: Vec, - properties: Vec, - ) -> Result; - - /// Insert or update a record, if the record exists, update it. - /// Otherwise insert a new one. - fn insert_or_update( - &self, - table_name: &str, - row_keys: Vec, - columns: Vec, - properties: Vec, - ) -> Result; - - /// Replace a record. - fn replace( - &self, - table_name: &str, - row_keys: Vec, - columns: Vec, - properties: Vec, - ) -> Result; - - /// Append - fn append( - &self, - table_name: &str, - row_keys: Vec, - columns: Vec, - properties: Vec, - ) -> Result; - - /// Increment - fn increment( - &self, - table_name: &str, - row_keys: Vec, - columns: Vec, - properties: Vec, - ) -> Result; - - /// Delete records by row keys. - fn delete(&self, table_name: &str, row_keys: Vec) -> Result; - - /// Retrieve a record by row keys. - fn get( - &self, - table_name: &str, - row_keys: Vec, - columns: Vec, - ) -> Result>; - - /// Create a batch operation - fn batch_operation(&self, ops_num_hint: usize) -> ObTableBatchOperation; - // Execute a batch operation - fn execute_batch( - &self, - table_name: &str, - batch_op: ObTableBatchOperation, - ) -> Result>; -} - /// ObTable client config #[derive(Clone, Debug, Eq, PartialEq)] pub struct ClientConfig { diff --git a/src/lib.rs b/src/lib.rs index 53f71d2..2653dcb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -59,7 +59,7 @@ pub use self::{ query::QueryResultSet, table::ObTable, table_client::{Builder, ObTableClient, RunningMode}, - ClientConfig, Table, TableOpResult, + ClientConfig, TableOpResult, }, monitors::prometheus::dump_metrics, rpc::{ diff --git a/src/rpc/mod.rs b/src/rpc/mod.rs index 57f2b03..a922afa 100644 --- a/src/rpc/mod.rs +++ b/src/rpc/mod.rs @@ -181,17 +181,23 @@ impl ConnectionSender { } #[allow(dead_code)] - /// close the connection + /// Close the connection + /// Requests in requests map will be cancelled when the writer closed async fn close(&mut self) -> Result<()> { self.request(ObTablePacket::ClosePoison).await?; - self.shutdown() + if let Some(writer) = mem::take(&mut self.writer) { + writer.await?? + } + self.sender.closed().await; + Ok(()) } /// Shutdown the sender without closing remote + /// Requests in the requests map will be not be cancelled fn shutdown(&mut self) -> Result<()> { - let writer = mem::replace(&mut self.writer, None); - let drop_helper = AbortOnDropMany(vec![writer.unwrap()]); - drop(drop_helper); + if let Some(writer) = mem::take(&mut self.writer) { + let _drop_helper = AbortOnDropMany(vec![writer]); + } Ok(()) } @@ -362,10 +368,16 @@ impl Connection { fn cancel_requests(requests: &RequestsMap) { let mut requests = requests.lock().unwrap(); for (_, sender) in requests.drain() { - if let Err(e) = sender.send(Err(CommonErr( - CommonErrCode::Rpc, - "connection reader exits".to_owned(), - ))) { + if let Err(e) = sender + .send(Ok(ObTablePacket::TransportPacket { + error: CommonErr( + CommonErrCode::BrokenPipe, + "No longer able to send messages".to_owned(), + ), + code: TransportCode::SendFailure, + })) + .map_err(ConnectionSender::broken_pipe) + { error!("Connection::cancel_requests: fail to send cancel message, err:{e:?}"); } } @@ -486,7 +498,8 @@ impl Connection { ); self.set_active(false); Connection::cancel_requests(&self.requests); - // TODO: although TCP connection may be closed by remote, we should do async close + // TODO: although TCP connection may be closed by remote, we should + // do async close } } @@ -705,9 +718,8 @@ impl Connection { ); } } - - let reader = mem::replace(&mut self.reader, None); - + let reader = mem::take(&mut self.reader); + Connection::cancel_requests(&self.requests); drop(reader); Ok(()) @@ -736,9 +748,8 @@ impl Connection { ); } } - - let reader = mem::replace(&mut self.reader, None); - + let reader = mem::take(&mut self.reader); + Connection::cancel_requests(&self.requests); drop(reader); Ok(()) @@ -773,22 +784,10 @@ impl Connection { impl Drop for Connection { fn drop(&mut self) { if let Err(err) = self.shutdown() { - warn!("Connection::drop fail to shutdown connection, err: {}.", err) - } - let mut requests = self.requests.lock().unwrap(); - for (_id, sender) in requests.drain() { - if let Err(e) = sender - .send(Ok(ObTablePacket::TransportPacket { - error: CommonErr( - CommonErrCode::BrokenPipe, - "No longer able to send messages".to_owned(), - ), - code: TransportCode::SendFailure, - })) - .map_err(ConnectionSender::broken_pipe) - { - error!("Connection::drop fail to notify senders, err: {}.", e); - } + error!( + "Connection::drop fail to shutdown connection, err: {}.", + err + ) } } } diff --git a/tests/utils/common.rs b/tests/utils/common.rs index 31fdd0c..c934f91 100644 --- a/tests/utils/common.rs +++ b/tests/utils/common.rs @@ -28,7 +28,7 @@ use std::{ #[allow(unused)] use obkv::error::CommonErrCode; -use obkv::{Builder, ObTableClient, RunningMode, Table}; +use obkv::{Builder, ObTableClient, RunningMode}; // TODO: use test conf to control which environments to test. const TEST_FULL_USER_NAME: &str = "test"; From ed7c4bfd29c873a102aa5d2d717c7bc01d95b8bc Mon Sep 17 00:00:00 2001 From: "zeli.lwb" Date: Tue, 13 Jun 2023 21:40:16 +0800 Subject: [PATCH 9/9] [Fix] review --- src/rpc/mod.rs | 5 ++--- src/runtime/mod.rs | 6 ++++++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/rpc/mod.rs b/src/rpc/mod.rs index a922afa..5e5ea98 100644 --- a/src/rpc/mod.rs +++ b/src/rpc/mod.rs @@ -57,7 +57,7 @@ use crate::{ error::{CommonErrCode, Error, Error::Common as CommonErr, Result}, monitors::{prometheus::OBKV_CLIENT_REGISTRY, rpc_metrics::RpcMetrics}, rpc::{protocol::TraceId, util::checksum::ob_crc64::ObCrc64Sse42}, - runtime::{AbortOnDropMany, JoinHandle, RuntimeRef}, + runtime::{JoinHandle, RuntimeRef}, }; lazy_static! { @@ -188,7 +188,6 @@ impl ConnectionSender { if let Some(writer) = mem::take(&mut self.writer) { writer.await?? } - self.sender.closed().await; Ok(()) } @@ -196,7 +195,7 @@ impl ConnectionSender { /// Requests in the requests map will be not be cancelled fn shutdown(&mut self) -> Result<()> { if let Some(writer) = mem::take(&mut self.writer) { - let _drop_helper = AbortOnDropMany(vec![writer]); + writer.abort() } Ok(()) } diff --git a/src/runtime/mod.rs b/src/runtime/mod.rs index e307dc5..b07e17f 100644 --- a/src/runtime/mod.rs +++ b/src/runtime/mod.rs @@ -78,6 +78,12 @@ pin_project! { } } +impl JoinHandle { + pub fn abort(&self) { + self.inner.abort(); + } +} + impl Future for JoinHandle { type Output = Result;