Skip to content

feat: compact segment #8261

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 31 commits into from
Oct 19, 2022
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
a387934
merge segments
dantengsky Oct 17, 2022
717b40d
refactor: rename block_select to target_select
dantengsky Oct 17, 2022
89cc209
minor refactor
dantengsky Oct 17, 2022
10c0849
retry & abort
dantengsky Oct 17, 2022
d747d81
add sqlogic test
dantengsky Oct 17, 2022
a42f046
Merge remote-tracking branch 'origin/main' into feat-compact-segment
dantengsky Oct 17, 2022
1347c22
resolve merge conflicts
dantengsky Oct 17, 2022
cfac597
adjust compact syntax
dantengsky Oct 18, 2022
891cec6
add unit test
dantengsky Oct 18, 2022
6c7ebdd
remove debug prints
dantengsky Oct 18, 2022
8aa847b
make lint
dantengsky Oct 18, 2022
db592e8
refact Table::compact
dantengsky Oct 18, 2022
bec6688
fix typo
dantengsky Oct 18, 2022
56d6719
Lowering memory usage and fix ut
dantengsky Oct 18, 2022
febf084
make lint
dantengsky Oct 18, 2022
a1d04ad
refine logictest
dantengsky Oct 18, 2022
6e66512
minor code gc
dantengsky Oct 18, 2022
717382d
Merge remote-tracking branch 'origin/main' into feat-compact-segment
dantengsky Oct 18, 2022
f84a24b
Merge remote-tracking branch 'origin/main' into feat-compact-segment
dantengsky Oct 18, 2022
83d0a7b
Explicitly dropping
dantengsky Oct 18, 2022
b31756b
fix typo & minor refactor
dantengsky Oct 18, 2022
f33b821
do not write down segment info if there is only one accumulated
dantengsky Oct 19, 2022
c0589a4
refine doc
dantengsky Oct 19, 2022
6889dae
Merge branch 'zuqian-compact' into feat-compact-segment
dantengsky Oct 19, 2022
ac259a6
use remove_file_in_batch
dantengsky Oct 19, 2022
2f25fdf
Merge remote-tracking branch 'origin/main' into feat-compact-segment
dantengsky Oct 19, 2022
3c0b4c0
refact: unify logic of conflicts detection
dantengsky Oct 19, 2022
fa0197e
refine doc
dantengsky Oct 19, 2022
0f4ebb4
code de-duplication
dantengsky Oct 19, 2022
8c3f4a1
remove snapshot id checking in conflict detection
dantengsky Oct 19, 2022
148cdf7
remove unnecessary checking in commit_mutation
dantengsky Oct 19, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 4 additions & 3 deletions src/common/base/src/base/runtime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -205,9 +205,10 @@ impl Runtime {
{
let semaphore = Arc::new(semaphore);
let iter = futures.into_iter().map(|v| {
|permit| {
let _permit = permit;
v
|permit| async {
let r = v.await;
drop(permit);
r
}
});
self.try_spawn_batch_with_owned_semaphore(semaphore, iter)
Expand Down
17 changes: 15 additions & 2 deletions src/query/ast/src/ast/statements/table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -444,19 +444,32 @@ impl Display for Engine {
}
}

#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum CompactTarget {
Block,
Segment,
}

#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum OptimizeTableAction {
All,
Purge,
Compact,
Compact(CompactTarget),
}

impl Display for OptimizeTableAction {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
OptimizeTableAction::All => write!(f, "ALL"),
OptimizeTableAction::Purge => write!(f, "PURGE"),
OptimizeTableAction::Compact => write!(f, "COMPACT"),
OptimizeTableAction::Compact(action) => match action {
CompactTarget::Block => {
write!(f, "COMPACT BLOCK")
}
CompactTarget::Segment => {
write!(f, "COMPACT SEGMENT")
}
},
}
}
}
Expand Down
11 changes: 9 additions & 2 deletions src/query/ast/src/parser/statement.rs
Original file line number Diff line number Diff line change
Expand Up @@ -941,7 +941,7 @@ pub fn statement(i: Input) -> IResult<StatementMsg> {
| #alter_table : "`ALTER TABLE [<database>.]<table> <action>`"
| #rename_table : "`RENAME TABLE [<database>.]<table> TO <new_table>`"
| #truncate_table : "`TRUNCATE TABLE [<database>.]<table> [PURGE]`"
| #optimize_table : "`OPTIMIZE TABLE [<database>.]<table> (ALL | PURGE | COMPACT)`"
| #optimize_table : "`OPTIMIZE TABLE [<database>.]<table> (ALL | PURGE | COMPACT [SEGMENT])`"
| #exists_table : "`EXISTS TABLE [<database>.]<table>`"
),
rule!(
Expand Down Expand Up @@ -1364,7 +1364,14 @@ pub fn optimize_table_action(i: Input) -> IResult<OptimizeTableAction> {
alt((
value(OptimizeTableAction::All, rule! { ALL }),
value(OptimizeTableAction::Purge, rule! { PURGE }),
value(OptimizeTableAction::Compact, rule! { COMPACT }),
value(
OptimizeTableAction::Compact(CompactTarget::Segment),
rule! { COMPACT ~ SEGMENT},
),
value(
OptimizeTableAction::Compact(CompactTarget::Block),
rule! { COMPACT},
),
))(i)
}

Expand Down
2 changes: 2 additions & 0 deletions src/query/ast/src/parser/token.rs
Original file line number Diff line number Diff line change
Expand Up @@ -601,6 +601,8 @@ pub enum TokenKind {
SECOND,
#[token("SELECT", ignore(ascii_case))]
SELECT,
#[token("SEGMENT", ignore(ascii_case))]
SEGMENT,
#[token("SET", ignore(ascii_case))]
SET,
#[token("SETTINGS", ignore(ascii_case))]
Expand Down
8 changes: 7 additions & 1 deletion src/query/catalog/src/table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -232,9 +232,10 @@ pub trait Table: Sync + Send {
async fn compact(
&self,
ctx: Arc<dyn TableContext>,
target: CompactTarget,
pipeline: &mut Pipeline,
) -> Result<Option<Arc<dyn TableMutator>>> {
let (_, _) = (ctx, pipeline);
let (_, _, _) = (ctx, target, pipeline);

Err(ErrorCode::UnImplement(format!(
"table {}, of engine type {}, does not support compact",
Expand Down Expand Up @@ -303,6 +304,11 @@ pub struct ColumnStatistics {
pub number_of_distinct_values: u64,
}

pub enum CompactTarget {
Blocks,
Segments,
}

pub trait ColumnStatisticsProvider {
// returns the statistics of the given column, if any.
// column_id is just the index of the column in table's schema
Expand Down
2 changes: 1 addition & 1 deletion src/query/catalog/src/table_mutator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,6 @@ use common_meta_app::schema::TableInfo;

#[async_trait::async_trait]
pub trait TableMutator: Send + Sync {
async fn blocks_select(&mut self) -> Result<bool>;
async fn target_select(&mut self) -> Result<bool>;
async fn try_commit(&self, table_info: &TableInfo) -> Result<()>;
}
3 changes: 2 additions & 1 deletion src/query/planner/src/plans/optimize_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,5 +34,6 @@ impl OptimizeTablePlan {
pub enum OptimizeTableAction {
All,
Purge,
Compact,
CompactBlocks,
CompactSegments,
}
24 changes: 20 additions & 4 deletions src/query/service/src/interpreters/interpreter_table_optimize.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

use std::sync::Arc;

use common_catalog::table::CompactTarget;
use common_exception::Result;
use common_planner::plans::OptimizeTableAction;
use common_planner::plans::OptimizeTablePlan;
Expand Down Expand Up @@ -56,14 +57,29 @@ impl Interpreter for OptimizeTableInterpreter {
action,
OptimizeTableAction::Purge | OptimizeTableAction::All
);
let do_compact = matches!(
let do_compact_blocks = matches!(
action,
OptimizeTableAction::Compact | OptimizeTableAction::All
OptimizeTableAction::CompactBlocks | OptimizeTableAction::All
);

if do_compact {
let do_compact_segments_only = matches!(action, OptimizeTableAction::CompactSegments);

if do_compact_segments_only {
let mut pipeline = Pipeline::create();
if let Some(mutator) = table
.compact(ctx.clone(), CompactTarget::Segments, &mut pipeline)
.await?
{
mutator.try_commit(table.get_table_info()).await?;
return Ok(PipelineBuildResult::create());
}
}

if do_compact_blocks {
let mut pipeline = Pipeline::create();
let mutator = table.compact(ctx.clone(), &mut pipeline).await?;
let mutator = table
.compact(ctx.clone(), CompactTarget::Blocks, &mut pipeline)
.await?;

if let Some(mutator) = mutator {
let settings = ctx.get_settings();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ impl<W: AsyncWrite + Send + Unpin> InteractiveWorkerBase<W> {
}
}

//#[tracing::instrument(level = "debug", skip(interpreter, context))]
#[tracing::instrument(level = "debug", skip(interpreter, context))]
async fn exec_query(
interpreter: Arc<dyn Interpreter>,
context: &Arc<QueryContext>,
Expand Down
5 changes: 4 additions & 1 deletion src/query/service/src/sql/planner/binder/ddl/table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -781,7 +781,10 @@ impl<'a> Binder {
let action = action.map_or(OptimizeTableAction::Purge, |v| match v {
AstOptimizeTableAction::All => OptimizeTableAction::All,
AstOptimizeTableAction::Purge => OptimizeTableAction::Purge,
AstOptimizeTableAction::Compact => OptimizeTableAction::Compact,
AstOptimizeTableAction::Compact(target) => match target {
CompactTarget::Block => OptimizeTableAction::CompactBlocks,
CompactTarget::Segment => OptimizeTableAction::CompactSegments,
},
});

Ok(Plan::OptimizeTable(Box::new(OptimizeTablePlan {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,194 @@
// Copyright 2022 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

use std::sync::Arc;

use common_base::base::tokio;
use common_catalog::table::CompactTarget;
use common_catalog::table::Table;
use common_datablocks::DataBlock;
use common_exception::ErrorCode;
use common_exception::Result;
use common_storages_fuse::FuseTable;
use common_streams::SendableDataBlockStream;
use databend_query::sessions::QueryContext;
use databend_query::sessions::TableContext;
use futures_util::TryStreamExt;

use crate::storages::fuse::table_test_fixture::execute_command;
use crate::storages::fuse::table_test_fixture::execute_query;
use crate::storages::fuse::table_test_fixture::TestFixture;

#[tokio::test]
async fn test_compact_segment_normal_case() -> Result<()> {
let fixture = TestFixture::new().await;
let ctx = fixture.ctx();

// setup
let qry = "create table t(c int) block_per_segment=10";
execute_command(ctx.clone(), qry).await?;

let catalog = ctx.get_catalog("default")?;

let num_inserts = 9;
append_rows(ctx.clone(), num_inserts).await?;

// check count
let count_qry = "select count(*) from t";
let stream = execute_query(fixture.ctx(), count_qry).await?;
assert_eq!(9, check_count(stream).await?);

// compact segment
let table = catalog
.get_table(ctx.get_tenant().as_str(), "default", "t")
.await?;
let fuse_table = FuseTable::try_from_table(table.as_ref())?;
let mut pipeline = common_pipeline_core::Pipeline::create();
let mutator = fuse_table
.compact(ctx.clone(), CompactTarget::Segments, &mut pipeline)
.await?;
assert!(mutator.is_some());
let mutator = mutator.unwrap();
mutator.try_commit(table.get_table_info()).await?;

// check segment count
let qry = "select segment_count as count from fuse_snapshot('default', 't') limit 1";
let stream = execute_query(fixture.ctx(), qry).await?;
// after compact, in our case, there should be only 1 segment left
assert_eq!(1, check_count(stream).await?);

// check block count
let qry = "select block_count as count from fuse_snapshot('default', 't') limit 1";
let stream = execute_query(fixture.ctx(), qry).await?;
assert_eq!(num_inserts as u64, check_count(stream).await?);
Ok(())
}

#[tokio::test]
async fn test_compact_segment_resolvable_conflict() -> Result<()> {
let fixture = TestFixture::new().await;
let ctx = fixture.ctx();

// setup
let create_tbl_command = "create table t(c int) block_per_segment=10";
execute_command(ctx.clone(), create_tbl_command).await?;

let catalog = ctx.get_catalog("default")?;

let num_inserts = 9;
append_rows(ctx.clone(), num_inserts).await?;

// check count
let count_qry = "select count(*) from t";
let stream = execute_query(fixture.ctx(), count_qry).await?;
assert_eq!(9, check_count(stream).await?);

// compact segment
let table = catalog
.get_table(ctx.get_tenant().as_str(), "default", "t")
.await?;
let fuse_table = FuseTable::try_from_table(table.as_ref())?;
let mut pipeline = common_pipeline_core::Pipeline::create();
let mutator = fuse_table
.compact(ctx.clone(), CompactTarget::Segments, &mut pipeline)
.await?;
assert!(mutator.is_some());
let mutator = mutator.unwrap();

// before commit compact segments, gives 9 append commits
let num_inserts = 9;
append_rows(ctx.clone(), num_inserts).await?;

mutator.try_commit(table.get_table_info()).await?;

// check segment count
let count_seg = "select segment_count as count from fuse_snapshot('default', 't') limit 1";
let stream = execute_query(fixture.ctx(), count_seg).await?;
// after compact, in our case, there should be only 1 + num_inserts segments left
// during compact retry, newly appended segments will NOT be compacted again
assert_eq!(1 + num_inserts as u64, check_count(stream).await?);

// check block count
let count_block = "select block_count as count from fuse_snapshot('default', 't') limit 1";
let stream = execute_query(fixture.ctx(), count_block).await?;
assert_eq!(num_inserts as u64 * 2, check_count(stream).await?);
Ok(())
}

#[tokio::test]
async fn test_compact_segment_unresolvable_conflict() -> Result<()> {
let fixture = TestFixture::new().await;
let ctx = fixture.ctx();

// setup
let create_tbl_command = "create table t(c int) block_per_segment=10";
execute_command(ctx.clone(), create_tbl_command).await?;

let catalog = ctx.get_catalog("default")?;

let num_inserts = 9;
append_rows(ctx.clone(), num_inserts).await?;

// check count
let count_qry = "select count(*) from t";
let stream = execute_query(fixture.ctx(), count_qry).await?;
assert_eq!(num_inserts as u64, check_count(stream).await?);

// try compact segment
let table = catalog
.get_table(ctx.get_tenant().as_str(), "default", "t")
.await?;
let fuse_table = FuseTable::try_from_table(table.as_ref())?;
let mut pipeline = common_pipeline_core::Pipeline::create();
let mutator = fuse_table
.compact(ctx.clone(), CompactTarget::Segments, &mut pipeline)
.await?;
assert!(mutator.is_some());
let mutator = mutator.unwrap();

{
// inject a unresolvable commit
compact_segment(ctx.clone(), table.as_ref()).await?;
}

// the compact operation committed latter should failed
let r = mutator.try_commit(table.get_table_info()).await;
assert!(r.is_err());
assert_eq!(r.err().unwrap().code(), ErrorCode::storage_other_code());

Ok(())
}

async fn append_rows(ctx: Arc<QueryContext>, n: usize) -> Result<()> {
let qry = "insert into t values(1)";
for _ in 0..n {
execute_command(ctx.clone(), qry).await?;
}
Ok(())
}

async fn check_count(result_stream: SendableDataBlockStream) -> Result<u64> {
let blocks: Vec<DataBlock> = result_stream.try_collect().await?;
blocks[0].column(0).get_u64(0)
}

async fn compact_segment(ctx: Arc<QueryContext>, table: &dyn Table) -> Result<()> {
let fuse_table = FuseTable::try_from_table(table)?;
let mut pipeline = common_pipeline_core::Pipeline::create();
let mutator = fuse_table
.compact(ctx, CompactTarget::Segments, &mut pipeline)
.await?
.unwrap();
mutator.try_commit(table.get_table_info()).await
}
Original file line number Diff line number Diff line change
Expand Up @@ -12,5 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.

mod compact_segments_mutator;
mod deletion_mutator;
mod recluster_mutator;
Loading