|
1 | 1 | #include <ydb/core/tx/schemeshard/ut_helpers/helpers.h>
|
2 | 2 | #include <ydb/core/tx/schemeshard/schemeshard_billing_helpers.h>
|
| 3 | +#include <ydb/core/testlib/actors/block_events.h> |
3 | 4 | #include <ydb/core/testlib/tablet_helpers.h>
|
4 | 5 |
|
5 | 6 | #include <ydb/core/tx/datashard/datashard.h>
|
6 | 7 | #include <ydb/core/metering/metering.h>
|
7 | 8 |
|
| 9 | +#include <ydb/public/sdk/cpp/client/ydb_table/table.h> |
| 10 | + |
8 | 11 | using namespace NKikimr;
|
9 | 12 | using namespace NSchemeShard;
|
10 | 13 | using namespace NSchemeShardUT_Private;
|
@@ -759,6 +762,154 @@ Y_UNIT_TEST_SUITE(IndexBuildTest) {
|
759 | 762 |
|
760 | 763 | }
|
761 | 764 |
|
| 765 | + Y_UNIT_TEST(MergeIndexTableShardsOnlyWhenReady) { |
| 766 | + TTestBasicRuntime runtime; |
| 767 | + |
| 768 | + TTestEnvOptions opts; |
| 769 | + opts.EnableBackgroundCompaction(false); |
| 770 | + opts.DisableStatsBatching(true); |
| 771 | + TTestEnv env(runtime, opts); |
| 772 | + |
| 773 | + NDataShard::gDbStatsReportInterval = TDuration::Seconds(0); |
| 774 | + |
| 775 | + ui64 txId = 100; |
| 776 | + |
| 777 | + TestCreateTable(runtime, ++txId, "/MyRoot", R"( |
| 778 | + Name: "Table" |
| 779 | + Columns { Name: "key" Type: "Uint64" } |
| 780 | + Columns { Name: "value" Type: "Uint64" } |
| 781 | + KeyColumnNames: ["key"] |
| 782 | + )"); |
| 783 | + env.TestWaitNotification(runtime, txId); |
| 784 | + |
| 785 | + Ydb::Table::GlobalIndexSettings settings; |
| 786 | + UNIT_ASSERT(google::protobuf::TextFormat::ParseFromString(R"( |
| 787 | + partition_at_keys { |
| 788 | + split_points { |
| 789 | + type { tuple_type { elements { optional_type { item { type_id: UINT64 } } } } } |
| 790 | + value { items { uint64_value: 10 } } |
| 791 | + } |
| 792 | + split_points { |
| 793 | + type { tuple_type { elements { optional_type { item { type_id: UINT64 } } } } } |
| 794 | + value { items { uint64_value: 20 } } |
| 795 | + } |
| 796 | + split_points { |
| 797 | + type { tuple_type { elements { optional_type { item { type_id: UINT64 } } } } } |
| 798 | + value { items { uint64_value: 30 } } |
| 799 | + } |
| 800 | + } |
| 801 | + )", &settings)); |
| 802 | + |
| 803 | + TBlockEvents<TEvSchemeShard::TEvModifySchemeTransaction> indexApplicationBlocker(runtime, [](const auto& ev) { |
| 804 | + const auto& modifyScheme = ev->Get()->Record.GetTransaction(0); |
| 805 | + return modifyScheme.GetOperationType() == NKikimrSchemeOp::ESchemeOpApplyIndexBuild; |
| 806 | + }); |
| 807 | + |
| 808 | + ui64 indexInitializationTx = 0; |
| 809 | + using TEvent = TEvSchemeShard::TEvModifySchemeTransaction; |
| 810 | + auto indexInitializationObserver = runtime.AddObserver<TEvent>([&indexInitializationTx](const TEvent::TPtr& ev) { |
| 811 | + const auto& record = ev->Get()->Record; |
| 812 | + if (record.GetTransaction(0).GetOperationType() == NKikimrSchemeOp::ESchemeOpCreateIndexBuild) { |
| 813 | + indexInitializationTx = record.GetTxId(); |
| 814 | + } |
| 815 | + } |
| 816 | + ); |
| 817 | + |
| 818 | + const ui64 buildIndexTx = ++txId; |
| 819 | + TestBuildIndex(runtime, buildIndexTx, TTestTxConfig::SchemeShard, "/MyRoot", "/MyRoot/Table", TBuildIndexConfig{ |
| 820 | + "ByValue", NKikimrSchemeOp::EIndexTypeGlobal, { "value" }, {}, |
| 821 | + { NYdb::NTable::TGlobalIndexSettings::FromProto(settings) } |
| 822 | + }); |
| 823 | + |
| 824 | + runtime.WaitFor("index initialization", [&indexInitializationTx]{ |
| 825 | + return indexInitializationTx != 0; |
| 826 | + }); |
| 827 | + env.TestWaitNotification(runtime, indexInitializationTx); |
| 828 | + |
| 829 | + TestDescribeResult(DescribePrivatePath(runtime, "/MyRoot/Table/ByValue"), { |
| 830 | + NLs::PathExist, |
| 831 | + NLs::IndexState(NKikimrSchemeOp::EIndexStateWriteOnly) |
| 832 | + }); |
| 833 | + |
| 834 | + TVector<ui64> indexShards; |
| 835 | + auto shardCollector = [&indexShards](const NKikimrScheme::TEvDescribeSchemeResult& record) { |
| 836 | + UNIT_ASSERT_VALUES_EQUAL(record.GetStatus(), NKikimrScheme::StatusSuccess); |
| 837 | + const auto& partitions = record.GetPathDescription().GetTablePartitions(); |
| 838 | + indexShards.clear(); |
| 839 | + indexShards.reserve(partitions.size()); |
| 840 | + for (const auto& partition : partitions) { |
| 841 | + indexShards.emplace_back(partition.GetDatashardId()); |
| 842 | + } |
| 843 | + }; |
| 844 | + TestDescribeResult(DescribePrivatePath(runtime, "/MyRoot/Table/ByValue/indexImplTable", true), { |
| 845 | + NLs::PathExist, |
| 846 | + NLs::PartitionCount(4), |
| 847 | + shardCollector |
| 848 | + }); |
| 849 | + UNIT_ASSERT_VALUES_EQUAL(indexShards.size(), 4); |
| 850 | + |
| 851 | + { |
| 852 | + // make sure no shards are merged |
| 853 | + TBlockEvents<TEvSchemeShard::TEvModifySchemeTransaction> mergeBlocker(runtime, [](const auto& ev) { |
| 854 | + const auto& modifyScheme = ev->Get()->Record.GetTransaction(0); |
| 855 | + return modifyScheme.GetOperationType() == NKikimrSchemeOp::ESchemeOpSplitMergeTablePartitions; |
| 856 | + }); |
| 857 | + |
| 858 | + { |
| 859 | + // wait for all index shards to send statistics |
| 860 | + THashSet<ui64> shardsWithStats; |
| 861 | + using TEvType = TEvDataShard::TEvPeriodicTableStats; |
| 862 | + auto statsObserver = runtime.AddObserver<TEvType>([&shardsWithStats](const TEvType::TPtr& ev) { |
| 863 | + shardsWithStats.emplace(ev->Get()->Record.GetDatashardId()); |
| 864 | + }); |
| 865 | + |
| 866 | + runtime.WaitFor("all index shards to send statistics", [&]{ |
| 867 | + return AllOf(indexShards, [&shardsWithStats](ui64 indexShard) { |
| 868 | + return shardsWithStats.contains(indexShard); |
| 869 | + }); |
| 870 | + }); |
| 871 | + } |
| 872 | + |
| 873 | + // we expect to not have observed any attempts to merge |
| 874 | + UNIT_ASSERT(mergeBlocker.empty()); |
| 875 | + |
| 876 | + // wait for 1 minute to ensure that no merges have been started by SchemeShard |
| 877 | + env.SimulateSleep(runtime, TDuration::Minutes(1)); |
| 878 | + UNIT_ASSERT(mergeBlocker.empty()); |
| 879 | + } |
| 880 | + |
| 881 | + // splits are allowed even if the index is not ready |
| 882 | + TestSplitTable(runtime, ++txId, "/MyRoot/Table/ByValue/indexImplTable", Sprintf(R"( |
| 883 | + SourceTabletId: %lu |
| 884 | + SplitBoundary { KeyPrefix { Tuple { Optional { Uint64: 5 } } } } |
| 885 | + )", |
| 886 | + indexShards.front() |
| 887 | + ) |
| 888 | + ); |
| 889 | + env.TestWaitNotification(runtime, txId); |
| 890 | + |
| 891 | + indexApplicationBlocker.Stop().Unblock(); |
| 892 | + env.TestWaitNotification(runtime, buildIndexTx); |
| 893 | + |
| 894 | + TestDescribeResult(DescribePrivatePath(runtime, "/MyRoot/Table/ByValue"), { |
| 895 | + NLs::IndexState(NKikimrSchemeOp::EIndexStateReady) |
| 896 | + }); |
| 897 | + |
| 898 | + // wait until all index impl table shards are merged into one |
| 899 | + while (true) { |
| 900 | + TestDescribeResult(DescribePrivatePath(runtime, "/MyRoot/Table/ByValue/indexImplTable", true), { |
| 901 | + shardCollector |
| 902 | + }); |
| 903 | + if (indexShards.size() > 1) { |
| 904 | + // If a merge happens, old shards are deleted and replaced with a new one. |
| 905 | + // That is why we need to wait for * all * the shards to be deleted. |
| 906 | + env.TestWaitTabletDeletion(runtime, indexShards); |
| 907 | + } else { |
| 908 | + break; |
| 909 | + } |
| 910 | + } |
| 911 | + } |
| 912 | + |
762 | 913 | Y_UNIT_TEST(DropIndex) {
|
763 | 914 | TTestBasicRuntime runtime;
|
764 | 915 | TTestEnv env(runtime);
|
|
0 commit comments