@@ -22,24 +22,23 @@ def setup_class(cls):
22
22
def teardown_class (cls ):
23
23
cls .cluster .stop ()
24
24
25
- def test (self ):
26
- """As per https://github.com/ydb-platform/ydb/issues/13529"""
27
-
25
+ def make_session (self ):
28
26
driver = ydb .Driver (endpoint = f'grpc://localhost:{ self .cluster .nodes [1 ].grpc_port } ' , database = '/Root' )
29
27
session = ydb .QuerySessionPool (driver )
30
28
driver .wait (5 , fail_fast = True )
29
+ return session
31
30
32
- def create_table ( table ):
33
- return session .execute_with_retries (f"""
31
+ def create_test_table ( self , session , table ):
32
+ return session .execute_with_retries (f"""
34
33
CREATE TABLE { table } (
35
34
k Int32 NOT NULL,
36
35
v Uint64,
37
36
PRIMARY KEY (k)
38
37
) WITH (STORE = COLUMN)
39
38
""" )
40
39
41
- def upsert_chunk ( table , chunk_id , retries = 10 ):
42
- return session .execute_with_retries (f"""
40
+ def upsert_test_chunk ( self , session , table , chunk_id , retries = 10 ):
41
+ return session .execute_with_retries (f"""
43
42
$n = { ROWS_CHUNK_SIZE } ;
44
43
$values_list = ListReplicate(42ul, $n);
45
44
$rows_list = ListFoldMap($values_list, { chunk_id * ROWS_CHUNK_SIZE } , ($val, $i) -> ((<|k:$i, v:$val|>, $i + 1)));
@@ -48,17 +47,54 @@ def upsert_chunk(table, chunk_id, retries=10):
48
47
SELECT * FROM AS_TABLE($rows_list);
49
48
""" , None , ydb .retries .RetrySettings (max_retries = retries ))
50
49
51
- create_table ('huge' )
52
-
50
+ def upsert_until_overload (self , session , table ):
53
51
try :
54
52
for i in range (ROWS_CHUNKS_COUNT ):
55
- res = upsert_chunk ( 'huge' , i , retries = 0 )
56
- print (f"query #{ i } ok, result:" , res , file = sys .stderr )
53
+ res = self . upsert_test_chunk ( session , table , i , retries = 0 )
54
+ print (f"upsert #{ i } ok, result:" , res , file = sys .stderr )
57
55
except ydb .issues .Overloaded :
58
- print ('got overload issue' , file = sys .stderr )
56
+ print ('upsert: got overload issue' , file = sys .stderr )
57
+
58
+ def test (self ):
59
+ """As per https://github.com/ydb-platform/ydb/issues/13529"""
60
+ session = self .make_session ()
59
61
62
+ # Overflow the database
63
+ self .create_test_table (session , 'huge' )
64
+ self .upsert_until_overload (session , 'huge' )
65
+
66
+ # Cleanup
60
67
session .execute_with_retries ("""DROP TABLE huge""" )
61
68
62
69
# Check database health after cleanup
63
- create_table ('small' )
64
- upsert_chunk ('small' , 0 )
70
+ self .create_test_table (session , 'small' )
71
+ self .upsert_test_chunk (session , 'small' , 0 )
72
+
73
+ def delete_test_chunk (self , session , table , chunk_id , retries = 10 ):
74
+ session .execute_with_retries (f"""
75
+ DELETE FROM { table }
76
+ WHERE { chunk_id * ROWS_CHUNK_SIZE } <= k AND k <= { chunk_id * ROWS_CHUNK_SIZE + ROWS_CHUNK_SIZE }
77
+ """ , None , ydb .retries .RetrySettings (max_retries = retries ))
78
+
79
+ def delete_until_overload (self , session , table ):
80
+ for i in range (ROWS_CHUNKS_COUNT ):
81
+ try :
82
+ self .delete_test_chunk (session , table , i , retries = 0 )
83
+ print (f"delete #{ i } ok" , file = sys .stderr )
84
+ except ydb .issues .Overloaded :
85
+ print ('delete: got overload issue' , file = sys .stderr )
86
+ return i
87
+
88
+ def test_delete (self ):
89
+ """As per https://github.com/ydb-platform/ydb/issues/13653"""
90
+ session = self .make_session ()
91
+
92
+ # Overflow the database
93
+ self .create_test_table (session , 'huge' )
94
+ self .upsert_until_overload (session , 'huge' )
95
+
96
+ # Check that deletions will lead to overflow, too
97
+ i = self .delete_until_overload (session , 'huge' )
98
+
99
+ # Try to wait until deletion works again (after compaction)
100
+ self .delete_test_chunk (session , 'huge' , i )
0 commit comments