1
+ import os
2
+ import subprocess
1
3
import sys
2
4
3
5
import ydb
@@ -22,24 +24,23 @@ def setup_class(cls):
22
24
def teardown_class (cls ):
23
25
cls .cluster .stop ()
24
26
25
- def test (self ):
26
- """As per https://github.com/ydb-platform/ydb/issues/13529"""
27
-
27
+ def make_session (self ):
28
28
driver = ydb .Driver (endpoint = f'grpc://localhost:{ self .cluster .nodes [1 ].grpc_port } ' , database = '/Root' )
29
29
session = ydb .QuerySessionPool (driver )
30
30
driver .wait (5 , fail_fast = True )
31
+ return session
31
32
32
- def create_table ( table ):
33
- return session .execute_with_retries (f"""
33
+ def create_test_table ( self , session , table ):
34
+ return session .execute_with_retries (f"""
34
35
CREATE TABLE { table } (
35
36
k Int32 NOT NULL,
36
37
v Uint64,
37
38
PRIMARY KEY (k)
38
39
) WITH (STORE = COLUMN)
39
40
""" )
40
41
41
- def upsert_chunk ( table , chunk_id , retries = 10 ):
42
- return session .execute_with_retries (f"""
42
+ def upsert_test_chunk ( self , session , table , chunk_id , retries = 10 ):
43
+ return session .execute_with_retries (f"""
43
44
$n = { ROWS_CHUNK_SIZE } ;
44
45
$values_list = ListReplicate(42ul, $n);
45
46
$rows_list = ListFoldMap($values_list, { chunk_id * ROWS_CHUNK_SIZE } , ($val, $i) -> ((<|k:$i, v:$val|>, $i + 1)));
@@ -48,17 +49,95 @@ def upsert_chunk(table, chunk_id, retries=10):
48
49
SELECT * FROM AS_TABLE($rows_list);
49
50
""" , None , ydb .retries .RetrySettings (max_retries = retries ))
50
51
51
- create_table ('huge' )
52
-
52
+ def upsert_until_overload (self , session , table ):
53
53
try :
54
54
for i in range (ROWS_CHUNKS_COUNT ):
55
- res = upsert_chunk ( 'huge' , i , retries = 0 )
56
- print (f"query #{ i } ok, result:" , res , file = sys .stderr )
55
+ res = self . upsert_test_chunk ( session , table , i , retries = 0 )
56
+ print (f"upsert #{ i } ok, result:" , res , file = sys .stderr )
57
57
except ydb .issues .Overloaded :
58
- print ('got overload issue' , file = sys .stderr )
58
+ print ('upsert: got overload issue' , file = sys .stderr )
59
+
60
+ def test (self ):
61
+ """As per https://github.com/ydb-platform/ydb/issues/13529"""
62
+ session = self .make_session ()
63
+
64
+ # Overflow the database
65
+ self .create_test_table (session , 'huge' )
66
+ self .upsert_until_overload (session , 'huge' )
59
67
68
+ # Cleanup
60
69
session .execute_with_retries ("""DROP TABLE huge""" )
61
70
62
71
# Check database health after cleanup
63
- create_table ('small' )
64
- upsert_chunk ('small' , 0 )
72
+ self .create_test_table (session , 'small' )
73
+ self .upsert_test_chunk (session , 'small' , 0 )
74
+
75
+ def delete_test_chunk (self , session , table , chunk_id , retries = 10 ):
76
+ session .execute_with_retries (f"""
77
+ DELETE FROM { table }
78
+ WHERE { chunk_id * ROWS_CHUNK_SIZE } <= k AND k <= { chunk_id * ROWS_CHUNK_SIZE + ROWS_CHUNK_SIZE }
79
+ """ , None , ydb .retries .RetrySettings (max_retries = retries ))
80
+
81
+ def delete_until_overload (self , session , table ):
82
+ for i in range (ROWS_CHUNKS_COUNT ):
83
+ try :
84
+ self .delete_test_chunk (session , table , i , retries = 0 )
85
+ print (f"delete #{ i } ok" , file = sys .stderr )
86
+ except ydb .issues .Overloaded :
87
+ print ('delete: got overload issue' , file = sys .stderr )
88
+ return i
89
+
90
+ def ydbcli_db_schema_exec (self , node , operation_proto ):
91
+ endpoint = f"{ node .host } :{ node .port } "
92
+ args = [
93
+ node .binary_path ,
94
+ f"--server=grpc://{ endpoint } " ,
95
+ "db" ,
96
+ "schema" ,
97
+ "exec" ,
98
+ operation_proto ,
99
+ ]
100
+ command = subprocess .run (args , capture_output = True )
101
+ assert command .returncode == 0 , command .stderr .decode ("utf-8" )
102
+
103
+ def alter_database_quotas (self , node , database_path , database_quotas ):
104
+ alter_proto = """ModifyScheme {
105
+ OperationType: ESchemeOpAlterSubDomain
106
+ WorkingDir: "%s"
107
+ SubDomain {
108
+ Name: "%s"
109
+ DatabaseQuotas {
110
+ %s
111
+ }
112
+ }
113
+ }""" % (
114
+ os .path .dirname (database_path ),
115
+ os .path .basename (database_path ),
116
+ database_quotas ,
117
+ )
118
+
119
+ self .ydbcli_db_schema_exec (node , alter_proto )
120
+
121
+ def test_delete (self ):
122
+ """As per https://github.com/ydb-platform/ydb/issues/13653"""
123
+ session = self .make_session ()
124
+
125
+ # Set soft and hard quotas to 6GB
126
+ self .alter_database_quotas (self .cluster .nodes [1 ], '/Root' , """
127
+ data_size_hard_quota: 6000000000
128
+ data_size_soft_quota: 6000000000
129
+ """ )
130
+
131
+ # Overflow the database
132
+ self .create_test_table (session , 'huge' )
133
+ self .upsert_until_overload (session , 'huge' )
134
+
135
+ # Check that deletion works at least first time
136
+ # self.delete_test_chunk(session, 'huge', 0)
137
+ # ^ uncomment after fixing https://github.com/ydb-platform/ydb/issues/13808
138
+
139
+ # Check that deletions will lead to overflow at some moment
140
+ i = self .delete_until_overload (session , 'huge' )
141
+
142
+ # Try to wait until deletion works again (after compaction)
143
+ self .delete_test_chunk (session , 'huge' , i )
0 commit comments