1
+ import os
2
+ import subprocess
1
3
import sys
4
+ import time
2
5
3
6
import ydb
4
7
from ydb .tests .library .harness .kikimr_config import KikimrConfigGenerator
@@ -22,24 +25,23 @@ def setup_class(cls):
22
25
def teardown_class (cls ):
23
26
cls .cluster .stop ()
24
27
25
- def test (self ):
26
- """As per https://github.com/ydb-platform/ydb/issues/13529"""
27
-
28
+ def make_session (self ):
28
29
driver = ydb .Driver (endpoint = f'grpc://localhost:{ self .cluster .nodes [1 ].grpc_port } ' , database = '/Root' )
29
30
session = ydb .QuerySessionPool (driver )
30
31
driver .wait (5 , fail_fast = True )
32
+ return session
31
33
32
- def create_table ( table ):
33
- return session .execute_with_retries (f"""
34
+ def create_test_table ( self , session , table ):
35
+ return session .execute_with_retries (f"""
34
36
CREATE TABLE { table } (
35
37
k Int32 NOT NULL,
36
38
v Uint64,
37
39
PRIMARY KEY (k)
38
40
) WITH (STORE = COLUMN)
39
41
""" )
40
42
41
- def upsert_chunk ( table , chunk_id , retries = 10 ):
42
- return session .execute_with_retries (f"""
43
+ def upsert_test_chunk ( self , session , table , chunk_id , retries = 10 ):
44
+ return session .execute_with_retries (f"""
43
45
$n = { ROWS_CHUNK_SIZE } ;
44
46
$values_list = ListReplicate(42ul, $n);
45
47
$rows_list = ListFoldMap($values_list, { chunk_id * ROWS_CHUNK_SIZE } , ($val, $i) -> ((<|k:$i, v:$val|>, $i + 1)));
@@ -48,17 +50,96 @@ def upsert_chunk(table, chunk_id, retries=10):
48
50
SELECT * FROM AS_TABLE($rows_list);
49
51
""" , None , ydb .retries .RetrySettings (max_retries = retries ))
50
52
51
- create_table ('huge' )
52
-
53
+ def upsert_until_overload (self , session , table ):
53
54
try :
54
55
for i in range (ROWS_CHUNKS_COUNT ):
55
- res = upsert_chunk ( 'huge' , i , retries = 0 )
56
- print (f"query #{ i } ok, result:" , res , file = sys .stderr )
56
+ res = self . upsert_test_chunk ( session , table , i , retries = 0 )
57
+ print (f"upsert #{ i } ok, result:" , res , file = sys .stderr )
57
58
except ydb .issues .Overloaded :
58
- print ('got overload issue' , file = sys .stderr )
59
+ print ('upsert: got overload issue' , file = sys .stderr )
60
+
61
+ def test (self ):
62
+ """As per https://github.com/ydb-platform/ydb/issues/13529"""
63
+ session = self .make_session ()
64
+
65
+ # Overflow the database
66
+ self .create_test_table (session , 'huge' )
67
+ self .upsert_until_overload (session , 'huge' )
59
68
69
+ # Cleanup
60
70
session .execute_with_retries ("""DROP TABLE huge""" )
61
71
62
72
# Check database health after cleanup
63
- create_table ('small' )
64
- upsert_chunk ('small' , 0 )
73
+ self .create_test_table (session , 'small' )
74
+ self .upsert_test_chunk (session , 'small' , 0 )
75
+
76
+ def delete_test_chunk (self , session , table , chunk_id , retries = 10 ):
77
+ session .execute_with_retries (f"""
78
+ DELETE FROM { table }
79
+ WHERE { chunk_id * ROWS_CHUNK_SIZE } <= k AND k <= { chunk_id * ROWS_CHUNK_SIZE + ROWS_CHUNK_SIZE }
80
+ """ , None , ydb .retries .RetrySettings (max_retries = retries ))
81
+
82
+ def delete_until_overload (self , session , table ):
83
+ for i in range (ROWS_CHUNKS_COUNT ):
84
+ try :
85
+ self .delete_test_chunk (session , table , i , retries = 0 )
86
+ print (f"delete #{ i } ok" , file = sys .stderr )
87
+ except ydb .issues .Overloaded :
88
+ print ('delete: got overload issue' , file = sys .stderr )
89
+ return i
90
+
91
+ def ydbcli_db_schema_exec (self , node , operation_proto ):
92
+ endpoint = f"{ node .host } :{ node .port } "
93
+ args = [
94
+ node .binary_path ,
95
+ f"--server=grpc://{ endpoint } " ,
96
+ "db" ,
97
+ "schema" ,
98
+ "exec" ,
99
+ operation_proto ,
100
+ ]
101
+ command = subprocess .run (args , capture_output = True )
102
+ assert command .returncode == 0 , command .stderr .decode ("utf-8" )
103
+
104
+
105
+ def alter_database_quotas (self , node , database_path , database_quotas ):
106
+ alter_proto = """ModifyScheme {
107
+ OperationType: ESchemeOpAlterSubDomain
108
+ WorkingDir: "%s"
109
+ SubDomain {
110
+ Name: "%s"
111
+ DatabaseQuotas {
112
+ %s
113
+ }
114
+ }
115
+ }""" % (
116
+ os .path .dirname (database_path ),
117
+ os .path .basename (database_path ),
118
+ database_quotas ,
119
+ )
120
+
121
+ self .ydbcli_db_schema_exec (node , alter_proto )
122
+
123
+ def test_delete (self ):
124
+ """As per https://github.com/ydb-platform/ydb/issues/13653"""
125
+ session = self .make_session ()
126
+
127
+ # Set soft and hard quotas to 6GB
128
+ self .alter_database_quotas (self .cluster .nodes [1 ], '/Root' , """
129
+ data_size_hard_quota: 6000000000
130
+ data_size_soft_quota: 6000000000
131
+ """ )
132
+
133
+ # Overflow the database
134
+ self .create_test_table (session , 'huge' )
135
+ self .upsert_until_overload (session , 'huge' )
136
+
137
+ # Check that deletion works at least first time
138
+ # self.delete_test_chunk(session, 'huge', 0)
139
+ # ^ uncomment after fixing https://github.com/ydb-platform/ydb/issues/13808
140
+
141
+ # Check that deletions will lead to overflow at some moment
142
+ i = self .delete_until_overload (session , 'huge' )
143
+
144
+ # Try to wait until deletion works again (after compaction)
145
+ self .delete_test_chunk (session , 'huge' , i )
0 commit comments