Skip to content

Commit dc365ca

Browse files
Vadim Averinblinkov
Vadim Averin
authored andcommitted
Add test for storage exhaustion when deleting rows (#13760)
1 parent 5468ab0 commit dc365ca

File tree

1 file changed

+94
-13
lines changed

1 file changed

+94
-13
lines changed

ydb/tests/olap/test_quota_exhaustion.py

Lines changed: 94 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
import os
2+
import subprocess
13
import sys
24

35
import ydb
@@ -23,23 +25,23 @@ def setup_class(cls):
2325
def teardown_class(cls):
2426
cls.cluster.stop()
2527

26-
@link_test_case("#13529")
27-
def test(self):
28+
def make_session(self):
2829
driver = ydb.Driver(endpoint=f'grpc://localhost:{self.cluster.nodes[1].grpc_port}', database='/Root')
2930
session = ydb.QuerySessionPool(driver)
3031
driver.wait(5, fail_fast=True)
32+
return session
3133

32-
def create_table(table):
33-
return session.execute_with_retries(f"""
34+
def create_test_table(self, session, table):
35+
return session.execute_with_retries(f"""
3436
CREATE TABLE {table} (
3537
k Int32 NOT NULL,
3638
v Uint64,
3739
PRIMARY KEY (k)
3840
) WITH (STORE = COLUMN)
3941
""")
4042

41-
def upsert_chunk(table, chunk_id, retries=10):
42-
return session.execute_with_retries(f"""
43+
def upsert_test_chunk(self, session, table, chunk_id, retries=10):
44+
return session.execute_with_retries(f"""
4345
$n = {ROWS_CHUNK_SIZE};
4446
$values_list = ListReplicate(42ul, $n);
4547
$rows_list = ListFoldMap($values_list, {chunk_id * ROWS_CHUNK_SIZE}, ($val, $i) -> ((<|k:$i, v:$val|>, $i + 1)));
@@ -48,17 +50,96 @@ def upsert_chunk(table, chunk_id, retries=10):
4850
SELECT * FROM AS_TABLE($rows_list);
4951
""", None, ydb.retries.RetrySettings(max_retries=retries))
5052

51-
create_table('huge')
52-
53+
def upsert_until_overload(self, session, table):
5354
try:
5455
for i in range(ROWS_CHUNKS_COUNT):
55-
res = upsert_chunk('huge', i, retries=0)
56-
print(f"query #{i} ok, result:", res, file=sys.stderr)
56+
res = self.upsert_test_chunk(session, table, i, retries=0)
57+
print(f"upsert #{i} ok, result:", res, file=sys.stderr)
5758
except ydb.issues.Overloaded:
58-
print('got overload issue', file=sys.stderr)
59+
print('upsert: got overload issue', file=sys.stderr)
5960

61+
@link_test_case("#13529")
62+
def test(self):
63+
"""As per https://github.com/ydb-platform/ydb/issues/13529"""
64+
session = self.make_session()
65+
66+
# Overflow the database
67+
self.create_test_table(session, 'huge')
68+
self.upsert_until_overload(session, 'huge')
69+
70+
# Cleanup
6071
session.execute_with_retries("""DROP TABLE huge""")
6172

6273
# Check database health after cleanup
63-
create_table('small')
64-
upsert_chunk('small', 0)
74+
self.create_test_table(session, 'small')
75+
self.upsert_test_chunk(session, 'small', 0)
76+
77+
def delete_test_chunk(self, session, table, chunk_id, retries=10):
78+
session.execute_with_retries(f"""
79+
DELETE FROM {table}
80+
WHERE {chunk_id * ROWS_CHUNK_SIZE} <= k AND k <= {chunk_id * ROWS_CHUNK_SIZE + ROWS_CHUNK_SIZE}
81+
""", None, ydb.retries.RetrySettings(max_retries=retries))
82+
83+
def delete_until_overload(self, session, table):
84+
for i in range(ROWS_CHUNKS_COUNT):
85+
try:
86+
self.delete_test_chunk(session, table, i, retries=0)
87+
print(f"delete #{i} ok", file=sys.stderr)
88+
except ydb.issues.Overloaded:
89+
print('delete: got overload issue', file=sys.stderr)
90+
return i
91+
92+
def ydbcli_db_schema_exec(self, node, operation_proto):
93+
endpoint = f"{node.host}:{node.port}"
94+
args = [
95+
node.binary_path,
96+
f"--server=grpc://{endpoint}",
97+
"db",
98+
"schema",
99+
"exec",
100+
operation_proto,
101+
]
102+
command = subprocess.run(args, capture_output=True)
103+
assert command.returncode == 0, command.stderr.decode("utf-8")
104+
105+
def alter_database_quotas(self, node, database_path, database_quotas):
106+
alter_proto = """ModifyScheme {
107+
OperationType: ESchemeOpAlterSubDomain
108+
WorkingDir: "%s"
109+
SubDomain {
110+
Name: "%s"
111+
DatabaseQuotas {
112+
%s
113+
}
114+
}
115+
}""" % (
116+
os.path.dirname(database_path),
117+
os.path.basename(database_path),
118+
database_quotas,
119+
)
120+
121+
self.ydbcli_db_schema_exec(node, alter_proto)
122+
123+
def test_delete(self):
124+
"""As per https://github.com/ydb-platform/ydb/issues/13653"""
125+
session = self.make_session()
126+
127+
# Set soft and hard quotas to 6GB
128+
self.alter_database_quotas(self.cluster.nodes[1], '/Root', """
129+
data_size_hard_quota: 6000000000
130+
data_size_soft_quota: 6000000000
131+
""")
132+
133+
# Overflow the database
134+
self.create_test_table(session, 'huge')
135+
self.upsert_until_overload(session, 'huge')
136+
137+
# Check that deletion works at least first time
138+
# self.delete_test_chunk(session, 'huge', 0)
139+
# ^ uncomment after fixing https://github.com/ydb-platform/ydb/issues/13808
140+
141+
# Check that deletions will lead to overflow at some moment
142+
i = self.delete_until_overload(session, 'huge')
143+
144+
# Try to wait until deletion works again (after compaction)
145+
self.delete_test_chunk(session, 'huge', i)

0 commit comments

Comments
 (0)