Skip to content

Commit fcec41e

Browse files
committed
Simplify parameterization to plain python
1 parent 1b7902c commit fcec41e

File tree

2 files changed

+45
-21
lines changed

2 files changed

+45
-21
lines changed

setup.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,7 @@ def read(filename):
3434
'Cython',
3535
'asyncpg>=0.18.2, < 0.20',
3636
'pyodbc',
37-
'psycopg2-binary>=2.7.5',
38-
'parameterized'
37+
'psycopg2-binary>=2.7.5'
3938
],
4039
python_requires='>=3.6',
4140
classifiers=[

tests/bwc/test_recovery.py

+44-19
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,14 @@
22
import unittest
33

44
from cr8.run_crate import get_crate, _extract_version
5-
from parameterized import parameterized
65
from crate.client import connect
76
import random
87
from random import sample
98

109
from crate.qa.tests import NodeProvider, insert_data, UpgradePath
1110

12-
UPGRADE_PATHS = [(UpgradePath('4.2.x', '4.3.x'),), (UpgradePath('4.3.x', 'latest-nightly'),)]
13-
UPGRADE_PATHS_FROM_43 = [(UpgradePath('4.3.x', 'latest-nightly'),)]
11+
UPGRADE_PATHS = [UpgradePath('4.2.x', '4.3.x'), UpgradePath('4.3.x', 'latest-nightly')]
12+
UPGRADE_PATHS_FROM_43 = [UpgradePath('4.3.x', 'latest-nightly')]
1413

1514

1615
class RecoveryTest(NodeProvider, unittest.TestCase):
@@ -83,12 +82,23 @@ def _upgrade_cluster(self, cluster, version: str, nodes: int) -> None:
8382
new_node = self.upgrade_node(node, version)
8483
cluster[i] = new_node
8584

86-
@parameterized.expand(UPGRADE_PATHS)
87-
def test_recovery_with_concurrent_indexing(self, path):
85+
def _run_upgrade_paths(self, test, paths):
86+
for p in paths:
87+
try:
88+
self.setUp()
89+
test(p)
90+
finally:
91+
self.tearDown()
92+
93+
def test_recovery_with_concurrent_indexing(self):
8894
"""
8995
This test creates a new table and insert data at every stage of the
9096
rolling upgrade.
9197
"""
98+
99+
self._run_upgrade_paths(self._test_recovery_with_concurrent_indexing, UPGRADE_PATHS)
100+
101+
def _test_recovery_with_concurrent_indexing(self, path):
92102
cluster = self._new_cluster(path.from_version, self.NUMBER_OF_NODES)
93103
cluster.start()
94104

@@ -143,8 +153,10 @@ def test_recovery_with_concurrent_indexing(self, path):
143153
for node_id in node_ids:
144154
self.assert_busy(lambda: self._assert_num_docs_by_node_id(conn, 'doc', 'test', node_id[0], 105))
145155

146-
@parameterized.expand(UPGRADE_PATHS)
147-
def test_relocation_with_concurrent_indexing(self, path):
156+
def test_relocation_with_concurrent_indexing(self):
157+
self._run_upgrade_paths(self._test_relocation_with_concurrent_indexing, UPGRADE_PATHS)
158+
159+
def _test_relocation_with_concurrent_indexing(self, path):
148160
cluster = self._new_cluster(path.from_version, self.NUMBER_OF_NODES)
149161
cluster.start()
150162

@@ -216,13 +228,15 @@ def _assert_shard_state(self, conn, schema, table_name, node_id, state):
216228
self.assertTrue(current_state)
217229
self.assertEqual(current_state[0], state)
218230

219-
@parameterized.expand(UPGRADE_PATHS)
220-
def test_recovery(self, path):
231+
def test_recovery(self):
221232
"""
222233
This test creates a new table, insert data and asserts the state at every stage of the
223234
rolling upgrade.
224235
"""
225236

237+
self._run_upgrade_paths(self._test_recovery, UPGRADE_PATHS)
238+
239+
def _test_recovery(self, path):
226240
cluster = self._new_cluster(path.from_version, self.NUMBER_OF_NODES)
227241
cluster.start()
228242

@@ -253,13 +267,15 @@ def test_recovery(self, path):
253267

254268
self.assert_busy(lambda: self._assert_is_green(conn, 'doc', 'test'))
255269

256-
@parameterized.expand(UPGRADE_PATHS)
257-
def test_recovery_closed_index(self, path):
270+
def test_recovery_closed_index(self):
258271
"""
259272
This test creates a table in the non upgraded cluster and closes it. It then
260273
checks that the table is effectively closed and potentially replicated.
261274
"""
262275

276+
self._run_upgrade_paths(self._test_recovery_closed_index, UPGRADE_PATHS)
277+
278+
def _test_recovery_closed_index(self, path):
263279
cluster = self._new_cluster(path.from_version, self.NUMBER_OF_NODES)
264280
cluster.start()
265281

@@ -284,8 +300,10 @@ def test_recovery_closed_index(self, path):
284300

285301
self._assert_is_closed(conn, 'doc', 'test')
286302

287-
@parameterized.expand(UPGRADE_PATHS)
288-
def test_closed_index_during_rolling_upgrade(self, path):
303+
def test_closed_index_during_rolling_upgrade(self):
304+
self._run_upgrade_paths(self._test_closed_index_during_rolling_upgrade, UPGRADE_PATHS)
305+
306+
def _test_closed_index_during_rolling_upgrade(self, path):
289307
"""
290308
This test creates and closes a new table at every stage of the rolling
291309
upgrade. It then checks that the table is effectively closed and
@@ -334,12 +352,15 @@ def test_closed_index_during_rolling_upgrade(self, path):
334352

335353
self._assert_is_closed(conn, 'doc', 'upgraded_cluster')
336354

337-
@parameterized.expand(UPGRADE_PATHS)
338-
def test_update_docs(self, path):
355+
def test_update_docs(self):
339356
"""
340357
This test creates a new table, insert data and updates data at every state at every stage of the
341358
rolling upgrade.
342359
"""
360+
361+
self._run_upgrade_paths(self._test_update_docs, UPGRADE_PATHS)
362+
363+
def _test_update_docs(self, path):
343364
cluster = self._new_cluster(path.from_version, self.NUMBER_OF_NODES)
344365
cluster.start()
345366
with connect(cluster.node().http_url, error_trace=True) as conn:
@@ -384,8 +405,7 @@ def test_update_docs(self, path):
384405
for result in res:
385406
self.assertEqual(result['rowcount'], 1)
386407

387-
@parameterized.expand(UPGRADE_PATHS_FROM_43)
388-
def test_operation_based_recovery(self, path):
408+
def test_operation_based_recovery(self):
389409
"""
390410
Tests that we should perform an operation-based recovery if there were
391411
some but not too many uncommitted documents (i.e., less than 10% of
@@ -394,6 +414,9 @@ def test_operation_based_recovery(self, path):
394414
based peer recoveries.
395415
"""
396416

417+
self._run_upgrade_paths(self._test_operation_based_recovery, UPGRADE_PATHS_FROM_43)
418+
419+
def _test_operation_based_recovery(self, path):
397420
cluster = self._new_cluster(path.from_version, self.NUMBER_OF_NODES)
398421
cluster.start()
399422

@@ -435,13 +458,15 @@ def test_operation_based_recovery(self, path):
435458

436459
self._assert_ensure_checkpoints_are_synced(conn, 'doc', 'test')
437460

438-
@parameterized.expand(UPGRADE_PATHS_FROM_43)
439-
def test_turnoff_translog_retention_after_upgraded(self, path):
461+
def test_turnoff_translog_retention_after_upgraded(self):
440462
"""
441463
Verifies that once all shard copies on the new version, we should turn
442464
off the translog retention for indices with soft-deletes.
443465
"""
444466

467+
self._run_upgrade_paths(self._test_turnoff_translog_retention_after_upgraded, UPGRADE_PATHS_FROM_43)
468+
469+
def _test_turnoff_translog_retention_after_upgraded(self, path):
445470
cluster = self._new_cluster(path.from_version, self.NUMBER_OF_NODES)
446471
cluster.start()
447472

0 commit comments

Comments
 (0)