Skip to content

Commit ebfec5e

Browse files
committed
Only test chunking where chunk size is greater than window size (i.e. remove for single-window tests).
1 parent a05beea commit ebfec5e

File tree

1 file changed

+12
-13
lines changed

1 file changed

+12
-13
lines changed

sgkit/tests/test_popgen.py

+12-13
Original file line numberDiff line numberDiff line change
@@ -125,10 +125,11 @@ def test_divergence(size, n_cohorts, chunks):
125125

126126

127127
@pytest.mark.parametrize("size, n_cohorts", [(10, 2)])
128-
def test_divergence__windowed(size, n_cohorts):
128+
@pytest.mark.parametrize("chunks", [(-1, -1), (50, -1)])
129+
def test_divergence__windowed(size, n_cohorts, chunks):
129130
ts = msprime.simulate(size, length=200, mutation_rate=0.05, random_seed=42)
130131
subsets = np.array_split(ts.samples(), n_cohorts)
131-
ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]
132+
ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]
132133
sample_cohorts = np.concatenate(
133134
[np.full_like(subset, i) for i, subset in enumerate(subsets)]
134135
)
@@ -167,13 +168,12 @@ def test_divergence__windowed(size, n_cohorts):
167168

168169

169170
@pytest.mark.parametrize("size", [2, 3, 10, 100])
170-
@pytest.mark.parametrize("chunks", [(-1, -1), (10, -1)])
171-
def test_Fst__Hudson(size, chunks):
171+
def test_Fst__Hudson(size):
172172
# scikit-allel can only calculate Fst for pairs of cohorts (populations)
173173
n_cohorts = 2
174174
ts = msprime.simulate(size, length=100, mutation_rate=0.05, random_seed=42)
175175
subsets = np.array_split(ts.samples(), n_cohorts)
176-
ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]
176+
ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]
177177
sample_cohorts = np.concatenate(
178178
[np.full_like(subset, i) for i, subset in enumerate(subsets)]
179179
)
@@ -198,11 +198,10 @@ def test_Fst__Hudson(size, chunks):
198198
"size, n_cohorts",
199199
[(2, 2), (3, 2), (3, 3), (10, 2), (10, 3), (10, 4), (100, 2), (100, 3), (100, 4)],
200200
)
201-
@pytest.mark.parametrize("chunks", [(-1, -1), (10, -1)])
202-
def test_Fst__Nei(size, n_cohorts, chunks):
201+
def test_Fst__Nei(size, n_cohorts):
203202
ts = msprime.simulate(size, length=100, mutation_rate=0.05, random_seed=42)
204203
subsets = np.array_split(ts.samples(), n_cohorts)
205-
ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]
204+
ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]
206205
sample_cohorts = np.concatenate(
207206
[np.full_like(subset, i) for i, subset in enumerate(subsets)]
208207
)
@@ -234,10 +233,11 @@ def test_Fst__unknown_estimator():
234233
"size, n_cohorts",
235234
[(10, 2)],
236235
)
237-
def test_Fst__windowed(size, n_cohorts):
236+
@pytest.mark.parametrize("chunks", [(-1, -1), (50, -1)])
237+
def test_Fst__windowed(size, n_cohorts, chunks):
238238
ts = msprime.simulate(size, length=200, mutation_rate=0.05, random_seed=42)
239239
subsets = np.array_split(ts.samples(), n_cohorts)
240-
ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]
240+
ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]
241241
sample_cohorts = np.concatenate(
242242
[np.full_like(subset, i) for i, subset in enumerate(subsets)]
243243
)
@@ -275,10 +275,9 @@ def test_Fst__windowed(size, n_cohorts):
275275

276276

277277
@pytest.mark.parametrize("size", [2, 3, 10, 100])
278-
@pytest.mark.parametrize("chunks", [(-1, -1), (10, -1)])
279-
def test_Tajimas_D(size, chunks):
278+
def test_Tajimas_D(size):
280279
ts = msprime.simulate(size, length=100, mutation_rate=0.05, random_seed=42)
281-
ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]
280+
ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]
282281
sample_cohorts = np.full_like(ts.samples(), 0)
283282
ds["sample_cohort"] = xr.DataArray(sample_cohorts, dims="samples")
284283
n_variants = ds.dims["variants"]

0 commit comments

Comments
 (0)