@@ -125,10 +125,11 @@ def test_divergence(size, n_cohorts, chunks):
125
125
126
126
127
127
@pytest .mark .parametrize ("size, n_cohorts" , [(10 , 2 )])
128
- def test_divergence__windowed (size , n_cohorts ):
128
+ @pytest .mark .parametrize ("chunks" , [(- 1 , - 1 ), (50 , - 1 )])
129
+ def test_divergence__windowed (size , n_cohorts , chunks ):
129
130
ts = msprime .simulate (size , length = 200 , mutation_rate = 0.05 , random_seed = 42 )
130
131
subsets = np .array_split (ts .samples (), n_cohorts )
131
- ds = ts_to_dataset (ts ) # type: ignore[no-untyped-call]
132
+ ds = ts_to_dataset (ts , chunks ) # type: ignore[no-untyped-call]
132
133
sample_cohorts = np .concatenate (
133
134
[np .full_like (subset , i ) for i , subset in enumerate (subsets )]
134
135
)
@@ -167,13 +168,12 @@ def test_divergence__windowed(size, n_cohorts):
167
168
168
169
169
170
@pytest .mark .parametrize ("size" , [2 , 3 , 10 , 100 ])
170
- @pytest .mark .parametrize ("chunks" , [(- 1 , - 1 ), (10 , - 1 )])
171
- def test_Fst__Hudson (size , chunks ):
171
+ def test_Fst__Hudson (size ):
172
172
# scikit-allel can only calculate Fst for pairs of cohorts (populations)
173
173
n_cohorts = 2
174
174
ts = msprime .simulate (size , length = 100 , mutation_rate = 0.05 , random_seed = 42 )
175
175
subsets = np .array_split (ts .samples (), n_cohorts )
176
- ds = ts_to_dataset (ts , chunks ) # type: ignore[no-untyped-call]
176
+ ds = ts_to_dataset (ts ) # type: ignore[no-untyped-call]
177
177
sample_cohorts = np .concatenate (
178
178
[np .full_like (subset , i ) for i , subset in enumerate (subsets )]
179
179
)
@@ -198,11 +198,10 @@ def test_Fst__Hudson(size, chunks):
198
198
"size, n_cohorts" ,
199
199
[(2 , 2 ), (3 , 2 ), (3 , 3 ), (10 , 2 ), (10 , 3 ), (10 , 4 ), (100 , 2 ), (100 , 3 ), (100 , 4 )],
200
200
)
201
- @pytest .mark .parametrize ("chunks" , [(- 1 , - 1 ), (10 , - 1 )])
202
- def test_Fst__Nei (size , n_cohorts , chunks ):
201
+ def test_Fst__Nei (size , n_cohorts ):
203
202
ts = msprime .simulate (size , length = 100 , mutation_rate = 0.05 , random_seed = 42 )
204
203
subsets = np .array_split (ts .samples (), n_cohorts )
205
- ds = ts_to_dataset (ts , chunks ) # type: ignore[no-untyped-call]
204
+ ds = ts_to_dataset (ts ) # type: ignore[no-untyped-call]
206
205
sample_cohorts = np .concatenate (
207
206
[np .full_like (subset , i ) for i , subset in enumerate (subsets )]
208
207
)
@@ -234,10 +233,11 @@ def test_Fst__unknown_estimator():
234
233
"size, n_cohorts" ,
235
234
[(10 , 2 )],
236
235
)
237
- def test_Fst__windowed (size , n_cohorts ):
236
+ @pytest .mark .parametrize ("chunks" , [(- 1 , - 1 ), (50 , - 1 )])
237
+ def test_Fst__windowed (size , n_cohorts , chunks ):
238
238
ts = msprime .simulate (size , length = 200 , mutation_rate = 0.05 , random_seed = 42 )
239
239
subsets = np .array_split (ts .samples (), n_cohorts )
240
- ds = ts_to_dataset (ts ) # type: ignore[no-untyped-call]
240
+ ds = ts_to_dataset (ts , chunks ) # type: ignore[no-untyped-call]
241
241
sample_cohorts = np .concatenate (
242
242
[np .full_like (subset , i ) for i , subset in enumerate (subsets )]
243
243
)
@@ -275,10 +275,9 @@ def test_Fst__windowed(size, n_cohorts):
275
275
276
276
277
277
@pytest .mark .parametrize ("size" , [2 , 3 , 10 , 100 ])
278
- @pytest .mark .parametrize ("chunks" , [(- 1 , - 1 ), (10 , - 1 )])
279
- def test_Tajimas_D (size , chunks ):
278
+ def test_Tajimas_D (size ):
280
279
ts = msprime .simulate (size , length = 100 , mutation_rate = 0.05 , random_seed = 42 )
281
- ds = ts_to_dataset (ts , chunks ) # type: ignore[no-untyped-call]
280
+ ds = ts_to_dataset (ts ) # type: ignore[no-untyped-call]
282
281
sample_cohorts = np .full_like (ts .samples (), 0 )
283
282
ds ["sample_cohort" ] = xr .DataArray (sample_cohorts , dims = "samples" )
284
283
n_variants = ds .dims ["variants" ]
0 commit comments