Skip to content

Commit 5f3d0e4

Browse files
authored
Adjust load SplitIndexIT#testSplitIndexPrimaryTerm (#42477)
SplitIndexIT#testSplitIndexPrimaryTerm sometimes timeout due to relocating many shards. This change adjusts loads and increases the timeout.
1 parent eda3da3 commit 5f3d0e4

File tree

1 file changed

+7
-25
lines changed

1 file changed

+7
-25
lines changed

server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java

Lines changed: 7 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@
4545
import org.elasticsearch.cluster.routing.Murmur3HashFunction;
4646
import org.elasticsearch.cluster.routing.ShardRouting;
4747
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
48-
import org.elasticsearch.common.collect.ImmutableOpenMap;
4948
import org.elasticsearch.common.settings.Settings;
49+
import org.elasticsearch.common.unit.TimeValue;
5050
import org.elasticsearch.common.xcontent.XContentType;
5151
import org.elasticsearch.index.Index;
5252
import org.elasticsearch.index.IndexService;
@@ -62,7 +62,6 @@
6262
import java.io.UncheckedIOException;
6363
import java.util.Arrays;
6464
import java.util.HashSet;
65-
import java.util.List;
6665
import java.util.Set;
6766
import java.util.function.BiFunction;
6867
import java.util.stream.IntStream;
@@ -75,7 +74,6 @@
7574
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
7675
import static org.hamcrest.Matchers.containsString;
7776
import static org.hamcrest.Matchers.equalTo;
78-
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
7977

8078
public class SplitIndexIT extends ESIntegTestCase {
8179

@@ -184,9 +182,6 @@ private void splitToN(int sourceShards, int firstSplitShards, int secondSplitSha
184182
}
185183
}
186184

187-
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()
188-
.getDataNodes();
189-
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
190185
ensureYellow();
191186
client().admin().indices().prepareUpdateSettings("source")
192187
.setSettings(Settings.builder()
@@ -287,19 +282,13 @@ public void assertAllUniqueDocs(SearchResponse response, int numDocs) {
287282
}
288283

289284
public void testSplitIndexPrimaryTerm() throws Exception {
290-
final List<Integer> factors = Arrays.asList(1, 2, 4, 8);
291-
final List<Integer> numberOfShardsFactors = randomSubsetOf(scaledRandomIntBetween(1, factors.size()), factors);
292-
final int numberOfShards = randomSubsetOf(numberOfShardsFactors).stream().reduce(1, (x, y) -> x * y);
293-
final int numberOfTargetShards = numberOfShardsFactors.stream().reduce(2, (x, y) -> x * y);
285+
int numberOfTargetShards = randomIntBetween(2, 20);
286+
int numberOfShards = randomValueOtherThanMany(n -> numberOfTargetShards % n != 0, () -> between(1, numberOfTargetShards - 1));
294287
internalCluster().ensureAtLeastNumDataNodes(2);
295288
prepareCreate("source").setSettings(Settings.builder().put(indexSettings())
296289
.put("number_of_shards", numberOfShards)
297290
.put("index.number_of_routing_shards", numberOfTargetShards)).get();
298-
299-
final ImmutableOpenMap<String, DiscoveryNode> dataNodes =
300-
client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
301-
assertThat(dataNodes.size(), greaterThanOrEqualTo(2));
302-
ensureYellow();
291+
ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to allocate many shards
303292

304293
// fail random primary shards to force primary terms to increase
305294
final Index source = resolveIndex("source");
@@ -352,7 +341,7 @@ public void testSplitIndexPrimaryTerm() throws Exception {
352341
.setResizeType(ResizeType.SPLIT)
353342
.setSettings(splitSettings).get());
354343

355-
ensureGreen();
344+
ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards
356345

357346
final IndexMetaData aftersplitIndexMetaData = indexMetaData(client(), "target");
358347
for (int shardId = 0; shardId < numberOfTargetShards; shardId++) {
@@ -365,9 +354,7 @@ private static IndexMetaData indexMetaData(final Client client, final String ind
365354
return clusterStateResponse.getState().metaData().index(index);
366355
}
367356

368-
public void testCreateSplitIndex() {
369-
internalCluster().ensureAtLeastNumDataNodes(2);
370-
357+
public void testCreateSplitIndex() throws Exception {
371358
Version version = VersionUtils.randomIndexCompatibleVersion(random());
372359
prepareCreate("source").setSettings(Settings.builder().put(indexSettings())
373360
.put("number_of_shards", 1)
@@ -378,9 +365,7 @@ public void testCreateSplitIndex() {
378365
client().prepareIndex("source", "type")
379366
.setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
380367
}
381-
ImmutableOpenMap<String, DiscoveryNode> dataNodes =
382-
client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
383-
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
368+
internalCluster().ensureAtLeastNumDataNodes(2);
384369
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
385370
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
386371
// to the require._name below.
@@ -486,9 +471,6 @@ public void testCreateSplitWithIndexSort() throws Exception {
486471
client().prepareIndex("source", "type", Integer.toString(i))
487472
.setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON).get();
488473
}
489-
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()
490-
.getDataNodes();
491-
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
492474
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
493475
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
494476
// to the require._name below.

0 commit comments

Comments
 (0)