Skip to content

Commit ceaa28e

Browse files
committed
Increase timeout in testFollowIndexWithConcurrentMappingChanges (#60534)
The test failed because the leader was taking a lot of CPUs to process many mapping updates. This commit reduces the mapping updates, increases timeout, and adds more debug info. Closes #59832
1 parent bf7eecf commit ceaa28e

File tree

2 files changed

+32
-32
lines changed

2 files changed

+32
-32
lines changed

x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java

Lines changed: 11 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -249,38 +249,23 @@ public void testFollowIndexWithConcurrentMappingChanges() throws Exception {
249249
final int firstBatchNumDocs = randomIntBetween(2, 64);
250250
logger.info("Indexing [{}] docs as first batch", firstBatchNumDocs);
251251
for (int i = 0; i < firstBatchNumDocs; i++) {
252-
final String source = String.format(Locale.ROOT, "{\"f\":%d}", i);
253-
leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get();
252+
leaderClient().prepareIndex("index1", "_doc").setId(Integer.toString(i)).setSource("f", i).get();
254253
}
255254

256255
AtomicBoolean isRunning = new AtomicBoolean(true);
257256

258257
// Concurrently index new docs with mapping changes
258+
int numFields = between(10, 20);
259259
Thread thread = new Thread(() -> {
260-
int docID = 10000;
261-
char[] chars = "abcdeghijklmnopqrstuvwxyz".toCharArray();
262-
for (char c : chars) {
260+
int numDocs = between(10, 200);
261+
for (int i = 0; i < numDocs; i++) {
263262
if (isRunning.get() == false) {
264263
break;
265264
}
266-
final String source;
267-
long valueToPutInDoc = randomLongBetween(0, 50000);
268-
if (randomBoolean()) {
269-
source = String.format(Locale.ROOT, "{\"%c\":%d}", c, valueToPutInDoc);
270-
} else {
271-
source = String.format(Locale.ROOT, "{\"%c\":\"%d\"}", c, valueToPutInDoc);
272-
}
273-
for (int i = 1; i < 10; i++) {
274-
if (isRunning.get() == false) {
275-
break;
276-
}
277-
leaderClient().prepareIndex("index1", "doc", Long.toString(docID++)).setSource(source, XContentType.JSON).get();
278-
if (rarely()) {
279-
leaderClient().admin().indices().prepareFlush("index1").setForce(true).get();
280-
}
281-
}
282-
if (between(0, 100) < 20) {
283-
leaderClient().admin().indices().prepareFlush("index1").setForce(false).setWaitIfOngoing(false).get();
265+
final String field = "f-" + between(1, numFields);
266+
leaderClient().prepareIndex("index1", "_doc").setSource(field, between(0, 1000)).get();
267+
if (rarely()) {
268+
leaderClient().admin().indices().prepareFlush("index1").setWaitIfOngoing(false).setForce(false).get();
284269
}
285270
}
286271
});
@@ -298,16 +283,14 @@ public void testFollowIndexWithConcurrentMappingChanges() throws Exception {
298283
final int secondBatchNumDocs = randomIntBetween(2, 64);
299284
logger.info("Indexing [{}] docs as second batch", secondBatchNumDocs);
300285
for (int i = firstBatchNumDocs; i < firstBatchNumDocs + secondBatchNumDocs; i++) {
301-
final String source = String.format(Locale.ROOT, "{\"f\":%d}", i);
302-
leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get();
286+
leaderClient().prepareIndex("index1", "_doc").setId(Integer.toString(i)).setSource("f", i).get();
303287
}
304-
305-
for (int i = firstBatchNumDocs; i < firstBatchNumDocs + secondBatchNumDocs; i++) {
288+
for (int i = 0; i < firstBatchNumDocs + secondBatchNumDocs; i++) {
306289
assertBusy(assertExpectedDocumentRunnable(i), 1, TimeUnit.MINUTES);
307290
}
308-
309291
isRunning.set(false);
310292
thread.join();
293+
assertIndexFullyReplicatedToFollower("index1", "index2");
311294
}
312295

313296
public void testFollowIndexWithoutWaitForComplete() throws Exception {

x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java

Lines changed: 21 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
import org.elasticsearch.action.ActionListener;
1111
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
1212
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
13+
import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads;
1314
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
1415
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
1516
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
@@ -365,7 +366,7 @@ protected final ClusterHealthStatus ensureFollowerGreen(String... indices) {
365366

366367
protected final ClusterHealthStatus ensureFollowerGreen(boolean waitForNoInitializingShards, String... indices) {
367368
logger.info("ensure green follower indices {}", Arrays.toString(indices));
368-
return ensureColor(clusterGroup.followerCluster, ClusterHealthStatus.GREEN, TimeValue.timeValueSeconds(30),
369+
return ensureColor(clusterGroup.followerCluster, ClusterHealthStatus.GREEN, TimeValue.timeValueSeconds(60),
369370
waitForNoInitializingShards, indices);
370371
}
371372

@@ -387,10 +388,21 @@ private ClusterHealthStatus ensureColor(TestCluster testCluster,
387388

388389
ClusterHealthResponse actionGet = testCluster.client().admin().cluster().health(healthRequest).actionGet();
389390
if (actionGet.isTimedOut()) {
390-
logger.info("{} timed out, cluster state:\n{}\n{}",
391+
logger.info("{} timed out: " +
392+
"\nleader cluster state:\n{}" +
393+
"\nleader cluster hot threads:\n{}" +
394+
"\nleader cluster tasks:\n{}" +
395+
"\nfollower cluster state:\n{}" +
396+
"\nfollower cluster hot threads:\n{}" +
397+
"\nfollower cluster tasks:\n{}",
391398
method,
392-
testCluster.client().admin().cluster().prepareState().get().getState(),
393-
testCluster.client().admin().cluster().preparePendingClusterTasks().get());
399+
leaderClient().admin().cluster().prepareState().get().getState(),
400+
getHotThreads(leaderClient()),
401+
leaderClient().admin().cluster().preparePendingClusterTasks().get(),
402+
followerClient().admin().cluster().prepareState().get().getState(),
403+
getHotThreads(followerClient()),
404+
followerClient().admin().cluster().preparePendingClusterTasks().get()
405+
);
394406
fail("timed out waiting for " + color + " state");
395407
}
396408
assertThat("Expected at least " + clusterHealthStatus + " but got " + actionGet.getStatus(),
@@ -399,6 +411,11 @@ private ClusterHealthStatus ensureColor(TestCluster testCluster,
399411
return actionGet.getStatus();
400412
}
401413

414+
static String getHotThreads(Client client) {
415+
return client.admin().cluster().prepareNodesHotThreads().setThreads(99999).setIgnoreIdleThreads(false)
416+
.get().getNodes().stream().map(NodeHotThreads::getHotThreads).collect(Collectors.joining("\n"));
417+
}
418+
402419
protected final Index resolveLeaderIndex(String index) {
403420
GetIndexResponse getIndexResponse = leaderClient().admin().indices().prepareGetIndex().setIndices(index).get();
404421
assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index));

0 commit comments

Comments
 (0)