Skip to content

Commit 5cfcd7c

Browse files
Re-fetch shard info of primary when new node joins (#47035)
Today, we don't clear the shard info of the primary shard when a new node joins; then we might risk of making replica allocation decisions based on the stale information of the primary. The serious problem is that we can cancel the current recovery which is more advanced than the copy on the new node due to the old info we have from the primary. With this change, we ensure the shard info from the primary is not older than any node when allocating replicas. Relates #46959 This work was done by Henning in #42518. Co-authored-by: Henning Andersen <[email protected]>
1 parent ff15495 commit 5cfcd7c

File tree

4 files changed

+244
-0
lines changed

4 files changed

+244
-0
lines changed

server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java

+7
Original file line numberDiff line numberDiff line change
@@ -232,6 +232,13 @@ protected synchronized void processAsyncFetch(List<T> responses, List<FailedNode
232232
*/
233233
protected abstract void reroute(ShardId shardId, String reason);
234234

235+
/**
236+
* Clear cache for node, ensuring next fetch will fetch a fresh copy.
237+
*/
238+
synchronized void clearCacheForNode(String nodeId) {
239+
cache.remove(nodeId);
240+
}
241+
235242
/**
236243
* Fills the shard fetched data with new (data) nodes and a fresh NodeEntry, and removes from
237244
* it nodes that are no longer part of the state.

server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java

+47
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,15 @@
1919

2020
package org.elasticsearch.gateway;
2121

22+
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
2223
import org.apache.logging.log4j.LogManager;
2324
import org.apache.logging.log4j.Logger;
2425
import org.apache.logging.log4j.message.ParameterizedMessage;
2526
import org.elasticsearch.action.ActionListener;
2627
import org.elasticsearch.action.support.nodes.BaseNodeResponse;
2728
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
29+
import org.elasticsearch.cluster.node.DiscoveryNode;
30+
import org.elasticsearch.cluster.node.DiscoveryNodes;
2831
import org.elasticsearch.cluster.routing.RerouteService;
2932
import org.elasticsearch.cluster.routing.RoutingNodes;
3033
import org.elasticsearch.cluster.routing.ShardRouting;
@@ -35,11 +38,16 @@
3538
import org.elasticsearch.common.inject.Inject;
3639
import org.elasticsearch.common.lease.Releasables;
3740
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
41+
import org.elasticsearch.common.util.set.Sets;
3842
import org.elasticsearch.index.shard.ShardId;
3943
import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
4044

45+
import java.util.Collections;
4146
import java.util.List;
47+
import java.util.Set;
4248
import java.util.concurrent.ConcurrentMap;
49+
import java.util.stream.Collectors;
50+
import java.util.stream.StreamSupport;
4351

4452
public class GatewayAllocator {
4553

@@ -54,6 +62,7 @@ public class GatewayAllocator {
5462
asyncFetchStarted = ConcurrentCollections.newConcurrentMap();
5563
private final ConcurrentMap<ShardId, AsyncShardFetch<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData>>
5664
asyncFetchStore = ConcurrentCollections.newConcurrentMap();
65+
private Set<String> lastSeenEphemeralIds = Collections.emptySet();
5766

5867
@Inject
5968
public GatewayAllocator(RerouteService rerouteService,
@@ -106,6 +115,7 @@ public void applyFailedShards(final RoutingAllocation allocation, final List<Fai
106115
public void allocateUnassigned(final RoutingAllocation allocation) {
107116
assert primaryShardAllocator != null;
108117
assert replicaShardAllocator != null;
118+
ensureAsyncFetchStorePrimaryRecency(allocation);
109119
innerAllocatedUnassigned(allocation, primaryShardAllocator, replicaShardAllocator);
110120
}
111121

@@ -138,6 +148,43 @@ public AllocateUnassignedDecision decideUnassignedShardAllocation(ShardRouting u
138148
}
139149
}
140150

151+
/**
152+
* Clear the fetched data for the primary to ensure we do not cancel recoveries based on excessively stale data.
153+
*/
154+
private void ensureAsyncFetchStorePrimaryRecency(RoutingAllocation allocation) {
155+
DiscoveryNodes nodes = allocation.nodes();
156+
if (hasNewNodes(nodes)) {
157+
final Set<String> newEphemeralIds = StreamSupport.stream(nodes.getDataNodes().spliterator(), false)
158+
.map(node -> node.value.getEphemeralId()).collect(Collectors.toSet());
159+
// Invalidate the cache if a data node has been added to the cluster. This ensures that we do not cancel a recovery if a node
160+
// drops out, we fetch the shard data, then some indexing happens and then the node rejoins the cluster again. There are other
161+
// ways we could decide to cancel a recovery based on stale data (e.g. changing allocation filters or a primary failure) but
162+
// making the wrong decision here is not catastrophic so we only need to cover the common case.
163+
logger.trace(() -> new ParameterizedMessage(
164+
"new nodes {} found, clearing primary async-fetch-store cache", Sets.difference(newEphemeralIds, lastSeenEphemeralIds)));
165+
asyncFetchStore.values().forEach(fetch -> clearCacheForPrimary(fetch, allocation));
166+
// recalc to also (lazily) clear out old nodes.
167+
this.lastSeenEphemeralIds = newEphemeralIds;
168+
}
169+
}
170+
171+
private static void clearCacheForPrimary(AsyncShardFetch<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> fetch,
172+
RoutingAllocation allocation) {
173+
ShardRouting primary = allocation.routingNodes().activePrimary(fetch.shardId);
174+
if (primary != null) {
175+
fetch.clearCacheForNode(primary.currentNodeId());
176+
}
177+
}
178+
179+
private boolean hasNewNodes(DiscoveryNodes nodes) {
180+
for (ObjectObjectCursor<String, DiscoveryNode> node : nodes.getDataNodes()) {
181+
if (lastSeenEphemeralIds.contains(node.value.getEphemeralId()) == false) {
182+
return true;
183+
}
184+
}
185+
return false;
186+
}
187+
141188
class InternalAsyncFetch<T extends BaseNodeResponse> extends AsyncShardFetch<T> {
142189

143190
InternalAsyncFetch(Logger logger, String type, ShardId shardId, Lister<? extends BaseNodesResponse<T>, T> action) {

server/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java

+80
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ public class AsyncShardFetchTests extends ESTestCase {
4646
private final DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), Collections.emptyMap(),
4747
Collections.singleton(DiscoveryNodeRole.DATA_ROLE), Version.CURRENT);
4848
private final Response response1 = new Response(node1);
49+
private final Response response1_2 = new Response(node1);
4950
private final Throwable failure1 = new Throwable("simulated failure 1");
5051
private final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), Collections.emptyMap(),
5152
Collections.singleton(DiscoveryNodeRole.DATA_ROLE), Version.CURRENT);
@@ -274,6 +275,85 @@ public void testTwoNodesAddedInBetween() throws Exception {
274275
assertThat(fetchData.getData().get(node2), sameInstance(response2));
275276
}
276277

278+
public void testClearCache() throws Exception {
279+
DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).build();
280+
test.addSimulation(node1.getId(), response1);
281+
282+
// must work also with no data
283+
test.clearCacheForNode(node1.getId());
284+
285+
// no fetched data, request still on going
286+
AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptySet());
287+
assertThat(fetchData.hasData(), equalTo(false));
288+
assertThat(test.reroute.get(), equalTo(0));
289+
290+
test.fireSimulationAndWait(node1.getId());
291+
assertThat(test.reroute.get(), equalTo(1));
292+
293+
// verify we get back right data from node
294+
fetchData = test.fetchData(nodes, emptySet());
295+
assertThat(fetchData.hasData(), equalTo(true));
296+
assertThat(fetchData.getData().size(), equalTo(1));
297+
assertThat(fetchData.getData().get(node1), sameInstance(response1));
298+
299+
// second fetch gets same data
300+
fetchData = test.fetchData(nodes, emptySet());
301+
assertThat(fetchData.hasData(), equalTo(true));
302+
assertThat(fetchData.getData().size(), equalTo(1));
303+
assertThat(fetchData.getData().get(node1), sameInstance(response1));
304+
305+
test.clearCacheForNode(node1.getId());
306+
307+
// prepare next request
308+
test.addSimulation(node1.getId(), response1_2);
309+
310+
// no fetched data, new request on going
311+
fetchData = test.fetchData(nodes, emptySet());
312+
assertThat(fetchData.hasData(), equalTo(false));
313+
314+
test.fireSimulationAndWait(node1.getId());
315+
assertThat(test.reroute.get(), equalTo(2));
316+
317+
// verify we get new data back
318+
fetchData = test.fetchData(nodes, emptySet());
319+
assertThat(fetchData.hasData(), equalTo(true));
320+
assertThat(fetchData.getData().size(), equalTo(1));
321+
assertThat(fetchData.getData().get(node1), sameInstance(response1_2));
322+
}
323+
324+
public void testConcurrentRequestAndClearCache() throws Exception {
325+
DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).build();
326+
test.addSimulation(node1.getId(), response1);
327+
328+
// no fetched data, request still on going
329+
AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptySet());
330+
assertThat(fetchData.hasData(), equalTo(false));
331+
assertThat(test.reroute.get(), equalTo(0));
332+
333+
// clear cache while request is still on going, before it is processed
334+
test.clearCacheForNode(node1.getId());
335+
336+
test.fireSimulationAndWait(node1.getId());
337+
assertThat(test.reroute.get(), equalTo(1));
338+
339+
// prepare next request
340+
test.addSimulation(node1.getId(), response1_2);
341+
342+
// verify still no fetched data, request still on going
343+
fetchData = test.fetchData(nodes, emptySet());
344+
assertThat(fetchData.hasData(), equalTo(false));
345+
346+
test.fireSimulationAndWait(node1.getId());
347+
assertThat(test.reroute.get(), equalTo(2));
348+
349+
// verify we get new data back
350+
fetchData = test.fetchData(nodes, emptySet());
351+
assertThat(fetchData.hasData(), equalTo(true));
352+
assertThat(fetchData.getData().size(), equalTo(1));
353+
assertThat(fetchData.getData().get(node1), sameInstance(response1_2));
354+
355+
}
356+
277357
static class TestFetch extends AsyncShardFetch<Response> {
278358

279359
static class Entry {
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,110 @@
1+
/*
2+
* Licensed to Elasticsearch under one or more contributor
3+
* license agreements. See the NOTICE file distributed with
4+
* this work for additional information regarding copyright
5+
* ownership. Elasticsearch licenses this file to you under
6+
* the Apache License, Version 2.0 (the "License"); you may
7+
* not use this file except in compliance with the License.
8+
* You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing,
13+
* software distributed under the License is distributed on an
14+
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15+
* KIND, either express or implied. See the License for the
16+
* specific language governing permissions and limitations
17+
* under the License.
18+
*/
19+
20+
package org.elasticsearch.gateway;
21+
22+
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
23+
import org.elasticsearch.cluster.metadata.IndexMetaData;
24+
import org.elasticsearch.cluster.routing.UnassignedInfo;
25+
import org.elasticsearch.common.Priority;
26+
import org.elasticsearch.common.settings.Settings;
27+
import org.elasticsearch.index.IndexService;
28+
import org.elasticsearch.index.IndexSettings;
29+
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
30+
import org.elasticsearch.plugins.Plugin;
31+
import org.elasticsearch.test.ESIntegTestCase;
32+
import org.elasticsearch.test.InternalSettingsPlugin;
33+
import org.elasticsearch.test.InternalTestCluster;
34+
import org.elasticsearch.test.transport.MockTransportService;
35+
import org.elasticsearch.transport.TransportService;
36+
37+
import java.util.Arrays;
38+
import java.util.Collection;
39+
import java.util.concurrent.CountDownLatch;
40+
import java.util.stream.Collectors;
41+
import java.util.stream.IntStream;
42+
43+
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
44+
import static org.hamcrest.Matchers.equalTo;
45+
import static org.hamcrest.Matchers.hasItem;
46+
47+
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
48+
public class ReplicaShardAllocatorIT extends ESIntegTestCase {
49+
50+
@Override
51+
protected Collection<Class<? extends Plugin>> nodePlugins() {
52+
return Arrays.asList(MockTransportService.TestPlugin.class, InternalSettingsPlugin.class);
53+
}
54+
55+
public void testRecentPrimaryInformation() throws Exception {
56+
String indexName = "test";
57+
String nodeWithPrimary = internalCluster().startNode();
58+
assertAcked(
59+
client().admin().indices().prepareCreate(indexName)
60+
.setSettings(Settings.builder()
61+
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
62+
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
63+
.put(IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.getKey(), 1.0f)
64+
.put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms")
65+
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "1ms")));
66+
String nodeWithReplica = internalCluster().startDataOnlyNode();
67+
Settings nodeWithReplicaSettings = internalCluster().dataPathSettings(nodeWithReplica);
68+
ensureGreen(indexName);
69+
indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, between(10, 100))
70+
.mapToObj(n -> client().prepareIndex(indexName, "_doc").setSource("f", "v")).collect(Collectors.toList()));
71+
assertBusy(() -> {
72+
SyncedFlushResponse syncedFlushResponse = client().admin().indices().prepareSyncedFlush(indexName).get();
73+
assertThat(syncedFlushResponse.successfulShards(), equalTo(2));
74+
});
75+
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeWithReplica));
76+
if (randomBoolean()) {
77+
indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, between(10, 100))
78+
.mapToObj(n -> client().prepareIndex(indexName, "_doc").setSource("f", "v")).collect(Collectors.toList()));
79+
}
80+
CountDownLatch blockRecovery = new CountDownLatch(1);
81+
CountDownLatch recoveryStarted = new CountDownLatch(1);
82+
MockTransportService transportServiceOnPrimary
83+
= (MockTransportService) internalCluster().getInstance(TransportService.class, nodeWithPrimary);
84+
transportServiceOnPrimary.addSendBehavior((connection, requestId, action, request, options) -> {
85+
if (PeerRecoveryTargetService.Actions.FILES_INFO.equals(action)) {
86+
recoveryStarted.countDown();
87+
try {
88+
blockRecovery.await();
89+
} catch (InterruptedException e) {
90+
Thread.currentThread().interrupt();
91+
}
92+
}
93+
connection.sendRequest(requestId, action, request, options);
94+
});
95+
String newNode = internalCluster().startDataOnlyNode();
96+
recoveryStarted.await();
97+
// destroy sync_id after the recovery on the new node has started
98+
client().admin().indices().prepareFlush(indexName).setForce(true).get();
99+
// AllocationService only calls GatewayAllocator if there are unassigned shards
100+
assertAcked(client().admin().indices().prepareCreate("dummy-index").setWaitForActiveShards(0)
101+
.setSettings(Settings.builder().put("index.routing.allocation.require.attr", "not-found")));
102+
internalCluster().startDataOnlyNode(nodeWithReplicaSettings);
103+
// need to wait for events to ensure the reroute has happened since we perform it async when a new node joins.
104+
client().admin().cluster().prepareHealth(indexName).setWaitForYellowStatus().setWaitForEvents(Priority.LANGUID).get();
105+
blockRecovery.countDown();
106+
ensureGreen(indexName);
107+
assertThat(internalCluster().nodesInclude(indexName), hasItem(newNode));
108+
transportServiceOnPrimary.clearAllRules();
109+
}
110+
}

0 commit comments

Comments
 (0)