Skip to content

Commit d7179ca

Browse files
committed
Add a soft limit on the number of shards that can be queried in a single search request. #17396
This commit adds the new `action.search.shard_count.limit` setting which configures the maximum number of shards that can be queried in a single search request. It has a default value of 1000.
1 parent e25ccb9 commit d7179ca

File tree

5 files changed

+98
-1
lines changed

5 files changed

+98
-1
lines changed

core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,16 @@ protected AbstractSearchAsyncAction(ESLogger logger, SearchTransportService sear
107107
request.indices());
108108

109109
shardsIts = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
110-
expectedSuccessfulOps = shardsIts.size();
110+
final int shardCount = shardsIts.size();
111+
final long shardCountLimit = clusterService.getClusterSettings().get(TransportSearchAction.SHARD_COUNT_LIMIT_SETTING);
112+
if (shardCount > shardCountLimit) {
113+
throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of "
114+
+ shardCountLimit + ". This limit exists because querying many shards at the same time can make the "
115+
+ "job of the coordinating node very CPU and/or memory intensive. It is usually a better idea to "
116+
+ "have a smaller number of larger shards. Update [" + TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey()
117+
+ "] to a greater value if you really want to query that many shards at the same time.");
118+
}
119+
expectedSuccessfulOps = shardCount;
111120
// we need to add 1 for non active partition, since we count it in the total!
112121
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
113122

core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,8 @@
2626
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
2727
import org.elasticsearch.cluster.service.ClusterService;
2828
import org.elasticsearch.common.inject.Inject;
29+
import org.elasticsearch.common.settings.Setting;
30+
import org.elasticsearch.common.settings.Setting.Property;
2931
import org.elasticsearch.common.settings.Settings;
3032
import org.elasticsearch.index.IndexNotFoundException;
3133
import org.elasticsearch.indices.IndexClosedException;
@@ -45,6 +47,10 @@
4547
*/
4648
public class TransportSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> {
4749

50+
/** The maximum number of shards for a single search request. */
51+
public static final Setting<Long> SHARD_COUNT_LIMIT_SETTING = Setting.longSetting(
52+
"action.search.shard_count.limit", 1000L, 1L, Property.Dynamic, Property.NodeScope);
53+
4854
private final ClusterService clusterService;
4955
private final SearchTransportService searchTransportService;
5056
private final SearchPhaseController searchPhaseController;

core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
package org.elasticsearch.common.settings;
2020

2121
import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
22+
import org.elasticsearch.action.search.TransportSearchAction;
2223
import org.elasticsearch.action.support.AutoCreateIndex;
2324
import org.elasticsearch.action.support.DestructiveOperations;
2425
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
@@ -258,6 +259,7 @@ public void apply(Settings value, Settings current, Settings previous) {
258259
ClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
259260
SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING,
260261
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING,
262+
TransportSearchAction.SHARD_COUNT_LIMIT_SETTING,
261263
TransportService.TRACE_LOG_EXCLUDE_SETTING,
262264
TransportService.TRACE_LOG_INCLUDE_SETTING,
263265
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
/*
2+
* Licensed to Elasticsearch under one or more contributor
3+
* license agreements. See the NOTICE file distributed with
4+
* this work for additional information regarding copyright
5+
* ownership. Elasticsearch licenses this file to you under
6+
* the Apache License, Version 2.0 (the "License"); you may
7+
* not use this file except in compliance with the License.
8+
* You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing,
13+
* software distributed under the License is distributed on an
14+
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15+
* KIND, either express or implied. See the License for the
16+
* specific language governing permissions and limitations
17+
* under the License.
18+
*/
19+
20+
package org.elasticsearch.action.search;
21+
22+
import org.elasticsearch.cluster.metadata.IndexMetaData;
23+
import org.elasticsearch.test.ESIntegTestCase;
24+
25+
import java.util.Collections;
26+
27+
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
28+
import static org.hamcrest.Matchers.containsString;
29+
30+
public class TransportSearchIT extends ESIntegTestCase {
31+
32+
public void testShardCountLimit() throws Exception {
33+
try {
34+
final int numPrimaries1 = randomIntBetween(2, 10);
35+
final int numPrimaries2 = randomIntBetween(1, 10);
36+
assertAcked(prepareCreate("test1")
37+
.setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numPrimaries1));
38+
assertAcked(prepareCreate("test2")
39+
.setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numPrimaries2));
40+
ensureYellow("test1", "test2");
41+
42+
// no exception
43+
client().prepareSearch("test1").get();
44+
45+
assertAcked(client().admin().cluster().prepareUpdateSettings()
46+
.setTransientSettings(Collections.singletonMap(
47+
TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey(), numPrimaries1 - 1)));
48+
49+
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
50+
() -> client().prepareSearch("test1").get());
51+
assertThat(e.getMessage(), containsString("Trying to query " + numPrimaries1
52+
+ " shards, which is over the limit of " + (numPrimaries1 - 1)));
53+
54+
assertAcked(client().admin().cluster().prepareUpdateSettings()
55+
.setTransientSettings(Collections.singletonMap(
56+
TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey(), numPrimaries1)));
57+
58+
// no exception
59+
client().prepareSearch("test1").get();
60+
61+
e = expectThrows(IllegalArgumentException.class,
62+
() -> client().prepareSearch("test1", "test2").get());
63+
assertThat(e.getMessage(), containsString("Trying to query " + (numPrimaries1 + numPrimaries2)
64+
+ " shards, which is over the limit of " + numPrimaries1));
65+
66+
} finally {
67+
assertAcked(client().admin().cluster().prepareUpdateSettings()
68+
.setTransientSettings(Collections.singletonMap(
69+
TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey(), null)));
70+
}
71+
}
72+
73+
}

docs/reference/search/search.asciidoc

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,3 +49,10 @@ Or even search across all indices and all types:
4949
--------------------------------------------------
5050
$ curl -XGET 'http://localhost:9200/_search?q=tag:wow'
5151
--------------------------------------------------
52+
53+
By default elasticsearch rejects search requests that would query more than
54+
1000 shards. The reason is that such large numbers of shards make the job of
55+
the coordinating node very CPU and memory intensive. It is usually a better
56+
idea to organize data in such a way that there are fewer larger shards. In
57+
case you would like to bypass this limit, which is discouraged, you can update
58+
the `action.search.shard_count.limit` cluster setting to a greater value.

0 commit comments

Comments
 (0)