Skip to content

Commit 1166c19

Browse files
committed
Merge branch '6.x' into ccr-6.x
* 6.x: Fixed byte buffer leak in Netty4 request handler Avoid uid creation in ParsedDocument (#27241) Upgrade to Lucene 7.1 (#27225) Add additional explanations around discovery.zen.ping_timeout (#27231) Fix compile error Remove unused searcher parameter in SearchService#createContext (#27227) Fix sequence number assertions in BWC tests Move IndexShard#getWritingBytes() under InternalEngine (#27209) Adjust bwc version for exists query tests
2 parents 0ab744e + 7cb202b commit 1166c19

File tree

61 files changed

+107
-136
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+107
-136
lines changed

buildSrc/version.properties

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
22
elasticsearch = 6.1.0
3-
lucene = 7.1.0-snapshot-f33ed4ba12a
3+
lucene = 7.1.0
44

55
# optional dependencies
66
spatial4j = 0.6

core/licenses/lucene-analyzers-common-7.1.0-snapshot-f33ed4ba12a.jar.sha1

-1
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
a508bf6b580471ee568dab7d2acfedfa5aadce70

core/licenses/lucene-backward-codecs-7.1.0-snapshot-f33ed4ba12a.jar.sha1

-1
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
804a7ce82bba3d085733486bfde4846ecb77ce01

core/licenses/lucene-core-7.1.0-snapshot-f33ed4ba12a.jar.sha1

-1
This file was deleted.
+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
dd291b7ebf4845483895724d2562214dc7f40049

core/licenses/lucene-grouping-7.1.0-snapshot-f33ed4ba12a.jar.sha1

-1
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
0732d16c16421fca058a2a07ca4081ec7696365b

core/licenses/lucene-highlighter-7.1.0-snapshot-f33ed4ba12a.jar.sha1

-1
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
596550daabae765ad685112e0fe7c4f0fdfccb3f

core/licenses/lucene-join-7.1.0-snapshot-f33ed4ba12a.jar.sha1

-1
This file was deleted.
+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
5f26dd64c195258a81175772ef7fe105e7d60a26

core/licenses/lucene-memory-7.1.0-snapshot-f33ed4ba12a.jar.sha1

-1
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
3ef64c58d0c09ca40d848efa96b585b7476271f2

core/licenses/lucene-misc-7.1.0-snapshot-f33ed4ba12a.jar.sha1

-1
This file was deleted.
+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
1496ee5fa62206ee5ddf51042a340d6a9ee3b5de

core/licenses/lucene-queries-7.1.0-snapshot-f33ed4ba12a.jar.sha1

-1
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
1554920ab207a3245fa408d022a5c90ad3a1fea3

core/licenses/lucene-queryparser-7.1.0-snapshot-f33ed4ba12a.jar.sha1

-1
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
5767c15c5ee97926829fd8a4337e434fa95f3c08

core/licenses/lucene-sandbox-7.1.0-snapshot-f33ed4ba12a.jar.sha1

-1
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
691f7b9ac05f3ad2ac7e80733ef70247904bd3ae

core/licenses/lucene-spatial-7.1.0-snapshot-f33ed4ba12a.jar.sha1

-1
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
6c64c04d802badb800516a8a574cb993929c3805

core/licenses/lucene-spatial-extras-7.1.0-snapshot-f33ed4ba12a.jar.sha1

-1
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
3f1bc1aada8f06b176b782da24b9d7ad9641c41a

core/licenses/lucene-spatial3d-7.1.0-snapshot-f33ed4ba12a.jar.sha1

-1
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
8ded650aed23efb775f17be496e3e3870214e23b

core/licenses/lucene-suggest-7.1.0-snapshot-f33ed4ba12a.jar.sha1

-1
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
8d0ed1589ebdccf34e888c6efc0134a13a238c85

core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest re
155155
String error = null;
156156
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(request.shardId(), request.types(),
157157
request.nowInMillis(), request.filteringAliases());
158-
SearchContext searchContext = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
158+
SearchContext searchContext = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT);
159159
try {
160160
ParsedQuery parsedQuery = searchContext.getQueryShardContext().toQuery(request.query());
161161
searchContext.parsedQuery(parsedQuery);

core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ protected void resolveRequest(ClusterState state, InternalRequest request) {
9090
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) throws IOException {
9191
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId,
9292
new String[]{request.type()}, request.nowInMillis, request.filteringAlias());
93-
SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
93+
SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT);
9494
Engine.GetResult result = null;
9595
try {
9696
Term uidTerm = context.mapperService().createUidTerm(request.type(), request.id());

core/src/main/java/org/elasticsearch/index/engine/Engine.java

+4-1
Original file line numberDiff line numberDiff line change
@@ -187,6 +187,9 @@ public MergeStats getMergeStats() {
187187
/** returns the history uuid for the engine */
188188
public abstract String getHistoryUUID();
189189

190+
/** Returns how many bytes we are currently moving from heap to disk */
191+
public abstract long getWritingBytes();
192+
190193
/**
191194
* A throttling class that can be activated, causing the
192195
* {@code acquireThrottle} method to block on a lock when throttling
@@ -707,7 +710,7 @@ protected void writerSegmentStats(SegmentsStats stats) {
707710
}
708711

709712
/** How much heap is used that would be freed by a refresh. Note that this may throw {@link AlreadyClosedException}. */
710-
public abstract long getIndexBufferRAMBytesUsed();
713+
public abstract long getIndexBufferRAMBytesUsed();
711714

712715
protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boolean verbose) {
713716
ensureOpen();

core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java

+21-19
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,12 @@ public class InternalEngine extends Engine {
140140
private final AtomicLong maxUnsafeAutoIdTimestamp = new AtomicLong(-1);
141141
private final CounterMetric numVersionLookups = new CounterMetric();
142142
private final CounterMetric numIndexVersionsLookups = new CounterMetric();
143+
/**
144+
* How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this
145+
* across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents
146+
* being indexed/deleted.
147+
*/
148+
private final AtomicLong writingBytes = new AtomicLong();
143149

144150
@Nullable
145151
private final String historyUUID;
@@ -422,6 +428,12 @@ public String getHistoryUUID() {
422428
return historyUUID;
423429
}
424430

431+
/** Returns how many bytes we are currently moving from indexing buffer to segments on disk */
432+
@Override
433+
public long getWritingBytes() {
434+
return writingBytes.get();
435+
}
436+
425437
/**
426438
* Reads the current stored translog ID from the IW commit data. If the id is not found, recommits the current
427439
* translog id into lucene and returns null.
@@ -1230,21 +1242,26 @@ public void refresh(String source) throws EngineException {
12301242
}
12311243

12321244
final void refresh(String source, SearcherScope scope) throws EngineException {
1245+
long bytes = 0;
12331246
// we obtain a read lock here, since we don't want a flush to happen while we are refreshing
12341247
// since it flushes the index as well (though, in terms of concurrency, we are allowed to do it)
12351248
try (ReleasableLock lock = readLock.acquire()) {
12361249
ensureOpen();
1250+
bytes = indexWriter.ramBytesUsed();
12371251
switch (scope) {
12381252
case EXTERNAL:
12391253
// even though we maintain 2 managers we really do the heavy-lifting only once.
12401254
// the second refresh will only do the extra work we have to do for warming caches etc.
1255+
writingBytes.addAndGet(bytes);
12411256
externalSearcherManager.maybeRefreshBlocking();
12421257
// the break here is intentional we never refresh both internal / external together
12431258
break;
12441259
case INTERNAL:
1260+
final long versionMapBytes = versionMap.ramBytesUsedForRefresh();
1261+
bytes += versionMapBytes;
1262+
writingBytes.addAndGet(bytes);
12451263
internalSearcherManager.maybeRefreshBlocking();
12461264
break;
1247-
12481265
default:
12491266
throw new IllegalArgumentException("unknown scope: " + scope);
12501267
}
@@ -1258,6 +1275,8 @@ final void refresh(String source, SearcherScope scope) throws EngineException {
12581275
e.addSuppressed(inner);
12591276
}
12601277
throw new RefreshFailedEngineException(shardId, e);
1278+
} finally {
1279+
writingBytes.addAndGet(-bytes);
12611280
}
12621281

12631282
// TODO: maybe we should just put a scheduled job in threadPool?
@@ -1271,24 +1290,7 @@ final void refresh(String source, SearcherScope scope) throws EngineException {
12711290
public void writeIndexingBuffer() throws EngineException {
12721291
// we obtain a read lock here, since we don't want a flush to happen while we are writing
12731292
// since it flushes the index as well (though, in terms of concurrency, we are allowed to do it)
1274-
try (ReleasableLock lock = readLock.acquire()) {
1275-
ensureOpen();
1276-
final long versionMapBytes = versionMap.ramBytesUsedForRefresh();
1277-
final long indexingBufferBytes = indexWriter.ramBytesUsed();
1278-
logger.debug("use refresh to write indexing buffer (heap size=[{}]), to also clear version map (heap size=[{}])",
1279-
new ByteSizeValue(indexingBufferBytes), new ByteSizeValue(versionMapBytes));
1280-
refresh("write indexing buffer", SearcherScope.INTERNAL);
1281-
} catch (AlreadyClosedException e) {
1282-
failOnTragicEvent(e);
1283-
throw e;
1284-
} catch (Exception e) {
1285-
try {
1286-
failEngine("writeIndexingBuffer failed", e);
1287-
} catch (Exception inner) {
1288-
e.addSuppressed(inner);
1289-
}
1290-
throw new RefreshFailedEngineException(shardId, e);
1291-
}
1293+
refresh("write indexing buffer", SearcherScope.INTERNAL);
12921294
}
12931295

12941296
@Override

core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java

+1-6
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
package org.elasticsearch.index.mapper;
2121

2222
import org.apache.lucene.document.Field;
23-
import org.apache.lucene.util.BytesRef;
2423
import org.elasticsearch.common.bytes.BytesReference;
2524
import org.elasticsearch.common.xcontent.XContentType;
2625
import org.elasticsearch.index.mapper.ParseContext.Document;
@@ -35,7 +34,6 @@ public class ParsedDocument {
3534
private final Field version;
3635

3736
private final String id, type;
38-
private final BytesRef uid;
3937
private final SeqNoFieldMapper.SequenceIDFields seqID;
4038

4139
private final String routing;
@@ -62,7 +60,6 @@ public ParsedDocument(Field version,
6260
this.seqID = seqID;
6361
this.id = id;
6462
this.type = type;
65-
this.uid = Uid.createUidAsBytes(type, id);
6663
this.routing = routing;
6764
this.documents = documents;
6865
this.source = source;
@@ -140,9 +137,7 @@ public void addDynamicMappingsUpdate(Mapping update) {
140137

141138
@Override
142139
public String toString() {
143-
StringBuilder sb = new StringBuilder();
144-
sb.append("Document ").append("uid[").append(uid).append("] doc [").append(documents).append("]");
145-
return sb.toString();
140+
return "Document uid[" + Uid.createUidAsBytes(type, id) + "] doc [" + documents + ']';
146141
}
147142

148143
}

core/src/main/java/org/elasticsearch/index/shard/IndexShard.java

+9-49
Original file line numberDiff line numberDiff line change
@@ -182,12 +182,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
182182
private final QueryCachingPolicy cachingPolicy;
183183
private final Supplier<Sort> indexSortSupplier;
184184

185-
/**
186-
* How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this
187-
* across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents
188-
* being indexed/deleted.
189-
*/
190-
private final AtomicLong writingBytes = new AtomicLong();
191185
private final SearchOperationListener searchOperationListener;
192186

193187
protected volatile ShardRouting shardRouting;
@@ -323,12 +317,6 @@ public Store store() {
323317
public Sort getIndexSort() {
324318
return indexSortSupplier.get();
325319
}
326-
/**
327-
* returns true if this shard supports indexing (i.e., write) operations.
328-
*/
329-
public boolean canIndex() {
330-
return true;
331-
}
332320

333321
public ShardGetService getService() {
334322
return this.getService;
@@ -839,34 +827,21 @@ public Engine.GetResult get(Engine.Get get) {
839827
*/
840828
public void refresh(String source) {
841829
verifyNotClosed();
842-
843-
if (canIndex()) {
844-
long bytes = getEngine().getIndexBufferRAMBytesUsed();
845-
writingBytes.addAndGet(bytes);
846-
try {
847-
if (logger.isTraceEnabled()) {
848-
logger.trace("refresh with source [{}] indexBufferRAMBytesUsed [{}]", source, new ByteSizeValue(bytes));
849-
}
850-
getEngine().refresh(source);
851-
} finally {
852-
if (logger.isTraceEnabled()) {
853-
logger.trace("remove [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId());
854-
}
855-
writingBytes.addAndGet(-bytes);
856-
}
857-
} else {
858-
if (logger.isTraceEnabled()) {
859-
logger.trace("refresh with source [{}]", source);
860-
}
861-
getEngine().refresh(source);
830+
if (logger.isTraceEnabled()) {
831+
logger.trace("refresh with source [{}]", source);
862832
}
833+
getEngine().refresh(source);
863834
}
864835

865836
/**
866837
* Returns how many bytes we are currently moving from heap to disk
867838
*/
868839
public long getWritingBytes() {
869-
return writingBytes.get();
840+
Engine engine = getEngineOrNull();
841+
if (engine == null) {
842+
return 0;
843+
}
844+
return engine.getWritingBytes();
870845
}
871846

872847
public RefreshStats refreshStats() {
@@ -1677,24 +1652,9 @@ private void handleRefreshException(Exception e) {
16771652
* Called when our shard is using too much heap and should move buffered indexed/deleted documents to disk.
16781653
*/
16791654
public void writeIndexingBuffer() {
1680-
if (canIndex() == false) {
1681-
throw new UnsupportedOperationException();
1682-
}
16831655
try {
16841656
Engine engine = getEngine();
1685-
long bytes = engine.getIndexBufferRAMBytesUsed();
1686-
1687-
// NOTE: this can be an overestimate by up to 20%, if engine uses IW.flush not refresh, because version map
1688-
// memory is low enough, but this is fine because after the writes finish, IMC will poll again and see that
1689-
// there's still up to the 20% being used and continue writing if necessary:
1690-
logger.debug("add [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId());
1691-
writingBytes.addAndGet(bytes);
1692-
try {
1693-
engine.writeIndexingBuffer();
1694-
} finally {
1695-
writingBytes.addAndGet(-bytes);
1696-
logger.debug("remove [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId());
1697-
}
1657+
engine.writeIndexingBuffer();
16981658
} catch (Exception e) {
16991659
handleRefreshException(e);
17001660
}

core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java

+1-2
Original file line numberDiff line numberDiff line change
@@ -152,8 +152,7 @@ ByteSizeValue indexingBufferSize() {
152152
protected List<IndexShard> availableShards() {
153153
List<IndexShard> availableShards = new ArrayList<>();
154154
for (IndexShard shard : indexShards) {
155-
// shadow replica doesn't have an indexing buffer
156-
if (shard.canIndex() && CAN_WRITE_INDEX_BUFFER_STATES.contains(shard.state())) {
155+
if (CAN_WRITE_INDEX_BUFFER_STATES.contains(shard.state())) {
157156
availableShards.add(shard);
158157
}
159158
}

0 commit comments

Comments
 (0)