Skip to content

Commit 8bbbbd1

Browse files
committed
Move DocsStats into Engine (#33835)
By moving DocStats into the engine we can easily cache the stats for read-only engines if necessary. It also moves the responsibility out of IndexShard which has quiet some complexity already.
1 parent 277ccd9 commit 8bbbbd1

File tree

4 files changed

+46
-28
lines changed

4 files changed

+46
-28
lines changed

server/src/main/java/org/elasticsearch/index/engine/Engine.java

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@
6666
import org.elasticsearch.index.merge.MergeStats;
6767
import org.elasticsearch.index.seqno.SeqNoStats;
6868
import org.elasticsearch.index.seqno.SequenceNumbers;
69+
import org.elasticsearch.index.shard.DocsStats;
6970
import org.elasticsearch.index.shard.ShardId;
7071
import org.elasticsearch.index.store.Store;
7172
import org.elasticsearch.index.translog.Translog;
@@ -175,6 +176,41 @@ public MergeStats getMergeStats() {
175176
/** Returns how many bytes we are currently moving from heap to disk */
176177
public abstract long getWritingBytes();
177178

179+
/**
180+
* Returns the {@link DocsStats} for this engine
181+
*/
182+
public DocsStats docStats() {
183+
// we calculate the doc stats based on the internal reader that is more up-to-date and not subject
184+
// to external refreshes. For instance we don't refresh an external reader if we flush and indices with
185+
// index.refresh_interval=-1 won't see any doc stats updates at all. This change will give more accurate statistics
186+
// when indexing but not refreshing in general. Yet, if a refresh happens the internal reader is refresh as well so we are
187+
// safe here.
188+
try (Engine.Searcher searcher = acquireSearcher("docStats", Engine.SearcherScope.INTERNAL)) {
189+
return docsStats(searcher.reader());
190+
}
191+
}
192+
193+
protected final DocsStats docsStats(IndexReader indexReader) {
194+
long numDocs = 0;
195+
long numDeletedDocs = 0;
196+
long sizeInBytes = 0;
197+
// we don't wait for a pending refreshes here since it's a stats call instead we mark it as accessed only which will cause
198+
// the next scheduled refresh to go through and refresh the stats as well
199+
for (LeafReaderContext readerContext : indexReader.leaves()) {
200+
// we go on the segment level here to get accurate numbers
201+
final SegmentReader segmentReader = Lucene.segmentReader(readerContext.reader());
202+
SegmentCommitInfo info = segmentReader.getSegmentInfo();
203+
numDocs += readerContext.reader().numDocs();
204+
numDeletedDocs += readerContext.reader().numDeletedDocs();
205+
try {
206+
sizeInBytes += info.sizeInBytes();
207+
} catch (IOException e) {
208+
logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
209+
}
210+
}
211+
return new DocsStats(numDocs, numDeletedDocs, sizeInBytes);
212+
}
213+
178214
/**
179215
* A throttling class that can be activated, causing the
180216
* {@code acquireThrottle} method to block on a lock when throttling

server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
import org.elasticsearch.index.mapper.MapperService;
3535
import org.elasticsearch.index.seqno.SeqNoStats;
3636
import org.elasticsearch.index.seqno.SequenceNumbers;
37+
import org.elasticsearch.index.shard.DocsStats;
3738
import org.elasticsearch.index.store.Store;
3839
import org.elasticsearch.index.translog.Translog;
3940
import org.elasticsearch.index.translog.TranslogStats;
@@ -63,6 +64,7 @@ public final class ReadOnlyEngine extends Engine {
6364
private final SearcherManager searcherManager;
6465
private final IndexCommit indexCommit;
6566
private final Lock indexWriterLock;
67+
private final DocsStats docsStats;
6668

6769
/**
6870
* Creates a new ReadOnlyEngine. This ctor can also be used to open a read-only engine on top of an already opened
@@ -101,6 +103,7 @@ public ReadOnlyEngine(EngineConfig config, SeqNoStats seqNoStats, TranslogStats
101103
this.indexCommit = reader.getIndexCommit();
102104
this.searcherManager = new SearcherManager(reader,
103105
new RamAccountingSearcherFactory(engineConfig.getCircuitBreakerService()));
106+
this.docsStats = docsStats(reader);
104107
this.indexWriterLock = indexWriterLock;
105108
success = true;
106109
} finally {
@@ -365,4 +368,9 @@ public void trimOperationsFromTranslog(long belowTerm, long aboveSeqNo) {
365368
@Override
366369
public void maybePruneDeletes() {
367370
}
371+
372+
@Override
373+
public DocsStats docStats() {
374+
return docsStats;
375+
}
368376
}

server/src/main/java/org/elasticsearch/index/shard/IndexShard.java

Lines changed: 1 addition & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -21,14 +21,10 @@
2121

2222
import com.carrotsearch.hppc.ObjectLongMap;
2323
import org.apache.logging.log4j.Logger;
24-
import org.apache.logging.log4j.message.ParameterizedMessage;
2524
import org.apache.lucene.index.CheckIndex;
2625
import org.apache.lucene.index.IndexCommit;
2726
import org.apache.lucene.index.IndexOptions;
28-
import org.apache.lucene.index.LeafReaderContext;
29-
import org.apache.lucene.index.SegmentCommitInfo;
3027
import org.apache.lucene.index.SegmentInfos;
31-
import org.apache.lucene.index.SegmentReader;
3228
import org.apache.lucene.index.Term;
3329
import org.apache.lucene.search.Query;
3430
import org.apache.lucene.search.QueryCachingPolicy;
@@ -918,29 +914,7 @@ public FlushStats flushStats() {
918914
}
919915

920916
public DocsStats docStats() {
921-
// we calculate the doc stats based on the internal reader that is more up-to-date and not subject
922-
// to external refreshes. For instance we don't refresh an external reader if we flush and indices with
923-
// index.refresh_interval=-1 won't see any doc stats updates at all. This change will give more accurate statistics
924-
// when indexing but not refreshing in general. Yet, if a refresh happens the internal reader is refresh as well so we are
925-
// safe here.
926-
long numDocs = 0;
927-
long numDeletedDocs = 0;
928-
long sizeInBytes = 0;
929-
try (Engine.Searcher searcher = acquireSearcher("docStats", Engine.SearcherScope.INTERNAL)) {
930-
for (LeafReaderContext reader : searcher.reader().leaves()) {
931-
// we go on the segment level here to get accurate numbers
932-
final SegmentReader segmentReader = Lucene.segmentReader(reader.reader());
933-
SegmentCommitInfo info = segmentReader.getSegmentInfo();
934-
numDocs += reader.reader().numDocs();
935-
numDeletedDocs += reader.reader().numDeletedDocs();
936-
try {
937-
sizeInBytes += info.sizeInBytes();
938-
} catch (IOException e) {
939-
logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
940-
}
941-
}
942-
}
943-
return new DocsStats(numDocs, numDeletedDocs, sizeInBytes);
917+
return getEngine().docStats();
944918
}
945919

946920
/**

server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2437,7 +2437,7 @@ public void testRecoverFromLocalShard() throws IOException {
24372437
closeShards(sourceShard, targetShard);
24382438
}
24392439

2440-
public void testDocStats() throws IOException, InterruptedException {
2440+
public void testDocStats() throws Exception {
24412441
IndexShard indexShard = null;
24422442
try {
24432443
indexShard = newStartedShard(randomBoolean(),

0 commit comments

Comments
 (0)