8
8
9
9
package org .elasticsearch .index .engine ;
10
10
11
+ import org .apache .lucene .codecs .StoredFieldsReader ;
11
12
import org .apache .lucene .document .LongPoint ;
12
13
import org .apache .lucene .index .LeafReader ;
13
14
import org .apache .lucene .index .LeafReaderContext ;
23
24
import org .apache .lucene .util .ArrayUtil ;
24
25
import org .elasticsearch .common .bytes .BytesReference ;
25
26
import org .elasticsearch .common .lucene .Lucene ;
27
+ import org .elasticsearch .common .lucene .index .SequentialStoredFieldsLeafReader ;
26
28
import org .elasticsearch .common .lucene .search .Queries ;
27
29
import org .elasticsearch .core .internal .io .IOUtils ;
28
30
import org .elasticsearch .index .fieldvisitor .FieldsVisitor ;
@@ -55,6 +57,9 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
55
57
private final ParallelArray parallelArray ;
56
58
private final Closeable onClose ;
57
59
60
+ private int storedFieldsReaderOrd = -1 ;
61
+ private StoredFieldsReader storedFieldsReader = null ;
62
+
58
63
/**
59
64
* Creates a new "translog" snapshot from Lucene for reading operations whose seq# in the specified range.
60
65
*
@@ -162,9 +167,16 @@ private void fillParallelArray(ScoreDoc[] scoreDocs, ParallelArray parallelArray
162
167
for (int i = 0 ; i < scoreDocs .length ; i ++) {
163
168
scoreDocs [i ].shardIndex = i ;
164
169
}
170
+ parallelArray .useSequentialStoredFieldsReader = scoreDocs .length >= 10 && hasSequentialAccess (scoreDocs );
171
+ if (parallelArray .useSequentialStoredFieldsReader == false ) {
172
+ storedFieldsReaderOrd = -1 ;
173
+ storedFieldsReader = null ;
174
+ }
165
175
// for better loading performance we sort the array by docID and
166
176
// then visit all leaves in order.
167
- ArrayUtil .introSort (scoreDocs , Comparator .comparingInt (i -> i .doc ));
177
+ if (parallelArray .useSequentialStoredFieldsReader == false ) {
178
+ ArrayUtil .introSort (scoreDocs , Comparator .comparingInt (i -> i .doc ));
179
+ }
168
180
int docBase = -1 ;
169
181
int maxDoc = 0 ;
170
182
List <LeafReaderContext > leaves = indexSearcher .getIndexReader ().leaves ();
@@ -190,8 +202,19 @@ private void fillParallelArray(ScoreDoc[] scoreDocs, ParallelArray parallelArray
190
202
parallelArray .hasRecoverySource [index ] = combinedDocValues .hasRecoverySource (segmentDocID );
191
203
}
192
204
// now sort back based on the shardIndex. we use this to store the previous index
193
- ArrayUtil .introSort (scoreDocs , Comparator .comparingInt (i -> i .shardIndex ));
205
+ if (parallelArray .useSequentialStoredFieldsReader == false ) {
206
+ ArrayUtil .introSort (scoreDocs , Comparator .comparingInt (i -> i .shardIndex ));
207
+ }
208
+ }
209
+ }
210
+
211
+ private static boolean hasSequentialAccess (ScoreDoc [] scoreDocs ) {
212
+ for (int i = 0 ; i < scoreDocs .length - 1 ; i ++) {
213
+ if (scoreDocs [i ].doc + 1 != scoreDocs [i + 1 ].doc ) {
214
+ return false ;
215
+ }
194
216
}
217
+ return true ;
195
218
}
196
219
197
220
private TopDocs searchOperations (ScoreDoc after ) throws IOException {
@@ -218,7 +241,25 @@ private Translog.Operation readDocAsOp(int docIndex) throws IOException {
218
241
final String sourceField = parallelArray .hasRecoverySource [docIndex ] ? SourceFieldMapper .RECOVERY_SOURCE_NAME :
219
242
SourceFieldMapper .NAME ;
220
243
final FieldsVisitor fields = new FieldsVisitor (true , sourceField );
221
- leaf .reader ().document (segmentDocID , fields );
244
+
245
+ if (parallelArray .useSequentialStoredFieldsReader ) {
246
+ if (storedFieldsReaderOrd != leaf .ord ) {
247
+ if (leaf .reader () instanceof SequentialStoredFieldsLeafReader ) {
248
+ storedFieldsReader = ((SequentialStoredFieldsLeafReader ) leaf .reader ()).getSequentialStoredFieldsReader ();
249
+ storedFieldsReaderOrd = leaf .ord ;
250
+ } else {
251
+ storedFieldsReader = null ;
252
+ storedFieldsReaderOrd = -1 ;
253
+ }
254
+ }
255
+ }
256
+ if (storedFieldsReader != null ) {
257
+ assert parallelArray .useSequentialStoredFieldsReader ;
258
+ assert storedFieldsReaderOrd == leaf .ord : storedFieldsReaderOrd + " != " + leaf .ord ;
259
+ storedFieldsReader .visitDocument (segmentDocID , fields );
260
+ } else {
261
+ leaf .reader ().document (segmentDocID , fields );
262
+ }
222
263
223
264
final Translog .Operation op ;
224
265
final boolean isTombstone = parallelArray .isTombStone [docIndex ];
@@ -270,6 +311,7 @@ private static final class ParallelArray {
270
311
final long [] primaryTerm ;
271
312
final boolean [] isTombStone ;
272
313
final boolean [] hasRecoverySource ;
314
+ boolean useSequentialStoredFieldsReader = false ;
273
315
274
316
ParallelArray (int size ) {
275
317
version = new long [size ];
@@ -281,4 +323,8 @@ private static final class ParallelArray {
281
323
}
282
324
}
283
325
326
+ // for testing
327
+ boolean useSequentialStoredFieldsReader () {
328
+ return storedFieldsReader != null ;
329
+ }
284
330
}
0 commit comments