8
8
9
9
package org .elasticsearch .common .lucene .search ;
10
10
11
- import com .carrotsearch .hppc .ObjectHashSet ;
12
-
13
11
import org .apache .lucene .index .IndexReader ;
14
12
import org .apache .lucene .index .LeafReaderContext ;
15
13
import org .apache .lucene .index .Term ;
27
25
import java .io .IOException ;
28
26
import java .util .ArrayList ;
29
27
import java .util .Arrays ;
28
+ import java .util .HashSet ;
30
29
import java .util .Iterator ;
31
30
import java .util .List ;
32
31
import java .util .ListIterator ;
33
32
import java .util .Objects ;
33
+ import java .util .Set ;
34
34
35
35
public class MultiPhrasePrefixQuery extends Query {
36
36
@@ -146,7 +146,7 @@ public Query rewrite(IndexReader reader) throws IOException {
146
146
}
147
147
Term [] suffixTerms = termArrays .get (sizeMinus1 );
148
148
int position = positions .get (sizeMinus1 );
149
- ObjectHashSet <Term > terms = new ObjectHashSet <>();
149
+ Set <Term > terms = new HashSet <>();
150
150
for (Term term : suffixTerms ) {
151
151
getPrefixTerms (terms , term , reader );
152
152
if (terms .size () > maxExpansions ) {
@@ -169,11 +169,11 @@ public Query rewrite(IndexReader reader) throws IOException {
169
169
)
170
170
.build ();
171
171
}
172
- query .add (terms .toArray (Term . class ), position );
172
+ query .add (terms .toArray (new Term [ 0 ] ), position );
173
173
return query .build ();
174
174
}
175
175
176
- private void getPrefixTerms (ObjectHashSet <Term > terms , final Term prefix , final IndexReader reader ) throws IOException {
176
+ private void getPrefixTerms (Set <Term > terms , final Term prefix , final IndexReader reader ) throws IOException {
177
177
// SlowCompositeReaderWrapper could be used... but this would merge all terms from each segment into one terms
178
178
// instance, which is very expensive. Therefore I think it is better to iterate over each leaf individually.
179
179
List <LeafReaderContext > leaves = reader .leaves ();
0 commit comments