|
8 | 8 |
|
9 | 9 | package org.elasticsearch.search.fetch.subphase.highlight;
|
10 | 10 |
|
| 11 | +import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR; |
| 12 | +import static org.hamcrest.CoreMatchers.equalTo; |
| 13 | + |
| 14 | +import java.net.URLEncoder; |
| 15 | +import java.text.BreakIterator; |
| 16 | +import java.util.ArrayList; |
| 17 | +import java.util.Locale; |
| 18 | + |
11 | 19 | import org.apache.lucene.analysis.Analyzer;
|
12 | 20 | import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
13 | 21 | import org.apache.lucene.document.Document;
|
|
31 | 39 | import org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter;
|
32 | 40 | import org.apache.lucene.search.uhighlight.Snippet;
|
33 | 41 | import org.apache.lucene.search.uhighlight.SplittingBreakIterator;
|
| 42 | +import org.apache.lucene.search.uhighlight.UnifiedHighlighter; |
34 | 43 | import org.apache.lucene.store.Directory;
|
35 | 44 | import org.elasticsearch.common.Strings;
|
36 | 45 | import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedHighlighterAnalyzer;
|
37 | 46 | import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText;
|
38 | 47 | import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotationAnalyzerWrapper;
|
39 | 48 | import org.elasticsearch.test.ESTestCase;
|
40 | 49 |
|
41 |
| -import java.net.URLEncoder; |
42 |
| -import java.text.BreakIterator; |
43 |
| -import java.util.ArrayList; |
44 |
| -import java.util.Locale; |
| 50 | +public class AnnotatedTextHighlighterTests extends ESTestCase { |
45 | 51 |
|
46 |
| -import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR; |
47 |
| -import static org.hamcrest.CoreMatchers.equalTo; |
| 52 | + private void assertHighlightOneDoc(String fieldName, String[] markedUpInputs, |
| 53 | + Query query, Locale locale, BreakIterator breakIterator, |
| 54 | + int noMatchSize, String[] expectedPassages) throws Exception { |
48 | 55 |
|
49 |
| -public class AnnotatedTextHighlighterTests extends ESTestCase { |
| 56 | + assertHighlightOneDoc(fieldName, markedUpInputs, query, locale, breakIterator, noMatchSize, expectedPassages, |
| 57 | + Integer.MAX_VALUE, null); |
| 58 | + } |
50 | 59 |
|
51 | 60 | private void assertHighlightOneDoc(String fieldName, String []markedUpInputs,
|
52 | 61 | Query query, Locale locale, BreakIterator breakIterator,
|
53 |
| - int noMatchSize, String[] expectedPassages) throws Exception { |
54 |
| - |
55 |
| - |
56 |
| - // Annotated fields wrap the usual analyzer with one that injects extra tokens |
57 |
| - Analyzer wrapperAnalyzer = new AnnotationAnalyzerWrapper(new StandardAnalyzer()); |
58 |
| - Directory dir = newDirectory(); |
59 |
| - IndexWriterConfig iwc = newIndexWriterConfig(wrapperAnalyzer); |
60 |
| - iwc.setMergePolicy(newTieredMergePolicy(random())); |
61 |
| - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); |
62 |
| - FieldType ft = new FieldType(TextField.TYPE_STORED); |
63 |
| - if (randomBoolean()) { |
64 |
| - ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); |
65 |
| - } else { |
66 |
| - ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); |
67 |
| - } |
68 |
| - ft.freeze(); |
69 |
| - Document doc = new Document(); |
70 |
| - for (String input : markedUpInputs) { |
71 |
| - Field field = new Field(fieldName, "", ft); |
72 |
| - field.setStringValue(input); |
73 |
| - doc.add(field); |
74 |
| - } |
75 |
| - iw.addDocument(doc); |
76 |
| - DirectoryReader reader = iw.getReader(); |
77 |
| - IndexSearcher searcher = newSearcher(reader); |
78 |
| - iw.close(); |
79 |
| - |
80 |
| - AnnotatedText[] annotations = new AnnotatedText[markedUpInputs.length]; |
81 |
| - for (int i = 0; i < markedUpInputs.length; i++) { |
82 |
| - annotations[i] = AnnotatedText.parse(markedUpInputs[i]); |
83 |
| - } |
84 |
| - AnnotatedHighlighterAnalyzer hiliteAnalyzer = new AnnotatedHighlighterAnalyzer(wrapperAnalyzer); |
85 |
| - hiliteAnalyzer.setAnnotations(annotations); |
86 |
| - AnnotatedPassageFormatter passageFormatter = new AnnotatedPassageFormatter(new DefaultEncoder()); |
87 |
| - passageFormatter.setAnnotations(annotations); |
88 |
| - |
89 |
| - ArrayList<Object> plainTextForHighlighter = new ArrayList<>(annotations.length); |
90 |
| - for (int i = 0; i < annotations.length; i++) { |
91 |
| - plainTextForHighlighter.add(annotations[i].textMinusMarkup); |
92 |
| - } |
| 62 | + int noMatchSize, String[] expectedPassages, |
| 63 | + int maxAnalyzedOffset, Integer queryMaxAnalyzedOffset) throws Exception { |
93 | 64 |
|
94 |
| - TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), 1, Sort.INDEXORDER); |
95 |
| - assertThat(topDocs.totalHits.value, equalTo(1L)); |
96 |
| - String rawValue = Strings.collectionToDelimitedString(plainTextForHighlighter, String.valueOf(MULTIVAL_SEP_CHAR)); |
97 |
| - CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter( |
98 |
| - searcher, |
99 |
| - hiliteAnalyzer, |
100 |
| - null, |
101 |
| - passageFormatter, |
102 |
| - locale, |
103 |
| - breakIterator, |
104 |
| - "index", |
105 |
| - "text", |
106 |
| - query, |
107 |
| - noMatchSize, |
108 |
| - expectedPassages.length, |
109 |
| - name -> "text".equals(name), |
110 |
| - Integer.MAX_VALUE |
111 |
| - ); |
112 |
| - highlighter.setFieldMatcher((name) -> "text".equals(name)); |
113 |
| - final Snippet[] snippets = highlighter.highlightField(getOnlyLeafReader(reader), topDocs.scoreDocs[0].doc, () -> rawValue); |
114 |
| - assertEquals(expectedPassages.length, snippets.length); |
115 |
| - for (int i = 0; i < snippets.length; i++) { |
116 |
| - assertEquals(expectedPassages[i], snippets[i].getText()); |
| 65 | + try (Directory dir = newDirectory()) { |
| 66 | + // Annotated fields wrap the usual analyzer with one that injects extra tokens |
| 67 | + Analyzer wrapperAnalyzer = new AnnotationAnalyzerWrapper(new StandardAnalyzer()); |
| 68 | + IndexWriterConfig iwc = newIndexWriterConfig(wrapperAnalyzer); |
| 69 | + iwc.setMergePolicy(newTieredMergePolicy(random())); |
| 70 | + RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); |
| 71 | + FieldType ft = new FieldType(TextField.TYPE_STORED); |
| 72 | + if (randomBoolean()) { |
| 73 | + ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); |
| 74 | + } else { |
| 75 | + ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); |
| 76 | + } |
| 77 | + ft.freeze(); |
| 78 | + Document doc = new Document(); |
| 79 | + for (String input : markedUpInputs) { |
| 80 | + Field field = new Field(fieldName, "", ft); |
| 81 | + field.setStringValue(input); |
| 82 | + doc.add(field); |
| 83 | + } |
| 84 | + iw.addDocument(doc); |
| 85 | + try (DirectoryReader reader = iw.getReader()) { |
| 86 | + IndexSearcher searcher = newSearcher(reader); |
| 87 | + iw.close(); |
| 88 | + |
| 89 | + AnnotatedText[] annotations = new AnnotatedText[markedUpInputs.length]; |
| 90 | + for (int i = 0; i < markedUpInputs.length; i++) { |
| 91 | + annotations[i] = AnnotatedText.parse(markedUpInputs[i]); |
| 92 | + } |
| 93 | + AnnotatedHighlighterAnalyzer hiliteAnalyzer = new AnnotatedHighlighterAnalyzer(wrapperAnalyzer); |
| 94 | + hiliteAnalyzer.setAnnotations(annotations); |
| 95 | + AnnotatedPassageFormatter passageFormatter = new AnnotatedPassageFormatter(new DefaultEncoder()); |
| 96 | + passageFormatter.setAnnotations(annotations); |
| 97 | + |
| 98 | + ArrayList<Object> plainTextForHighlighter = new ArrayList<>(annotations.length); |
| 99 | + for (int i = 0; i < annotations.length; i++) { |
| 100 | + plainTextForHighlighter.add(annotations[i].textMinusMarkup); |
| 101 | + } |
| 102 | + |
| 103 | + TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), 1, Sort.INDEXORDER); |
| 104 | + assertThat(topDocs.totalHits.value, equalTo(1L)); |
| 105 | + String rawValue = Strings.collectionToDelimitedString(plainTextForHighlighter, String.valueOf(MULTIVAL_SEP_CHAR)); |
| 106 | + CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter( |
| 107 | + searcher, |
| 108 | + hiliteAnalyzer, |
| 109 | + UnifiedHighlighter.OffsetSource.ANALYSIS, |
| 110 | + passageFormatter, |
| 111 | + locale, |
| 112 | + breakIterator, |
| 113 | + "index", |
| 114 | + "text", |
| 115 | + query, |
| 116 | + noMatchSize, |
| 117 | + expectedPassages.length, |
| 118 | + name -> "text".equals(name), |
| 119 | + maxAnalyzedOffset, |
| 120 | + queryMaxAnalyzedOffset |
| 121 | + ); |
| 122 | + highlighter.setFieldMatcher((name) -> "text".equals(name)); |
| 123 | + final Snippet[] snippets = highlighter.highlightField(getOnlyLeafReader(reader), topDocs.scoreDocs[0].doc, () -> rawValue); |
| 124 | + assertEquals(expectedPassages.length, snippets.length); |
| 125 | + for (int i = 0; i < snippets.length; i++) { |
| 126 | + assertEquals(expectedPassages[i], snippets[i].getText()); |
| 127 | + } |
| 128 | + } |
117 | 129 | }
|
118 |
| - reader.close(); |
119 |
| - dir.close(); |
120 | 130 | }
|
121 | 131 |
|
122 |
| - |
123 | 132 | public void testAnnotatedTextStructuredMatch() throws Exception {
|
124 | 133 | // Check that a structured token eg a URL can be highlighted in a query
|
125 | 134 | // on marked-up
|
@@ -191,4 +200,65 @@ public void testBadAnnotation() throws Exception {
|
191 | 200 | assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages);
|
192 | 201 | }
|
193 | 202 |
|
| 203 | + public void testExceedMaxAnalyzedOffset() throws Exception { |
| 204 | + TermQuery query = new TermQuery(new Term("text", "exceeds")); |
| 205 | + BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); |
| 206 | + assertHighlightOneDoc("text", new String[] { "[Short Text](Short+Text)" }, query, Locale.ROOT, breakIterator, 0, new String[] {}, |
| 207 | + 10, null); |
| 208 | + |
| 209 | + IllegalArgumentException e = expectThrows( |
| 210 | + IllegalArgumentException.class, |
| 211 | + () -> assertHighlightOneDoc( |
| 212 | + "text", |
| 213 | + new String[] { "[Long Text exceeds](Long+Text+exceeds) MAX analyzed offset)" }, |
| 214 | + query, |
| 215 | + Locale.ROOT, |
| 216 | + breakIterator, |
| 217 | + 0, |
| 218 | + new String[] {}, |
| 219 | + 20, |
| 220 | + null |
| 221 | + ) |
| 222 | + ); |
| 223 | + assertEquals( |
| 224 | + "The length [38] of field [text] in doc[0]/index[index] exceeds the [index.highlight.max_analyzed_offset] limit [20]. " |
| 225 | + + "To avoid this error, set the query parameter [max_analyzed_offset] to a value less than index setting [20] and this " |
| 226 | + + "will tolerate long field values by truncating them.", |
| 227 | + e.getMessage() |
| 228 | + ); |
| 229 | + |
| 230 | + final Integer queryMaxOffset = randomIntBetween(21, 1000); |
| 231 | + e = expectThrows( |
| 232 | + IllegalArgumentException.class, |
| 233 | + () -> assertHighlightOneDoc( |
| 234 | + "text", |
| 235 | + new String[] { "[Long Text exceeds](Long+Text+exceeds) MAX analyzed offset)" }, |
| 236 | + query, |
| 237 | + Locale.ROOT, |
| 238 | + breakIterator, |
| 239 | + 0, |
| 240 | + new String[] {}, |
| 241 | + 20, |
| 242 | + queryMaxOffset |
| 243 | + ) |
| 244 | + ); |
| 245 | + assertEquals( |
| 246 | + "The length [38] of field [text] in doc[0]/index[index] exceeds the [index.highlight.max_analyzed_offset] limit [20]. " |
| 247 | + + "To avoid this error, set the query parameter [max_analyzed_offset] to a value less than index setting [20] and this " |
| 248 | + + "will tolerate long field values by truncating them.", |
| 249 | + e.getMessage() |
| 250 | + ); |
| 251 | + |
| 252 | + assertHighlightOneDoc( |
| 253 | + "text", |
| 254 | + new String[] { "[Long Text Exceeds](Long+Text+Exceeds) MAX analyzed offset [Long Text Exceeds](Long+Text+Exceeds)" }, |
| 255 | + query, |
| 256 | + Locale.ROOT, |
| 257 | + breakIterator, |
| 258 | + 0, |
| 259 | + new String[] { "Long Text [Exceeds](_hit_term=exceeds) MAX analyzed offset [Long Text Exceeds](Long+Text+Exceeds)" }, |
| 260 | + 20, |
| 261 | + 15 |
| 262 | + ); |
| 263 | + } |
194 | 264 | }
|
0 commit comments