|
42 | 42 | import org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter;
|
43 | 43 | import org.apache.lucene.search.uhighlight.Snippet;
|
44 | 44 | import org.apache.lucene.search.uhighlight.SplittingBreakIterator;
|
| 45 | +import org.apache.lucene.search.uhighlight.UnifiedHighlighter; |
45 | 46 | import org.apache.lucene.store.Directory;
|
46 | 47 | import org.elasticsearch.common.Strings;
|
47 | 48 | import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedHighlighterAnalyzer;
|
|
59 | 60 |
|
60 | 61 | public class AnnotatedTextHighlighterTests extends ESTestCase {
|
61 | 62 |
|
| 63 | + private void assertHighlightOneDoc(String fieldName, String[] markedUpInputs, |
| 64 | + Query query, Locale locale, BreakIterator breakIterator, |
| 65 | + int noMatchSize, String[] expectedPassages) throws Exception { |
| 66 | + |
| 67 | + assertHighlightOneDoc(fieldName, markedUpInputs, query, locale, breakIterator, noMatchSize, expectedPassages, |
| 68 | + Integer.MAX_VALUE, false); |
| 69 | + } |
| 70 | + |
62 | 71 | private void assertHighlightOneDoc(String fieldName, String []markedUpInputs,
|
63 | 72 | Query query, Locale locale, BreakIterator breakIterator,
|
64 |
| - int noMatchSize, String[] expectedPassages) throws Exception { |
65 |
| - |
66 |
| - |
67 |
| - // Annotated fields wrap the usual analyzer with one that injects extra tokens |
68 |
| - Analyzer wrapperAnalyzer = new AnnotationAnalyzerWrapper(new StandardAnalyzer()); |
69 |
| - Directory dir = newDirectory(); |
70 |
| - IndexWriterConfig iwc = newIndexWriterConfig(wrapperAnalyzer); |
71 |
| - iwc.setMergePolicy(newTieredMergePolicy(random())); |
72 |
| - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); |
73 |
| - FieldType ft = new FieldType(TextField.TYPE_STORED); |
74 |
| - if (randomBoolean()) { |
75 |
| - ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); |
76 |
| - } else { |
77 |
| - ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); |
78 |
| - } |
79 |
| - ft.freeze(); |
80 |
| - Document doc = new Document(); |
81 |
| - for (String input : markedUpInputs) { |
82 |
| - Field field = new Field(fieldName, "", ft); |
83 |
| - field.setStringValue(input); |
84 |
| - doc.add(field); |
| 73 | + int noMatchSize, String[] expectedPassages, |
| 74 | + int maxAnalyzedOffset, boolean limitToMaxAnalyzedOffset) throws Exception { |
| 75 | + |
| 76 | + Directory dir = null; |
| 77 | + DirectoryReader reader = null; |
| 78 | + try { |
| 79 | + dir = newDirectory(); |
| 80 | + |
| 81 | + // Annotated fields wrap the usual analyzer with one that injects extra tokens |
| 82 | + Analyzer wrapperAnalyzer = new AnnotationAnalyzerWrapper(new StandardAnalyzer()); |
| 83 | + ; |
| 84 | + IndexWriterConfig iwc = newIndexWriterConfig(wrapperAnalyzer); |
| 85 | + iwc.setMergePolicy(newTieredMergePolicy(random())); |
| 86 | + RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); |
| 87 | + FieldType ft = new FieldType(TextField.TYPE_STORED); |
| 88 | + if (randomBoolean()) { |
| 89 | + ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); |
| 90 | + } else { |
| 91 | + ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); |
| 92 | + } |
| 93 | + ft.freeze(); |
| 94 | + Document doc = new Document(); |
| 95 | + for (String input : markedUpInputs) { |
| 96 | + Field field = new Field(fieldName, "", ft); |
| 97 | + field.setStringValue(input); |
| 98 | + doc.add(field); |
| 99 | + } |
| 100 | + iw.addDocument(doc); |
| 101 | + reader = iw.getReader(); |
| 102 | + IndexSearcher searcher = newSearcher(reader); |
| 103 | + iw.close(); |
| 104 | + |
| 105 | + AnnotatedText[] annotations = new AnnotatedText[markedUpInputs.length]; |
| 106 | + for (int i = 0; i < markedUpInputs.length; i++) { |
| 107 | + annotations[i] = AnnotatedText.parse(markedUpInputs[i]); |
| 108 | + } |
| 109 | + AnnotatedHighlighterAnalyzer hiliteAnalyzer = new AnnotatedHighlighterAnalyzer(wrapperAnalyzer); |
| 110 | + hiliteAnalyzer.setAnnotations(annotations); |
| 111 | + AnnotatedPassageFormatter passageFormatter = new AnnotatedPassageFormatter(new DefaultEncoder()); |
| 112 | + passageFormatter.setAnnotations(annotations); |
| 113 | + |
| 114 | + ArrayList<Object> plainTextForHighlighter = new ArrayList<>(annotations.length); |
| 115 | + for (int i = 0; i < annotations.length; i++) { |
| 116 | + plainTextForHighlighter.add(annotations[i].textMinusMarkup); |
| 117 | + } |
| 118 | + |
| 119 | + TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), 1, Sort.INDEXORDER); |
| 120 | + assertThat(topDocs.totalHits.value, equalTo(1L)); |
| 121 | + String rawValue = Strings.collectionToDelimitedString(plainTextForHighlighter, String.valueOf(MULTIVAL_SEP_CHAR)); |
| 122 | + CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter( |
| 123 | + searcher, |
| 124 | + hiliteAnalyzer, |
| 125 | + UnifiedHighlighter.OffsetSource.ANALYSIS, |
| 126 | + passageFormatter, |
| 127 | + locale, |
| 128 | + breakIterator, |
| 129 | + "index", |
| 130 | + "text", |
| 131 | + query, |
| 132 | + noMatchSize, |
| 133 | + expectedPassages.length, |
| 134 | + name -> "text".equals(name), |
| 135 | + maxAnalyzedOffset, |
| 136 | + limitToMaxAnalyzedOffset |
| 137 | + ); |
| 138 | + highlighter.setFieldMatcher((name) -> "text".equals(name)); |
| 139 | + final Snippet[] snippets = highlighter.highlightField(getOnlyLeafReader(reader), topDocs.scoreDocs[0].doc, () -> rawValue); |
| 140 | + assertEquals(expectedPassages.length, snippets.length); |
| 141 | + for (int i = 0; i < snippets.length; i++) { |
| 142 | + assertEquals(expectedPassages[i], snippets[i].getText()); |
| 143 | + } |
| 144 | + } finally { |
| 145 | + if (reader != null) { |
| 146 | + reader.close(); |
| 147 | + } |
| 148 | + if (dir != null) { |
| 149 | + dir.close(); |
| 150 | + } |
85 | 151 | }
|
86 |
| - iw.addDocument(doc); |
87 |
| - DirectoryReader reader = iw.getReader(); |
88 |
| - IndexSearcher searcher = newSearcher(reader); |
89 |
| - iw.close(); |
90 |
| - |
91 |
| - AnnotatedText[] annotations = new AnnotatedText[markedUpInputs.length]; |
92 |
| - for (int i = 0; i < markedUpInputs.length; i++) { |
93 |
| - annotations[i] = AnnotatedText.parse(markedUpInputs[i]); |
94 |
| - } |
95 |
| - AnnotatedHighlighterAnalyzer hiliteAnalyzer = new AnnotatedHighlighterAnalyzer(wrapperAnalyzer); |
96 |
| - hiliteAnalyzer.setAnnotations(annotations); |
97 |
| - AnnotatedPassageFormatter passageFormatter = new AnnotatedPassageFormatter(new DefaultEncoder()); |
98 |
| - passageFormatter.setAnnotations(annotations); |
99 |
| - |
100 |
| - ArrayList<Object> plainTextForHighlighter = new ArrayList<>(annotations.length); |
101 |
| - for (int i = 0; i < annotations.length; i++) { |
102 |
| - plainTextForHighlighter.add(annotations[i].textMinusMarkup); |
103 |
| - } |
104 |
| - |
105 |
| - TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), 1, Sort.INDEXORDER); |
106 |
| - assertThat(topDocs.totalHits.value, equalTo(1L)); |
107 |
| - String rawValue = Strings.collectionToDelimitedString(plainTextForHighlighter, String.valueOf(MULTIVAL_SEP_CHAR)); |
108 |
| - CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter( |
109 |
| - searcher, |
110 |
| - hiliteAnalyzer, |
111 |
| - null, |
112 |
| - passageFormatter, |
113 |
| - locale, |
114 |
| - breakIterator, |
115 |
| - "index", |
116 |
| - "text", |
117 |
| - query, |
118 |
| - noMatchSize, |
119 |
| - expectedPassages.length, |
120 |
| - name -> "text".equals(name), |
121 |
| - Integer.MAX_VALUE |
122 |
| - ); |
123 |
| - highlighter.setFieldMatcher((name) -> "text".equals(name)); |
124 |
| - final Snippet[] snippets = highlighter.highlightField(getOnlyLeafReader(reader), topDocs.scoreDocs[0].doc, () -> rawValue); |
125 |
| - assertEquals(expectedPassages.length, snippets.length); |
126 |
| - for (int i = 0; i < snippets.length; i++) { |
127 |
| - assertEquals(expectedPassages[i], snippets[i].getText()); |
128 |
| - } |
129 |
| - reader.close(); |
130 |
| - dir.close(); |
131 | 152 | }
|
132 | 153 |
|
133 |
| - |
134 | 154 | public void testAnnotatedTextStructuredMatch() throws Exception {
|
135 | 155 | // Check that a structured token eg a URL can be highlighted in a query
|
136 | 156 | // on marked-up
|
@@ -202,4 +222,33 @@ public void testBadAnnotation() throws Exception {
|
202 | 222 | assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages);
|
203 | 223 | }
|
204 | 224 |
|
| 225 | + public void testExceedMaxAnalyzedOffset() throws Exception { |
| 226 | + TermQuery query = new TermQuery(new Term("text", "exceeds")); |
| 227 | + BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); |
| 228 | + assertHighlightOneDoc("text", new String[] { "[Short Text](Short+Text)" }, query, Locale.ROOT, breakIterator, 0, new String[] {}); |
| 229 | + |
| 230 | + IllegalArgumentException e = expectThrows( |
| 231 | + IllegalArgumentException.class, |
| 232 | + () -> assertHighlightOneDoc( |
| 233 | + "text", |
| 234 | + new String[] { "[Long Text exceeds](Long+Text+exceeds) MAX analyzed offset)" }, |
| 235 | + query, |
| 236 | + Locale.ROOT, |
| 237 | + breakIterator, |
| 238 | + 0, |
| 239 | + new String[] {}, |
| 240 | + 15, |
| 241 | + false |
| 242 | + ) |
| 243 | + ); |
| 244 | + assertEquals( |
| 245 | + "The length of [text] field of [0] doc of [index] index has exceeded [15] - maximum allowed to be analyzed for " |
| 246 | + + "highlighting. This maximum can be set by changing the [index.highlight.max_analyzed_offset] index level setting. " |
| 247 | + + "For large texts, indexing with offsets or term vectors is recommended!", |
| 248 | + e.getMessage() |
| 249 | + ); |
| 250 | + |
| 251 | + assertHighlightOneDoc("text", new String[] { "[Long Text exceeds](Long+Text+exceeds) MAX analyzed offset" }, |
| 252 | + query, Locale.ROOT, breakIterator, 0, new String[] {"Long Text [exceeds](_hit_term=exceeds) MAX analyzed offset"}, 15, true); |
| 253 | + } |
205 | 254 | }
|
0 commit comments