Skip to content

Commit 28fcf20

Browse files
committed
Fix testForceMergeWithSoftDeletesRetentionAndRecoverySource (#48766)
This test failure manifests the limitation of the recovery source merge policy explained in #41628. If we already merge down to a single segment then subsequent force merges will be noop although they can prune recovery source. We need to adjust this test until we have a fix for the merge policy. Relates #41628 Closes #48735
1 parent df8346f commit 28fcf20

File tree

1 file changed

+19
-11
lines changed

1 file changed

+19
-11
lines changed

server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java

+19-11
Original file line numberDiff line numberDiff line change
@@ -1694,18 +1694,26 @@ public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exc
16941694
settings.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0);
16951695
indexSettings.updateIndexMetaData(IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build());
16961696
engine.onSettingsChanged();
1697-
// If the global checkpoint equals to the local checkpoint, the next force-merge will be a noop
1698-
// because all deleted documents are expunged in the previous force-merge already. We need to flush
1699-
// a new segment to make merge happen so that we can verify that all _recovery_source are pruned.
1700-
if (globalCheckpoint.get() == engine.getLocalCheckpoint() && liveDocs.isEmpty() == false) {
1701-
String deleteId = randomFrom(liveDocs);
1702-
engine.delete(new Engine.Delete("test", deleteId, newUid(deleteId), primaryTerm.get()));
1703-
liveDocsWithSource.remove(deleteId);
1704-
liveDocs.remove(deleteId);
1705-
engine.flush();
1697+
// If we already merged down to 1 segment, then the next force-merge will be a noop. We need to add an extra segment to make
1698+
// merges happen so we can verify that _recovery_source are pruned. See: https://github.com/elastic/elasticsearch/issues/41628.
1699+
final int numSegments;
1700+
try (Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL)) {
1701+
numSegments = searcher.getDirectoryReader().leaves().size();
1702+
}
1703+
if (numSegments == 1) {
1704+
boolean useRecoverySource = randomBoolean() || omitSourceAllTheTime;
1705+
ParsedDocument doc = testParsedDocument("dummy", null, testDocument(), B_1, null, useRecoverySource);
1706+
engine.index(indexForDoc(doc));
1707+
if (useRecoverySource == false) {
1708+
liveDocsWithSource.add(doc.id());
1709+
}
1710+
engine.syncTranslog();
1711+
globalCheckpoint.set(engine.getLocalCheckpoint());
1712+
engine.flush(randomBoolean(), true);
1713+
} else {
1714+
globalCheckpoint.set(engine.getLocalCheckpoint());
1715+
engine.syncTranslog();
17061716
}
1707-
globalCheckpoint.set(engine.getLocalCheckpoint());
1708-
engine.syncTranslog();
17091717
engine.forceMerge(true, 1, false, false, false);
17101718
assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService);
17111719
assertThat(readAllOperationsInLucene(engine, mapperService), hasSize(liveDocsWithSource.size()));

0 commit comments

Comments
 (0)