|
6 | 6 | */
|
7 | 7 | package org.elasticsearch.snapshots;
|
8 | 8 |
|
| 9 | +import org.apache.logging.log4j.LogManager; |
| 10 | +import org.apache.logging.log4j.Logger; |
| 11 | +import org.apache.logging.log4j.message.ParameterizedMessage; |
| 12 | +import org.apache.lucene.index.CorruptIndexException; |
9 | 13 | import org.apache.lucene.index.DirectoryReader;
|
10 | 14 | import org.apache.lucene.index.IndexCommit;
|
11 | 15 | import org.apache.lucene.index.SegmentInfos;
|
|
22 | 26 | import org.elasticsearch.cluster.metadata.Metadata;
|
23 | 27 | import org.elasticsearch.cluster.metadata.RepositoryMetadata;
|
24 | 28 | import org.elasticsearch.common.Strings;
|
| 29 | +import org.elasticsearch.common.lucene.Lucene; |
25 | 30 | import org.elasticsearch.common.lucene.search.Queries;
|
26 | 31 | import org.elasticsearch.common.settings.Setting;
|
27 | 32 | import org.elasticsearch.common.settings.Settings;
|
|
43 | 48 | import java.io.Closeable;
|
44 | 49 | import java.io.IOException;
|
45 | 50 | import java.io.UncheckedIOException;
|
| 51 | +import java.nio.file.FileAlreadyExistsException; |
| 52 | +import java.nio.file.NoSuchFileException; |
46 | 53 | import java.nio.file.Path;
|
47 | 54 | import java.util.ArrayList;
|
48 | 55 | import java.util.Collection;
|
@@ -73,6 +80,8 @@ public final class SourceOnlySnapshotRepository extends FilterRepository {
|
73 | 80 | public static final Setting<Boolean> SOURCE_ONLY = Setting.boolSetting("index.source_only", false, Setting
|
74 | 81 | .Property.IndexScope, Setting.Property.Final, Setting.Property.PrivateIndex);
|
75 | 82 |
|
| 83 | + private static final Logger logger = LogManager.getLogger(SourceOnlySnapshotRepository.class); |
| 84 | + |
76 | 85 | private static final String SNAPSHOT_DIR_NAME = "_snapshot";
|
77 | 86 |
|
78 | 87 | SourceOnlySnapshotRepository(Repository in) {
|
@@ -146,8 +155,16 @@ protected void closeInternal() {
|
146 | 155 | }, Store.OnClose.EMPTY);
|
147 | 156 | Supplier<Query> querySupplier = mapperService.hasNested() ? Queries::newNestedFilter : null;
|
148 | 157 | // SourceOnlySnapshot will take care of soft- and hard-deletes no special casing needed here
|
149 |
| - SourceOnlySnapshot snapshot = new SourceOnlySnapshot(overlayDir, querySupplier); |
150 |
| - snapshot.syncSnapshot(snapshotIndexCommit); |
| 158 | + SourceOnlySnapshot snapshot; |
| 159 | + snapshot = new SourceOnlySnapshot(overlayDir, querySupplier); |
| 160 | + try { |
| 161 | + snapshot.syncSnapshot(snapshotIndexCommit); |
| 162 | + } catch (NoSuchFileException | CorruptIndexException | FileAlreadyExistsException e) { |
| 163 | + logger.warn(() -> new ParameterizedMessage( |
| 164 | + "Existing staging directory [{}] appears corrupted and will be pruned and recreated.", snapPath), e); |
| 165 | + Lucene.cleanLuceneIndex(overlayDir); |
| 166 | + snapshot.syncSnapshot(snapshotIndexCommit); |
| 167 | + } |
151 | 168 | // we will use the lucene doc ID as the seq ID so we set the local checkpoint to maxDoc with a new index UUID
|
152 | 169 | SegmentInfos segmentInfos = tempStore.readLastCommittedSegmentsInfo();
|
153 | 170 | final long maxDoc = segmentInfos.totalMaxDoc();
|
|
0 commit comments