Skip to content

Commit dc1b548

Browse files
committed
Delete shard store files before restoring a snapshot (#27476)
Pull request #20220 added a change where the store files that have the same name but are different from the ones in the snapshot are deleted first before the snapshot is restored. This logic was based on the `Store.RecoveryDiff.different` set of files which works by computing a diff between an existing store and a snapshot. This works well when the files on the filesystem form valid shard store, ie there's a `segments` file and store files are not corrupted. Otherwise, the existing store's snapshot metadata cannot be read (using Store#snapshotStoreMetadata()) and an exception is thrown (CorruptIndexException, IndexFormatTooOldException etc) which is later caught as the begining of the restore process (see RestoreContext#restore()) and is translated into an empty store metadata (Store.MetadataSnapshot.EMPTY). This will make the deletion of different files introduced in #20220 useless as the set of files will always be empty even when store files exist on the filesystem. And if some files are present within the store directory, then restoring a snapshot with files with same names will fail with a FileAlreadyExistException. This is part of the #26865 issue. There are various cases were some files could exist in the store directory before a snapshot is restored. One that Igor identified is a restore attempt that failed on a node and only first files were restored, then the shard is allocated again to the same node and the restore starts again (but fails because of existing files). Another one is when some files of a closed index are corrupted / deleted and the index is restored. This commit adds a test that uses the infrastructure provided by IndexShardTestCase in order to test that restoring a shard succeed even when files with same names exist on filesystem. Related to #26865
1 parent 03990f4 commit dc1b548

File tree

4 files changed

+216
-17
lines changed

4 files changed

+216
-17
lines changed

core/src/main/java/org/elasticsearch/index/store/Store.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -716,7 +716,7 @@ public String toString() {
716716

717717
/**
718718
* Represents a snapshot of the current directory build from the latest Lucene commit.
719-
* Only files that are part of the last commit are considered in this datastrucutre.
719+
* Only files that are part of the last commit are considered in this datastructure.
720720
* For backwards compatibility the snapshot might include legacy checksums that
721721
* are derived from a dedicated checksum file written by older elasticsearch version pre 1.3
722722
* <p>

core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java

Lines changed: 27 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@
3535
import org.apache.lucene.store.RateLimiter;
3636
import org.apache.lucene.util.BytesRef;
3737
import org.apache.lucene.util.BytesRefBuilder;
38-
import org.apache.lucene.util.IOUtils;
3938
import org.elasticsearch.ElasticsearchParseException;
4039
import org.elasticsearch.ExceptionsHelper;
4140
import org.elasticsearch.ResourceNotFoundException;
@@ -110,6 +109,7 @@
110109
import java.nio.file.FileAlreadyExistsException;
111110
import java.nio.file.NoSuchFileException;
112111
import java.util.ArrayList;
112+
import java.util.Arrays;
113113
import java.util.Collection;
114114
import java.util.Collections;
115115
import java.util.HashMap;
@@ -1610,6 +1610,9 @@ public void restore() throws IOException {
16101610
SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles());
16111611
Store.MetadataSnapshot recoveryTargetMetadata;
16121612
try {
1613+
// this will throw an IOException if the store has no segments infos file. The
1614+
// store can still have existing files but they will be deleted just before being
1615+
// restored.
16131616
recoveryTargetMetadata = targetShard.snapshotStoreMetadata();
16141617
} catch (IndexNotFoundException e) {
16151618
// happens when restore to an empty shard, not a big deal
@@ -1637,7 +1640,14 @@ public void restore() throws IOException {
16371640
snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata());
16381641
fileInfos.put(fileInfo.metadata().name(), fileInfo);
16391642
}
1643+
16401644
final Store.MetadataSnapshot sourceMetaData = new Store.MetadataSnapshot(unmodifiableMap(snapshotMetaData), emptyMap(), 0);
1645+
1646+
final StoreFileMetaData restoredSegmentsFile = sourceMetaData.getSegmentsFile();
1647+
if (restoredSegmentsFile == null) {
1648+
throw new IndexShardRestoreFailedException(shardId, "Snapshot has no segments file");
1649+
}
1650+
16411651
final Store.RecoveryDiff diff = sourceMetaData.recoveryDiff(recoveryTargetMetadata);
16421652
for (StoreFileMetaData md : diff.identical) {
16431653
BlobStoreIndexShardSnapshot.FileInfo fileInfo = fileInfos.get(md.name());
@@ -1664,29 +1674,31 @@ public void restore() throws IOException {
16641674
logger.trace("no files to recover, all exists within the local store");
16651675
}
16661676

1667-
if (logger.isTraceEnabled()) {
1668-
logger.trace("[{}] [{}] recovering_files [{}] with total_size [{}], reusing_files [{}] with reused_size [{}]", shardId, snapshotId,
1669-
index.totalRecoverFiles(), new ByteSizeValue(index.totalRecoverBytes()), index.reusedFileCount(), new ByteSizeValue(index.reusedFileCount()));
1670-
}
16711677
try {
1672-
// first, delete pre-existing files in the store that have the same name but are
1673-
// different (i.e. different length/checksum) from those being restored in the snapshot
1674-
for (final StoreFileMetaData storeFileMetaData : diff.different) {
1675-
IOUtils.deleteFiles(store.directory(), storeFileMetaData.name());
1676-
}
1678+
// list of all existing store files
1679+
final List<String> deleteIfExistFiles = Arrays.asList(store.directory().listAll());
1680+
16771681
// restore the files from the snapshot to the Lucene store
16781682
for (final BlobStoreIndexShardSnapshot.FileInfo fileToRecover : filesToRecover) {
1683+
// if a file with a same physical name already exist in the store we need to delete it
1684+
// before restoring it from the snapshot. We could be lenient and try to reuse the existing
1685+
// store files (and compare their names/length/checksum again with the snapshot files) but to
1686+
// avoid extra complexity we simply delete them and restore them again like StoreRecovery
1687+
// does with dangling indices. Any existing store file that is not restored from the snapshot
1688+
// will be clean up by RecoveryTarget.cleanFiles().
1689+
final String physicalName = fileToRecover.physicalName();
1690+
if (deleteIfExistFiles.contains(physicalName)) {
1691+
logger.trace("[{}] [{}] deleting pre-existing file [{}]", shardId, snapshotId, physicalName);
1692+
store.directory().deleteFile(physicalName);
1693+
}
1694+
16791695
logger.trace("[{}] [{}] restoring file [{}]", shardId, snapshotId, fileToRecover.name());
16801696
restoreFile(fileToRecover, store);
16811697
}
16821698
} catch (IOException ex) {
16831699
throw new IndexShardRestoreFailedException(shardId, "Failed to recover index", ex);
16841700
}
1685-
final StoreFileMetaData restoredSegmentsFile = sourceMetaData.getSegmentsFile();
1686-
if (recoveryTargetMetadata == null) {
1687-
throw new IndexShardRestoreFailedException(shardId, "Snapshot has no segments file");
1688-
}
1689-
assert restoredSegmentsFile != null;
1701+
16901702
// read the snapshot data persisted
16911703
final SegmentInfos segmentCommitInfos;
16921704
try {
@@ -1761,5 +1773,4 @@ private void restoreFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, fi
17611773
}
17621774
}
17631775
}
1764-
17651776
}
Lines changed: 142 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,142 @@
1+
/*
2+
* Licensed to Elasticsearch under one or more contributor
3+
* license agreements. See the NOTICE file distributed with
4+
* this work for additional information regarding copyright
5+
* ownership. Elasticsearch licenses this file to you under
6+
* the Apache License, Version 2.0 (the "License"); you may
7+
* not use this file except in compliance with the License.
8+
* You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing,
13+
* software distributed under the License is distributed on an
14+
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15+
* KIND, either express or implied. See the License for the
16+
* specific language governing permissions and limitations
17+
* under the License.
18+
*/
19+
20+
package org.elasticsearch.repositories.blobstore;
21+
22+
import org.apache.lucene.store.Directory;
23+
import org.apache.lucene.util.IOUtils;
24+
import org.apache.lucene.util.TestUtil;
25+
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
26+
import org.elasticsearch.cluster.routing.ShardRouting;
27+
import org.elasticsearch.cluster.routing.ShardRoutingHelper;
28+
import org.elasticsearch.common.UUIDs;
29+
import org.elasticsearch.common.settings.Settings;
30+
import org.elasticsearch.env.Environment;
31+
import org.elasticsearch.index.shard.IndexShard;
32+
import org.elasticsearch.index.shard.IndexShardState;
33+
import org.elasticsearch.index.shard.IndexShardTestCase;
34+
import org.elasticsearch.index.shard.ShardId;
35+
import org.elasticsearch.index.store.Store;
36+
import org.elasticsearch.index.store.StoreFileMetaData;
37+
import org.elasticsearch.repositories.IndexId;
38+
import org.elasticsearch.repositories.Repository;
39+
import org.elasticsearch.repositories.fs.FsRepository;
40+
import org.elasticsearch.snapshots.Snapshot;
41+
import org.elasticsearch.snapshots.SnapshotId;
42+
43+
import java.io.IOException;
44+
import java.nio.file.Files;
45+
import java.nio.file.Path;
46+
import java.util.Arrays;
47+
import java.util.List;
48+
49+
import static org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE;
50+
51+
/**
52+
* This class tests the behavior of {@link BlobStoreRepository} when it
53+
* restores a shard from a snapshot but some files with same names already
54+
* exist on disc.
55+
*/
56+
public class BlobStoreRepositoryRestoreTests extends IndexShardTestCase {
57+
58+
/**
59+
* Restoring a snapshot that contains multiple files must succeed even when
60+
* some files already exist in the shard's store.
61+
*/
62+
public void testRestoreSnapshotWithExistingFiles() throws IOException {
63+
final IndexId indexId = new IndexId(randomAlphaOfLength(10), UUIDs.randomBase64UUID());
64+
final ShardId shardId = new ShardId(indexId.getName(), indexId.getId(), 0);
65+
66+
IndexShard shard = newShard(shardId, true);
67+
try {
68+
// index documents in the shards
69+
final int numDocs = scaledRandomIntBetween(1, 500);
70+
recoveryShardFromStore(shard);
71+
for (int i = 0; i < numDocs; i++) {
72+
indexDoc(shard, "doc", Integer.toString(i));
73+
if (rarely()) {
74+
flushShard(shard, false);
75+
}
76+
}
77+
assertDocCount(shard, numDocs);
78+
79+
// snapshot the shard
80+
final Repository repository = createRepository();
81+
final Snapshot snapshot = new Snapshot(repository.getMetadata().name(), new SnapshotId(randomAlphaOfLength(10), "_uuid"));
82+
snapshotShard(shard, snapshot, repository);
83+
84+
// capture current store files
85+
final Store.MetadataSnapshot storeFiles = shard.snapshotStoreMetadata();
86+
assertFalse(storeFiles.asMap().isEmpty());
87+
88+
// close the shard
89+
closeShards(shard);
90+
91+
// delete some random files in the store
92+
List<String> deletedFiles = randomSubsetOf(randomIntBetween(1, storeFiles.size() - 1), storeFiles.asMap().keySet());
93+
for (String deletedFile : deletedFiles) {
94+
Files.delete(shard.shardPath().resolveIndex().resolve(deletedFile));
95+
}
96+
97+
// build a new shard using the same store directory as the closed shard
98+
ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(shard.routingEntry(), EXISTING_STORE_INSTANCE);
99+
shard = newShard(shardRouting, shard.shardPath(), shard.indexSettings().getIndexMetaData(), null);
100+
101+
// restore the shard
102+
recoverShardFromSnapshot(shard, snapshot, repository);
103+
104+
// check that the shard is not corrupted
105+
TestUtil.checkIndex(shard.store().directory());
106+
107+
// check that all files have been restored
108+
final Directory directory = shard.store().directory();
109+
final List<String> directoryFiles = Arrays.asList(directory.listAll());
110+
111+
for (StoreFileMetaData storeFile : storeFiles) {
112+
String fileName = storeFile.name();
113+
assertTrue("File [" + fileName + "] does not exist in store directory", directoryFiles.contains(fileName));
114+
assertEquals(storeFile.length(), shard.store().directory().fileLength(fileName));
115+
}
116+
} finally {
117+
if (shard != null && shard.state() != IndexShardState.CLOSED) {
118+
try {
119+
shard.close("test", false);
120+
} finally {
121+
IOUtils.close(shard.store());
122+
}
123+
}
124+
}
125+
}
126+
127+
/** Create a {@link Repository} with a random name **/
128+
private Repository createRepository() throws IOException {
129+
Settings settings = Settings.builder().put("location", randomAlphaOfLength(10)).build();
130+
RepositoryMetaData repositoryMetaData = new RepositoryMetaData(randomAlphaOfLength(10), FsRepository.TYPE, settings);
131+
return new FsRepository(repositoryMetaData, createEnvironment(), xContentRegistry());
132+
}
133+
134+
/** Create a {@link Environment} with random path.home and path.repo **/
135+
private Environment createEnvironment() {
136+
Path home = createTempDir();
137+
return new Environment(Settings.builder()
138+
.put(Environment.PATH_HOME_SETTING.getKey(), home.toAbsolutePath())
139+
.put(Environment.PATH_REPO_SETTING.getKey(), home.resolve("repo").toAbsolutePath())
140+
.build());
141+
}
142+
}

test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
package org.elasticsearch.index.shard;
2020

2121
import org.apache.lucene.document.Document;
22+
import org.apache.lucene.index.IndexCommit;
2223
import org.apache.lucene.index.IndexNotFoundException;
2324
import org.apache.lucene.index.LeafReader;
2425
import org.apache.lucene.index.LeafReaderContext;
@@ -44,6 +45,7 @@
4445
import org.elasticsearch.common.util.BigArrays;
4546
import org.elasticsearch.common.xcontent.XContentType;
4647
import org.elasticsearch.env.NodeEnvironment;
48+
import org.elasticsearch.index.Index;
4749
import org.elasticsearch.index.IndexSettings;
4850
import org.elasticsearch.index.MapperTestUtils;
4951
import org.elasticsearch.index.VersionType;
@@ -57,6 +59,7 @@
5759
import org.elasticsearch.index.mapper.Uid;
5860
import org.elasticsearch.index.mapper.UidFieldMapper;
5961
import org.elasticsearch.index.similarity.SimilarityService;
62+
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
6063
import org.elasticsearch.index.store.DirectoryService;
6164
import org.elasticsearch.index.store.Store;
6265
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
@@ -67,6 +70,9 @@
6770
import org.elasticsearch.indices.recovery.RecoveryState;
6871
import org.elasticsearch.indices.recovery.RecoveryTarget;
6972
import org.elasticsearch.indices.recovery.StartRecoveryRequest;
73+
import org.elasticsearch.repositories.IndexId;
74+
import org.elasticsearch.repositories.Repository;
75+
import org.elasticsearch.snapshots.Snapshot;
7076
import org.elasticsearch.test.DummyShardLock;
7177
import org.elasticsearch.test.ESTestCase;
7278
import org.elasticsearch.threadpool.TestThreadPool;
@@ -80,6 +86,7 @@
8086
import java.util.concurrent.TimeUnit;
8187
import java.util.function.BiFunction;
8288

89+
import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting;
8390
import static org.hamcrest.Matchers.contains;
8491
import static org.hamcrest.Matchers.hasSize;
8592

@@ -481,4 +488,43 @@ protected void flushShard(IndexShard shard) {
481488
protected void flushShard(IndexShard shard, boolean force) {
482489
shard.flush(new FlushRequest(shard.shardId().getIndexName()).force(force));
483490
}
491+
492+
/** Recover a shard from a snapshot using a given repository **/
493+
protected void recoverShardFromSnapshot(final IndexShard shard,
494+
final Snapshot snapshot,
495+
final Repository repository) throws IOException {
496+
final Version version = Version.CURRENT;
497+
final ShardId shardId = shard.shardId();
498+
final String index = shardId.getIndexName();
499+
final IndexId indexId = new IndexId(shardId.getIndex().getName(), shardId.getIndex().getUUID());
500+
final DiscoveryNode node = getFakeDiscoNode(shard.routingEntry().currentNodeId());
501+
final RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource(snapshot, version, index);
502+
final ShardRouting shardRouting = newShardRouting(shardId, node.getId(), true, ShardRoutingState.INITIALIZING, recoverySource);
503+
504+
shard.markAsRecovering("from snapshot", new RecoveryState(shardRouting, node, null));
505+
repository.restoreShard(shard, snapshot.getSnapshotId(), version, indexId, shard.shardId(), shard.recoveryState());
506+
}
507+
508+
/** Snapshot a shard using a given repository **/
509+
protected void snapshotShard(final IndexShard shard,
510+
final Snapshot snapshot,
511+
final Repository repository) throws IOException {
512+
final IndexShardSnapshotStatus snapshotStatus = new IndexShardSnapshotStatus();
513+
514+
IndexCommit indexCommit = null;
515+
try {
516+
indexCommit = shard.acquireIndexCommit(true);
517+
Index index = shard.shardId().getIndex();
518+
IndexId indexId = new IndexId(index.getName(), index.getUUID());
519+
520+
repository.snapshotShard(shard, snapshot.getSnapshotId(), indexId, indexCommit, snapshotStatus);
521+
} finally {
522+
if (indexCommit != null) {
523+
shard.releaseIndexCommit(indexCommit);
524+
}
525+
}
526+
assertEquals(IndexShardSnapshotStatus.Stage.DONE, snapshotStatus.stage());
527+
assertEquals(shard.snapshotStoreMetadata().size(), snapshotStatus.numberOfFiles());
528+
assertNull(snapshotStatus.failure());
529+
}
484530
}

0 commit comments

Comments
 (0)