21
21
22
22
import org .apache .logging .log4j .LogManager ;
23
23
import org .apache .logging .log4j .Logger ;
24
+ import org .apache .lucene .util .SetOnce ;
24
25
import org .elasticsearch .ExceptionsHelper ;
25
26
import org .elasticsearch .Version ;
26
27
import org .elasticsearch .action .ActionListener ;
143
144
import org .elasticsearch .env .TestEnvironment ;
144
145
import org .elasticsearch .gateway .MetaStateService ;
145
146
import org .elasticsearch .gateway .TransportNodesListGatewayStartedShards ;
147
+ import org .elasticsearch .index .Index ;
146
148
import org .elasticsearch .index .analysis .AnalysisRegistry ;
147
149
import org .elasticsearch .index .seqno .GlobalCheckpointSyncAction ;
148
150
import org .elasticsearch .index .seqno .RetentionLeaseSyncer ;
211
213
import static org .hamcrest .Matchers .empty ;
212
214
import static org .hamcrest .Matchers .hasSize ;
213
215
import static org .hamcrest .Matchers .instanceOf ;
216
+ import static org .hamcrest .Matchers .is ;
214
217
import static org .hamcrest .Matchers .lessThanOrEqualTo ;
215
218
import static org .mockito .Mockito .mock ;
216
219
@@ -503,7 +506,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() {
503
506
}
504
507
}
505
508
506
- public void testConcurrentSnapshotDeleteAndDeleteIndex () {
509
+ public void testConcurrentSnapshotDeleteAndDeleteIndex () throws IOException {
507
510
setupTestCluster (randomFrom (1 , 3 , 5 ), randomIntBetween (2 , 10 ));
508
511
509
512
String repoName = "repo" ;
@@ -514,11 +517,13 @@ public void testConcurrentSnapshotDeleteAndDeleteIndex() {
514
517
testClusterNodes .currentMaster (testClusterNodes .nodes .values ().iterator ().next ().clusterService .state ());
515
518
516
519
final StepListener <Collection <CreateIndexResponse >> createIndicesListener = new StepListener <>();
520
+ final int indices = randomIntBetween (5 , 20 );
517
521
522
+ final SetOnce <Index > firstIndex = new SetOnce <>();
518
523
continueOrDie (createRepoAndIndex (repoName , index , 1 ), createIndexResponse -> {
524
+ firstIndex .set (masterNode .clusterService .state ().metaData ().index (index ).getIndex ());
519
525
// create a few more indices to make it more likely that the subsequent index delete operation happens before snapshot
520
526
// finalization
521
- final int indices = randomIntBetween (5 , 20 );
522
527
final GroupedActionListener <CreateIndexResponse > listener = new GroupedActionListener <>(createIndicesListener , indices );
523
528
for (int i = 0 ; i < indices ; ++i ) {
524
529
client ().admin ().indices ().create (new CreateIndexRequest ("index-" + i ), listener );
@@ -527,23 +532,54 @@ public void testConcurrentSnapshotDeleteAndDeleteIndex() {
527
532
528
533
final StepListener <CreateSnapshotResponse > createSnapshotResponseStepListener = new StepListener <>();
529
534
535
+ final boolean partialSnapshot = randomBoolean ();
536
+
530
537
continueOrDie (createIndicesListener , createIndexResponses ->
531
538
client ().admin ().cluster ().prepareCreateSnapshot (repoName , snapshotName ).setWaitForCompletion (false )
532
- .execute (createSnapshotResponseStepListener ));
539
+ .setPartial ( partialSnapshot ). execute (createSnapshotResponseStepListener ));
533
540
534
541
continueOrDie (createSnapshotResponseStepListener ,
535
- createSnapshotResponse -> client ().admin ().indices ().delete (new DeleteIndexRequest (index ), noopListener ()));
542
+ createSnapshotResponse -> client ().admin ().indices ().delete (new DeleteIndexRequest (index ), new ActionListener <>() {
543
+ @ Override
544
+ public void onResponse (AcknowledgedResponse acknowledgedResponse ) {
545
+ if (partialSnapshot ) {
546
+ // Recreate index by the same name to test that we don't snapshot conflicting metadata in this scenario
547
+ client ().admin ().indices ().create (new CreateIndexRequest (index ), noopListener ());
548
+ }
549
+ }
550
+
551
+ @ Override
552
+ public void onFailure (Exception e ) {
553
+ if (partialSnapshot ) {
554
+ throw new AssertionError ("Delete index should always work during partial snapshots" , e );
555
+ }
556
+ }
557
+ }));
536
558
537
559
deterministicTaskQueue .runAllRunnableTasks ();
538
560
539
561
SnapshotsInProgress finalSnapshotsInProgress = masterNode .clusterService .state ().custom (SnapshotsInProgress .TYPE );
540
562
assertFalse (finalSnapshotsInProgress .entries ().stream ().anyMatch (entry -> entry .state ().completed () == false ));
541
563
final Repository repository = masterNode .repositoriesService .repository (repoName );
542
- Collection <SnapshotId > snapshotIds = getRepositoryData (repository ).getSnapshotIds ();
564
+ final RepositoryData repositoryData = getRepositoryData (repository );
565
+ Collection <SnapshotId > snapshotIds = repositoryData .getSnapshotIds ();
543
566
assertThat (snapshotIds , hasSize (1 ));
544
567
545
568
final SnapshotInfo snapshotInfo = repository .getSnapshotInfo (snapshotIds .iterator ().next ());
546
569
assertEquals (SnapshotState .SUCCESS , snapshotInfo .state ());
570
+ if (partialSnapshot ) {
571
+ // Single shard for each index so we either get all indices or all except for the deleted index
572
+ assertThat (snapshotInfo .successfulShards (), either (is (indices + 1 )).or (is (indices )));
573
+ if (snapshotInfo .successfulShards () == indices + 1 ) {
574
+ final IndexMetaData indexMetaData =
575
+ repository .getSnapshotIndexMetaData (snapshotInfo .snapshotId (), repositoryData .resolveIndexId (index ));
576
+ // Make sure we snapshotted the metadata of this index and not the recreated version
577
+ assertEquals (indexMetaData .getIndex (), firstIndex .get ());
578
+ }
579
+ } else {
580
+ // Index delete must be blocked for non-partial snapshots and we get a snapshot for every index
581
+ assertEquals (snapshotInfo .successfulShards (), indices + 1 );
582
+ }
547
583
assertEquals (0 , snapshotInfo .failedShards ());
548
584
}
549
585
0 commit comments