21
21
22
22
import org .apache .logging .log4j .LogManager ;
23
23
import org .apache .logging .log4j .Logger ;
24
+ import org .apache .lucene .util .SetOnce ;
24
25
import org .elasticsearch .ExceptionsHelper ;
25
26
import org .elasticsearch .Version ;
26
27
import org .elasticsearch .action .ActionListener ;
143
144
import org .elasticsearch .env .TestEnvironment ;
144
145
import org .elasticsearch .gateway .MetaStateService ;
145
146
import org .elasticsearch .gateway .TransportNodesListGatewayStartedShards ;
147
+ import org .elasticsearch .index .Index ;
146
148
import org .elasticsearch .index .analysis .AnalysisRegistry ;
147
149
import org .elasticsearch .index .seqno .GlobalCheckpointSyncAction ;
148
150
import org .elasticsearch .index .seqno .RetentionLeaseSyncer ;
213
215
import static org .hamcrest .Matchers .empty ;
214
216
import static org .hamcrest .Matchers .hasSize ;
215
217
import static org .hamcrest .Matchers .instanceOf ;
218
+ import static org .hamcrest .Matchers .is ;
216
219
import static org .hamcrest .Matchers .lessThanOrEqualTo ;
217
220
import static org .mockito .Mockito .mock ;
218
221
@@ -505,7 +508,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() {
505
508
}
506
509
}
507
510
508
- public void testConcurrentSnapshotDeleteAndDeleteIndex () {
511
+ public void testConcurrentSnapshotDeleteAndDeleteIndex () throws IOException {
509
512
setupTestCluster (randomFrom (1 , 3 , 5 ), randomIntBetween (2 , 10 ));
510
513
511
514
String repoName = "repo" ;
@@ -516,11 +519,13 @@ public void testConcurrentSnapshotDeleteAndDeleteIndex() {
516
519
testClusterNodes .currentMaster (testClusterNodes .nodes .values ().iterator ().next ().clusterService .state ());
517
520
518
521
final StepListener <Collection <CreateIndexResponse >> createIndicesListener = new StepListener <>();
522
+ final int indices = randomIntBetween (5 , 20 );
519
523
524
+ final SetOnce <Index > firstIndex = new SetOnce <>();
520
525
continueOrDie (createRepoAndIndex (repoName , index , 1 ), createIndexResponse -> {
526
+ firstIndex .set (masterNode .clusterService .state ().metaData ().index (index ).getIndex ());
521
527
// create a few more indices to make it more likely that the subsequent index delete operation happens before snapshot
522
528
// finalization
523
- final int indices = randomIntBetween (5 , 20 );
524
529
final GroupedActionListener <CreateIndexResponse > listener = new GroupedActionListener <>(createIndicesListener , indices );
525
530
for (int i = 0 ; i < indices ; ++i ) {
526
531
client ().admin ().indices ().create (new CreateIndexRequest ("index-" + i ), listener );
@@ -529,23 +534,55 @@ public void testConcurrentSnapshotDeleteAndDeleteIndex() {
529
534
530
535
final StepListener <CreateSnapshotResponse > createSnapshotResponseStepListener = new StepListener <>();
531
536
537
+ final boolean partialSnapshot = randomBoolean ();
538
+
532
539
continueOrDie (createIndicesListener , createIndexResponses ->
533
540
client ().admin ().cluster ().prepareCreateSnapshot (repoName , snapshotName ).setWaitForCompletion (false )
534
- .execute (createSnapshotResponseStepListener ));
541
+ .setPartial ( partialSnapshot ). execute (createSnapshotResponseStepListener ));
535
542
536
543
continueOrDie (createSnapshotResponseStepListener ,
537
- createSnapshotResponse -> client ().admin ().indices ().delete (new DeleteIndexRequest (index ), noopListener ()));
544
+ createSnapshotResponse -> client ().admin ().indices ().delete (new DeleteIndexRequest (index ),
545
+ new ActionListener <AcknowledgedResponse >() {
546
+ @ Override
547
+ public void onResponse (AcknowledgedResponse acknowledgedResponse ) {
548
+ if (partialSnapshot ) {
549
+ // Recreate index by the same name to test that we don't snapshot conflicting metadata in this scenario
550
+ client ().admin ().indices ().create (new CreateIndexRequest (index ), noopListener ());
551
+ }
552
+ }
553
+
554
+ @ Override
555
+ public void onFailure (Exception e ) {
556
+ if (partialSnapshot ) {
557
+ throw new AssertionError ("Delete index should always work during partial snapshots" , e );
558
+ }
559
+ }
560
+ }));
538
561
539
562
deterministicTaskQueue .runAllRunnableTasks ();
540
563
541
564
SnapshotsInProgress finalSnapshotsInProgress = masterNode .clusterService .state ().custom (SnapshotsInProgress .TYPE );
542
565
assertFalse (finalSnapshotsInProgress .entries ().stream ().anyMatch (entry -> entry .state ().completed () == false ));
543
566
final Repository repository = masterNode .repositoriesService .repository (repoName );
544
- Collection <SnapshotId > snapshotIds = getRepositoryData (repository ).getSnapshotIds ();
567
+ final RepositoryData repositoryData = getRepositoryData (repository );
568
+ Collection <SnapshotId > snapshotIds = repositoryData .getSnapshotIds ();
545
569
assertThat (snapshotIds , hasSize (1 ));
546
570
547
571
final SnapshotInfo snapshotInfo = repository .getSnapshotInfo (snapshotIds .iterator ().next ());
548
572
assertEquals (SnapshotState .SUCCESS , snapshotInfo .state ());
573
+ if (partialSnapshot ) {
574
+ // Single shard for each index so we either get all indices or all except for the deleted index
575
+ assertThat (snapshotInfo .successfulShards (), either (is (indices + 1 )).or (is (indices )));
576
+ if (snapshotInfo .successfulShards () == indices + 1 ) {
577
+ final IndexMetaData indexMetaData =
578
+ repository .getSnapshotIndexMetaData (snapshotInfo .snapshotId (), repositoryData .resolveIndexId (index ));
579
+ // Make sure we snapshotted the metadata of this index and not the recreated version
580
+ assertEquals (indexMetaData .getIndex (), firstIndex .get ());
581
+ }
582
+ } else {
583
+ // Index delete must be blocked for non-partial snapshots and we get a snapshot for every index
584
+ assertEquals (snapshotInfo .successfulShards (), indices + 1 );
585
+ }
549
586
assertEquals (0 , snapshotInfo .failedShards ());
550
587
}
551
588
0 commit comments