diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java index 2a660709f487c..ad8f502273611 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java @@ -31,7 +31,6 @@ import java.io.ByteArrayInputStream; import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows; import static org.hamcrest.Matchers.is; @@ -49,11 +48,8 @@ public void testMasterFailoverDuringCleanup() throws Exception { ensureStableCluster(nodeCount - 1); logger.info("--> wait for cleanup to finish and disappear from cluster state"); - assertBusy(() -> { - RepositoryCleanupInProgress cleanupInProgress = - client().admin().cluster().prepareState().get().getState().custom(RepositoryCleanupInProgress.TYPE); - assertFalse(cleanupInProgress.hasCleanupInProgress()); - }, 30, TimeUnit.SECONDS); + awaitClusterState(state -> + state.custom(RepositoryCleanupInProgress.TYPE, RepositoryCleanupInProgress.EMPTY).hasCleanupInProgress() == false); } public void testRepeatCleanupsDontRemove() throws Exception { @@ -71,11 +67,8 @@ public void testRepeatCleanupsDontRemove() throws Exception { unblockNode("test-repo", masterNode); logger.info("--> wait for cleanup to finish and disappear from cluster state"); - assertBusy(() -> { - RepositoryCleanupInProgress cleanupInProgress = - client().admin().cluster().prepareState().get().getState().custom(RepositoryCleanupInProgress.TYPE); - assertFalse(cleanupInProgress.hasCleanupInProgress()); - }, 30, TimeUnit.SECONDS); + awaitClusterState(state -> + state.custom(RepositoryCleanupInProgress.TYPE, RepositoryCleanupInProgress.EMPTY).hasCleanupInProgress() == false); } private String startBlockedCleanup(String repoName) throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index 16d6ba09bbd11..8f4db10f1f92b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -24,10 +24,8 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -149,29 +147,10 @@ public void testFindDanglingLatestGeneration() throws Exception { Files.move(repo.resolve("index-" + beforeMoveGen), repo.resolve("index-" + (beforeMoveGen + 1))); logger.info("--> set next generation as pending in the cluster state"); - final PlainActionFuture csUpdateFuture = PlainActionFuture.newFuture(); - internalCluster().getCurrentMasterNodeInstance(ClusterService.class).submitStateUpdateTask("set pending generation", - new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - return ClusterState.builder(currentState).metadata(Metadata.builder(currentState.getMetadata()) - .putCustom(RepositoriesMetadata.TYPE, - currentState.metadata().custom(RepositoriesMetadata.TYPE).withUpdatedGeneration( - repository.getMetadata().name(), beforeMoveGen, beforeMoveGen + 1)).build()).build(); - } - - @Override - public void onFailure(String source, Exception e) { - csUpdateFuture.onFailure(e); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - csUpdateFuture.onResponse(null); - } - } - ); - csUpdateFuture.get(); + updateClusterState(currentState -> ClusterState.builder(currentState).metadata(Metadata.builder(currentState.getMetadata()) + .putCustom(RepositoriesMetadata.TYPE, + currentState.metadata().custom(RepositoriesMetadata.TYPE).withUpdatedGeneration( + repository.getMetadata().name(), beforeMoveGen, beforeMoveGen + 1)).build()).build()); logger.info("--> full cluster restart"); internalCluster().fullRestart(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 85b84c7e889b8..19c5e52b82793 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -33,11 +33,9 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -46,7 +44,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedFunction; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; @@ -108,7 +105,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; -import java.util.function.Function; import static org.elasticsearch.index.seqno.RetentionLeaseActions.RETAIN_ALL; import static org.elasticsearch.test.NodeRoles.nonMasterNode; @@ -224,8 +220,7 @@ public void testExceptionWhenRestoringPersistentSettings() { createRepository("test-repo", "fs"); createFullSnapshot("test-repo", "test-snap"); - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet() - .getSnapshots("test-repo").get(0).state(), equalTo(SnapshotState.SUCCESS)); + assertThat(getSnapshot("test-repo", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); logger.info("--> change the test persistent setting and break it"); setSettingValue.accept("new value 2"); @@ -249,7 +244,6 @@ public void testRestoreCustomMetadata() throws Exception { logger.info("--> start node"); internalCluster().startNode(); - Client client = client(); createIndex("test-idx"); logger.info("--> add custom persistent metadata"); updateClusterState(currentState -> { @@ -267,9 +261,7 @@ public void testRestoreCustomMetadata() throws Exception { createRepository("test-repo", "fs", tempDir); createFullSnapshot("test-repo", "test-snap"); - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet() - .getSnapshots("test-repo").get(0).state(), - equalTo(SnapshotState.SUCCESS)); + assertThat(getSnapshot("test-repo", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); logger.info("--> change custom persistent metadata"); updateClusterState(currentState -> { @@ -293,20 +285,20 @@ public void testRestoreCustomMetadata() throws Exception { }); logger.info("--> delete repository"); - assertAcked(client.admin().cluster().prepareDeleteRepository("test-repo")); + assertAcked(clusterAdmin().prepareDeleteRepository("test-repo")); createRepository("test-repo-2", "fs", tempDir); logger.info("--> restore snapshot"); - client.admin().cluster().prepareRestoreSnapshot("test-repo-2", "test-snap").setRestoreGlobalState(true).setIndices("-*") + clusterAdmin().prepareRestoreSnapshot("test-repo-2", "test-snap").setRestoreGlobalState(true).setIndices("-*") .setWaitForCompletion(true).execute().actionGet(); logger.info("--> make sure old repository wasn't restored"); - assertRequestBuilderThrows(client.admin().cluster().prepareGetRepositories("test-repo"), RepositoryMissingException.class); - assertThat(client.admin().cluster().prepareGetRepositories("test-repo-2").get().repositories().size(), equalTo(1)); + assertRequestBuilderThrows(clusterAdmin().prepareGetRepositories("test-repo"), RepositoryMissingException.class); + assertThat(clusterAdmin().prepareGetRepositories("test-repo-2").get().repositories().size(), equalTo(1)); logger.info("--> check that custom persistent metadata was restored"); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); logger.info("Cluster state: {}", clusterState); Metadata metadata = clusterState.getMetadata(); assertThat(((SnapshottableMetadata) metadata.custom(SnapshottableMetadata.TYPE)).getData(), equalTo("before_snapshot_s")); @@ -321,7 +313,7 @@ public void testRestoreCustomMetadata() throws Exception { ensureYellow(); logger.info("--> check that gateway-persistent custom metadata survived full cluster restart"); - clusterState = client().admin().cluster().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState().get().getState(); logger.info("Cluster state: {}", clusterState); metadata = clusterState.getMetadata(); assertThat(metadata.custom(SnapshottableMetadata.TYPE), nullValue()); @@ -338,32 +330,7 @@ public void testRestoreCustomMetadata() throws Exception { equalTo("before_snapshot_s_gw_noapi")); } - private void updateClusterState(final Function updater) throws InterruptedException { - final CountDownLatch countDownLatch = new CountDownLatch(1); - final ClusterService clusterService = internalCluster().getInstance(ClusterService.class); - clusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - return updater.apply(currentState); - } - - @Override - public void onFailure(String source, @Nullable Exception e) { - countDownLatch.countDown(); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - countDownLatch.countDown(); - } - }); - countDownLatch.await(); - } - public void testSnapshotDuringNodeShutdown() throws Exception { - logger.info("--> start 2 nodes"); - Client client = client(); - assertAcked(prepareCreate("test-idx", 2, indexSettingsNoReplicas(2))); ensureGreen(); @@ -378,7 +345,7 @@ public void testSnapshotDuringNodeShutdown() throws Exception { String blockedNode = blockNodeWithIndex("test-repo", "test-idx"); logger.info("--> snapshot"); - client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") .setWaitForCompletion(false) .setIndices("test-idx") .get(); @@ -399,10 +366,7 @@ public void testSnapshotDuringNodeShutdown() throws Exception { public void testSnapshotWithStuckNode() throws Exception { logger.info("--> start 2 nodes"); - ArrayList nodes = new ArrayList<>(); - nodes.add(internalCluster().startNode()); - nodes.add(internalCluster().startNode()); - Client client = client(); + List nodes = internalCluster().startNodes(2); assertAcked(prepareCreate("test-idx", 2, indexSettingsNoReplicas(2))); ensureGreen(); @@ -421,7 +385,7 @@ public void testSnapshotWithStuckNode() throws Exception { assertFileCount(repo, 0); logger.info("--> snapshot"); - client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") .setWaitForCompletion(false) .setIndices("test-idx") .get(); @@ -448,11 +412,11 @@ public void testSnapshotWithStuckNode() throws Exception { logger.info("--> making sure that snapshot no longer exists"); expectThrows(SnapshotMissingException.class, - () -> client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap") + () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap") .execute().actionGet().getSnapshots("test-repo")); logger.info("--> Go through a loop of creating and deleting a snapshot to trigger repository cleanup"); - client().admin().cluster().prepareCleanupRepository("test-repo").get(); + clusterAdmin().prepareCleanupRepository("test-repo").get(); // Expect two files to remain in the repository: // (1) index-(N+1) @@ -476,7 +440,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> shutdown one of the nodes"); internalCluster().stopRandomDataNode(); - assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForNodes("<2") + assertThat(clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForNodes("<2") .execute().actionGet().isTimedOut(), equalTo(false)); @@ -499,25 +463,25 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> start snapshot with default settings without a closed index - should fail"); final SnapshotException sne = expectThrows(SnapshotException.class, - () -> client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1") + () -> clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1") .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(true).execute().actionGet()); assertThat(sne.getMessage(), containsString("Indices don't have primary shards")); if (randomBoolean()) { logger.info("checking snapshot completion using status"); - client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2") + clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-2") .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(false).setPartial(true).execute().actionGet(); assertBusy(() -> { - SnapshotsStatusResponse snapshotsStatusResponse = client().admin().cluster().prepareSnapshotStatus("test-repo") + SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") .setSnapshots("test-snap-2").get(); List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); assertEquals(snapshotStatuses.size(), 1); logger.trace("current snapshot status [{}]", snapshotStatuses.get(0)); assertTrue(snapshotStatuses.get(0).getState().completed()); }, 1, TimeUnit.MINUTES); - SnapshotsStatusResponse snapshotsStatusResponse = client().admin().cluster().prepareSnapshotStatus("test-repo") + SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") .setSnapshots("test-snap-2").get(); List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); assertThat(snapshotStatuses.size(), equalTo(1)); @@ -530,17 +494,14 @@ public void testRestoreIndexWithMissingShards() throws Exception { // There is slight delay between snapshot being marked as completed in the cluster state and on the file system // After it was marked as completed in the cluster state - we need to check if it's completed on the file system as well assertBusy(() -> { - GetSnapshotsResponse response = client().admin().cluster().prepareGetSnapshots("test-repo") - .setSnapshots("test-snap-2").get(); - assertThat(response.getSnapshots("test-repo").size(), equalTo(1)); - SnapshotInfo snapshotInfo = response.getSnapshots("test-repo").get(0); + SnapshotInfo snapshotInfo = getSnapshot("test-repo", "test-snap-2"); assertTrue(snapshotInfo.state().completed()); assertEquals(SnapshotState.PARTIAL, snapshotInfo.state()); }, 1, TimeUnit.MINUTES); } else { logger.info("checking snapshot completion using wait_for_completion flag"); final CreateSnapshotResponse createSnapshotResponse = - client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2") + clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-2") .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(true).setPartial(true).execute().actionGet(); logger.info("State: [{}], Reason: [{}]", @@ -548,20 +509,19 @@ public void testRestoreIndexWithMissingShards() throws Exception { assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(22)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), lessThan(16)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(10)); - assertThat(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-2").execute().actionGet() - .getSnapshots("test-repo").get(0).state(), + assertThat(getSnapshot("test-repo", "test-snap-2").state(), equalTo(SnapshotState.PARTIAL)); } assertAcked(client().admin().indices().prepareClose("test-idx-all")); logger.info("--> restore incomplete snapshot - should fail"); - assertFutureThrows(client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2").setRestoreGlobalState(false) + assertFutureThrows(clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2").setRestoreGlobalState(false) .setWaitForCompletion(true).execute(), SnapshotRestoreException.class); logger.info("--> restore snapshot for the index that was snapshotted completely"); - RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2") + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2") .setRestoreGlobalState(false).setIndices("test-idx-all").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue()); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6)); @@ -571,7 +531,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> restore snapshot for the partial index"); cluster().wipeIndices("test-idx-some"); - restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2") .setRestoreGlobalState(false).setIndices("test-idx-some").setPartial(true).setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue()); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6)); @@ -581,7 +541,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> restore snapshot for the index that didn't have any shards snapshotted successfully"); cluster().wipeIndices("test-idx-none"); - restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2") .setRestoreGlobalState(false).setIndices("test-idx-none").setPartial(true).setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue()); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6)); @@ -590,7 +550,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { assertThat(getCountForIndex("test-idx-some"), allOf(greaterThan(0L), lessThan(100L))); logger.info("--> restore snapshot for the closed index that was snapshotted completely"); - restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2") .setRestoreGlobalState(false).setIndices("test-idx-closed").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue()); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(4)); @@ -618,7 +578,7 @@ public void testRestoreIndexWithShardsMissingInLocalGateway() throws Exception { indexRandomDocs("test-idx", 100); logger.info("--> start snapshot"); - assertThat(client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setIndices("test-idx") + assertThat(clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1").setIndices("test-idx") .setWaitForCompletion(true).get().getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); @@ -633,12 +593,12 @@ public boolean clearData(String nodeName) { } }); - assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForNodes("2") + assertThat(clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForNodes("2") .execute().actionGet().isTimedOut(), equalTo(false)); logger.info("--> restore index snapshot"); - assertThat(client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-1").setRestoreGlobalState(false) + assertThat(clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-1").setRestoreGlobalState(false) .setWaitForCompletion(true).get().getRestoreInfo().successfulShards(), equalTo(6)); @@ -665,12 +625,12 @@ public void testRegistrationFailure() { internalCluster().startNode(nonMasterNode()); // Register mock repositories for (int i = 0; i < 5; i++) { - client().admin().cluster().preparePutRepository("test-repo" + i) + clusterAdmin().preparePutRepository("test-repo" + i) .setType("mock").setSettings(Settings.builder() .put("location", randomRepoPath())).setVerify(false).get(); } logger.info("--> make sure that properly setup repository can be registered on all nodes"); - client().admin().cluster().preparePutRepository("test-repo-0") + clusterAdmin().preparePutRepository("test-repo-0") .setType("fs").setSettings(Settings.builder() .put("location", randomRepoPath())).get(); @@ -678,9 +638,8 @@ public void testRegistrationFailure() { public void testThatSensitiveRepositorySettingsAreNotExposed() throws Exception { disableRepoConsistencyCheck("This test does not create any data in the repository"); - Settings nodeSettings = Settings.EMPTY; logger.info("--> start two nodes"); - internalCluster().startNodes(2, nodeSettings); + internalCluster().startNodes(2); createRepository("test-repo", "mock", Settings.builder() .put("location", randomRepoPath()) .put(MockRepository.Plugin.USERNAME_SETTING.getKey(), "notsecretusername") @@ -756,18 +715,10 @@ public void testMasterShutdownDuringSnapshot() throws Exception { logger.info("--> wait until the snapshot is done"); - assertBusy(() -> { - GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster().prepareGetSnapshots("test-repo") - .setSnapshots("test-snap").get(); - SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots("test-repo").get(0); - assertTrue(snapshotInfo.state().completed()); - }, 1, TimeUnit.MINUTES); + assertBusy(() -> assertTrue(getSnapshot("test-repo", "test-snap").state().completed()), 1, TimeUnit.MINUTES); logger.info("--> verify that snapshot was successful"); - - GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster().prepareGetSnapshots("test-repo") - .setSnapshots("test-snap").get(); - SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots("test-repo").get(0); + SnapshotInfo snapshotInfo = getSnapshot("test-repo", "test-snap"); assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); assertEquals(snapshotInfo.totalShards(), snapshotInfo.successfulShards()); assertEquals(0, snapshotInfo.failedShards()); @@ -803,18 +754,10 @@ public void testMasterAndDataShutdownDuringSnapshot() throws Exception { logger.info("--> wait until the snapshot is done"); - assertBusy(() -> { - GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster().prepareGetSnapshots("test-repo") - .setSnapshots("test-snap").get(); - SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots("test-repo").get(0); - assertTrue(snapshotInfo.state().completed()); - }, 1, TimeUnit.MINUTES); + assertBusy(() -> assertTrue(getSnapshot("test-repo", "test-snap").state().completed()), 1, TimeUnit.MINUTES); logger.info("--> verify that snapshot was partial"); - - GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster().prepareGetSnapshots("test-repo") - .setSnapshots("test-snap").get(); - SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots("test-repo").get(0); + SnapshotInfo snapshotInfo = getSnapshot("test-repo", "test-snap"); assertEquals(SnapshotState.PARTIAL, snapshotInfo.state()); assertNotEquals(snapshotInfo.totalShards(), snapshotInfo.successfulShards()); assertThat(snapshotInfo.failedShards(), greaterThan(0)); @@ -833,7 +776,6 @@ public void testRestoreShrinkIndex() throws Exception { internalCluster().startMasterOnlyNode(); internalCluster().startDataOnlyNode(); - final Client client = client(); final String repo = "test-repo"; final String snapshot = "test-snap"; final String sourceIdx = "test-idx"; @@ -846,20 +788,20 @@ public void testRestoreShrinkIndex() throws Exception { indexRandomDocs(sourceIdx, randomIntBetween(10, 100)); logger.info("--> shrink the index"); - assertAcked(client.admin().indices().prepareUpdateSettings(sourceIdx) + assertAcked(client().admin().indices().prepareUpdateSettings(sourceIdx) .setSettings(Settings.builder().put("index.blocks.write", true)).get()); - assertAcked(client.admin().indices().prepareResizeIndex(sourceIdx, shrunkIdx).get()); + assertAcked(client().admin().indices().prepareResizeIndex(sourceIdx, shrunkIdx).get()); logger.info("--> snapshot the shrunk index"); - assertSuccessful(client.admin().cluster() + assertSuccessful(clusterAdmin() .prepareCreateSnapshot(repo, snapshot) .setWaitForCompletion(true).setIndices(shrunkIdx).execute()); logger.info("--> delete index and stop the data node"); - assertAcked(client.admin().indices().prepareDelete(sourceIdx).get()); - assertAcked(client.admin().indices().prepareDelete(shrunkIdx).get()); + assertAcked(client().admin().indices().prepareDelete(sourceIdx).get()); + assertAcked(client().admin().indices().prepareDelete(shrunkIdx).get()); internalCluster().stopRandomDataNode(); - client().admin().cluster().prepareHealth().setTimeout("30s").setWaitForNodes("1"); + clusterAdmin().prepareHealth().setTimeout("30s").setWaitForNodes("1"); logger.info("--> start a new data node"); final Settings dataSettings = Settings.builder() @@ -867,10 +809,10 @@ public void testRestoreShrinkIndex() throws Exception { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) // to get a new node id .build(); internalCluster().startDataOnlyNode(dataSettings); - client().admin().cluster().prepareHealth().setTimeout("30s").setWaitForNodes("2"); + clusterAdmin().prepareHealth().setTimeout("30s").setWaitForNodes("2"); logger.info("--> restore the shrunk index and ensure all shards are allocated"); - RestoreSnapshotResponse restoreResponse = client().admin().cluster() + RestoreSnapshotResponse restoreResponse = clusterAdmin() .prepareRestoreSnapshot(repo, snapshot).setWaitForCompletion(true) .setIndices(shrunkIdx).get(); assertEquals(restoreResponse.getRestoreInfo().totalShards(), @@ -880,13 +822,12 @@ public void testRestoreShrinkIndex() throws Exception { public void testSnapshotWithDateMath() { final String repo = "repo"; - final AdminClient admin = client().admin(); final IndexNameExpressionResolver nameExpressionResolver = new IndexNameExpressionResolver(); final String snapshotName = ""; logger.info("--> creating repository"); - assertAcked(admin.cluster().preparePutRepository(repo).setType("fs") + assertAcked(clusterAdmin().preparePutRepository(repo).setType("fs") .setSettings(Settings.builder().put("location", randomRepoPath()) .put("compress", randomBoolean()))); @@ -896,7 +837,7 @@ public void testSnapshotWithDateMath() { // snapshot could be taken before or after a day rollover final String expression2 = nameExpressionResolver.resolveDateMathExpression(snapshotName); - SnapshotsStatusResponse response = admin.cluster().prepareSnapshotStatus(repo) + SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(repo) .setSnapshots(Sets.newHashSet(expression1, expression2).toArray(Strings.EMPTY_ARRAY)) .setIgnoreUnavailable(true) .get(); @@ -906,7 +847,6 @@ public void testSnapshotWithDateMath() { } public void testSnapshotTotalAndIncrementalSizes() throws Exception { - Client client = client(); final String indexName = "test-blocks-1"; final String repositoryName = "repo-" + indexName; final String snapshot0 = "snapshot-0"; @@ -916,14 +856,14 @@ public void testSnapshotTotalAndIncrementalSizes() throws Exception { int docs = between(10, 100); for (int i = 0; i < docs; i++) { - client.prepareIndex(indexName).setSource("test", "init").execute().actionGet(); + client().prepareIndex(indexName).setSource("test", "init").execute().actionGet(); } final Path repoPath = randomRepoPath(); createRepository(repositoryName, "fs", repoPath); createFullSnapshot(repositoryName, snapshot0); - SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus(repositoryName) + SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(repositoryName) .setSnapshots(snapshot0) .get(); @@ -951,7 +891,7 @@ public void testSnapshotTotalAndIncrementalSizes() throws Exception { // add few docs - less than initially docs = between(1, 5); for (int i = 0; i < docs; i++) { - client.prepareIndex(indexName).setSource("test", "test" + i).execute().actionGet(); + client().prepareIndex(indexName).setSource("test", "test" + i).execute().actionGet(); } // create another snapshot @@ -961,9 +901,7 @@ public void testSnapshotTotalAndIncrementalSizes() throws Exception { // drop 1st one to avoid miscalculation as snapshot reuses some files of prev snapshot assertAcked(startDeleteSnapshot(repositoryName, snapshot0).get()); - response = client.admin().cluster().prepareSnapshotStatus(repositoryName) - .setSnapshots(snapshot1) - .get(); + response = clusterAdmin().prepareSnapshotStatus(repositoryName).setSnapshots(snapshot1).get(); final List snapshot1Files = scanSnapshotFolder(repoPath); final List snapshot1IndexMetaFiles = findRepoMetaBlobs(repoPath); @@ -1041,7 +979,7 @@ public void testDeduplicateIndexMetadata() throws Exception { final List snapshot2IndexMetaFiles = findRepoMetaBlobs(repoPath); assertThat(snapshot2IndexMetaFiles, hasSize(2)); // should have created one new metadata blob - assertAcked(client().admin().cluster().prepareDeleteSnapshot(repositoryName, snapshot0, snapshot1).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(repositoryName, snapshot0, snapshot1).get()); final List snapshot3IndexMetaFiles = findRepoMetaBlobs(repoPath); assertThat(snapshot3IndexMetaFiles, hasSize(1)); // should have deleted the metadata blob referenced by the first two snapshots } @@ -1070,14 +1008,14 @@ public void testDataNodeRestartWithBusyMasterDuringSnapshot() throws Exception { logger.info("--> wait for shard snapshots to show as failed"); assertBusy(() -> assertThat( - client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get().getSnapshots() + clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get().getSnapshots() .get(0).getShardsStats().getFailedShards(), greaterThanOrEqualTo(1)), 60L, TimeUnit.SECONDS); unblockNode("test-repo", dataNode); disruption.stopDisrupting(); // check that snapshot completes assertBusy(() -> { - GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster() + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin() .prepareGetSnapshots("test-repo").setSnapshots("test-snap").setIgnoreUnavailable(true).get(); assertEquals(1, snapshotsStatusResponse.getSnapshots("test-repo").size()); SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots("test-repo").get(0); @@ -1106,7 +1044,7 @@ public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception { logger.info("--> wait for shard snapshot of first primary to show as failed"); assertBusy(() -> assertThat( - client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get().getSnapshots() + clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get().getSnapshots() .get(0).getShardsStats().getFailedShards(), is(1)), 60L, TimeUnit.SECONDS); logger.info("--> restarting second data node, which should cause the primary shard on it to be failed"); @@ -1114,7 +1052,7 @@ public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception { // check that snapshot completes with both failed shards being accounted for in the snapshot result assertBusy(() -> { - GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster() + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin() .prepareGetSnapshots("test-repo").setSnapshots("test-snap").setIgnoreUnavailable(true).get(); assertEquals(1, snapshotsStatusResponse.getSnapshots("test-repo").size()); SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots("test-repo").get(0); @@ -1154,7 +1092,7 @@ public void testRetentionLeasesClearedOnRestore() throws Exception { final String snapshotName = "snapshot-retention-leases"; logger.debug("--> create snapshot {}:{}", repoName, snapshotName); - CreateSnapshotResponse createResponse = client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName) + CreateSnapshotResponse createResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) .setWaitForCompletion(true).setIndices(indexName).get(); assertThat(createResponse.getSnapshotInfo().successfulShards(), equalTo(shardCount)); assertThat(createResponse.getSnapshotInfo().failedShards(), equalTo(0)); @@ -1175,7 +1113,7 @@ public void testRetentionLeasesClearedOnRestore() throws Exception { assertAcked(client().admin().indices().prepareClose(indexName)); logger.debug("--> restore index {} from snapshot", indexName); - RestoreSnapshotResponse restoreResponse = client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName) + RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) .setWaitForCompletion(true).get(); assertThat(restoreResponse.getRestoreInfo().successfulShards(), equalTo(shardCount)); assertThat(restoreResponse.getRestoreInfo().failedShards(), equalTo(0)); @@ -1272,11 +1210,11 @@ public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception { logger.info("--> wait for relocations to start"); assertBusy(() -> assertThat( - client().admin().cluster().prepareHealth(indexName).execute().actionGet().getRelocatingShards(), greaterThan(0)), + clusterAdmin().prepareHealth(indexName).execute().actionGet().getRelocatingShards(), greaterThan(0)), 1L, TimeUnit.MINUTES); logger.info("--> snapshot"); - client().admin().cluster().prepareCreateSnapshot(repoName, "test-snap") + clusterAdmin().prepareCreateSnapshot(repoName, "test-snap") .setWaitForCompletion(false).setPartial(true).setIndices(indexName).get(); assertAcked(client().admin().indices().prepareDelete(indexName)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index a76587deaf576..199a54a71813e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -139,7 +139,6 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -// The tests in here do a lot of state updates and other writes to disk and are slowed down too much by WindowsFS public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCase { @Override @@ -157,8 +156,6 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testBasicWorkFlow() throws Exception { - Client client = client(); - createRepository("test-repo", "fs"); createIndexWithRandomDocs("test-idx-1", 100); createIndexWithRandomDocs("test-idx-2", 100); @@ -175,7 +172,7 @@ public void testBasicWorkFlow() throws Exception { if (!indicesToFlush.isEmpty()) { String[] indices = indicesToFlush.toArray(new String[indicesToFlush.size()]); logger.info("--> starting asynchronous flush for indices {}", Arrays.toString(indices)); - flushResponseFuture = client.admin().indices().prepareFlush(indices).execute(); + flushResponseFuture = client().admin().indices().prepareFlush(indices).execute(); } } @@ -195,17 +192,17 @@ public void testBasicWorkFlow() throws Exception { final boolean snapshotClosed = randomBoolean(); if (snapshotClosed) { - assertAcked(client.admin().indices().prepareClose(indicesToSnapshot).setWaitForActiveShards(ActiveShardCount.ALL).get()); + assertAcked(client().admin().indices().prepareClose(indicesToSnapshot).setWaitForActiveShards(ActiveShardCount.ALL).get()); } logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") .setWaitForCompletion(true).setIndices(indicesToSnapshot).get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); - List snapshotInfos = client.admin().cluster().prepareGetSnapshots("test-repo") + List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo") .setSnapshots(randomFrom("test-snap", "_all", "*", "*-snap", "test*")).get().getSnapshots("test-repo"); assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); @@ -213,18 +210,18 @@ public void testBasicWorkFlow() throws Exception { assertThat(snapshotInfo.version(), equalTo(Version.CURRENT)); if (snapshotClosed) { - assertAcked(client.admin().indices().prepareOpen(indicesToSnapshot).setWaitForActiveShards(ActiveShardCount.ALL).get()); + assertAcked(client().admin().indices().prepareOpen(indicesToSnapshot).setWaitForActiveShards(ActiveShardCount.ALL).get()); } logger.info("--> delete some data"); for (int i = 0; i < 50; i++) { - client.prepareDelete("test-idx-1", Integer.toString(i)).get(); + client().prepareDelete("test-idx-1", Integer.toString(i)).get(); } for (int i = 50; i < 100; i++) { - client.prepareDelete("test-idx-2", Integer.toString(i)).get(); + client().prepareDelete("test-idx-2", Integer.toString(i)).get(); } for (int i = 0; i < 100; i += 2) { - client.prepareDelete("test-idx-3", Integer.toString(i)).get(); + client().prepareDelete("test-idx-3", Integer.toString(i)).get(); } assertAllSuccessful(refresh()); assertDocCount("test-idx-1", 50L); @@ -232,10 +229,10 @@ public void testBasicWorkFlow() throws Exception { assertDocCount("test-idx-3", 50L); logger.info("--> close indices"); - client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get(); + client().admin().indices().prepareClose("test-idx-1", "test-idx-2").get(); logger.info("--> restore all indices from the snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -244,7 +241,7 @@ public void testBasicWorkFlow() throws Exception { assertDocCount("test-idx-2", 100L); assertDocCount("test-idx-3", 50L); - assertNull(client.admin().indices().prepareGetSettings("test-idx-1").get().getSetting("test-idx-1", + assertNull(client().admin().indices().prepareGetSettings("test-idx-1").get().getSetting("test-idx-1", MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey())); for (ShardStats shardStats: client().admin().indices().prepareStats(indicesToSnapshot).clear().get().getShards()) { @@ -258,17 +255,17 @@ public void testBasicWorkFlow() throws Exception { logger.info("--> delete indices"); cluster().wipeIndices("test-idx-1", "test-idx-2"); logger.info("--> restore one index after deletion"); - restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true) + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-2").execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); assertDocCount("test-idx-1", 100); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); assertThat(clusterState.getMetadata().hasIndex("test-idx-1"), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex("test-idx-2"), equalTo(false)); - assertNull(client.admin().indices().prepareGetSettings("test-idx-1").get().getSetting("test-idx-1", + assertNull(client().admin().indices().prepareGetSettings("test-idx-1").get().getSetting("test-idx-1", MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey())); for (ShardStats shardStats: client().admin().indices().prepareStats(indicesToSnapshot).clear().get().getShards()) { @@ -293,7 +290,6 @@ public void testSingleGetAfterRestore() throws Exception { String restoredIndexName = indexName + "-restored"; String expectedValue = "expected"; - Client client = client(); // Write a document String docId = Integer.toString(randomInt()); indexDoc(indexName, docId, "value", expectedValue); @@ -301,7 +297,7 @@ public void testSingleGetAfterRestore() throws Exception { createRepository(repoName, "fs", absolutePath); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName) + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -310,19 +306,17 @@ public void testSingleGetAfterRestore() throws Exception { equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); - RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot(repoName, snapshotName) + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) .setWaitForCompletion(true) .setRenamePattern(indexName) .setRenameReplacement(restoredIndexName) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); - assertThat(client.prepareGet(restoredIndexName, docId).get().isExists(), equalTo(true)); + assertThat(client().prepareGet(restoredIndexName, docId).get().isExists(), equalTo(true)); } public void testFreshIndexUUID() { - Client client = client(); - createRepository("test-repo", "fs"); createIndex("test"); @@ -331,7 +325,7 @@ public void testFreshIndexUUID() { assertTrue(originalIndexUUID, originalIndexUUID != null); assertFalse(originalIndexUUID, originalIndexUUID.equals(IndexMetadata.INDEX_UUID_NA_VALUE)); ensureGreen(); - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") .setWaitForCompletion(true).setIndices("test").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), @@ -348,10 +342,10 @@ public void testFreshIndexUUID() { assertFalse(newIndexUUID, newIndexUUID.equals(IndexMetadata.INDEX_UUID_NA_VALUE)); assertFalse(newIndexUUID, newIndexUUID.equals(originalIndexUUID)); logger.info("--> close index"); - client.admin().indices().prepareClose("test").get(); + client().admin().indices().prepareClose("test").get(); logger.info("--> restore all indices from the snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -362,7 +356,7 @@ public void testFreshIndexUUID() { newIndexUUID.equals(newAfterRestoreIndexUUID)); logger.info("--> restore indices with different names"); - restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setRenamePattern("(.+)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -385,7 +379,7 @@ public void testRestoreWithDifferentMappingsAndSettings() throws Exception { ensureGreen(); logger.info("--> snapshot it"); - CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") .setWaitForCompletion(true).setIndices("test-idx").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), @@ -403,12 +397,12 @@ public void testRestoreWithDifferentMappingsAndSettings() throws Exception { client().admin().indices().prepareClose("test-idx").get(); logger.info("--> restore all indices from the snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> assert that old mapping is restored"); - MappingMetadata mappings = client().admin().cluster().prepareState().get().getState().getMetadata() + MappingMetadata mappings = clusterAdmin().prepareState().get().getState().getMetadata() .getIndices().get("test-idx").mapping(); assertThat(mappings.sourceAsMap().toString(), containsString("baz")); assertThat(mappings.sourceAsMap().toString(), not(containsString("foo"))); @@ -426,13 +420,10 @@ public void testEmptySnapshot() throws Exception { assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); - assertThat(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap") - .get().getSnapshots("test-repo").get(0).state(), equalTo(SnapshotState.SUCCESS)); + assertThat(getSnapshot("test-repo", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); } public void testRestoreAliases() throws Exception { - Client client = client(); - createRepository("test-repo", "fs"); logger.info("--> create test indices"); @@ -440,66 +431,64 @@ public void testRestoreAliases() throws Exception { ensureGreen(); logger.info("--> create aliases"); - assertAcked(client.admin().indices().prepareAliases() + assertAcked(client().admin().indices().prepareAliases() .addAlias("test-idx-1", "alias-123") .addAlias("test-idx-2", "alias-123") .addAlias("test-idx-3", "alias-123") .addAlias("test-idx-1", "alias-1") .get()); - assertFalse(client.admin().indices().prepareGetAliases("alias-123").get().getAliases().isEmpty()); + assertFalse(client().admin().indices().prepareGetAliases("alias-123").get().getAliases().isEmpty()); logger.info("--> snapshot"); - assertThat(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + assertThat(clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") .setIndices().setWaitForCompletion(true).get().getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); logger.info("--> delete all indices"); cluster().wipeIndices("test-idx-1", "test-idx-2", "test-idx-3"); - assertTrue(client.admin().indices().prepareGetAliases("alias-123").get().getAliases().isEmpty()); - assertTrue(client.admin().indices().prepareGetAliases("alias-1").get().getAliases().isEmpty()); + assertTrue(client().admin().indices().prepareGetAliases("alias-123").get().getAliases().isEmpty()); + assertTrue(client().admin().indices().prepareGetAliases("alias-1").get().getAliases().isEmpty()); logger.info("--> restore snapshot with aliases"); - RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet(); // We don't restore any indices here assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), allOf(greaterThan(0), equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards()))); logger.info("--> check that aliases are restored"); - assertFalse(client.admin().indices().prepareGetAliases("alias-123").get().getAliases().isEmpty()); - assertFalse(client.admin().indices().prepareGetAliases("alias-1").get().getAliases().isEmpty()); + assertFalse(client().admin().indices().prepareGetAliases("alias-123").get().getAliases().isEmpty()); + assertFalse(client().admin().indices().prepareGetAliases("alias-1").get().getAliases().isEmpty()); logger.info("--> update aliases"); - assertAcked(client.admin().indices().prepareAliases().removeAlias("test-idx-3", "alias-123")); - assertAcked(client.admin().indices().prepareAliases().addAlias("test-idx-3", "alias-3")); + assertAcked(client().admin().indices().prepareAliases().removeAlias("test-idx-3", "alias-123")); + assertAcked(client().admin().indices().prepareAliases().addAlias("test-idx-3", "alias-3")); logger.info("--> delete and close indices"); cluster().wipeIndices("test-idx-1", "test-idx-2"); - assertAcked(client.admin().indices().prepareClose("test-idx-3")); - assertTrue(client.admin().indices().prepareGetAliases("alias-123").get().getAliases().isEmpty()); - assertTrue(client.admin().indices().prepareGetAliases("alias-1").get().getAliases().isEmpty()); + assertAcked(client().admin().indices().prepareClose("test-idx-3")); + assertTrue(client().admin().indices().prepareGetAliases("alias-123").get().getAliases().isEmpty()); + assertTrue(client().admin().indices().prepareGetAliases("alias-1").get().getAliases().isEmpty()); logger.info("--> restore snapshot without aliases"); - restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true) + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true) .setRestoreGlobalState(true).setIncludeAliases(false).execute().actionGet(); // We don't restore any indices here assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), allOf(greaterThan(0), equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards()))); logger.info("--> check that aliases are not restored and existing aliases still exist"); - assertTrue(client.admin().indices().prepareGetAliases("alias-123").get().getAliases().isEmpty()); - assertTrue(client.admin().indices().prepareGetAliases("alias-1").get().getAliases().isEmpty()); - assertFalse(client.admin().indices().prepareGetAliases("alias-3").get().getAliases().isEmpty()); + assertTrue(client().admin().indices().prepareGetAliases("alias-123").get().getAliases().isEmpty()); + assertTrue(client().admin().indices().prepareGetAliases("alias-1").get().getAliases().isEmpty()); + assertFalse(client().admin().indices().prepareGetAliases("alias-3").get().getAliases().isEmpty()); } public void testRestoreTemplates() throws Exception { - Client client = client(); - createRepository("test-repo", "fs"); logger.info("--> creating test template"); - assertThat(client.admin().indices() + assertThat(client().admin().indices() .preparePutTemplate("test-template") .setPatterns(Collections.singletonList("te*")) .setMapping(XContentFactory.jsonBuilder() @@ -520,20 +509,19 @@ public void testRestoreTemplates() throws Exception { .get().isAcknowledged(), equalTo(true)); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") - .setIndices().setWaitForCompletion(true).get(); - assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0)); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get(). - getSnapshots("test-repo").get(0).state(), equalTo(SnapshotState.SUCCESS)); + final SnapshotInfo snapshotInfo = assertSuccessful(clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") + .setIndices().setWaitForCompletion(true).execute()); + assertThat(snapshotInfo.totalShards(), equalTo(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(0)); + assertThat(getSnapshot("test-repo", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); logger.info("--> delete test template"); - assertThat(client.admin().indices().prepareDeleteTemplate("test-template").get().isAcknowledged(), equalTo(true)); + assertThat(client().admin().indices().prepareDeleteTemplate("test-template").get().isAcknowledged(), equalTo(true)); GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> restore cluster state"); - RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet(); // We don't restore any indices here assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); @@ -544,8 +532,6 @@ public void testRestoreTemplates() throws Exception { } public void testIncludeGlobalState() throws Exception { - Client client = client(); - createRepository("test-repo", "fs"); boolean testTemplate = randomBoolean(); @@ -554,7 +540,7 @@ public void testIncludeGlobalState() throws Exception { if(testTemplate) { logger.info("--> creating test template"); - assertThat(client.admin().indices() + assertThat(client().admin().indices() .preparePutTemplate("test-template") .setPatterns(Collections.singletonList("te*")) .setMapping(XContentFactory.jsonBuilder() @@ -586,41 +572,37 @@ public void testIncludeGlobalState() throws Exception { .endObject() .endArray() .endObject()); - assertAcked(client().admin().cluster().preparePutPipeline("barbaz", pipelineSource, XContentType.JSON).get()); + assertAcked(clusterAdmin().preparePutPipeline("barbaz", pipelineSource, XContentType.JSON).get()); } if(testScript) { logger.info("--> creating test script"); - assertAcked(client().admin().cluster().preparePutStoredScript() + assertAcked(clusterAdmin().preparePutStoredScript() .setId("foobar") .setContent(new BytesArray( "{\"script\": { \"lang\": \"" + MockScriptEngine.NAME + "\", \"source\": \"1\"} }"), XContentType.JSON)); } logger.info("--> snapshot without global state"); - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster() + CreateSnapshotResponse createSnapshotResponse = clusterAdmin() .prepareCreateSnapshot("test-repo", "test-snap-no-global-state").setIndices().setIncludeGlobalState(false) .setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state") - .get().getSnapshots("test-repo").get(0).state(), - equalTo(SnapshotState.SUCCESS)); - SnapshotsStatusResponse snapshotsStatusResponse = client.admin().cluster().prepareSnapshotStatus("test-repo") + assertThat(getSnapshot("test-repo", "test-snap-no-global-state").state(), equalTo(SnapshotState.SUCCESS)); + SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") .addSnapshots("test-snap-no-global-state").get(); assertThat(snapshotsStatusResponse.getSnapshots().size(), equalTo(1)); SnapshotStatus snapshotStatus = snapshotsStatusResponse.getSnapshots().get(0); assertThat(snapshotStatus.includeGlobalState(), equalTo(false)); logger.info("--> snapshot with global state"); - createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-with-global-state") + createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-with-global-state") .setIndices().setIncludeGlobalState(true).setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-with-global-state") - .get().getSnapshots("test-repo").get(0).state(), - equalTo(SnapshotState.SUCCESS)); - snapshotsStatusResponse = client.admin().cluster().prepareSnapshotStatus("test-repo") + assertThat(getSnapshot("test-repo", "test-snap-with-global-state").state(), equalTo(SnapshotState.SUCCESS)); + snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") .addSnapshots("test-snap-with-global-state").get(); assertThat(snapshotsStatusResponse.getSnapshots().size(), equalTo(1)); snapshotStatus = snapshotsStatusResponse.getSnapshots().get(0); @@ -635,16 +617,16 @@ public void testIncludeGlobalState() throws Exception { if (testPipeline) { logger.info("--> delete test pipeline"); - assertAcked(client().admin().cluster().deletePipeline(new DeletePipelineRequest("barbaz")).get()); + assertAcked(clusterAdmin().deletePipeline(new DeletePipelineRequest("barbaz")).get()); } if (testScript) { logger.info("--> delete test script"); - assertAcked(client().admin().cluster().prepareDeleteStoredScript("foobar").get()); + assertAcked(clusterAdmin().prepareDeleteStoredScript("foobar").get()); } logger.info("--> try restoring cluster state from snapshot without global state"); - RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster() + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin() .prepareRestoreSnapshot("test-repo", "test-snap-no-global-state") .setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); @@ -654,7 +636,7 @@ public void testIncludeGlobalState() throws Exception { assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> restore cluster state"); - restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-with-global-state") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-with-global-state") .setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); @@ -666,27 +648,25 @@ public void testIncludeGlobalState() throws Exception { if (testPipeline) { logger.info("--> check that pipeline is restored"); - GetPipelineResponse getPipelineResponse = client().admin().cluster().prepareGetPipeline("barbaz").get(); + GetPipelineResponse getPipelineResponse = clusterAdmin().prepareGetPipeline("barbaz").get(); assertTrue(getPipelineResponse.isFound()); } if (testScript) { logger.info("--> check that script is restored"); - GetStoredScriptResponse getStoredScriptResponse = client().admin().cluster().prepareGetStoredScript("foobar").get(); + GetStoredScriptResponse getStoredScriptResponse = clusterAdmin().prepareGetStoredScript("foobar").get(); assertNotNull(getStoredScriptResponse.getSource()); } createIndexWithRandomDocs("test-idx", 100); logger.info("--> snapshot without global state but with indices"); - createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-no-global-state-with-index") + createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-no-global-state-with-index") .setIndices("test-idx").setIncludeGlobalState(false).setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state-with-index") - .get().getSnapshots("test-repo").get(0).state(), - equalTo(SnapshotState.SUCCESS)); + assertThat(getSnapshot("test-repo", "test-snap-no-global-state-with-index").state(), equalTo(SnapshotState.SUCCESS)); logger.info("--> delete global state and index "); cluster().wipeIndices("test-idx"); @@ -694,18 +674,18 @@ public void testIncludeGlobalState() throws Exception { cluster().wipeTemplates("test-template"); } if (testPipeline) { - assertAcked(client().admin().cluster().deletePipeline(new DeletePipelineRequest("barbaz")).get()); + assertAcked(clusterAdmin().deletePipeline(new DeletePipelineRequest("barbaz")).get()); } if (testScript) { - assertAcked(client().admin().cluster().prepareDeleteStoredScript("foobar").get()); + assertAcked(clusterAdmin().prepareDeleteStoredScript("foobar").get()); } getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> try restoring index and cluster state from snapshot without global state"); - restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state-with-index") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state-with-index") .setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); @@ -713,17 +693,16 @@ public void testIncludeGlobalState() throws Exception { logger.info("--> check that global state wasn't restored but index was"); getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); - assertFalse(client().admin().cluster().prepareGetPipeline("barbaz").get().isFound()); - assertNull(client().admin().cluster().prepareGetStoredScript("foobar").get().getSource()); + assertFalse(clusterAdmin().prepareGetPipeline("barbaz").get().isFound()); + assertNull(clusterAdmin().prepareGetStoredScript("foobar").get().getSource()); assertDocCount("test-idx", 100L); } public void testSnapshotFileFailureDuringSnapshot() throws InterruptedException { disableRepoConsistencyCheck("This test uses a purposely broken repository so it would fail consistency checks"); - Client client = client(); logger.info("--> creating repository"); - assertAcked(client.admin().cluster().preparePutRepository("test-repo") + assertAcked(clusterAdmin().preparePutRepository("test-repo") .setType("mock").setSettings( Settings.builder() .put("location", randomRepoPath()) @@ -735,7 +714,7 @@ public void testSnapshotFileFailureDuringSnapshot() throws InterruptedException logger.info("--> snapshot"); try { - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") .setWaitForCompletion(true).setIndices("test-idx").get(); if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) { // If we are here, that means we didn't have any failures, let's check it @@ -748,10 +727,7 @@ public void testSnapshotFileFailureDuringSnapshot() throws InterruptedException assertThat(shardFailure.nodeId(), notNullValue()); assertThat(shardFailure.index(), equalTo("test-idx")); } - GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo") - .addSnapshots("test-snap").get(); - assertThat(getSnapshotsResponse.getSnapshots("test-repo").size(), equalTo(1)); - SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots("test-repo").get(0); + SnapshotInfo snapshotInfo = getSnapshot("test-repo", "test-snap"); if (snapshotInfo.state() == SnapshotState.SUCCESS) { assertThat(snapshotInfo.shardFailures().size(), greaterThan(0)); assertThat(snapshotInfo.totalShards(), greaterThan(snapshotInfo.successfulShards())); @@ -772,7 +748,6 @@ public void testSnapshotFileFailureDuringSnapshot() throws InterruptedException public void testDataFileFailureDuringSnapshot() throws Exception { disableRepoConsistencyCheck("This test intentionally leaves a broken repository"); - Client client = client(); createRepository("test-repo", "mock", Settings.builder().put("location", randomRepoPath()) .put("random", randomAlphaOfLength(10)).put("random_data_file_io_exception_rate", 0.3)); @@ -780,7 +755,7 @@ public void testDataFileFailureDuringSnapshot() throws Exception { createIndexWithRandomDocs("test-idx", 100); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") .setWaitForCompletion(true).setIndices("test-idx").get(); if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) { logger.info("--> no failures"); @@ -794,16 +769,13 @@ public void testDataFileFailureDuringSnapshot() throws Exception { assertThat(shardFailure.nodeId(), notNullValue()); assertThat(shardFailure.index(), equalTo("test-idx")); } - GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo") - .addSnapshots("test-snap").get(); - assertThat(getSnapshotsResponse.getSnapshots("test-repo").size(), equalTo(1)); - SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots("test-repo").get(0); + SnapshotInfo snapshotInfo = getSnapshot("test-repo", "test-snap"); assertThat(snapshotInfo.state(), equalTo(SnapshotState.PARTIAL)); assertThat(snapshotInfo.shardFailures().size(), greaterThan(0)); assertThat(snapshotInfo.totalShards(), greaterThan(snapshotInfo.successfulShards())); // Verify that snapshot status also contains the same failures - SnapshotsStatusResponse snapshotsStatusResponse = client.admin().cluster().prepareSnapshotStatus("test-repo") + SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") .addSnapshots("test-snap").get(); assertThat(snapshotsStatusResponse.getSnapshots().size(), equalTo(1)); SnapshotStatus snapshotStatus = snapshotsStatusResponse.getSnapshots().get(0); @@ -962,7 +934,7 @@ public void testUnrestorableIndexDuringRestore() throws Exception { .setSettings(Settings.builder() .putNull("index.routing.allocation.include._name") .build())); - assertAcked(client().admin().cluster().prepareReroute().setRetryFailed(true)); + assertAcked(clusterAdmin().prepareReroute().setRetryFailed(true)); }; unrestorableUseCase(indexName, Settings.EMPTY, Settings.EMPTY, restoreIndexSettings, @@ -988,7 +960,7 @@ private void unrestorableUseCase(final String indexName, // create a snapshot final NumShards numShards = getNumShards(indexName); - CreateSnapshotResponse snapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + CreateSnapshotResponse snapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -1001,7 +973,7 @@ private void unrestorableUseCase(final String indexName, assertAcked(client().admin().indices().prepareDelete(indexName)); // update the test repository - assertAcked(client().admin().cluster().preparePutRepository("test-repo") + assertAcked(clusterAdmin().preparePutRepository("test-repo") .setType("mock") .setSettings(Settings.builder() .put("location", repositoryLocation) @@ -1009,7 +981,7 @@ private void unrestorableUseCase(final String indexName, .build())); // attempt to restore the snapshot with the given settings - RestoreSnapshotResponse restoreResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setIndices(indexName) .setIndexSettings(restoreIndexSettings) .setWaitForCompletion(true) @@ -1019,7 +991,7 @@ private void unrestorableUseCase(final String indexName, assertThat(restoreResponse.getRestoreInfo().totalShards(), equalTo(numShards.numPrimaries)); assertThat(restoreResponse.getRestoreInfo().successfulShards(), equalTo(0)); - ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().setCustoms(true).setRoutingTable(true).get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().setCustoms(true).setRoutingTable(true).get(); // check that there is no restore in progress RestoreInProgress restoreInProgress = clusterStateResponse.getState().custom(RestoreInProgress.TYPE); @@ -1049,7 +1021,7 @@ private void unrestorableUseCase(final String indexName, // delete the index and restore again assertAcked(client().admin().indices().prepareDelete(indexName)); - restoreResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).get(); + restoreResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).get(); assertThat(restoreResponse.getRestoreInfo().totalShards(), equalTo(numShards.numPrimaries)); assertThat(restoreResponse.getRestoreInfo().successfulShards(), equalTo(numShards.numPrimaries)); @@ -1121,7 +1093,7 @@ public void testUnallocatedShards() { logger.info("--> snapshot"); final SnapshotException sne = expectThrows(SnapshotException.class, - () -> client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + () -> clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") .setWaitForCompletion(true).setIndices("test-idx").get()); assertThat(sne.getMessage(), containsString("Indices don't have primary shards")); assertThat(getRepositoryData("test-repo"), is(RepositoryData.EMPTY)); @@ -1191,7 +1163,7 @@ public void testDeleteSnapshot() throws Exception { public void testGetSnapshotsNoRepos() { ensureGreen(); - GetSnapshotsResponse getSnapshotsResponse = client().admin().cluster() + GetSnapshotsResponse getSnapshotsResponse = clusterAdmin() .prepareGetSnapshots(new String[]{"_all"}) .setSnapshots(randomFrom("_all", "*")) .get(); @@ -1426,20 +1398,20 @@ public void testDeleteSnapshotWithCorruptedGlobalState() throws Exception { } } - List snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").get().getSnapshots("test-repo"); + List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots("test-repo"); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap")); SnapshotsStatusResponse snapshotStatusResponse = - client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get(); + clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get(); assertThat(snapshotStatusResponse.getSnapshots(), hasSize(1)); assertThat(snapshotStatusResponse.getSnapshots().get(0).getSnapshot().getSnapshotId().getName(), equalTo("test-snap")); assertAcked(startDeleteSnapshot("test-repo", "test-snap").get()); - expectThrows(SnapshotMissingException.class, () -> client().admin().cluster() + expectThrows(SnapshotMissingException.class, () -> clusterAdmin() .prepareGetSnapshots("test-repo").addSnapshots("test-snap").get().getSnapshots("test-repo")); - assertRequestBuilderThrows(client().admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap"), + assertRequestBuilderThrows(clusterAdmin().prepareSnapshotStatus("test-repo").addSnapshots("test-snap"), SnapshotMissingException.class); createFullSnapshot("test-repo", "test-snap"); @@ -1458,7 +1430,7 @@ public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { client().prepareIndex("test-idx-2").setSource("foo", "bar")); logger.info("--> creating snapshot"); - client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1") + clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1") .setWaitForCompletion(true).setIndices("test-idx-*").get(); logger.info("--> deleting shard level index file"); @@ -1474,7 +1446,7 @@ public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { logger.info("--> creating another snapshot"); CreateSnapshotResponse createSnapshotResponse = - client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2") + clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-2") .setWaitForCompletion(true).setIndices("test-idx-1").get(); assertEquals(createSnapshotResponse.getSnapshotInfo().successfulShards(), createSnapshotResponse.getSnapshotInfo().totalShards() - 1); @@ -1483,7 +1455,7 @@ public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { "because it uses snap-*.data files and not the index-N to determine what files to restore"); client().admin().indices().prepareDelete("test-idx-1", "test-idx-2").get(); RestoreSnapshotResponse restoreSnapshotResponse = - client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).get(); + clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).get(); assertEquals(0, restoreSnapshotResponse.getRestoreInfo().failedShards()); } @@ -1648,16 +1620,13 @@ public void testMoveShardWhileSnapshotting() throws Exception { logger.info("--> unblocking blocked node"); unblockNode("test-repo", blockedNode); logger.info("--> waiting for completion"); - SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600)); - logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size()); + logger.info("Number of failed shards [{}]", + waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600)).shardFailures().size()); logger.info("--> done"); - List snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo") - .setSnapshots("test-snap").get().getSnapshots("test-repo"); - - assertThat(snapshotInfos.size(), equalTo(1)); - assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); - assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0)); + final SnapshotInfo snapshotInfo = getSnapshot("test-repo", "test-snap"); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.shardFailures(), empty()); logger.info("--> delete index"); cluster().wipeIndices("test-idx"); @@ -1722,16 +1691,13 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { logger.info("--> unblocking blocked node"); unblockNode("test-repo", blockedNode); logger.info("--> waiting for completion"); - SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600)); - logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size()); + logger.info("Number of failed shards [{}]", + waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600)).shardFailures().size()); logger.info("--> done"); - List snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo") - .setSnapshots("test-snap").get().getSnapshots("test-repo"); - - assertThat(snapshotInfos.size(), equalTo(1)); - assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); - assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0)); + final SnapshotInfo snapshotInfo = getSnapshot("test-repo", "test-snap"); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.shardFailures().size(), equalTo(0)); logger.info("--> delete index"); cluster().wipeIndices("test-idx"); @@ -1760,8 +1726,7 @@ public void testReadonlyRepository() throws Exception { assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get() - .getSnapshots("test-repo").get(0).state(), equalTo(SnapshotState.SUCCESS)); + assertThat(getSnapshot("test-repo", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); logger.info("--> delete index"); cluster().wipeIndices("test-idx"); @@ -2017,7 +1982,7 @@ public void testSnapshotRelocatingPrimary() throws Exception { logger.info("--> wait for relocations to start"); assertBusy(() -> assertThat( - client().admin().cluster().prepareHealth("test-idx").execute().actionGet().getRelocatingShards(), greaterThan(0)), + clusterAdmin().prepareHealth("test-idx").execute().actionGet().getRelocatingShards(), greaterThan(0)), 1L, TimeUnit.MINUTES); logger.info("--> snapshot"); @@ -2049,8 +2014,7 @@ public void testSnapshotMoreThanOnce() throws InterruptedException { assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseFirst.getSnapshotInfo().totalShards())); - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test").get() - .getSnapshots("test-repo").get(0).state(), equalTo(SnapshotState.SUCCESS)); + assertThat(getSnapshot("test-repo", "test").state(), equalTo(SnapshotState.SUCCESS)); { SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo") .setSnapshots("test").get().getSnapshots().get(0); @@ -2065,8 +2029,7 @@ public void testSnapshotMoreThanOnce() throws InterruptedException { assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseSecond.getSnapshotInfo().totalShards())); - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-1").get() - .getSnapshots("test-repo").get(0).state(), equalTo(SnapshotState.SUCCESS)); + assertThat(getSnapshot("test-repo", "test-1").state(), equalTo(SnapshotState.SUCCESS)); { SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo") .setSnapshots("test-1").get().getSnapshots().get(0); @@ -2082,8 +2045,7 @@ public void testSnapshotMoreThanOnce() throws InterruptedException { assertThat(createSnapshotResponseThird.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponseThird.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseThird.getSnapshotInfo().totalShards())); - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-2").get() - .getSnapshots("test-repo").get(0).state(), equalTo(SnapshotState.SUCCESS)); + assertThat(getSnapshot("test-repo", "test-2").state(), equalTo(SnapshotState.SUCCESS)); { SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo") .setSnapshots("test-2").get().getSnapshots().get(0); @@ -2301,7 +2263,7 @@ public void testCloseOrDeleteIndexDuringSnapshot() throws Exception { createIndexWithRandomDocs("test-idx-3", 100); logger.info("--> snapshot"); - ActionFuture future = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + ActionFuture future = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") .setIndices("test-idx-*").setWaitForCompletion(true).setPartial(false).execute(); logger.info("--> wait for block to kick in"); waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueMinutes(1)); @@ -2422,7 +2384,7 @@ public void testDeleteSnapshotWhileRestoringFails() throws Exception { logger.info("--> try deleting the snapshot while the restore is in progress (should throw an error)"); ConcurrentSnapshotExecutionException e = expectThrows(ConcurrentSnapshotExecutionException.class, () -> - client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).get()); + clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).get()); assertEquals(repoName, e.getRepositoryName()); assertEquals(snapshotName, e.getSnapshotName()); assertThat(e.getMessage(), containsString("cannot delete snapshot during a restore")); @@ -2531,19 +2493,19 @@ public void testRestoreSnapshotWithCorruptedGlobalState() throws Exception { outChan.truncate(randomInt(10)); } - List snapshotInfos = client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(repoName); + List snapshotInfos = clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(repoName); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo(snapshotName)); SnapshotsStatusResponse snapshotStatusResponse = - client().admin().cluster().prepareSnapshotStatus(repoName).setSnapshots(snapshotName).get(); + clusterAdmin().prepareSnapshotStatus(repoName).setSnapshots(snapshotName).get(); assertThat(snapshotStatusResponse.getSnapshots(), hasSize(1)); assertThat(snapshotStatusResponse.getSnapshots().get(0).getSnapshot().getSnapshotId().getName(), equalTo(snapshotName)); assertAcked(client().admin().indices().prepareDelete("test-idx-1", "test-idx-2")); - SnapshotException ex = expectThrows(SnapshotException.class, () -> client().admin().cluster() + SnapshotException ex = expectThrows(SnapshotException.class, () -> clusterAdmin() .prepareRestoreSnapshot(repoName, snapshotName) .setRestoreGlobalState(true) .setWaitForCompletion(true) @@ -2552,7 +2514,7 @@ public void testRestoreSnapshotWithCorruptedGlobalState() throws Exception { assertThat(ex.getSnapshotName(), equalTo(snapshotName)); assertThat(ex.getMessage(), containsString("failed to read global metadata")); - RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName) + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); @@ -2609,7 +2571,7 @@ public void testRestoreSnapshotWithCorruptedIndexMetadata() throws Exception { outChan.truncate(randomInt(10)); } - List snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").get().getSnapshots("test-repo"); + List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots("test-repo"); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap")); @@ -2618,7 +2580,7 @@ public void testRestoreSnapshotWithCorruptedIndexMetadata() throws Exception { Predicate isRestorableIndex = index -> corruptedIndex.getName().equals(index) == false; - client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setIndices(nbDocsPerIndex.keySet().stream().filter(isRestorableIndex).toArray(String[]::new)) .setRestoreGlobalState(randomBoolean()) .setWaitForCompletion(true) @@ -2674,7 +2636,7 @@ public void testSnapshotWithCorruptedShardIndexFile() throws Exception { } logger.info("--> verifying snapshot state for [{}]", snapshot1); - List snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").get().getSnapshots("test-repo"); + List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots("test-repo"); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo(snapshot1)); @@ -2683,7 +2645,7 @@ public void testSnapshotWithCorruptedShardIndexFile() throws Exception { assertAcked(client().admin().indices().prepareDelete(indexName)); logger.info("--> restoring snapshot [{}]", snapshot1); - client().admin().cluster().prepareRestoreSnapshot("test-repo", snapshot1) + clusterAdmin().prepareRestoreSnapshot("test-repo", snapshot1) .setRestoreGlobalState(randomBoolean()) .setWaitForCompletion(true) .get(); @@ -2699,7 +2661,7 @@ public void testSnapshotWithCorruptedShardIndexFile() throws Exception { final String snapshot2 = "test-snap-2"; logger.info("--> creating snapshot [{}]", snapshot2); - final SnapshotInfo snapshotInfo2 = client().admin().cluster().prepareCreateSnapshot("test-repo", snapshot2) + final SnapshotInfo snapshotInfo2 = clusterAdmin().prepareCreateSnapshot("test-repo", snapshot2) .setWaitForCompletion(true) .get() .getSnapshotInfo(); @@ -2923,7 +2885,7 @@ public void testSnapshotCanceledOnRemovedShard() throws Exception { String blockedNode = blockNodeWithIndex(repo, index); logger.info("--> snapshot"); - client().admin().cluster().prepareCreateSnapshot(repo, snapshot) + clusterAdmin().prepareCreateSnapshot(repo, snapshot) .setWaitForCompletion(false) .execute(); @@ -2999,9 +2961,7 @@ public void testSnapshotSucceedsAfterSnapshotFailure() throws Exception { .setIndices("test-idx") .get(); assertEquals(0, createSnapshotResponse.getSnapshotInfo().failedShards()); - GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo-2") - .addSnapshots("test-snap-2").get(); - assertEquals(SnapshotState.SUCCESS, getSnapshotsResponse.getSnapshots("test-repo-2").get(0).state()); + assertEquals(SnapshotState.SUCCESS, getSnapshot("test-repo-2", "test-snap-2").state()); } public void testSnapshotStatusOnFailedSnapshot() throws Exception { @@ -3016,7 +2976,7 @@ public void testSnapshotStatusOnFailedSnapshot() throws Exception { indexRandomDocs("test-idx-good", randomIntBetween(1, 5)); final SnapshotsStatusResponse snapshotsStatusResponse = - client().admin().cluster().prepareSnapshotStatus(repoName).setSnapshots(snapshot).get(); + clusterAdmin().prepareSnapshotStatus(repoName).setSnapshots(snapshot).get(); assertEquals(1, snapshotsStatusResponse.getSnapshots().size()); assertEquals(State.FAILED, snapshotsStatusResponse.getSnapshots().get(0).getState()); } @@ -3070,7 +3030,7 @@ public void testGetSnapshotsFromIndexBlobOnly() throws Exception { } logger.info("--> verify _all returns snapshot info"); - GetSnapshotsResponse response = client().admin().cluster() + GetSnapshotsResponse response = clusterAdmin() .prepareGetSnapshots("test-repo") .setSnapshots("_all") .setVerbose(false) @@ -3079,7 +3039,7 @@ public void testGetSnapshotsFromIndexBlobOnly() throws Exception { verifySnapshotInfo(response, indicesPerSnapshot); logger.info("--> verify wildcard returns snapshot info"); - response = client().admin().cluster() + response = clusterAdmin() .prepareGetSnapshots("test-repo") .setSnapshots("test-snap-*") .setVerbose(false) @@ -3089,7 +3049,7 @@ public void testGetSnapshotsFromIndexBlobOnly() throws Exception { logger.info("--> verify individual requests return snapshot info"); for (int i = 0; i < numSnapshots; i++) { - response = client().admin().cluster() + response = clusterAdmin() .prepareGetSnapshots("test-repo") .setSnapshots("test-snap-" + i) .setVerbose(false) @@ -3284,7 +3244,7 @@ public void testRestoreIncreasesPrimaryTerms() { } } - final IndexMetadata indexMetadata = client().admin().cluster().prepareState().clear().setIndices(indexName) + final IndexMetadata indexMetadata = clusterAdmin().prepareState().clear().setIndices(indexName) .setMetadata(true).get().getState().metadata().index(indexName); assertThat(indexMetadata.getSettings().get(IndexMetadata.SETTING_HISTORY_UUID), nullValue()); final int numPrimaries = getNumShards(indexName).numPrimaries; @@ -3292,19 +3252,19 @@ public void testRestoreIncreasesPrimaryTerms() { .boxed().collect(Collectors.toMap(shardId -> shardId, indexMetadata::primaryTerm)); createRepository("test-repo", "fs"); - final CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") .setWaitForCompletion(true).setIndices(indexName).get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(numPrimaries)); assertThat(createSnapshotResponse.getSnapshotInfo().failedShards(), equalTo(0)); assertAcked(client().admin().indices().prepareClose(indexName)); - final RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(numPrimaries)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); - final IndexMetadata restoredIndexMetadata = client().admin().cluster().prepareState().clear().setIndices(indexName) + final IndexMetadata restoredIndexMetadata = clusterAdmin().prepareState().clear().setIndices(indexName) .setMetadata(true).get().getState().metadata().index(indexName); for (int shardId = 0; shardId < numPrimaries; shardId++) { assertThat(restoredIndexMetadata.primaryTerm(shardId), greaterThan(primaryTerms.get(shardId))); @@ -3346,11 +3306,11 @@ public void testSnapshotDifferentIndicesBySameName() throws InterruptedException assertThat(snapshot2.successfulShards(), is(newShardCount)); logger.info("--> restoring snapshot 1"); - client().admin().cluster().prepareRestoreSnapshot(repoName, "snap-1").setIndices(indexName).setRenamePattern(indexName) + clusterAdmin().prepareRestoreSnapshot(repoName, "snap-1").setIndices(indexName).setRenamePattern(indexName) .setRenameReplacement("restored-1").setWaitForCompletion(true).get(); logger.info("--> restoring snapshot 2"); - client().admin().cluster().prepareRestoreSnapshot(repoName, "snap-2").setIndices(indexName).setRenamePattern(indexName) + clusterAdmin().prepareRestoreSnapshot(repoName, "snap-2").setIndices(indexName).setRenamePattern(indexName) .setRenameReplacement("restored-2").setWaitForCompletion(true).get(); logger.info("--> verify doc counts"); @@ -3371,7 +3331,7 @@ public void testSnapshotDifferentIndicesBySameName() throws InterruptedException } assertAcked(startDeleteSnapshot(repoName, snapshotToDelete).get()); logger.info("--> restoring snapshot [{}]", snapshotToRestore); - client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotToRestore).setIndices(indexName).setRenamePattern(indexName) + clusterAdmin().prepareRestoreSnapshot(repoName, snapshotToRestore).setIndices(indexName).setRenamePattern(indexName) .setRenameReplacement("restored-3").setWaitForCompletion(true).get(); logger.info("--> verify doc counts"); @@ -3393,7 +3353,7 @@ public void testBulkDeleteWithOverlappingPatterns() { } refresh(); logger.info("--> snapshot {}", i); - CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-" + i) + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-" + i) .setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), @@ -3401,8 +3361,8 @@ public void testBulkDeleteWithOverlappingPatterns() { } logger.info("--> deleting all snapshots"); - client().admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-*", "*").get(); - final GetSnapshotsResponse getSnapshotsResponse = client().admin().cluster().prepareGetSnapshots("test-repo").get(); + clusterAdmin().prepareDeleteSnapshot("test-repo", "test-snap-*", "*").get(); + final GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots("test-repo").get(); assertThat(getSnapshotsResponse.getSnapshots("test-repo"), empty()); } @@ -3446,7 +3406,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws InterruptedException { // Verify that hidden indices get restored with a wildcard restore { - RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster() + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin() .prepareRestoreSnapshot(repoName, snapName) .setWaitForCompletion(true) .setIndices("*") @@ -3464,7 +3424,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws InterruptedException { // Verify that exclusions work on hidden indices { - RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster() + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin() .prepareRestoreSnapshot(repoName, snapName) .setWaitForCompletion(true) .setIndices("*", "-.*") @@ -3482,7 +3442,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws InterruptedException { // Verify that hidden indices can be restored with a non-star pattern { - RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster() + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin() .prepareRestoreSnapshot(repoName, snapName) .setWaitForCompletion(true) .setIndices("hid*") @@ -3500,7 +3460,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws InterruptedException { // Verify that hidden indices can be restored by fully specified name { - RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster() + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin() .prepareRestoreSnapshot(repoName, snapName) .setWaitForCompletion(true) .setIndices(dottedHiddenIndex) @@ -3556,10 +3516,10 @@ public void testForbidDisableSoftDeletesDuringRestore() throws Exception { indexRandomDocs("test-index", between(0, 100)); flush("test-index"); } - client().admin().cluster().prepareCreateSnapshot("test-repo", "snapshot-0") + clusterAdmin().prepareCreateSnapshot("test-repo", "snapshot-0") .setIndices("test-index").setWaitForCompletion(true).get(); final SnapshotRestoreException restoreError = expectThrows(SnapshotRestoreException.class, - () -> client().admin().cluster().prepareRestoreSnapshot("test-repo", "snapshot-0") + () -> clusterAdmin().prepareRestoreSnapshot("test-repo", "snapshot-0") .setIndexSettings(Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), false)) .setRenamePattern("test-index").setRenameReplacement("new-index") .get()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java index 65ba594796f47..495b6e6d427d9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.snapshots; -import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.plugins.Plugin; @@ -66,8 +65,7 @@ public void testRetryPostingSnapshotStatusMessages() throws Exception { .get(); waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60)); - final SnapshotId snapshotId = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap") - .get().getSnapshots("test-repo").get(0).snapshotId(); + final SnapshotId snapshotId = getSnapshot("test-repo", "test-snap").snapshotId(); logger.info("--> start disrupting cluster"); final NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.NetworkDelay.random(random())); @@ -92,10 +90,7 @@ public void testRetryPostingSnapshotStatusMessages() throws Exception { internalCluster().clearDisruptionScheme(true); assertBusy(() -> { - GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster() - .prepareGetSnapshots("test-repo") - .setSnapshots("test-snap").get(); - SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots("test-repo").get(0); + SnapshotInfo snapshotInfo = getSnapshot("test-repo", "test-snap"); logger.info("Snapshot status [{}], successfulShards [{}]", snapshotInfo.state(), snapshotInfo.successfulShards()); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfo.successfulShards(), equalTo(shards)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index 03cea2203b659..978db7deffc12 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStats; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; -import org.elasticsearch.client.Client; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -49,8 +48,6 @@ public class SnapshotStatusApisIT extends AbstractSnapshotIntegTestCase { public void testStatusApiConsistency() { - Client client = client(); - createRepository("test-repo", "fs"); createIndex("test-idx-1", "test-idx-2", "test-idx-3"); @@ -66,14 +63,13 @@ public void testStatusApiConsistency() { createFullSnapshot("test-repo", "test-snap"); - List snapshotInfos = - client.admin().cluster().prepareGetSnapshots("test-repo").get().getSnapshots("test-repo"); + List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots("test-repo"); assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfo.version(), equalTo(Version.CURRENT)); - final List snapshotStatus = client.admin().cluster().snapshotsStatus( + final List snapshotStatus = clusterAdmin().snapshotsStatus( new SnapshotsStatusRequest("test-repo", new String[]{"test-snap"})).actionGet().getSnapshots(); assertThat(snapshotStatus.size(), equalTo(1)); final SnapshotStatus snStatus = snapshotStatus.get(0); @@ -82,8 +78,6 @@ public void testStatusApiConsistency() { } public void testStatusAPICallInProgressSnapshot() throws Exception { - Client client = client(); - createRepository("test-repo", "mock", Settings.builder().put("location", randomRepoPath()).put("block_on_data", true)); createIndex("test-idx-1"); @@ -96,13 +90,12 @@ public void testStatusAPICallInProgressSnapshot() throws Exception { refresh(); logger.info("--> snapshot"); - ActionFuture createSnapshotResponseActionFuture = - client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute(); + ActionFuture createSnapshotResponseActionFuture = startFullSnapshot("test-repo", "test-snap"); logger.info("--> wait for data nodes to get blocked"); waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueMinutes(1)); awaitNumberOfSnapshotsInProgress(1); - assertEquals(SnapshotsInProgress.State.STARTED, client.admin().cluster().prepareSnapshotStatus("test-repo") + assertEquals(SnapshotsInProgress.State.STARTED, client().admin().cluster().prepareSnapshotStatus("test-repo") .setSnapshots("test-snap").get().getSnapshots().get(0).getState()); logger.info("--> unblock all data nodes"); diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 3929cf4170686..cf09ca5cebfd6 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -28,6 +28,7 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -82,7 +83,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase { @@ -122,11 +123,11 @@ public void verifyNoLeakedListeners() throws Exception { @After public void assertRepoConsistency() { if (skipRepoConsistencyCheckReason == null) { - client().admin().cluster().prepareGetRepositories().get().repositories().forEach(repositoryMetadata -> { + clusterAdmin().prepareGetRepositories().get().repositories().forEach(repositoryMetadata -> { final String name = repositoryMetadata.name(); if (repositoryMetadata.settings().getAsBoolean("readonly", false) == false) { - client().admin().cluster().prepareDeleteSnapshot(name, OLD_VERSION_SNAPSHOT_PREFIX + "*").get(); - client().admin().cluster().prepareCleanupRepository(name).get(); + clusterAdmin().prepareDeleteSnapshot(name, OLD_VERSION_SNAPSHOT_PREFIX + "*").get(); + clusterAdmin().prepareCleanupRepository(name).get(); } BlobStoreTestUtil.assertRepoConsistency(internalCluster(), name); }); @@ -202,12 +203,10 @@ public static void waitForBlock(String node, String repository, TimeValue timeou public SnapshotInfo waitForCompletion(String repository, String snapshotName, TimeValue timeout) throws InterruptedException { long start = System.currentTimeMillis(); while (System.currentTimeMillis() - start < timeout.millis()) { - List snapshotInfos = client().admin().cluster().prepareGetSnapshots(repository).setSnapshots(snapshotName) - .get().getSnapshots(repository); - assertThat(snapshotInfos.size(), equalTo(1)); - if (snapshotInfos.get(0).state().completed()) { + final SnapshotInfo snapshotInfo = getSnapshot(repository, snapshotName); + if (snapshotInfo.state().completed()) { // Make sure that snapshot clean up operations are finished - ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); boolean found = false; for (SnapshotsInProgress.Entry entry : stateResponse.getState().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries()) { @@ -218,7 +217,7 @@ public SnapshotInfo waitForCompletion(String repository, String snapshotName, Ti } } if (found == false) { - return snapshotInfos.get(0); + return snapshotInfo; } } Thread.sleep(100); @@ -307,7 +306,7 @@ public void unblockNode(final String repository, final String node) { protected void createRepository(String repoName, String type, Settings.Builder settings) { logger.info("--> creating repository [{}] [{}]", repoName, type); - assertAcked(client().admin().cluster().preparePutRepository(repoName) + assertAcked(clusterAdmin().preparePutRepository(repoName) .setType(type).setSettings(settings)); } @@ -349,7 +348,7 @@ protected void maybeInitWithOldSnapshotVersion(String repoName, Path repoPath) t protected String initWithSnapshotVersion(String repoName, Path repoPath, Version version) throws IOException { assertThat("This hack only works on an empty repository", getRepositoryData(repoName).getSnapshotIds(), empty()); final String oldVersionSnapshot = OLD_VERSION_SNAPSHOT_PREFIX + version.id; - final CreateSnapshotResponse createSnapshotResponse = client().admin().cluster() + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin() .prepareCreateSnapshot(repoName, oldVersionSnapshot).setIndices("does-not-exist-for-sure-*") .setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), is(0)); @@ -372,7 +371,7 @@ protected String initWithSnapshotVersion(String repoName, Path repoPath, Version protected SnapshotInfo createFullSnapshot(String repoName, String snapshotName) { logger.info("--> creating full snapshot [{}] in [{}]", snapshotName, repoName); - CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName) + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) .setIncludeGlobalState(true) .setWaitForCompletion(true) .get(); @@ -416,7 +415,7 @@ protected void assertDocCount(String index, long count) { * @param metadata snapshot metadata to write (as returned by {@link SnapshotInfo#userMetadata()}) */ protected void addBwCFailedSnapshot(String repoName, String snapshotName, Map metadata) throws Exception { - final ClusterState state = client().admin().cluster().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState().get().getState(); final RepositoriesMetadata repositoriesMetadata = state.metadata().custom(RepositoriesMetadata.TYPE); assertNotNull(repositoriesMetadata); final RepositoryMetadata initialRepoMetadata = repositoriesMetadata.repository(repoName); @@ -486,7 +485,7 @@ protected ActionFuture startFullSnapshot(String repoName protected ActionFuture startFullSnapshot(String repoName, String snapshotName, boolean partial) { logger.info("--> creating full snapshot [{}] to repo [{}]", snapshotName, repoName); - return client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true) + return clusterAdmin().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true) .setPartial(partial).execute(); } @@ -517,6 +516,35 @@ protected void createIndexWithContent(String indexName, Settings indexSettings) protected ActionFuture startDeleteSnapshot(String repoName, String snapshotName) { logger.info("--> deleting snapshot [{}] from repo [{}]", snapshotName, repoName); - return client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).execute(); + return clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).execute(); + } + + protected void updateClusterState(final Function updater) throws Exception { + final PlainActionFuture future = PlainActionFuture.newFuture(); + final ClusterService clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + clusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return updater.apply(currentState); + } + + @Override + public void onFailure(String source, Exception e) { + future.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + future.onResponse(null); + } + }); + future.get(); + } + + protected SnapshotInfo getSnapshot(String repository, String snapshot) { + final List snapshotInfos = clusterAdmin().prepareGetSnapshots(repository).setSnapshots(snapshot) + .get().getSnapshots(repository); + assertThat(snapshotInfos, hasSize(1)); + return snapshotInfos.get(0); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 4209ae1128d88..adcfe120b4d7a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -58,6 +58,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; +import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.client.Requests; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; @@ -1329,6 +1330,13 @@ protected AdminClient admin() { return client().admin(); } + /** + * Returns a random cluster admin client. This client can be pointing to any of the nodes in the cluster. + */ + protected ClusterAdminClient clusterAdmin() { + return admin().cluster(); + } + /** * Convenience method that forwards to {@link #indexRandom(boolean, List)}. */ diff --git a/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index 989c4ec3ec09d..3bf8956fe33c6 100644 --- a/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; -import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -110,10 +109,7 @@ public void testSnapshotAndRestore() throws Exception { RestStatus status = createSnapshotResponse.getSnapshotInfo().status(); assertEquals(RestStatus.OK, status); - GetSnapshotsResponse snapshot = client.admin().cluster().prepareGetSnapshots(REPO).setSnapshots(SNAPSHOT).get(); - List snap = snapshot.getSnapshots(REPO); - assertEquals(1, snap.size()); - assertEquals(Collections.singletonList(DS_BACKING_INDEX_NAME), snap.get(0).indices()); + assertEquals(Collections.singletonList(DS_BACKING_INDEX_NAME), getSnapshot(REPO, SNAPSHOT).indices()); assertTrue( client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "ds" })) @@ -156,10 +152,7 @@ public void testSnapshotAndRestoreAll() throws Exception { RestStatus status = createSnapshotResponse.getSnapshotInfo().status(); assertEquals(RestStatus.OK, status); - GetSnapshotsResponse snapshot = client.admin().cluster().prepareGetSnapshots(REPO).setSnapshots(SNAPSHOT).get(); - List snap = snapshot.getSnapshots(REPO); - assertEquals(1, snap.size()); - assertEquals(Collections.singletonList(DS_BACKING_INDEX_NAME), snap.get(0).indices()); + assertEquals(Collections.singletonList(DS_BACKING_INDEX_NAME), getSnapshot(REPO, SNAPSHOT).indices()); assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "*" })).get()); assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN));