diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 6a054e4aab317..6da0de77dd533 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -152,3 +152,18 @@ task azureThirdPartyTest(type: Test) { } } tasks.named("check").configure { dependsOn("azureThirdPartyTest") } + +// test jar is exported by the integTestArtifacts configuration to be used in the encrypted Azure repository test +configurations { + internalClusterTestArtifacts.extendsFrom internalClusterTestImplementation + internalClusterTestArtifacts.extendsFrom internalClusterTestRuntime +} + +def internalClusterTestJar = tasks.register("internalClusterTestJar", Jar) { + appendix 'internalClusterTest' + from sourceSets.internalClusterTest.output +} + +artifacts { + internalClusterTestArtifacts internalClusterTestJar +} diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index 3e305c1cb1baf..e22f8eb7fb987 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -53,12 +53,15 @@ protected String repositoryType() { } @Override - protected Settings repositorySettings() { - return Settings.builder() - .put(super.repositorySettings()) - .put(AzureRepository.Repository.CONTAINER_SETTING.getKey(), "container") - .put(AzureStorageSettings.ACCOUNT_SETTING.getKey(), "test") - .build(); + protected Settings repositorySettings(String repoName) { + Settings.Builder settingsBuilder = Settings.builder() + .put(super.repositorySettings(repoName)) + .put(AzureRepository.Repository.CONTAINER_SETTING.getKey(), "container") + .put(AzureStorageSettings.ACCOUNT_SETTING.getKey(), "test"); + if (randomBoolean()) { + settingsBuilder.put(AzureRepository.Repository.BASE_PATH_SETTING.getKey(), randomFrom("test", "test/1")); + } + return settingsBuilder.build(); } @Override diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 5ec1ea519a6d7..eb842e9eb543c 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -333,4 +333,21 @@ def gcsThirdPartyTest = tasks.register("gcsThirdPartyTest", Test) { tasks.named("check").configure { dependsOn(largeBlobYamlRestTest, gcsThirdPartyTest) -} \ No newline at end of file +} + +// test jar is exported by the integTestArtifacts configuration to be used in the encrypted GCS repository test +configurations { + internalClusterTestArtifacts.extendsFrom internalClusterTestImplementation + internalClusterTestArtifacts.extendsFrom internalClusterTestRuntime +} + +def internalClusterTestJar = tasks.register("internalClusterTestJar", Jar) { + appendix 'internalClusterTest' + from sourceSets.internalClusterTest.output + // for the repositories.gcs.TestUtils class + from sourceSets.test.output +} + +artifacts { + internalClusterTestArtifacts internalClusterTestJar +} diff --git a/plugins/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 47918619c18e1..69b8fb669d441 100644 --- a/plugins/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -67,6 +67,7 @@ import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.CREDENTIALS_FILE_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.ENDPOINT_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.TOKEN_URI_SETTING; +import static org.elasticsearch.repositories.gcs.GoogleCloudStorageRepository.BASE_PATH; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageRepository.BUCKET; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageRepository.CLIENT_NAME; @@ -79,12 +80,15 @@ protected String repositoryType() { } @Override - protected Settings repositorySettings() { - return Settings.builder() - .put(super.repositorySettings()) - .put(BUCKET.getKey(), "bucket") - .put(CLIENT_NAME.getKey(), "test") - .build(); + protected Settings repositorySettings(String repoName) { + Settings.Builder settingsBuilder = Settings.builder() + .put(super.repositorySettings(repoName)) + .put(BUCKET.getKey(), "bucket") + .put(CLIENT_NAME.getKey(), "test"); + if (randomBoolean()) { + settingsBuilder.put(BASE_PATH.getKey(), randomFrom("test", "test/1")); + } + return settingsBuilder.build(); } @Override @@ -120,7 +124,7 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testDeleteSingleItem() { - final String repoName = createRepository(randomName()); + final String repoName = createRepository(randomRepositoryName()); final RepositoriesService repositoriesService = internalCluster().getMasterNodeInstance(RepositoriesService.class); final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repoName); PlainActionFuture.get(f -> repository.threadPool().generic().execute(ActionRunnable.run(f, () -> diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java index a427cee824209..e87c2ac5c1b29 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java @@ -38,7 +38,7 @@ protected String repositoryType() { } @Override - protected Settings repositorySettings() { + protected Settings repositorySettings(String repoName) { return Settings.builder() .put("uri", "hdfs:///") .put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName()) @@ -47,6 +47,12 @@ protected Settings repositorySettings() { .put("compress", randomBoolean()).build(); } + @Override + public void testSnapshotAndRestore() throws Exception { + // the HDFS mockup doesn't preserve the repository contents after removing the repository + testSnapshotAndRestore(false); + } + @Override protected Collection> nodePlugins() { return Collections.singletonList(HdfsPlugin.class); diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index bbbdcc6c5d61f..f13c78e0f339b 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -309,4 +309,21 @@ tasks.named("thirdPartyAudit").configure { 'com.amazonaws.services.kms.model.GenerateDataKeyResult', 'javax.activation.DataHandler' ) -} \ No newline at end of file +} + +// test jar is exported by the integTestArtifacts configuration to be used in the encrypted S3 repository test +configurations { + internalClusterTestArtifacts.extendsFrom internalClusterTestImplementation + internalClusterTestArtifacts.extendsFrom internalClusterTestRuntime +} + +def internalClusterTestJar = tasks.register("internalClusterTestJar", Jar) { + appendix 'internalClusterTest' + from sourceSets.internalClusterTest.output + // for the plugin-security.policy resource + from sourceSets.test.output +} + +artifacts { + internalClusterTestArtifacts internalClusterTestJar +} diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 71c9fe0fd346b..ab439d45560b0 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -95,14 +95,17 @@ protected String repositoryType() { } @Override - protected Settings repositorySettings() { - return Settings.builder() - .put(super.repositorySettings()) + protected Settings repositorySettings(String repoName) { + Settings.Builder settingsBuilder = Settings.builder() + .put(super.repositorySettings(repoName)) .put(S3Repository.BUCKET_SETTING.getKey(), "bucket") .put(S3Repository.CLIENT_NAME.getKey(), "test") // Don't cache repository data because some tests manually modify the repository data - .put(BlobStoreRepository.CACHE_REPOSITORY_DATA.getKey(), false) - .build(); + .put(BlobStoreRepository.CACHE_REPOSITORY_DATA.getKey(), false); + if (randomBoolean()) { + settingsBuilder.put(S3Repository.BASE_PATH_SETTING.getKey(), randomFrom("test", "test/1")); + } + return settingsBuilder.build(); } @Override @@ -146,8 +149,9 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testEnforcedCooldownPeriod() throws IOException { - final String repoName = createRepository(randomName(), Settings.builder().put(repositorySettings()) - .put(S3Repository.COOLDOWN_PERIOD.getKey(), TEST_COOLDOWN_PERIOD).build()); + final String repoName = randomRepositoryName(); + createRepository(repoName, Settings.builder().put(repositorySettings(repoName)) + .put(S3Repository.COOLDOWN_PERIOD.getKey(), TEST_COOLDOWN_PERIOD).build(), true); final SnapshotId fakeOldSnapshot = client().admin().cluster().prepareCreateSnapshot(repoName, "snapshot-old") .setWaitForCompletion(true).setIndices().get().getSnapshotInfo().snapshotId(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIntegTests.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIntegTests.java new file mode 100644 index 0000000000000..268f47130601d --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIntegTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.fs; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.repositories.blobstore.ESFsBasedRepositoryIntegTestCase; + +public class FsBlobStoreRepositoryIntegTests extends ESFsBasedRepositoryIntegTestCase { + + @Override + protected Settings repositorySettings(String repositoryName) { + final Settings.Builder settings = Settings.builder() + .put("compress", randomBoolean()) + .put("location", randomRepoPath()); + if (randomBoolean()) { + long size = 1 << randomInt(10); + settings.put("chunk_size", new ByteSizeValue(size, ByteSizeUnit.KB)); + } + return settings.build(); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java index 6c6df937584d8..9c635fbe9201c 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java @@ -25,6 +25,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Objects; /** * The list of paths where a blob can reside. The contents of the paths are dependent upon the implementation of {@link BlobContainer}. @@ -90,4 +91,17 @@ public String toString() { } return sb.toString(); } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + BlobPath other = (BlobPath) o; + return paths.equals(other.paths); + } + + @Override + public int hashCode() { + return Objects.hash(paths); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index 057a1c49010a6..0717259d67202 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -41,6 +41,7 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.snapshots.SnapshotMissingException; import org.elasticsearch.snapshots.SnapshotRestoreException; import org.elasticsearch.test.ESIntegTestCase; @@ -78,17 +79,19 @@ public static RepositoryData getRepositoryData(Repository repository) { protected abstract String repositoryType(); - protected Settings repositorySettings() { + protected Settings repositorySettings(String repoName) { return Settings.builder().put("compress", randomBoolean()).build(); } protected final String createRepository(final String name) { - return createRepository(name, repositorySettings()); + return createRepository(name, true); } - protected final String createRepository(final String name, final Settings settings) { - final boolean verify = randomBoolean(); + protected final String createRepository(final String name, final boolean verify) { + return createRepository(name, repositorySettings(name), verify); + } + protected final String createRepository(final String name, final Settings settings, final boolean verify) { logger.debug("--> creating repository [name: {}, verify: {}, settings: {}]", name, verify, settings); assertAcked(client().admin().cluster().preparePutRepository(name) .setType(repositoryType()) @@ -98,7 +101,7 @@ protected final String createRepository(final String name, final Settings settin internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class).forEach(repositories -> { assertThat(repositories.repository(name), notNullValue()); assertThat(repositories.repository(name), instanceOf(BlobStoreRepository.class)); - assertThat(repositories.repository(name).isReadOnly(), is(false)); + assertThat(repositories.repository(name).isReadOnly(), is(settings.getAsBoolean("readonly", false))); BlobStore blobStore = ((BlobStoreRepository) repositories.repository(name)).getBlobStore(); assertThat("blob store has to be lazy initialized", blobStore, verify ? is(notNullValue()) : is(nullValue())); }); @@ -106,6 +109,15 @@ protected final String createRepository(final String name, final Settings settin return name; } + protected final void deleteRepository(final String name) { + logger.debug("--> deleting repository [name: {}]", name); + assertAcked(client().admin().cluster().prepareDeleteRepository(name)); + internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class).forEach(repositories -> { + RepositoryMissingException e = expectThrows(RepositoryMissingException.class, () -> repositories.repository(name)); + assertThat(e.repository(), equalTo(name)); + }); + } + public void testReadNonExistingPath() throws IOException { try (BlobStore store = newBlobStore()) { final BlobContainer container = store.blobContainer(new BlobPath()); @@ -176,7 +188,7 @@ public void testList() throws IOException { BlobMetadata blobMetadata = blobs.get(generated.getKey()); assertThat(generated.getKey(), blobMetadata, CoreMatchers.notNullValue()); assertThat(blobMetadata.name(), CoreMatchers.equalTo(generated.getKey())); - assertThat(blobMetadata.length(), CoreMatchers.equalTo(generated.getValue())); + assertThat(blobMetadata.length(), CoreMatchers.equalTo(blobLengthFromContentLength(generated.getValue()))); } assertThat(container.listBlobsByPrefix("foo-").size(), CoreMatchers.equalTo(numberOfFooBlobs)); @@ -263,7 +275,11 @@ protected static void writeBlob(BlobContainer container, String blobName, BytesA } protected BlobStore newBlobStore() { - final String repository = createRepository(randomName()); + final String repository = createRepository(randomRepositoryName()); + return newBlobStore(repository); + } + + protected BlobStore newBlobStore(String repository) { final BlobStoreRepository blobStoreRepository = (BlobStoreRepository) internalCluster().getMasterNodeInstance(RepositoriesService.class).repository(repository); return PlainActionFuture.get( @@ -271,7 +287,13 @@ protected BlobStore newBlobStore() { } public void testSnapshotAndRestore() throws Exception { - final String repoName = createRepository(randomName()); + testSnapshotAndRestore(randomBoolean()); + } + + protected void testSnapshotAndRestore(boolean recreateRepositoryBeforeRestore) throws Exception { + final String repoName = randomRepositoryName(); + final Settings repoSettings = repositorySettings(repoName); + createRepository(repoName, repoSettings, randomBoolean()); int indexCount = randomIntBetween(1, 5); int[] docCounts = new int[indexCount]; String[] indexNames = generateRandomNames(indexCount); @@ -319,6 +341,11 @@ public void testSnapshotAndRestore() throws Exception { assertAcked(client().admin().indices().prepareClose(closeIndices.toArray(new String[closeIndices.size()]))); } + if (recreateRepositoryBeforeRestore) { + deleteRepository(repoName); + createRepository(repoName, repoSettings, randomBoolean()); + } + logger.info("--> restore all indices from the snapshot"); assertSuccessfulRestore(client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(true)); @@ -343,7 +370,7 @@ public void testSnapshotAndRestore() throws Exception { } public void testMultipleSnapshotAndRollback() throws Exception { - final String repoName = createRepository(randomName()); + final String repoName = createRepository(randomRepositoryName()); int iterationCount = randomIntBetween(2, 5); int[] docCounts = new int[iterationCount]; String indexName = randomName(); @@ -398,7 +425,7 @@ public void testMultipleSnapshotAndRollback() throws Exception { } public void testIndicesDeletedFromRepository() throws Exception { - final String repoName = createRepository("test-repo"); + final String repoName = createRepository(randomRepositoryName()); Client client = client(); createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); @@ -495,7 +522,15 @@ private static void assertSuccessfulRestore(RestoreSnapshotResponse response) { assertThat(response.getRestoreInfo().successfulShards(), equalTo(response.getRestoreInfo().totalShards())); } - protected static String randomName() { + protected String randomName() { return randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); } + + protected String randomRepositoryName() { + return randomName(); + } + + protected long blobLengthFromContentLength(long contentLength) { + return contentLength; + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESFsBasedRepositoryIntegTestCase.java similarity index 69% rename from server/src/internalClusterTest/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java rename to test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESFsBasedRepositoryIntegTestCase.java index 9ad02412f3771..3a8501d65e95a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESFsBasedRepositoryIntegTestCase.java @@ -7,7 +7,7 @@ * not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an @@ -16,18 +16,16 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.repositories.fs; +package org.elasticsearch.repositories.blobstore; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.fs.FsBlobStore; +import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; +import org.elasticsearch.repositories.fs.FsRepository; import java.io.IOException; import java.nio.file.Files; @@ -39,35 +37,22 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.instanceOf; -public class FsBlobStoreRepositoryIT extends ESBlobStoreRepositoryIntegTestCase { +public abstract class ESFsBasedRepositoryIntegTestCase extends ESBlobStoreRepositoryIntegTestCase { @Override protected String repositoryType() { return FsRepository.TYPE; } - @Override - protected Settings repositorySettings() { - final Settings.Builder settings = Settings.builder(); - settings.put(super.repositorySettings()); - settings.put("location", randomRepoPath()); - if (randomBoolean()) { - long size = 1 << randomInt(10); - settings.put("chunk_size", new ByteSizeValue(size, ByteSizeUnit.KB)); - } - return settings.build(); - } - public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOException, InterruptedException { - final String repoName = randomName(); + final String repoName = randomRepositoryName(); final Path repoPath = randomRepoPath(); - logger.info("--> creating repository {} at {}", repoName, repoPath); - - assertAcked(client().admin().cluster().preparePutRepository(repoName).setType("fs").setSettings(Settings.builder() - .put("location", repoPath) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + final Settings repoSettings = Settings.builder() + .put(repositorySettings(repoName)) + .put("location", repoPath) + .build(); + createRepository(repoName, repoSettings, randomBoolean()); final String indexName = randomName(); int docCount = iterations(10, 1000); @@ -91,8 +76,7 @@ public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOExce } assertFalse(Files.exists(deletedPath)); - assertAcked(client().admin().cluster().preparePutRepository(repoName).setType("fs").setSettings(Settings.builder() - .put("location", repoPath).put("readonly", true))); + createRepository(repoName, Settings.builder().put(repoSettings).put("readonly", true).build(), randomBoolean()); final ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(randomBoolean()).get()); @@ -102,25 +86,34 @@ public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOExce } public void testReadOnly() throws Exception { - Path tempDir = createTempDir(); - Path path = tempDir.resolve("bar"); - - try (FsBlobStore store = new FsBlobStore(randomIntBetween(1, 8) * 1024, path, true)) { - assertFalse(Files.exists(path)); + final String repoName = randomRepositoryName(); + final Path repoPath = randomRepoPath(); + final Settings repoSettings = Settings.builder() + .put(repositorySettings(repoName)) + .put("readonly", true) + .put(FsRepository.LOCATION_SETTING.getKey(), repoPath) + .put(BlobStoreRepository.BUFFER_SIZE_SETTING.getKey(), String.valueOf(randomIntBetween(1, 8) * 1024) + "kb") + .build(); + createRepository(repoName, repoSettings, false); + + try (BlobStore store = newBlobStore(repoName)) { + assertFalse(Files.exists(repoPath)); BlobPath blobPath = BlobPath.cleanPath().add("foo"); store.blobContainer(blobPath); - Path storePath = store.path(); + Path storePath = repoPath; for (String d : blobPath) { storePath = storePath.resolve(d); } assertFalse(Files.exists(storePath)); } - try (FsBlobStore store = new FsBlobStore(randomIntBetween(1, 8) * 1024, path, false)) { - assertTrue(Files.exists(path)); + createRepository(repoName, Settings.builder().put(repoSettings).put("readonly", false).build(), false); + + try (BlobStore store = newBlobStore(repoName)) { + assertTrue(Files.exists(repoPath)); BlobPath blobPath = BlobPath.cleanPath().add("foo"); BlobContainer container = store.blobContainer(blobPath); - Path storePath = store.path(); + Path storePath = repoPath; for (String d : blobPath) { storePath = storePath.resolve(d); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java index e174d94d3716a..305faecc1bcf4 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java @@ -34,12 +34,15 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.mocksocket.MockHttpServer; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.RepositoryStats; import org.elasticsearch.test.BackgroundIndexer; +import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -55,6 +58,9 @@ import java.util.Map; import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.StreamSupport; @@ -83,6 +89,7 @@ protected interface BlobStoreHttpHandler extends HttpHandler { private static final byte[] BUFFER = new byte[1024]; private static HttpServer httpServer; + private static ExecutorService executorService; protected Map handlers; private static final Logger log = LogManager.getLogger(); @@ -90,13 +97,19 @@ protected interface BlobStoreHttpHandler extends HttpHandler { @BeforeClass public static void startHttpServer() throws Exception { httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + ThreadFactory threadFactory = EsExecutors.daemonThreadFactory("[" + ESMockAPIBasedRepositoryIntegTestCase.class.getName() + "]"); + // the EncryptedRepository can require more than one connection open at one time + executorService = EsExecutors.newScaling(ESMockAPIBasedRepositoryIntegTestCase.class.getName(), 0, 2, 60, + TimeUnit.SECONDS, threadFactory, new ThreadContext(Settings.EMPTY)); httpServer.setExecutor(r -> { - try { - r.run(); - } catch (Throwable t) { - log.error("Error in execution on mock http server IO thread", t); - throw t; - } + executorService.execute(() -> { + try { + r.run(); + } catch (Throwable t) { + log.error("Error in execution on mock http server IO thread", t); + throw t; + } + }); }); httpServer.start(); } @@ -111,6 +124,7 @@ public void setUpHttpServer() { @AfterClass public static void stopHttpServer() { httpServer.stop(0); + ThreadPool.terminate(executorService, 10, TimeUnit.SECONDS); httpServer = null; } @@ -124,14 +138,17 @@ public void tearDownHttpServer() { h = ((DelegatingHttpHandler) h).getDelegate(); } if (h instanceof BlobStoreHttpHandler) { - List blobs = ((BlobStoreHttpHandler) h).blobs().keySet().stream() - .filter(blob -> blob.contains("index") == false).collect(Collectors.toList()); - assertThat("Only index blobs should remain in repository but found " + blobs, blobs, hasSize(0)); + assertEmptyRepo(((BlobStoreHttpHandler) h).blobs()); } } } } + protected void assertEmptyRepo(Map blobsMap) { + List blobs = blobsMap.keySet().stream().filter(blob -> blob.contains("index") == false).collect(Collectors.toList()); + assertThat("Only index blobs should remain in repository but found " + blobs, blobs, hasSize(0)); + } + protected abstract Map createHttpHandlers(); protected abstract HttpHandler createErroneousHttpHandler(HttpHandler delegate); @@ -140,7 +157,7 @@ public void tearDownHttpServer() { * Test the snapshot and restore of an index which has large segments files. */ public final void testSnapshotWithLargeSegmentFiles() throws Exception { - final String repository = createRepository(randomName()); + final String repository = createRepository(randomRepositoryName()); final String index = "index-no-merges"; createIndex(index, Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) @@ -171,7 +188,7 @@ public final void testSnapshotWithLargeSegmentFiles() throws Exception { } public void testRequestStats() throws Exception { - final String repository = createRepository(randomName()); + final String repository = createRepository(randomRepositoryName()); final String index = "index-no-merges"; createIndex(index, Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index d2c7754b59a67..e16fa2b915051 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -60,6 +60,8 @@ public enum Feature { MONITORING_CLUSTER_ALERTS(OperationMode.STANDARD, true), MONITORING_UPDATE_RETENTION(OperationMode.STANDARD, false), + ENCRYPTED_SNAPSHOT(OperationMode.PLATINUM, true), + CCR(OperationMode.PLATINUM, true), GRAPH(OperationMode.PLATINUM, true), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index dd521ce4d9ba3..2dd14e37a6b19 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -20,6 +20,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.license.License.OperationMode.BASIC; +import static org.elasticsearch.license.License.OperationMode.ENTERPRISE; import static org.elasticsearch.license.License.OperationMode.GOLD; import static org.elasticsearch.license.License.OperationMode.MISSING; import static org.elasticsearch.license.License.OperationMode.PLATINUM; @@ -348,6 +349,24 @@ public void testWatcherInactivePlatinumGoldTrial() throws Exception { assertAllowed(STANDARD, false, s -> s.checkFeature(Feature.WATCHER), false); } + public void testEncryptedSnapshotsWithInactiveLicense() { + assertAllowed(BASIC, false, s -> s.checkFeature(Feature.ENCRYPTED_SNAPSHOT), false); + assertAllowed(TRIAL, false, s -> s.checkFeature(Feature.ENCRYPTED_SNAPSHOT), false); + assertAllowed(GOLD, false, s -> s.checkFeature(Feature.ENCRYPTED_SNAPSHOT), false); + assertAllowed(PLATINUM, false, s -> s.checkFeature(Feature.ENCRYPTED_SNAPSHOT), false); + assertAllowed(ENTERPRISE, false, s -> s.checkFeature(Feature.ENCRYPTED_SNAPSHOT), false); + assertAllowed(STANDARD, false, s -> s.checkFeature(Feature.ENCRYPTED_SNAPSHOT), false); + } + + public void testEncryptedSnapshotsWithActiveLicense() { + assertAllowed(BASIC, true, s -> s.checkFeature(Feature.ENCRYPTED_SNAPSHOT), false); + assertAllowed(TRIAL, true, s -> s.checkFeature(Feature.ENCRYPTED_SNAPSHOT), true); + assertAllowed(GOLD, true, s -> s.checkFeature(Feature.ENCRYPTED_SNAPSHOT), false); + assertAllowed(PLATINUM, true, s -> s.checkFeature(Feature.ENCRYPTED_SNAPSHOT), true); + assertAllowed(ENTERPRISE, true, s -> s.checkFeature(Feature.ENCRYPTED_SNAPSHOT), true); + assertAllowed(STANDARD, true, s -> s.checkFeature(Feature.ENCRYPTED_SNAPSHOT), false); + } + public void testGraphPlatinumTrial() throws Exception { assertAllowed(TRIAL, true, s -> s.checkFeature(Feature.GRAPH), true); assertAllowed(PLATINUM, true, s -> s.checkFeature(Feature.GRAPH), true); diff --git a/x-pack/plugin/repository-encrypted/build.gradle b/x-pack/plugin/repository-encrypted/build.gradle index 5a2f82946f711..34b9e2aea320f 100644 --- a/x-pack/plugin/repository-encrypted/build.gradle +++ b/x-pack/plugin/repository-encrypted/build.gradle @@ -1,11 +1,25 @@ evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.esplugin' +apply plugin: 'elasticsearch.internal-cluster-test' esplugin { name 'repository-encrypted' description 'Elasticsearch Expanded Pack Plugin - client-side encrypted repositories.' classname 'org.elasticsearch.repositories.encrypted.EncryptedRepositoryPlugin' extendedPlugins = ['x-pack-core'] } +archivesBaseName = 'x-pack-repository-encrypted' -integTest.enabled = false +dependencies { + // necessary for the license check + compileOnly project(path: xpackModule('core'), configuration: 'default') + testImplementation project(path: xpackModule('core'), configuration: 'testArtifacts') + // required for integ tests of encrypted FS repository + internalClusterTestImplementation project(":test:framework") + // required for integ tests of encrypted cloud repositories + internalClusterTestImplementation project(path: ':plugins:repository-gcs', configuration: 'internalClusterTestArtifacts') + internalClusterTestImplementation project(path: ':plugins:repository-azure', configuration: 'internalClusterTestArtifacts') + internalClusterTestImplementation project(path: ':plugins:repository-s3', configuration: 'internalClusterTestArtifacts') + // for encrypted GCS repository integ tests + internalClusterTestRuntimeOnly 'com.google.guava:guava:26.0-jre' +} \ No newline at end of file diff --git a/x-pack/plugin/repository-encrypted/src/internalClusterTest/java/org/elasticsearch/repositories/encrypted/EncryptedAzureBlobStoreRepositoryIntegTests.java b/x-pack/plugin/repository-encrypted/src/internalClusterTest/java/org/elasticsearch/repositories/encrypted/EncryptedAzureBlobStoreRepositoryIntegTests.java new file mode 100644 index 0000000000000..5aee6f84f1d7d --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/internalClusterTest/java/org/elasticsearch/repositories/encrypted/EncryptedAzureBlobStoreRepositoryIntegTests.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.repositories.encrypted; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.azure.AzureBlobStoreRepositoryTests; +import org.junit.BeforeClass; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.elasticsearch.repositories.encrypted.EncryptedRepository.DEK_ROOT_CONTAINER; +import static org.elasticsearch.repositories.encrypted.EncryptedRepository.getEncryptedBlobByteLength; +import static org.hamcrest.Matchers.hasSize; + +public final class EncryptedAzureBlobStoreRepositoryIntegTests extends AzureBlobStoreRepositoryTests { + private static List repositoryNames; + + @BeforeClass + private static void preGenerateRepositoryNames() { + List names = new ArrayList<>(); + for (int i = 0; i < 32; i++) { + names.add("test-repo-" + i); + } + repositoryNames = Collections.synchronizedList(names); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder settingsBuilder = Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), License.LicenseType.TRIAL.getTypeName()); + MockSecureSettings superSecureSettings = (MockSecureSettings) settingsBuilder.getSecureSettings(); + superSecureSettings.merge(nodeSecureSettings()); + return settingsBuilder.build(); + } + + protected MockSecureSettings nodeSecureSettings() { + MockSecureSettings secureSettings = new MockSecureSettings(); + for (String repositoryName : repositoryNames) { + secureSettings.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryName).getKey(), + repositoryName + ); + } + return secureSettings; + } + + @Override + protected String randomRepositoryName() { + return repositoryNames.remove(randomIntBetween(0, repositoryNames.size() - 1)); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateEncryptedRepositoryPlugin.class, TestAzureRepositoryPlugin.class); + } + + @Override + protected String repositoryType() { + return EncryptedRepositoryPlugin.REPOSITORY_TYPE_NAME; + } + + @Override + protected Settings repositorySettings(String repositoryName) { + return Settings.builder() + .put(super.repositorySettings(repositoryName)) + .put(EncryptedRepositoryPlugin.DELEGATE_TYPE_SETTING.getKey(), "azure") + .put(EncryptedRepositoryPlugin.PASSWORD_NAME_SETTING.getKey(), repositoryName) + .build(); + } + + @Override + protected void assertEmptyRepo(Map blobsMap) { + List blobs = blobsMap.keySet() + .stream() + .filter(blob -> false == blob.contains("index")) + .filter(blob -> false == blob.contains(DEK_ROOT_CONTAINER)) // encryption metadata "leaks" + .collect(Collectors.toList()); + assertThat("Only index blobs should remain in repository but found " + blobs, blobs, hasSize(0)); + } + + @Override + protected long blobLengthFromContentLength(long contentLength) { + return getEncryptedBlobByteLength(contentLength); + } + +} diff --git a/x-pack/plugin/repository-encrypted/src/internalClusterTest/java/org/elasticsearch/repositories/encrypted/EncryptedFSBlobStoreRepositoryIntegTests.java b/x-pack/plugin/repository-encrypted/src/internalClusterTest/java/org/elasticsearch/repositories/encrypted/EncryptedFSBlobStoreRepositoryIntegTests.java new file mode 100644 index 0000000000000..274ac459f6625 --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/internalClusterTest/java/org/elasticsearch/repositories/encrypted/EncryptedFSBlobStoreRepositoryIntegTests.java @@ -0,0 +1,155 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.repositories.encrypted; + +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.blobstore.ESFsBasedRepositoryIntegTestCase; +import org.elasticsearch.repositories.fs.FsRepository; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.stream.Stream; + +import static org.elasticsearch.repositories.encrypted.EncryptedRepository.getEncryptedBlobByteLength; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; + +public final class EncryptedFSBlobStoreRepositoryIntegTests extends ESFsBasedRepositoryIntegTestCase { + private static int NUMBER_OF_TEST_REPOSITORIES = 32; + + private static List repositoryNames = new ArrayList<>(); + + @BeforeClass + private static void preGenerateRepositoryNames() { + for (int i = 0; i < NUMBER_OF_TEST_REPOSITORIES; i++) { + repositoryNames.add("test-repo-" + i); + } + } + + @Override + protected Settings repositorySettings(String repositoryName) { + final Settings.Builder settings = Settings.builder() + .put("compress", randomBoolean()) + .put("location", randomRepoPath()) + .put("delegate_type", FsRepository.TYPE) + .put("password_name", repositoryName); + if (randomBoolean()) { + long size = 1 << randomInt(10); + settings.put("chunk_size", new ByteSizeValue(size, ByteSizeUnit.KB)); + } + return settings.build(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), License.LicenseType.TRIAL.getTypeName()) + .setSecureSettings(nodeSecureSettings()) + .build(); + } + + protected MockSecureSettings nodeSecureSettings() { + MockSecureSettings secureSettings = new MockSecureSettings(); + for (String repositoryName : repositoryNames) { + secureSettings.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryName).getKey(), + repositoryName + ); + } + return secureSettings; + } + + @Override + protected String randomRepositoryName() { + return repositoryNames.remove(randomIntBetween(0, repositoryNames.size() - 1)); + } + + @Override + protected long blobLengthFromContentLength(long contentLength) { + return getEncryptedBlobByteLength(contentLength); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateEncryptedRepositoryPlugin.class); + } + + @Override + protected String repositoryType() { + return EncryptedRepositoryPlugin.REPOSITORY_TYPE_NAME; + } + + public void testTamperedEncryptionMetadata() throws Exception { + final String repoName = randomRepositoryName(); + final Path repoPath = randomRepoPath(); + final Settings repoSettings = Settings.builder().put(repositorySettings(repoName)).put("location", repoPath).build(); + createRepository(repoName, repoSettings, true); + + final String snapshotName = randomName(); + logger.info("--> create snapshot {}:{}", repoName, snapshotName); + client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).setIndices("other*").get(); + + assertAcked(client().admin().cluster().prepareDeleteRepository(repoName)); + createRepository(repoName, Settings.builder().put(repoSettings).put("readonly", randomBoolean()).build(), randomBoolean()); + + try (Stream rootContents = Files.list(repoPath.resolve(EncryptedRepository.DEK_ROOT_CONTAINER))) { + // tamper all DEKs + rootContents.filter(Files::isDirectory).forEach(DEKRootPath -> { + try (Stream contents = Files.list(DEKRootPath)) { + contents.filter(Files::isRegularFile).forEach(DEKPath -> { + try { + byte[] originalDEKBytes = Files.readAllBytes(DEKPath); + // tamper DEK + int tamperPos = randomIntBetween(0, originalDEKBytes.length - 1); + originalDEKBytes[tamperPos] ^= 0xFF; + Files.write(DEKPath, originalDEKBytes); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); + final BlobStoreRepository blobStoreRepository = (BlobStoreRepository) internalCluster().getCurrentMasterNodeInstance( + RepositoriesService.class + ).repository(repoName); + RepositoryException e = expectThrows( + RepositoryException.class, + () -> PlainActionFuture.get( + f -> blobStoreRepository.threadPool().generic().execute(ActionRunnable.wrap(f, blobStoreRepository::getRepositoryData)) + ) + ); + assertThat(e.getMessage(), containsString("the encryption metadata in the repository has been corrupted")); + e = expectThrows( + RepositoryException.class, + () -> client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(true).get() + ); + assertThat(e.getMessage(), containsString("the encryption metadata in the repository has been corrupted")); + } + } + +} diff --git a/x-pack/plugin/repository-encrypted/src/internalClusterTest/java/org/elasticsearch/repositories/encrypted/EncryptedGCSBlobStoreRepositoryIntegTests.java b/x-pack/plugin/repository-encrypted/src/internalClusterTest/java/org/elasticsearch/repositories/encrypted/EncryptedGCSBlobStoreRepositoryIntegTests.java new file mode 100644 index 0000000000000..36413d59a730c --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/internalClusterTest/java/org/elasticsearch/repositories/encrypted/EncryptedGCSBlobStoreRepositoryIntegTests.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.repositories.encrypted; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.gcs.GoogleCloudStorageBlobStoreRepositoryTests; +import org.junit.BeforeClass; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.elasticsearch.repositories.encrypted.EncryptedRepository.DEK_ROOT_CONTAINER; +import static org.elasticsearch.repositories.encrypted.EncryptedRepository.getEncryptedBlobByteLength; +import static org.hamcrest.Matchers.hasSize; + +public final class EncryptedGCSBlobStoreRepositoryIntegTests extends GoogleCloudStorageBlobStoreRepositoryTests { + + private static List repositoryNames; + + @BeforeClass + private static void preGenerateRepositoryNames() { + List names = new ArrayList<>(); + for (int i = 0; i < 32; i++) { + names.add("test-repo-" + i); + } + repositoryNames = Collections.synchronizedList(names); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder settingsBuilder = Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), License.LicenseType.TRIAL.getTypeName()); + MockSecureSettings superSecureSettings = (MockSecureSettings) settingsBuilder.getSecureSettings(); + superSecureSettings.merge(nodeSecureSettings()); + return settingsBuilder.build(); + } + + protected MockSecureSettings nodeSecureSettings() { + MockSecureSettings secureSettings = new MockSecureSettings(); + for (String repositoryName : repositoryNames) { + secureSettings.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryName).getKey(), + repositoryName + ); + } + return secureSettings; + } + + @Override + protected String randomRepositoryName() { + return repositoryNames.remove(randomIntBetween(0, repositoryNames.size() - 1)); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateEncryptedRepositoryPlugin.class, TestGoogleCloudStoragePlugin.class); + } + + @Override + protected String repositoryType() { + return EncryptedRepositoryPlugin.REPOSITORY_TYPE_NAME; + } + + @Override + protected Settings repositorySettings(String repositoryName) { + return Settings.builder() + .put(super.repositorySettings(repositoryName)) + .put(EncryptedRepositoryPlugin.DELEGATE_TYPE_SETTING.getKey(), "gcs") + .put(EncryptedRepositoryPlugin.PASSWORD_NAME_SETTING.getKey(), repositoryName) + .build(); + } + + @Override + protected void assertEmptyRepo(Map blobsMap) { + List blobs = blobsMap.keySet() + .stream() + .filter(blob -> false == blob.contains("index")) + .filter(blob -> false == blob.contains(DEK_ROOT_CONTAINER)) // encryption metadata "leaks" + .collect(Collectors.toList()); + assertThat("Only index blobs should remain in repository but found " + blobs, blobs, hasSize(0)); + } + + @Override + protected long blobLengthFromContentLength(long contentLength) { + return getEncryptedBlobByteLength(contentLength); + } + +} diff --git a/x-pack/plugin/repository-encrypted/src/internalClusterTest/java/org/elasticsearch/repositories/encrypted/EncryptedRepositorySecretIntegTests.java b/x-pack/plugin/repository-encrypted/src/internalClusterTest/java/org/elasticsearch/repositories/encrypted/EncryptedRepositorySecretIntegTests.java new file mode 100644 index 0000000000000..3deba6f9b476f --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/internalClusterTest/java/org/elasticsearch/repositories/encrypted/EncryptedRepositorySecretIntegTests.java @@ -0,0 +1,806 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.repositories.encrypted; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.repositories.RepositoryVerificationException; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotMissingException; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; + +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +public final class EncryptedRepositorySecretIntegTests extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateEncryptedRepositoryPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), License.LicenseType.TRIAL.getTypeName()) + .build(); + } + + public void testRepositoryCreationFailsForMissingPassword() throws Exception { + // if the password is missing on the master node, the repository creation fails + final String repositoryName = randomName(); + MockSecureSettings secureSettingsWithPassword = new MockSecureSettings(); + secureSettingsWithPassword.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryName).getKey(), + randomAlphaOfLength(20) + ); + logger.info("--> start 3 nodes"); + internalCluster().setBootstrapMasterNodeIndex(0); + final String masterNodeName = internalCluster().startNode(); + logger.info("--> started master node " + masterNodeName); + ensureStableCluster(1); + internalCluster().startNodes(2, Settings.builder().setSecureSettings(secureSettingsWithPassword).build()); + logger.info("--> started two other nodes"); + ensureStableCluster(3); + assertThat(masterNodeName, equalTo(internalCluster().getMasterName())); + + final Settings repositorySettings = repositorySettings(repositoryName); + RepositoryException e = expectThrows( + RepositoryException.class, + () -> client().admin() + .cluster() + .preparePutRepository(repositoryName) + .setType(repositoryType()) + .setVerify(randomBoolean()) + .setSettings(repositorySettings) + .get() + ); + assertThat(e.getMessage(), containsString("failed to create repository")); + expectThrows(RepositoryMissingException.class, () -> client().admin().cluster().prepareGetRepositories(repositoryName).get()); + + if (randomBoolean()) { + // stop the node with the missing password + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodeName)); + ensureStableCluster(2); + } else { + // restart the node with the missing password + internalCluster().restartNode(masterNodeName, new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + Settings.Builder newSettings = Settings.builder().put(super.onNodeStopped(nodeName)); + newSettings.setSecureSettings(secureSettingsWithPassword); + return newSettings.build(); + } + }); + ensureStableCluster(3); + } + // repository creation now successful + createRepository(repositoryName, repositorySettings, true); + } + + public void testRepositoryVerificationFailsForMissingPassword() throws Exception { + // if the password is missing on any non-master node, the repository verification fails + final String repositoryName = randomName(); + MockSecureSettings secureSettingsWithPassword = new MockSecureSettings(); + secureSettingsWithPassword.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryName).getKey(), + randomAlphaOfLength(20) + ); + logger.info("--> start 2 nodes"); + internalCluster().setBootstrapMasterNodeIndex(0); + final String masterNodeName = internalCluster().startNode(Settings.builder().setSecureSettings(secureSettingsWithPassword).build()); + logger.info("--> started master node " + masterNodeName); + ensureStableCluster(1); + final String otherNodeName = internalCluster().startNode(); + logger.info("--> started other node " + otherNodeName); + ensureStableCluster(2); + assertThat(masterNodeName, equalTo(internalCluster().getMasterName())); + // repository create fails verification + final Settings repositorySettings = repositorySettings(repositoryName); + expectThrows( + RepositoryVerificationException.class, + () -> client().admin() + .cluster() + .preparePutRepository(repositoryName) + .setType(repositoryType()) + .setVerify(true) + .setSettings(repositorySettings) + .get() + ); + if (randomBoolean()) { + // delete and recreate repo + logger.debug("--> deleting repository [name: {}]", repositoryName); + assertAcked(client().admin().cluster().prepareDeleteRepository(repositoryName)); + assertAcked( + client().admin() + .cluster() + .preparePutRepository(repositoryName) + .setType(repositoryType()) + .setVerify(false) + .setSettings(repositorySettings) + .get() + ); + } + // test verify call fails + expectThrows(RepositoryVerificationException.class, () -> client().admin().cluster().prepareVerifyRepository(repositoryName).get()); + if (randomBoolean()) { + // stop the node with the missing password + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(otherNodeName)); + ensureStableCluster(1); + // repository verification now succeeds + VerifyRepositoryResponse verifyRepositoryResponse = client().admin().cluster().prepareVerifyRepository(repositoryName).get(); + List verifiedNodes = verifyRepositoryResponse.getNodes().stream().map(n -> n.getName()).collect(Collectors.toList()); + assertThat(verifiedNodes, contains(masterNodeName)); + } else { + // restart the node with the missing password + internalCluster().restartNode(otherNodeName, new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + Settings.Builder newSettings = Settings.builder().put(super.onNodeStopped(nodeName)); + newSettings.setSecureSettings(secureSettingsWithPassword); + return newSettings.build(); + } + }); + ensureStableCluster(2); + // repository verification now succeeds + VerifyRepositoryResponse verifyRepositoryResponse = client().admin().cluster().prepareVerifyRepository(repositoryName).get(); + List verifiedNodes = verifyRepositoryResponse.getNodes().stream().map(n -> n.getName()).collect(Collectors.toList()); + assertThat(verifiedNodes, containsInAnyOrder(masterNodeName, otherNodeName)); + } + } + + public void testRepositoryVerificationFailsForDifferentPassword() throws Exception { + final String repositoryName = randomName(); + final String repoPass1 = randomAlphaOfLength(20); + final String repoPass2 = randomAlphaOfLength(19); + // put a different repository password + MockSecureSettings secureSettings1 = new MockSecureSettings(); + secureSettings1.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryName).getKey(), + repoPass1 + ); + MockSecureSettings secureSettings2 = new MockSecureSettings(); + secureSettings2.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryName).getKey(), + repoPass2 + ); + logger.info("--> start 2 nodes"); + internalCluster().setBootstrapMasterNodeIndex(1); + final String node1 = internalCluster().startNode(Settings.builder().setSecureSettings(secureSettings1).build()); + final String node2 = internalCluster().startNode(Settings.builder().setSecureSettings(secureSettings2).build()); + ensureStableCluster(2); + // repository create fails verification + Settings repositorySettings = repositorySettings(repositoryName); + expectThrows( + RepositoryVerificationException.class, + () -> client().admin() + .cluster() + .preparePutRepository(repositoryName) + .setType(repositoryType()) + .setVerify(true) + .setSettings(repositorySettings) + .get() + ); + if (randomBoolean()) { + // delete and recreate repo + logger.debug("--> deleting repository [name: {}]", repositoryName); + assertAcked(client().admin().cluster().prepareDeleteRepository(repositoryName)); + assertAcked( + client().admin() + .cluster() + .preparePutRepository(repositoryName) + .setType(repositoryType()) + .setVerify(false) + .setSettings(repositorySettings) + .get() + ); + } + // test verify call fails + expectThrows(RepositoryVerificationException.class, () -> client().admin().cluster().prepareVerifyRepository(repositoryName).get()); + // restart one of the nodes to use the same password + if (randomBoolean()) { + secureSettings1.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryName).getKey(), + repoPass2 + ); + internalCluster().restartNode(node1, new InternalTestCluster.RestartCallback()); + } else { + secureSettings2.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryName).getKey(), + repoPass1 + ); + internalCluster().restartNode(node2, new InternalTestCluster.RestartCallback()); + } + ensureStableCluster(2); + // repository verification now succeeds + VerifyRepositoryResponse verifyRepositoryResponse = client().admin().cluster().prepareVerifyRepository(repositoryName).get(); + List verifiedNodes = verifyRepositoryResponse.getNodes().stream().map(n -> n.getName()).collect(Collectors.toList()); + assertThat(verifiedNodes, containsInAnyOrder(node1, node2)); + } + + public void testLicenseComplianceSnapshotAndRestore() throws Exception { + final String repositoryName = randomName(); + MockSecureSettings secureSettingsWithPassword = new MockSecureSettings(); + secureSettingsWithPassword.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryName).getKey(), + randomAlphaOfLength(20) + ); + logger.info("--> start 2 nodes"); + internalCluster().setBootstrapMasterNodeIndex(1); + internalCluster().startNodes(2, Settings.builder().setSecureSettings(secureSettingsWithPassword).build()); + ensureStableCluster(2); + + logger.info("--> creating repo " + repositoryName); + createRepository(repositoryName); + final String indexName = randomName(); + logger.info("--> create random index {} with {} records", indexName, 3); + indexRandom( + true, + client().prepareIndex(indexName).setId("1").setSource("field1", "the quick brown fox jumps"), + client().prepareIndex(indexName).setId("2").setSource("field1", "quick brown"), + client().prepareIndex(indexName).setId("3").setSource("field1", "quick") + ); + assertHitCount(client().prepareSearch(indexName).setSize(0).get(), 3); + + final String snapshotName = randomName(); + logger.info("--> create snapshot {}:{}", repositoryName, snapshotName); + assertSuccessfulSnapshot( + client().admin() + .cluster() + .prepareCreateSnapshot(repositoryName, snapshotName) + .setIndices(indexName) + .setWaitForCompletion(true) + .get() + ); + + // make license not accept encrypted snapshots + EncryptedRepository encryptedRepository = (EncryptedRepository) internalCluster().getCurrentMasterNodeInstance( + RepositoriesService.class + ).repository(repositoryName); + encryptedRepository.licenseStateSupplier = () -> { + XPackLicenseState mockLicenseState = mock(XPackLicenseState.class); + when(mockLicenseState.isAllowed(anyObject())).thenReturn(false); + return mockLicenseState; + }; + + // now snapshot is not permitted + ElasticsearchSecurityException e = expectThrows( + ElasticsearchSecurityException.class, + () -> client().admin().cluster().prepareCreateSnapshot(repositoryName, snapshotName + "2").setWaitForCompletion(true).get() + ); + assertThat(e.getDetailedMessage(), containsString("current license is non-compliant for [encrypted snapshots]")); + + logger.info("--> delete index {}", indexName); + assertAcked(client().admin().indices().prepareDelete(indexName)); + + // but restore is permitted + logger.info("--> restore index from the snapshot"); + assertSuccessfulRestore( + client().admin().cluster().prepareRestoreSnapshot(repositoryName, snapshotName).setWaitForCompletion(true).get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch(indexName).setSize(0).get(), 3); + // also delete snapshot is permitted + logger.info("--> delete snapshot {}:{}", repositoryName, snapshotName); + assertAcked(client().admin().cluster().prepareDeleteSnapshot(repositoryName, snapshotName).get()); + } + + public void testSnapshotIsPartialForMissingPassword() throws Exception { + final String repositoryName = randomName(); + final Settings repositorySettings = repositorySettings(repositoryName); + MockSecureSettings secureSettingsWithPassword = new MockSecureSettings(); + secureSettingsWithPassword.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryName).getKey(), + randomAlphaOfLength(20) + ); + logger.info("--> start 2 nodes"); + internalCluster().setBootstrapMasterNodeIndex(0); + // master has the password + internalCluster().startNode(Settings.builder().setSecureSettings(secureSettingsWithPassword).build()); + ensureStableCluster(1); + final String otherNode = internalCluster().startNode(); + ensureStableCluster(2); + logger.debug("--> creating repository [name: {}, verify: {}, settings: {}]", repositoryName, false, repositorySettings); + assertAcked( + client().admin() + .cluster() + .preparePutRepository(repositoryName) + .setType(repositoryType()) + .setVerify(false) + .setSettings(repositorySettings) + ); + // create an index with the shard on the node without a repository password + final String indexName = randomName(); + final Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put("index.routing.allocation.include._name", otherNode) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .build(); + logger.info("--> create random index {}", indexName); + createIndex(indexName, indexSettings); + indexRandom( + true, + client().prepareIndex(indexName).setId("1").setSource("field1", "the quick brown fox jumps"), + client().prepareIndex(indexName).setId("2").setSource("field1", "quick brown"), + client().prepareIndex(indexName).setId("3").setSource("field1", "quick") + ); + assertHitCount(client().prepareSearch(indexName).setSize(0).get(), 3); + + // empty snapshot completes successfully because it does not involve data on the node without a repository password + final String snapshotName = randomName(); + logger.info("--> create snapshot {}:{}", repositoryName, snapshotName); + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot(repositoryName, snapshotName) + .setIndices(indexName + "other*") + .setWaitForCompletion(true) + .get(); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().userMetadata(), + not(hasKey(EncryptedRepository.PASSWORD_HASH_USER_METADATA_KEY)) + ); + assertThat( + createSnapshotResponse.getSnapshotInfo().userMetadata(), + not(hasKey(EncryptedRepository.PASSWORD_SALT_USER_METADATA_KEY)) + ); + + // snapshot is PARTIAL because it includes shards on nodes with a missing repository password + final String snapshotName2 = snapshotName + "2"; + CreateSnapshotResponse incompleteSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot(repositoryName, snapshotName2) + .setWaitForCompletion(true) + .setIndices(indexName) + .get(); + assertThat(incompleteSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.PARTIAL)); + assertTrue( + incompleteSnapshotResponse.getSnapshotInfo() + .shardFailures() + .stream() + .allMatch(shardFailure -> shardFailure.reason().contains("[" + repositoryName + "] missing")) + ); + assertThat( + incompleteSnapshotResponse.getSnapshotInfo().userMetadata(), + not(hasKey(EncryptedRepository.PASSWORD_HASH_USER_METADATA_KEY)) + ); + assertThat( + incompleteSnapshotResponse.getSnapshotInfo().userMetadata(), + not(hasKey(EncryptedRepository.PASSWORD_SALT_USER_METADATA_KEY)) + ); + final Set nodesWithFailures = incompleteSnapshotResponse.getSnapshotInfo() + .shardFailures() + .stream() + .map(sf -> sf.nodeId()) + .collect(Collectors.toSet()); + assertThat(nodesWithFailures.size(), equalTo(1)); + final ClusterStateResponse clusterState = client().admin().cluster().prepareState().clear().setNodes(true).get(); + assertThat(clusterState.getState().nodes().get(nodesWithFailures.iterator().next()).getName(), equalTo(otherNode)); + } + + public void testSnapshotIsPartialForDifferentPassword() throws Exception { + final String repoName = randomName(); + final Settings repoSettings = repositorySettings(repoName); + final String repoPass1 = randomAlphaOfLength(20); + final String repoPass2 = randomAlphaOfLength(19); + MockSecureSettings secureSettingsMaster = new MockSecureSettings(); + secureSettingsMaster.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repoName).getKey(), + repoPass1 + ); + MockSecureSettings secureSettingsOther = new MockSecureSettings(); + secureSettingsOther.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repoName).getKey(), + repoPass2 + ); + final boolean putRepoEarly = randomBoolean(); + logger.info("--> start 2 nodes"); + internalCluster().setBootstrapMasterNodeIndex(0); + final String masterNode = internalCluster().startNode(Settings.builder().setSecureSettings(secureSettingsMaster).build()); + ensureStableCluster(1); + if (putRepoEarly) { + createRepository(repoName, repoSettings, true); + } + final String otherNode = internalCluster().startNode(Settings.builder().setSecureSettings(secureSettingsOther).build()); + ensureStableCluster(2); + if (false == putRepoEarly) { + createRepository(repoName, repoSettings, false); + } + + // create index with shards on both nodes + final String indexName = randomName(); + final Settings indexSettings = Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 5).build(); + logger.info("--> create random index {}", indexName); + createIndex(indexName, indexSettings); + indexRandom( + true, + client().prepareIndex(indexName).setId("1").setSource("field1", "the quick brown fox jumps"), + client().prepareIndex(indexName).setId("2").setSource("field1", "quick brown"), + client().prepareIndex(indexName).setId("3").setSource("field1", "quick"), + client().prepareIndex(indexName).setId("4").setSource("field1", "lazy"), + client().prepareIndex(indexName).setId("5").setSource("field1", "dog") + ); + assertHitCount(client().prepareSearch(indexName).setSize(0).get(), 5); + + // empty snapshot completes successfully for both repos because it does not involve any data + final String snapshotName = randomName(); + logger.info("--> create snapshot {}:{}", repoName, snapshotName); + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot(repoName, snapshotName) + .setIndices(indexName + "other*") + .setWaitForCompletion(true) + .get(); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().userMetadata(), + not(hasKey(EncryptedRepository.PASSWORD_HASH_USER_METADATA_KEY)) + ); + assertThat( + createSnapshotResponse.getSnapshotInfo().userMetadata(), + not(hasKey(EncryptedRepository.PASSWORD_SALT_USER_METADATA_KEY)) + ); + + // snapshot is PARTIAL because it includes shards on nodes with a different repository KEK + final String snapshotName2 = snapshotName + "2"; + CreateSnapshotResponse incompleteSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot(repoName, snapshotName2) + .setWaitForCompletion(true) + .setIndices(indexName) + .get(); + assertThat(incompleteSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.PARTIAL)); + assertTrue( + incompleteSnapshotResponse.getSnapshotInfo() + .shardFailures() + .stream() + .allMatch(shardFailure -> shardFailure.reason().contains("Repository password mismatch")) + ); + final Set nodesWithFailures = incompleteSnapshotResponse.getSnapshotInfo() + .shardFailures() + .stream() + .map(sf -> sf.nodeId()) + .collect(Collectors.toSet()); + assertThat(nodesWithFailures.size(), equalTo(1)); + final ClusterStateResponse clusterState = client().admin().cluster().prepareState().clear().setNodes(true).get(); + assertThat(clusterState.getState().nodes().get(nodesWithFailures.iterator().next()).getName(), equalTo(otherNode)); + } + + public void testWrongRepositoryPassword() throws Exception { + final String repositoryName = randomName(); + final Settings repositorySettings = repositorySettings(repositoryName); + final String goodPassword = randomAlphaOfLength(20); + final String wrongPassword = randomAlphaOfLength(19); + MockSecureSettings secureSettingsWithPassword = new MockSecureSettings(); + secureSettingsWithPassword.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryName).getKey(), + goodPassword + ); + logger.info("--> start 2 nodes"); + internalCluster().setBootstrapMasterNodeIndex(1); + internalCluster().startNodes(2, Settings.builder().setSecureSettings(secureSettingsWithPassword).build()); + ensureStableCluster(2); + createRepository(repositoryName, repositorySettings, true); + // create empty smapshot + final String snapshotName = randomName(); + logger.info("--> create empty snapshot {}:{}", repositoryName, snapshotName); + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot(repositoryName, snapshotName) + .setWaitForCompletion(true) + .get(); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().userMetadata(), + not(hasKey(EncryptedRepository.PASSWORD_HASH_USER_METADATA_KEY)) + ); + assertThat( + createSnapshotResponse.getSnapshotInfo().userMetadata(), + not(hasKey(EncryptedRepository.PASSWORD_SALT_USER_METADATA_KEY)) + ); + // restart master node and fill in a wrong password + secureSettingsWithPassword.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryName).getKey(), + wrongPassword + ); + Set nodesWithWrongPassword = new HashSet<>(); + do { + String masterNodeName = internalCluster().getMasterName(); + logger.info("--> restart master node {}", masterNodeName); + internalCluster().restartNode(masterNodeName, new InternalTestCluster.RestartCallback()); + nodesWithWrongPassword.add(masterNodeName); + ensureStableCluster(2); + } while (false == nodesWithWrongPassword.contains(internalCluster().getMasterName())); + // maybe recreate the repository + if (randomBoolean()) { + deleteRepository(repositoryName); + createRepository(repositoryName, repositorySettings, false); + } + // all repository operations return "repository password is incorrect", but the repository does not move to the corrupted state + final BlobStoreRepository blobStoreRepository = (BlobStoreRepository) internalCluster().getCurrentMasterNodeInstance( + RepositoriesService.class + ).repository(repositoryName); + RepositoryException e = expectThrows( + RepositoryException.class, + () -> PlainActionFuture.get( + f -> blobStoreRepository.threadPool().generic().execute(ActionRunnable.wrap(f, blobStoreRepository::getRepositoryData)) + ) + ); + assertThat(e.getCause().getMessage(), containsString("repository password is incorrect")); + e = expectThrows( + RepositoryException.class, + () -> client().admin().cluster().prepareCreateSnapshot(repositoryName, snapshotName + "2").setWaitForCompletion(true).get() + ); + assertThat(e.getCause().getMessage(), containsString("repository password is incorrect")); + GetSnapshotsResponse getSnapshotResponse = client().admin().cluster().prepareGetSnapshots(repositoryName).get(); + assertThat(getSnapshotResponse.getSuccessfulResponses().keySet(), empty()); + assertThat(getSnapshotResponse.getFailedResponses().keySet(), contains(repositoryName)); + assertThat( + getSnapshotResponse.getFailedResponses().get(repositoryName).getCause().getMessage(), + containsString("repository password is incorrect") + ); + e = expectThrows( + RepositoryException.class, + () -> client().admin().cluster().prepareRestoreSnapshot(repositoryName, snapshotName).setWaitForCompletion(true).get() + ); + assertThat(e.getCause().getMessage(), containsString("repository password is incorrect")); + e = expectThrows( + RepositoryException.class, + () -> client().admin().cluster().prepareDeleteSnapshot(repositoryName, snapshotName).get() + ); + assertThat(e.getCause().getMessage(), containsString("repository password is incorrect")); + // restart master node and fill in the good password + secureSettingsWithPassword.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryName).getKey(), + goodPassword + ); + do { + String masterNodeName = internalCluster().getMasterName(); + logger.info("--> restart master node {}", masterNodeName); + internalCluster().restartNode(masterNodeName, new InternalTestCluster.RestartCallback()); + nodesWithWrongPassword.remove(masterNodeName); + ensureStableCluster(2); + } while (nodesWithWrongPassword.contains(internalCluster().getMasterName())); + // ensure get snapshot works + getSnapshotResponse = client().admin().cluster().prepareGetSnapshots(repositoryName).get(); + assertThat(getSnapshotResponse.getFailedResponses().keySet(), empty()); + assertThat(getSnapshotResponse.getSuccessfulResponses().keySet(), contains(repositoryName)); + } + + public void testSnapshotFailsForMasterFailoverWithWrongPassword() throws Exception { + final String repoName = randomName(); + final Settings repoSettings = repositorySettings(repoName); + final String goodPass = randomAlphaOfLength(20); + final String wrongPass = randomAlphaOfLength(19); + MockSecureSettings secureSettingsWithPassword = new MockSecureSettings(); + secureSettingsWithPassword.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repoName).getKey(), + goodPass + ); + logger.info("--> start 4 nodes"); + internalCluster().setBootstrapMasterNodeIndex(0); + final String masterNode = internalCluster().startMasterOnlyNodes( + 1, + Settings.builder().setSecureSettings(secureSettingsWithPassword).build() + ).get(0); + final String otherNode = internalCluster().startDataOnlyNodes( + 1, + Settings.builder().setSecureSettings(secureSettingsWithPassword).build() + ).get(0); + ensureStableCluster(2); + secureSettingsWithPassword.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repoName).getKey(), + wrongPass + ); + internalCluster().startMasterOnlyNodes(2, Settings.builder().setSecureSettings(secureSettingsWithPassword).build()); + ensureStableCluster(4); + assertThat(internalCluster().getMasterName(), equalTo(masterNode)); + + logger.debug("--> creating repository [name: {}, verify: {}, settings: {}]", repoName, false, repoSettings); + assertAcked( + client().admin().cluster().preparePutRepository(repoName).setType(repositoryType()).setVerify(false).setSettings(repoSettings) + ); + // create index with just one shard on the "other" data node + final String indexName = randomName(); + final Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put("index.routing.allocation.include._name", otherNode) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .build(); + logger.info("--> create random index {}", indexName); + createIndex(indexName, indexSettings); + indexRandom( + true, + client().prepareIndex(indexName).setId("1").setSource("field1", "the quick brown fox jumps"), + client().prepareIndex(indexName).setId("2").setSource("field1", "quick brown"), + client().prepareIndex(indexName).setId("3").setSource("field1", "quick"), + client().prepareIndex(indexName).setId("4").setSource("field1", "lazy"), + client().prepareIndex(indexName).setId("5").setSource("field1", "dog") + ); + assertHitCount(client().prepareSearch(indexName).setSize(0).get(), 5); + + // block shard snapshot on the data node + final LocalStateEncryptedRepositoryPlugin.TestEncryptedRepository otherNodeEncryptedRepo = + (LocalStateEncryptedRepositoryPlugin.TestEncryptedRepository) internalCluster().getInstance( + RepositoriesService.class, + otherNode + ).repository(repoName); + otherNodeEncryptedRepo.blockSnapshotShard(); + + final String snapshotName = randomName(); + logger.info("--> create snapshot {}:{}", repoName, snapshotName); + client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName).setIndices(indexName).setWaitForCompletion(false).get(); + + // stop master + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNode)); + ensureStableCluster(3); + + otherNodeEncryptedRepo.unblockSnapshotShard(); + + // the failover master has the wrong password, snapshot fails + logger.info("--> waiting for completion"); + expectThrows(SnapshotMissingException.class, () -> { waitForCompletion(repoName, snapshotName, TimeValue.timeValueSeconds(60)); }); + } + + protected String randomName() { + return randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + } + + protected String repositoryType() { + return EncryptedRepositoryPlugin.REPOSITORY_TYPE_NAME; + } + + protected Settings repositorySettings(String repositoryName) { + return Settings.builder() + .put("compress", randomBoolean()) + .put(EncryptedRepositoryPlugin.DELEGATE_TYPE_SETTING.getKey(), FsRepository.TYPE) + .put(EncryptedRepositoryPlugin.PASSWORD_NAME_SETTING.getKey(), repositoryName) + .put("location", randomRepoPath()) + .build(); + } + + protected String createRepository(final String name) { + return createRepository(name, true); + } + + protected String createRepository(final String name, final boolean verify) { + return createRepository(name, repositorySettings(name), verify); + } + + protected String createRepository(final String name, final Settings settings, final boolean verify) { + logger.debug("--> creating repository [name: {}, verify: {}, settings: {}]", name, verify, settings); + assertAcked( + client().admin().cluster().preparePutRepository(name).setType(repositoryType()).setVerify(verify).setSettings(settings) + ); + + internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class).forEach(repositories -> { + assertThat(repositories.repository(name), notNullValue()); + assertThat(repositories.repository(name), instanceOf(BlobStoreRepository.class)); + assertThat(repositories.repository(name).isReadOnly(), is(settings.getAsBoolean("readonly", false))); + }); + + return name; + } + + protected void deleteRepository(final String name) { + logger.debug("--> deleting repository [name: {}]", name); + assertAcked(client().admin().cluster().prepareDeleteRepository(name)); + + internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class).forEach(repositories -> { + RepositoryMissingException e = expectThrows(RepositoryMissingException.class, () -> repositories.repository(name)); + assertThat(e.repository(), equalTo(name)); + }); + } + + private void assertSuccessfulRestore(RestoreSnapshotResponse response) { + assertThat(response.getRestoreInfo().successfulShards(), greaterThan(0)); + assertThat(response.getRestoreInfo().successfulShards(), equalTo(response.getRestoreInfo().totalShards())); + } + + private void assertSuccessfulSnapshot(CreateSnapshotResponse response) { + assertThat(response.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(response.getSnapshotInfo().successfulShards(), equalTo(response.getSnapshotInfo().totalShards())); + assertThat(response.getSnapshotInfo().userMetadata(), not(hasKey(EncryptedRepository.PASSWORD_HASH_USER_METADATA_KEY))); + assertThat(response.getSnapshotInfo().userMetadata(), not(hasKey(EncryptedRepository.PASSWORD_SALT_USER_METADATA_KEY))); + } + + public SnapshotInfo waitForCompletion(String repository, String snapshotName, TimeValue timeout) throws InterruptedException { + long start = System.currentTimeMillis(); + while (System.currentTimeMillis() - start < timeout.millis()) { + List snapshotInfos = client().admin() + .cluster() + .prepareGetSnapshots(repository) + .setSnapshots(snapshotName) + .get() + .getSnapshots(repository); + assertThat(snapshotInfos.size(), equalTo(1)); + if (snapshotInfos.get(0).state().completed()) { + // Make sure that snapshot clean up operations are finished + ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get(); + SnapshotsInProgress snapshotsInProgress = stateResponse.getState().custom(SnapshotsInProgress.TYPE); + if (snapshotsInProgress == null) { + return snapshotInfos.get(0); + } else { + boolean found = false; + for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { + final Snapshot curr = entry.snapshot(); + if (curr.getRepository().equals(repository) && curr.getSnapshotId().getName().equals(snapshotName)) { + found = true; + break; + } + } + if (found == false) { + return snapshotInfos.get(0); + } + } + } + Thread.sleep(100); + } + fail("Timeout!!!"); + return null; + } +} diff --git a/x-pack/plugin/repository-encrypted/src/internalClusterTest/java/org/elasticsearch/repositories/encrypted/EncryptedS3BlobStoreRepositoryIntegTests.java b/x-pack/plugin/repository-encrypted/src/internalClusterTest/java/org/elasticsearch/repositories/encrypted/EncryptedS3BlobStoreRepositoryIntegTests.java new file mode 100644 index 0000000000000..689247bbc3186 --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/internalClusterTest/java/org/elasticsearch/repositories/encrypted/EncryptedS3BlobStoreRepositoryIntegTests.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.repositories.encrypted; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.s3.S3BlobStoreRepositoryTests; +import org.junit.BeforeClass; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.elasticsearch.repositories.encrypted.EncryptedRepository.DEK_ROOT_CONTAINER; +import static org.elasticsearch.repositories.encrypted.EncryptedRepository.getEncryptedBlobByteLength; +import static org.hamcrest.Matchers.hasSize; + +public final class EncryptedS3BlobStoreRepositoryIntegTests extends S3BlobStoreRepositoryTests { + private static List repositoryNames; + + @BeforeClass + private static void preGenerateRepositoryNames() { + List names = new ArrayList<>(); + for (int i = 0; i < 32; i++) { + names.add("test-repo-" + i); + } + repositoryNames = Collections.synchronizedList(names); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder settingsBuilder = Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), License.LicenseType.TRIAL.getTypeName()); + MockSecureSettings superSecureSettings = (MockSecureSettings) settingsBuilder.getSecureSettings(); + superSecureSettings.merge(nodeSecureSettings()); + return settingsBuilder.build(); + } + + protected MockSecureSettings nodeSecureSettings() { + MockSecureSettings secureSettings = new MockSecureSettings(); + for (String repositoryName : repositoryNames) { + secureSettings.setString( + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryName).getKey(), + repositoryName + ); + } + return secureSettings; + } + + @Override + protected String randomRepositoryName() { + return repositoryNames.remove(randomIntBetween(0, repositoryNames.size() - 1)); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateEncryptedRepositoryPlugin.class, TestS3RepositoryPlugin.class); + } + + @Override + protected String repositoryType() { + return EncryptedRepositoryPlugin.REPOSITORY_TYPE_NAME; + } + + @Override + protected Settings repositorySettings(String repositoryName) { + return Settings.builder() + .put(super.repositorySettings(repositoryName)) + .put(EncryptedRepositoryPlugin.DELEGATE_TYPE_SETTING.getKey(), "s3") + .put(EncryptedRepositoryPlugin.PASSWORD_NAME_SETTING.getKey(), repositoryName) + .build(); + } + + @Override + protected void assertEmptyRepo(Map blobsMap) { + List blobs = blobsMap.keySet() + .stream() + .filter(blob -> false == blob.contains("index")) + .filter(blob -> false == blob.contains(DEK_ROOT_CONTAINER)) // encryption metadata "leaks" + .collect(Collectors.toList()); + assertThat("Only index blobs should remain in repository but found " + blobs, blobs, hasSize(0)); + } + + @Override + protected long blobLengthFromContentLength(long contentLength) { + return getEncryptedBlobByteLength(contentLength); + } + + @Override + public void testEnforcedCooldownPeriod() { + // this test is not applicable for the encrypted repository because it verifies behavior which pertains to snapshots that must + // be created before the encrypted repository was introduced, hence no such encrypted snapshots can possibly exist + } +} diff --git a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/AESKeyUtils.java b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/AESKeyUtils.java new file mode 100644 index 0000000000000..92a128d93848c --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/AESKeyUtils.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.repositories.encrypted; + +import org.elasticsearch.common.settings.SecureString; + +import javax.crypto.Cipher; +import javax.crypto.SecretKey; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.PBEKeySpec; +import javax.crypto.spec.SecretKeySpec; +import java.nio.charset.StandardCharsets; +import java.security.GeneralSecurityException; +import java.security.Key; +import java.util.Base64; + +public final class AESKeyUtils { + public static final int KEY_LENGTH_IN_BYTES = 32; // 256-bit AES key + public static final int WRAPPED_KEY_LENGTH_IN_BYTES = KEY_LENGTH_IN_BYTES + 8; // https://www.ietf.org/rfc/rfc3394.txt section 2.2 + // parameter for the KDF function, it's a funny and unusual iter count larger than 60k + private static final int KDF_ITER = 61616; + // the KDF algorithm that generate the symmetric key given the password + private static final String KDF_ALGO = "PBKDF2WithHmacSHA512"; + // The Id of any AES SecretKey is the AES-Wrap-ciphertext of this fixed 32 byte wide array. + // Key wrapping encryption is deterministic (same plaintext generates the same ciphertext) + // and the probability that two different keys map the same plaintext to the same ciphertext is very small + // (2^-256, much lower than the UUID collision of 2^-128), assuming AES is indistinguishable from a pseudorandom permutation. + private static final byte[] KEY_ID_PLAINTEXT = "wrapping known text forms key id".getBytes(StandardCharsets.UTF_8); + + public static byte[] wrap(SecretKey wrappingKey, SecretKey keyToWrap) throws GeneralSecurityException { + assert "AES".equals(wrappingKey.getAlgorithm()); + assert "AES".equals(keyToWrap.getAlgorithm()); + Cipher c = Cipher.getInstance("AESWrap"); + c.init(Cipher.WRAP_MODE, wrappingKey); + return c.wrap(keyToWrap); + } + + public static SecretKey unwrap(SecretKey wrappingKey, byte[] keyToUnwrap) throws GeneralSecurityException { + assert "AES".equals(wrappingKey.getAlgorithm()); + assert keyToUnwrap.length == WRAPPED_KEY_LENGTH_IN_BYTES; + Cipher c = Cipher.getInstance("AESWrap"); + c.init(Cipher.UNWRAP_MODE, wrappingKey); + Key unwrappedKey = c.unwrap(keyToUnwrap, "AES", Cipher.SECRET_KEY); + return new SecretKeySpec(unwrappedKey.getEncoded(), "AES"); // make sure unwrapped key is "AES" + } + + /** + * Computes the ID of the given AES {@code SecretKey}. + * The ID can be published as it does not leak any information about the key. + * Different {@code SecretKey}s have different IDs with a very high probability. + *

+ * The ID is the ciphertext of a known plaintext, using the AES Wrap cipher algorithm. + * AES Wrap algorithm is deterministic, i.e. encryption using the same key, of the same plaintext, generates the same ciphertext. + * Moreover, the ciphertext reveals no information on the key, and the probability of collision of ciphertexts given different + * keys is statistically negligible. + */ + public static String computeId(SecretKey secretAESKey) throws GeneralSecurityException { + byte[] ciphertextOfKnownPlaintext = wrap(secretAESKey, new SecretKeySpec(KEY_ID_PLAINTEXT, "AES")); + return new String(Base64.getUrlEncoder().withoutPadding().encode(ciphertextOfKnownPlaintext), StandardCharsets.UTF_8); + } + + public static SecretKey generatePasswordBasedKey(SecureString password, String salt) throws GeneralSecurityException { + return generatePasswordBasedKey(password, salt.getBytes(StandardCharsets.UTF_8)); + } + + public static SecretKey generatePasswordBasedKey(SecureString password, byte[] salt) throws GeneralSecurityException { + PBEKeySpec keySpec = new PBEKeySpec(password.getChars(), salt, KDF_ITER, KEY_LENGTH_IN_BYTES * Byte.SIZE); + SecretKeyFactory keyFactory = SecretKeyFactory.getInstance(KDF_ALGO); + SecretKey secretKey = keyFactory.generateSecret(keySpec); + SecretKeySpec secret = new SecretKeySpec(secretKey.getEncoded(), "AES"); + return secret; + } +} diff --git a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/ChainingInputStream.java b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/ChainingInputStream.java index 5e64abb71fd70..cc86f772d53c7 100644 --- a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/ChainingInputStream.java +++ b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/ChainingInputStream.java @@ -7,11 +7,14 @@ import java.io.IOException; import java.io.InputStream; +import java.io.SequenceInputStream; import java.util.Objects; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.core.internal.io.IOUtils; /** * A {@code ChainingInputStream} concatenates multiple component input streams into a @@ -72,6 +75,55 @@ public abstract class ChainingInputStream extends InputStream { */ private boolean closed; + /** + * Returns a new {@link ChainingInputStream} that concatenates the bytes to be read from the first + * input stream with the bytes from the second input stream. The stream arguments must support + * the {@code mark} and {@code reset} operations; otherwise use {@link SequenceInputStream}. + * + * @param first the input stream supplying the first bytes of the returned {@link ChainingInputStream} + * @param second the input stream supplying the bytes after the {@code first} input stream has been exhausted + */ + public static ChainingInputStream chain(InputStream first, InputStream second) { + if (false == Objects.requireNonNull(first).markSupported()) { + throw new IllegalArgumentException("The first component input stream does not support mark"); + } + if (false == Objects.requireNonNull(second).markSupported()) { + throw new IllegalArgumentException("The second component input stream does not support mark"); + } + // components can be reused, and the {@code ChainingInputStream} eagerly closes components after every use + // "first" and "second" are closed when the returned {@code ChainingInputStream} is closed + final InputStream firstComponent = Streams.noCloseStream(first); + final InputStream secondComponent = Streams.noCloseStream(second); + // be sure to remember the start of components because they might be reused + firstComponent.mark(Integer.MAX_VALUE); + secondComponent.mark(Integer.MAX_VALUE); + + return new ChainingInputStream() { + + @Override + InputStream nextComponent(InputStream currentComponentIn) throws IOException { + if (currentComponentIn == null) { + // when returning the next component, start from its beginning + firstComponent.reset(); + return firstComponent; + } else if (currentComponentIn == firstComponent) { + // when returning the next component, start from its beginning + secondComponent.reset(); + return secondComponent; + } else if (currentComponentIn == secondComponent) { + return null; + } else { + throw new IllegalStateException("Unexpected component input stream"); + } + } + + @Override + public void close() throws IOException { + IOUtils.close(super::close, first, second); + } + }; + } + /** * This method is responsible for generating the component input streams. * It is passed the current input stream and must return the successive one, diff --git a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/DecryptionPacketsInputStream.java b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/DecryptionPacketsInputStream.java index e1f06d6bc6511..77fc979e00094 100644 --- a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/DecryptionPacketsInputStream.java +++ b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/DecryptionPacketsInputStream.java @@ -5,6 +5,9 @@ */ package org.elasticsearch.repositories.encrypted; +import org.elasticsearch.common.util.ByteUtils; +import org.elasticsearch.core.internal.io.IOUtils; + import javax.crypto.BadPaddingException; import javax.crypto.Cipher; import javax.crypto.IllegalBlockSizeException; @@ -15,8 +18,6 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; @@ -31,9 +32,9 @@ * {@link EncryptionPacketsInputStream} generates. No decrypted bytes are returned before * they are authenticated. *

- * The same parameters, namely {@code secretKey}, {@code nonce} and {@code packetLength}, - * that have been used during encryption must also be used for decryption, otherwise - * decryption will fail. + * The same parameters, namely {@code secretKey} and {@code packetLength}, + * which have been used during encryption, must also be used for decryption, + * otherwise decryption will fail. *

* This implementation buffers the encrypted packet in memory. The maximum packet size it can * accommodate is {@link EncryptedRepository#MAX_PACKET_LENGTH_IN_BYTES}. @@ -51,7 +52,6 @@ public final class DecryptionPacketsInputStream extends ChainingInputStream { private final InputStream source; private final SecretKey secretKey; - private final int nonce; private final int packetLength; private final byte[] packetBuffer; @@ -77,10 +77,9 @@ public static long getDecryptionLength(long ciphertextLength, int packetLength) return decryptedSize; } - public DecryptionPacketsInputStream(InputStream source, SecretKey secretKey, int nonce, int packetLength) { + public DecryptionPacketsInputStream(InputStream source, SecretKey secretKey, int packetLength) { this.source = Objects.requireNonNull(source); this.secretKey = Objects.requireNonNull(secretKey); - this.nonce = nonce; if (packetLength <= 0 || packetLength >= EncryptedRepository.MAX_PACKET_LENGTH_IN_BYTES) { throw new IllegalArgumentException("Invalid packet length [" + packetLength + "]"); } @@ -124,19 +123,22 @@ public void reset() throws IOException { throw new IOException("Mark/reset not supported"); } + @Override + public void close() throws IOException { + IOUtils.close(super::close, source); + } + private int decrypt(PrefixInputStream packetInputStream) throws IOException { // read only the IV prefix into the packet buffer int ivLength = packetInputStream.readNBytes(packetBuffer, 0, GCM_IV_LENGTH_IN_BYTES); if (ivLength != GCM_IV_LENGTH_IN_BYTES) { throw new IOException("Packet heading IV error. Unexpected length [" + ivLength + "]."); } - // extract the nonce and the counter from the packet IV - ByteBuffer ivBuffer = ByteBuffer.wrap(packetBuffer, 0, GCM_IV_LENGTH_IN_BYTES).order(ByteOrder.LITTLE_ENDIAN); - int packetIvNonce = ivBuffer.getInt(0); - long packetIvCounter = ivBuffer.getLong(Integer.BYTES); - if (packetIvNonce != nonce) { - throw new IOException("Packet nonce mismatch. Expecting [" + nonce + "], but got [" + packetIvNonce + "]."); - } + // extract the counter from the packet IV and validate it (that the packet is in order) + // skips the first 4 bytes in the packet IV, which contain the encryption nonce, which cannot be explicitly validated + // because the nonce is not passed in during decryption, but it is implicitly because it is part of the IV, + // when GCM validates the packet authn tag + long packetIvCounter = ByteUtils.readLongLE(packetBuffer, Integer.BYTES); if (packetIvCounter != counter) { throw new IOException("Packet counter mismatch. Expecting [" + counter + "], but got [" + packetIvCounter + "]."); } diff --git a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepository.java b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepository.java index a54c992e4ebaa..3ca3189e837aa 100644 --- a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepository.java +++ b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepository.java @@ -6,11 +6,697 @@ package org.elasticsearch.repositories.encrypted; -public class EncryptedRepository { +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.IndexCommit; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobMetadata; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.DeleteResult; +import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.repositories.RepositoryStats; +import org.elasticsearch.repositories.ShardGenerations; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; + +import javax.crypto.KeyGenerator; +import javax.crypto.SecretKey; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.NoSuchFileException; +import java.security.GeneralSecurityException; +import java.security.SecureRandom; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.function.Function; +import java.util.function.Supplier; + +public class EncryptedRepository extends BlobStoreRepository { + static final Logger logger = LogManager.getLogger(EncryptedRepository.class); + // the following constants are fixed by definition static final int GCM_TAG_LENGTH_IN_BYTES = 16; static final int GCM_IV_LENGTH_IN_BYTES = 12; - static final int AES_BLOCK_SIZE_IN_BYTES = 128; + static final int AES_BLOCK_LENGTH_IN_BYTES = 128; + // the following constants require careful thought before changing because they will break backwards compatibility static final String DATA_ENCRYPTION_SCHEME = "AES/GCM/NoPadding"; static final long PACKET_START_COUNTER = Long.MIN_VALUE; - static final int MAX_PACKET_LENGTH_IN_BYTES = 1 << 30; + static final int MAX_PACKET_LENGTH_IN_BYTES = 8 << 20; // 8MB + // this should be smaller than {@code #MAX_PACKET_LENGTH_IN_BYTES} and it's what {@code EncryptionPacketsInputStream} uses + // during encryption and what {@code DecryptionPacketsInputStream} expects during decryption (it is not configurable) + static final int PACKET_LENGTH_IN_BYTES = 64 * (1 << 10); // 64KB + // the path of the blob container holding all the DEKs + // this is relative to the root base path holding the encrypted blobs (i.e. the repository root base path) + static final String DEK_ROOT_CONTAINER = ".encryption-metadata"; // package private for tests + static final int DEK_ID_LENGTH = 22; // {@code org.elasticsearch.common.UUIDS} length + + // the following constants can be changed freely + private static final String RAND_ALGO = "SHA1PRNG"; + + // the snapshot metadata (residing in the cluster state for the lifetime of the snapshot) + // contains the salted hash of the repository password as present on the master node (which starts the snapshot operation). + // The hash is verified on each data node, before initiating the actual shard files snapshot, as well + // as on the master node that finalizes the snapshot (which could be a different master node from the one that started + // the operation if a master failover occurred during the snapshot). + // This ensures that all participating nodes in the snapshot operation agree on the value of the key encryption key, so that + // all the data included in a snapshot is encrypted using the same password. + static final String PASSWORD_HASH_USER_METADATA_KEY = EncryptedRepository.class.getName() + ".repositoryPasswordHash"; + static final String PASSWORD_SALT_USER_METADATA_KEY = EncryptedRepository.class.getName() + ".repositoryPasswordSalt"; + private static final int DEK_CACHE_WEIGHT = 2048; + + // this is the repository instance to which all blob reads and writes are forwarded to (it stores both the encrypted blobs, as well + // as the associated encrypted DEKs) + private final BlobStoreRepository delegatedRepository; + // every data blob is encrypted with its randomly generated AES key (DEK) + private final Supplier> dekGenerator; + // license is checked before every snapshot operations; protected non-final for tests + protected Supplier licenseStateSupplier; + private final SecureString repositoryPassword; + private final String localRepositoryPasswordHash; + private final String localRepositoryPasswordSalt; + private volatile String validatedLocalRepositoryPasswordHash; + private final Cache dekCache; + + /** + * Returns the byte length (i.e. the storage size) of an encrypted blob, given the length of the blob's plaintext contents. + * + * @see EncryptionPacketsInputStream#getEncryptionLength(long, int) + */ + public static long getEncryptedBlobByteLength(long plaintextBlobByteLength) { + return (long) DEK_ID_LENGTH /* UUID byte length */ + + EncryptionPacketsInputStream.getEncryptionLength(plaintextBlobByteLength, PACKET_LENGTH_IN_BYTES); + } + + protected EncryptedRepository( + RepositoryMetadata metadata, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + BigArrays bigArrays, + RecoverySettings recoverySettings, + BlobStoreRepository delegatedRepository, + Supplier licenseStateSupplier, + SecureString repositoryPassword + ) throws GeneralSecurityException { + super( + metadata, + namedXContentRegistry, + clusterService, + bigArrays, + recoverySettings, + BlobPath.cleanPath() /* the encrypted repository uses a hardcoded empty + base blob path but the base path setting is honored for the delegated repository */ + ); + this.delegatedRepository = delegatedRepository; + this.dekGenerator = createDEKGenerator(); + this.licenseStateSupplier = licenseStateSupplier; + this.repositoryPassword = repositoryPassword; + // the salt used to generate an irreversible "hash"; it is generated randomly but it's fixed for the lifetime of the + // repository solely for efficiency reasons + this.localRepositoryPasswordSalt = UUIDs.randomBase64UUID(); + // the "hash" of the repository password from the local node is not actually a hash but the ciphertext of a + // known-plaintext using a key derived from the repository password using a random salt + this.localRepositoryPasswordHash = AESKeyUtils.computeId( + AESKeyUtils.generatePasswordBasedKey(repositoryPassword, localRepositoryPasswordSalt) + ); + // a "hash" computed locally is also locally trusted (trivially) + this.validatedLocalRepositoryPasswordHash = this.localRepositoryPasswordHash; + // stores decrypted DEKs; DEKs are reused to encrypt/decrypt multiple independent blobs + this.dekCache = CacheBuilder.builder().setMaximumWeight(DEK_CACHE_WEIGHT).build(); + if (isReadOnly() != delegatedRepository.isReadOnly()) { + throw new RepositoryException( + metadata.name(), + "Unexpected fatal internal error", + new IllegalStateException("The encrypted repository must be read-only iff the delegate repository is read-only") + ); + } + } + + @Override + public RepositoryStats stats() { + return this.delegatedRepository.stats(); + } + + /** + * The repository hook method which populates the snapshot metadata with the salted password hash of the repository on the (master) + * node that starts of the snapshot operation. All the other actions associated with the same snapshot operation will first verify + * that the local repository password checks with the hash from the snapshot metadata. + *

+ * In addition, if the installed license does not comply with the "encrypted snapshots" feature, this method throws an exception, + * which aborts the snapshot operation. + * + * See {@link org.elasticsearch.repositories.Repository#adaptUserMetadata(Map)}. + * + * @param userMetadata the snapshot metadata as received from the calling user + * @return the snapshot metadata containing the salted password hash of the node initializing the snapshot + */ + @Override + public Map adaptUserMetadata(Map userMetadata) { + // because populating the snapshot metadata must be done before the actual snapshot is first initialized, + // we take the opportunity to validate the license and abort if non-compliant + if (false == licenseStateSupplier.get().isAllowed(XPackLicenseState.Feature.ENCRYPTED_SNAPSHOT)) { + throw LicenseUtils.newComplianceException("encrypted snapshots"); + } + Map snapshotUserMetadata = new HashMap<>(); + if (userMetadata != null) { + snapshotUserMetadata.putAll(userMetadata); + } + // fill in the hash of the repository password, which is then checked before every snapshot operation + // (i.e. {@link #snapshotShard} and {@link #finalizeSnapshot}) to ensure that all participating nodes + // in the snapshot operation use the same repository password + snapshotUserMetadata.put(PASSWORD_SALT_USER_METADATA_KEY, localRepositoryPasswordSalt); + snapshotUserMetadata.put(PASSWORD_HASH_USER_METADATA_KEY, localRepositoryPasswordHash); + logger.trace( + "Snapshot metadata for local repository password [{}] and [{}]", + localRepositoryPasswordSalt, + localRepositoryPasswordHash + ); + // do not wrap in Map.of; we have to be able to modify the map (remove the added entries) when finalizing the snapshot + return snapshotUserMetadata; + } + + @Override + public void finalizeSnapshot( + ShardGenerations shardGenerations, + long repositoryStateId, + Metadata clusterMetadata, + SnapshotInfo snapshotInfo, + Version repositoryMetaVersion, + Function stateTransformer, + ActionListener listener + ) { + try { + validateLocalRepositorySecret(snapshotInfo.userMetadata()); + } catch (RepositoryException passwordValidationException) { + listener.onFailure(passwordValidationException); + return; + } finally { + // remove the repository password hash (and salt) from the snapshot metadata so that it is not displayed in the API response + // to the user + snapshotInfo.userMetadata().remove(PASSWORD_HASH_USER_METADATA_KEY); + snapshotInfo.userMetadata().remove(PASSWORD_SALT_USER_METADATA_KEY); + } + super.finalizeSnapshot( + shardGenerations, + repositoryStateId, + clusterMetadata, + snapshotInfo, + repositoryMetaVersion, + stateTransformer, + listener + ); + } + + @Override + public void snapshotShard( + Store store, + MapperService mapperService, + SnapshotId snapshotId, + IndexId indexId, + IndexCommit snapshotIndexCommit, + String shardStateIdentifier, + IndexShardSnapshotStatus snapshotStatus, + Version repositoryMetaVersion, + Map userMetadata, + ActionListener listener + ) { + try { + validateLocalRepositorySecret(userMetadata); + } catch (RepositoryException passwordValidationException) { + listener.onFailure(passwordValidationException); + return; + } + super.snapshotShard( + store, + mapperService, + snapshotId, + indexId, + snapshotIndexCommit, + shardStateIdentifier, + snapshotStatus, + repositoryMetaVersion, + userMetadata, + listener + ); + } + + @Override + protected BlobStore createBlobStore() { + final Supplier> blobStoreDEKGenerator; + if (isReadOnly()) { + // make sure that a read-only repository can't encrypt anything + blobStoreDEKGenerator = () -> { + throw new RepositoryException( + metadata.name(), + "Unexpected fatal internal error", + new IllegalStateException("DEKs are required for encryption but this is a read-only repository") + ); + }; + } else { + blobStoreDEKGenerator = this.dekGenerator; + } + return new EncryptedBlobStore( + delegatedRepository.blobStore(), + delegatedRepository.basePath(), + metadata.name(), + this::generateKEK, + blobStoreDEKGenerator, + dekCache + ); + } + + @Override + protected void doStart() { + this.delegatedRepository.start(); + super.doStart(); + } + + @Override + protected void doStop() { + super.doStop(); + this.delegatedRepository.stop(); + } + + @Override + protected void doClose() { + super.doClose(); + this.delegatedRepository.close(); + } + + private Supplier> createDEKGenerator() throws GeneralSecurityException { + // DEK and DEK Ids MUST be generated randomly (with independent random instances) + final SecureRandom dekSecureRandom = SecureRandom.getInstance(RAND_ALGO); + final SecureRandom dekIdSecureRandom = SecureRandom.getInstance(RAND_ALGO); + final KeyGenerator dekGenerator = KeyGenerator.getInstance(DATA_ENCRYPTION_SCHEME.split("/")[0]); + dekGenerator.init(AESKeyUtils.KEY_LENGTH_IN_BYTES * Byte.SIZE, dekSecureRandom); + return () -> { + final BytesReference dekId = new BytesArray(UUIDs.randomBase64UUID(dekIdSecureRandom)); + final SecretKey dek = dekGenerator.generateKey(); + logger.debug("Repository [{}] generated new DEK [{}]", metadata.name(), dekId); + return new Tuple<>(dekId, dek); + }; + } + + // pkg-private for tests + Tuple generateKEK(String dekId) { + try { + // we rely on the DEK Id being generated randomly so it can be used as a salt + final SecretKey kek = AESKeyUtils.generatePasswordBasedKey(repositoryPassword, dekId); + final String kekId = AESKeyUtils.computeId(kek); + logger.debug("Repository [{}] computed KEK [{}] for DEK [{}]", metadata.name(), kekId, dekId); + return new Tuple<>(kekId, kek); + } catch (GeneralSecurityException e) { + throw new RepositoryException(metadata.name(), "Failure to generate KEK to wrap the DEK [" + dekId + "]", e); + } + } + + /** + * Called before the shard snapshot and finalize operations, on the data and master nodes. This validates that the repository + * password on the master node that started the snapshot operation is identical to the repository password on the local node. + * + * @param snapshotUserMetadata the snapshot metadata containing the repository password hash to assert + * @throws RepositoryException if the repository password hash on the local node mismatches the master's + */ + private void validateLocalRepositorySecret(Map snapshotUserMetadata) throws RepositoryException { + assert snapshotUserMetadata != null; + assert snapshotUserMetadata.get(PASSWORD_HASH_USER_METADATA_KEY) instanceof String; + final String masterRepositoryPasswordId = (String) snapshotUserMetadata.get(PASSWORD_HASH_USER_METADATA_KEY); + if (false == masterRepositoryPasswordId.equals(validatedLocalRepositoryPasswordHash)) { + assert snapshotUserMetadata.get(PASSWORD_SALT_USER_METADATA_KEY) instanceof String; + final String masterRepositoryPasswordIdSalt = (String) snapshotUserMetadata.get(PASSWORD_SALT_USER_METADATA_KEY); + final String computedRepositoryPasswordId; + try { + computedRepositoryPasswordId = AESKeyUtils.computeId( + AESKeyUtils.generatePasswordBasedKey(repositoryPassword, masterRepositoryPasswordIdSalt) + ); + } catch (Exception e) { + throw new RepositoryException(metadata.name(), "Unexpected fatal internal error", e); + } + if (computedRepositoryPasswordId.equals(masterRepositoryPasswordId)) { + this.validatedLocalRepositoryPasswordHash = computedRepositoryPasswordId; + } else { + throw new RepositoryException( + metadata.name(), + "Repository password mismatch. The local node's repository password, from the keystore setting [" + + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace( + EncryptedRepositoryPlugin.PASSWORD_NAME_SETTING.get(metadata.settings()) + ).getKey() + + "], is different compared to the elected master node's which started the snapshot operation" + ); + } + } + } + + // pkg-private for tests + static final class EncryptedBlobStore implements BlobStore { + private final BlobStore delegatedBlobStore; + private final BlobPath delegatedBasePath; + private final String repositoryName; + private final Function> getKEKforDEK; + private final Cache dekCache; + private final CheckedSupplier singleUseDEKSupplier; + + EncryptedBlobStore( + BlobStore delegatedBlobStore, + BlobPath delegatedBasePath, + String repositoryName, + Function> getKEKforDEK, + Supplier> dekGenerator, + Cache dekCache + ) { + this.delegatedBlobStore = delegatedBlobStore; + this.delegatedBasePath = delegatedBasePath; + this.repositoryName = repositoryName; + this.getKEKforDEK = getKEKforDEK; + this.dekCache = dekCache; + this.singleUseDEKSupplier = SingleUseKey.createSingleUseKeySupplier(() -> { + Tuple newDEK = dekGenerator.get(); + // store the newly generated DEK before making it available + storeDEK(newDEK.v1().utf8ToString(), newDEK.v2()); + return newDEK; + }); + } + + // pkg-private for tests + SecretKey getDEKById(String dekId) throws IOException { + try { + return dekCache.computeIfAbsent(dekId, ignored -> loadDEK(dekId)); + } catch (ExecutionException e) { + // some exception types are to be expected + if (e.getCause() instanceof IOException) { + throw (IOException) e.getCause(); + } else if (e.getCause() instanceof ElasticsearchException) { + throw (ElasticsearchException) e.getCause(); + } else { + throw new RepositoryException(repositoryName, "Unexpected exception retrieving DEK [" + dekId + "]", e); + } + } + } + + private SecretKey loadDEK(String dekId) throws IOException { + final BlobPath dekBlobPath = delegatedBasePath.add(DEK_ROOT_CONTAINER).add(dekId); + logger.debug("Repository [{}] loading wrapped DEK [{}] from blob path {}", repositoryName, dekId, dekBlobPath); + final BlobContainer dekBlobContainer = delegatedBlobStore.blobContainer(dekBlobPath); + final Tuple kekTuple = getKEKforDEK.apply(dekId); + final String kekId = kekTuple.v1(); + final SecretKey kek = kekTuple.v2(); + logger.trace("Repository [{}] using KEK [{}] to unwrap DEK [{}]", repositoryName, kekId, dekId); + final byte[] encryptedDEKBytes = new byte[AESKeyUtils.WRAPPED_KEY_LENGTH_IN_BYTES]; + try (InputStream encryptedDEKInputStream = dekBlobContainer.readBlob(kekId)) { + final int bytesRead = Streams.readFully(encryptedDEKInputStream, encryptedDEKBytes); + if (bytesRead != AESKeyUtils.WRAPPED_KEY_LENGTH_IN_BYTES) { + throw new RepositoryException( + repositoryName, + "Wrapped DEK [" + dekId + "] has smaller length [" + bytesRead + "] than expected" + ); + } + if (encryptedDEKInputStream.read() != -1) { + throw new RepositoryException(repositoryName, "Wrapped DEK [" + dekId + "] is larger than expected"); + } + } catch (NoSuchFileException e) { + // do NOT throw IOException when the DEK does not exist, as this is a decryption problem, and IOExceptions + // can move the repository in the corrupted state + throw new ElasticsearchException( + "Failure to read and decrypt DEK [" + + dekId + + "] from " + + dekBlobContainer.path() + + ". Most likely the repository password is incorrect, where previous " + + "snapshots have used a different password.", + e + ); + } + logger.trace("Repository [{}] successfully read DEK [{}] from path {} {}", repositoryName, dekId, dekBlobPath, kekId); + try { + final SecretKey dek = AESKeyUtils.unwrap(kek, encryptedDEKBytes); + logger.debug("Repository [{}] successfully loaded DEK [{}] from path {} {}", repositoryName, dekId, dekBlobPath, kekId); + return dek; + } catch (GeneralSecurityException e) { + throw new RepositoryException( + repositoryName, + "Failure to AES unwrap the DEK [" + + dekId + + "]. " + + "Most likely the encryption metadata in the repository has been corrupted", + e + ); + } + } + + // pkg-private for tests + void storeDEK(String dekId, SecretKey dek) throws IOException { + final BlobPath dekBlobPath = delegatedBasePath.add(DEK_ROOT_CONTAINER).add(dekId); + logger.debug("Repository [{}] storing wrapped DEK [{}] under blob path {}", repositoryName, dekId, dekBlobPath); + final BlobContainer dekBlobContainer = delegatedBlobStore.blobContainer(dekBlobPath); + final Tuple kek = getKEKforDEK.apply(dekId); + logger.trace("Repository [{}] using KEK [{}] to wrap DEK [{}]", repositoryName, kek.v1(), dekId); + final byte[] encryptedDEKBytes; + try { + encryptedDEKBytes = AESKeyUtils.wrap(kek.v2(), dek); + if (encryptedDEKBytes.length != AESKeyUtils.WRAPPED_KEY_LENGTH_IN_BYTES) { + throw new RepositoryException( + repositoryName, + "Wrapped DEK [" + dekId + "] has unexpected length [" + encryptedDEKBytes.length + "]" + ); + } + } catch (GeneralSecurityException e) { + // throw unchecked ElasticsearchException; IOExceptions are interpreted differently and can move the repository in the + // corrupted state + throw new RepositoryException(repositoryName, "Failure to AES wrap the DEK [" + dekId + "]", e); + } + logger.trace("Repository [{}] successfully wrapped DEK [{}]", repositoryName, dekId); + try (InputStream encryptedDEKInputStream = new ByteArrayInputStream(encryptedDEKBytes)) { + dekBlobContainer.writeBlobAtomic(kek.v1(), encryptedDEKInputStream, encryptedDEKBytes.length, true); + } + logger.debug("Repository [{}] successfully stored DEK [{}] under path {} {}", repositoryName, dekId, dekBlobPath, kek.v1()); + } + + @Override + public BlobContainer blobContainer(BlobPath path) { + final Iterator pathIterator = path.iterator(); + BlobPath delegatedBlobContainerPath = delegatedBasePath; + while (pathIterator.hasNext()) { + delegatedBlobContainerPath = delegatedBlobContainerPath.add(pathIterator.next()); + } + final BlobContainer delegatedBlobContainer = delegatedBlobStore.blobContainer(delegatedBlobContainerPath); + return new EncryptedBlobContainer(path, repositoryName, delegatedBlobContainer, singleUseDEKSupplier, this::getDEKById); + } + + @Override + public void close() { + // do NOT close delegatedBlobStore; it will be closed when the inner delegatedRepository is closed + } + } + + private static final class EncryptedBlobContainer extends AbstractBlobContainer { + private final String repositoryName; + private final BlobContainer delegatedBlobContainer; + // supplier for the DEK used for encryption (snapshot) + private final CheckedSupplier singleUseDEKSupplier; + // retrieves the DEK required for decryption (restore) + private final CheckedFunction getDEKById; + + EncryptedBlobContainer( + BlobPath path, // this path contains the {@code EncryptedRepository#basePath} which, importantly, is empty + String repositoryName, + BlobContainer delegatedBlobContainer, + CheckedSupplier singleUseDEKSupplier, + CheckedFunction getDEKById + ) { + super(path); + this.repositoryName = repositoryName; + final String rootPathElement = path.iterator().hasNext() ? path.iterator().next() : null; + if (DEK_ROOT_CONTAINER.equals(rootPathElement)) { + throw new RepositoryException(repositoryName, "Cannot descend into the DEK blob container " + path); + } + this.delegatedBlobContainer = delegatedBlobContainer; + this.singleUseDEKSupplier = singleUseDEKSupplier; + this.getDEKById = getDEKById; + } + + @Override + public boolean blobExists(String blobName) throws IOException { + return delegatedBlobContainer.blobExists(blobName); + } + + /** + * Returns a new {@link InputStream} for the given {@code blobName} that can be used to read the contents of the blob. + * The returned {@code InputStream} transparently handles the decryption of the blob contents, by first working out + * the blob name of the associated DEK id, reading and decrypting the DEK (given the repository password, unless the DEK is + * already cached because it had been used for other blobs before), and lastly reading and decrypting the data blob, + * in a streaming fashion, by employing the {@link DecryptionPacketsInputStream}. + * The {@code DecryptionPacketsInputStream} does not return un-authenticated data. + * + * @param blobName The name of the blob to get an {@link InputStream} for. + */ + @Override + public InputStream readBlob(String blobName) throws IOException { + // This MIGHT require two concurrent readBlob connections if the DEK is not already in the cache and if the encrypted blob + // is large enough so that the underlying network library keeps the connection open after reading the prepended DEK ID. + // Arguably this is a problem only under lab conditions, when the storage service is saturated only by the first read + // connection of the pair, so that the second read connection (for the DEK) can not be fulfilled. + // In this case the second connection will time-out which will trigger the closing of the first one, therefore + // allowing other pair connections to complete. + // In this situation the restore process should slowly make headway, albeit under read-timeout exceptions + final InputStream encryptedDataInputStream = delegatedBlobContainer.readBlob(blobName); + try { + // read the DEK Id (fixed length) which is prepended to the encrypted blob + final byte[] dekIdBytes = new byte[DEK_ID_LENGTH]; + final int bytesRead = Streams.readFully(encryptedDataInputStream, dekIdBytes); + if (bytesRead != DEK_ID_LENGTH) { + throw new RepositoryException(repositoryName, "The encrypted blob [" + blobName + "] is too small [" + bytesRead + "]"); + } + final String dekId = new String(dekIdBytes, StandardCharsets.UTF_8); + // might open a connection to read and decrypt the DEK, but most likely it will be served from cache + final SecretKey dek = getDEKById.apply(dekId); + // read and decrypt the rest of the blob + return new DecryptionPacketsInputStream(encryptedDataInputStream, dek, PACKET_LENGTH_IN_BYTES); + } catch (Exception e) { + try { + encryptedDataInputStream.close(); + } catch (IOException closeEx) { + e.addSuppressed(closeEx); + } + throw e; + } + } + + @Override + public InputStream readBlob(String blobName, long position, long length) throws IOException { + throw new UnsupportedOperationException("Not yet implemented"); + } + + /** + * Reads the blob content from the input stream and writes it to the container in a new blob with the given name. + * If {@code failIfAlreadyExists} is {@code true} and a blob with the same name already exists, the write operation will fail; + * otherwise, if {@code failIfAlreadyExists} is {@code false} the blob is overwritten. + * The contents are encrypted in a streaming fashion. The DEK (encryption key) is randomly generated and reused for encrypting + * subsequent blobs such that the same IV is not reused together with the same key. + * The DEK encryption key is separately stored in a different blob, which is encrypted with the repository key. + * + * @param blobName + * The name of the blob to write the contents of the input stream to. + * @param inputStream + * The input stream from which to retrieve the bytes to write to the blob. + * @param blobSize + * The size of the blob to be written, in bytes. The actual number of bytes written to the storage service is larger + * because of encryption and authentication overhead. It is implementation dependent whether this value is used + * in writing the blob to the repository. + * @param failIfAlreadyExists + * whether to throw a FileAlreadyExistsException if the given blob already exists + */ + @Override + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { + // reuse, but possibly generate and store a new DEK + final SingleUseKey singleUseNonceAndDEK = singleUseDEKSupplier.get(); + final BytesReference dekIdBytes = singleUseNonceAndDEK.getKeyId(); + if (dekIdBytes.length() != DEK_ID_LENGTH) { + throw new RepositoryException( + repositoryName, + "Unexpected fatal internal error", + new IllegalStateException("Unexpected DEK Id length [" + dekIdBytes.length() + "]") + ); + } + final long encryptedBlobSize = getEncryptedBlobByteLength(blobSize); + try ( + InputStream encryptedInputStream = ChainingInputStream.chain( + dekIdBytes.streamInput(), + new EncryptionPacketsInputStream( + inputStream, + singleUseNonceAndDEK.getKey(), + singleUseNonceAndDEK.getNonce(), + PACKET_LENGTH_IN_BYTES + ) + ) + ) { + delegatedBlobContainer.writeBlob(blobName, encryptedInputStream, encryptedBlobSize, failIfAlreadyExists); + } + } + + @Override + public void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) + throws IOException { + // the encrypted repository does not offer an alternative implementation for atomic writes + // fallback to regular write + writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists); + } + + @Override + public DeleteResult delete() throws IOException { + return delegatedBlobContainer.delete(); + } + + @Override + public void deleteBlobsIgnoringIfNotExists(List blobNames) throws IOException { + delegatedBlobContainer.deleteBlobsIgnoringIfNotExists(blobNames); + } + + @Override + public Map listBlobs() throws IOException { + return delegatedBlobContainer.listBlobs(); + } + + @Override + public Map listBlobsByPrefix(String blobNamePrefix) throws IOException { + return delegatedBlobContainer.listBlobsByPrefix(blobNamePrefix); + } + + @Override + public Map children() throws IOException { + final Map childEncryptedBlobContainers = delegatedBlobContainer.children(); + final Map resultBuilder = new HashMap<>(childEncryptedBlobContainers.size()); + for (Map.Entry childBlobContainer : childEncryptedBlobContainers.entrySet()) { + if (childBlobContainer.getKey().equals(DEK_ROOT_CONTAINER) && false == path().iterator().hasNext()) { + // do not descend into the DEK blob container + continue; + } + // get an encrypted blob container for each child + // Note that the encryption metadata blob container might be missing + resultBuilder.put( + childBlobContainer.getKey(), + new EncryptedBlobContainer( + path().add(childBlobContainer.getKey()), + repositoryName, + childBlobContainer.getValue(), + singleUseDEKSupplier, + getDEKById + ) + ); + } + return Map.copyOf(resultBuilder); + } + } } diff --git a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepositoryPlugin.java b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepositoryPlugin.java index 1d89bf500538e..849914fa72495 100644 --- a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepositoryPlugin.java +++ b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepositoryPlugin.java @@ -6,25 +6,168 @@ package org.elasticsearch.repositories.encrypted; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.xpack.core.XPackPlugin; +import java.security.GeneralSecurityException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Supplier; -public final class EncryptedRepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { +public class EncryptedRepositoryPlugin extends Plugin implements RepositoryPlugin { + static final Logger logger = LogManager.getLogger(EncryptedRepositoryPlugin.class); + static final String REPOSITORY_TYPE_NAME = "encrypted"; + // TODO add at least hdfs, and investigate supporting all `BlobStoreRepository` implementations + static final List SUPPORTED_ENCRYPTED_TYPE_NAMES = Arrays.asList("fs", "gcs", "azure", "s3"); + static final Setting.AffixSetting ENCRYPTION_PASSWORD_SETTING = Setting.affixKeySetting( + "repository.encrypted.", + "password", + key -> SecureSetting.secureString(key, null) + ); + static final Setting DELEGATE_TYPE_SETTING = Setting.simpleString("delegate_type", ""); + static final Setting PASSWORD_NAME_SETTING = Setting.simpleString("password_name", ""); - public EncryptedRepositoryPlugin(final Settings settings) {} + // "protected" because it is overloaded for tests + protected XPackLicenseState getLicenseState() { + return XPackPlugin.getSharedLicenseState(); + } @Override public List> getSettings() { - return List.of(); + return List.of(ENCRYPTION_PASSWORD_SETTING); } @Override - public void reload(Settings settings) { - // Secure settings should be readable inside this method. + public Map getRepositories( + Environment env, + NamedXContentRegistry registry, + ClusterService clusterService, + BigArrays bigArrays, + RecoverySettings recoverySettings + ) { + // load all the passwords from the keystore in memory because the keystore is not readable when the repository is created + final Map repositoryPasswordsMapBuilder = new HashMap<>(); + for (String passwordName : ENCRYPTION_PASSWORD_SETTING.getNamespaces(env.settings())) { + Setting passwordSetting = ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(passwordName); + repositoryPasswordsMapBuilder.put(passwordName, passwordSetting.get(env.settings())); + logger.debug("Loaded repository password [{}] from the node keystore", passwordName); + } + final Map repositoryPasswordsMap = Map.copyOf(repositoryPasswordsMapBuilder); + + return Collections.singletonMap(REPOSITORY_TYPE_NAME, new Repository.Factory() { + + @Override + public Repository create(RepositoryMetadata metadata) { + throw new UnsupportedOperationException(); + } + + @Override + public Repository create(RepositoryMetadata metadata, Function typeLookup) throws Exception { + final String delegateType = DELEGATE_TYPE_SETTING.get(metadata.settings()); + if (Strings.hasLength(delegateType) == false) { + throw new IllegalArgumentException("Repository setting [" + DELEGATE_TYPE_SETTING.getKey() + "] must be set"); + } + if (REPOSITORY_TYPE_NAME.equals(delegateType)) { + throw new IllegalArgumentException( + "Cannot encrypt an already encrypted repository. [" + + DELEGATE_TYPE_SETTING.getKey() + + "] must not be equal to [" + + REPOSITORY_TYPE_NAME + + "]" + ); + } + final Repository.Factory factory = typeLookup.apply(delegateType); + if (null == factory || false == SUPPORTED_ENCRYPTED_TYPE_NAMES.contains(delegateType)) { + throw new IllegalArgumentException( + "Unsupported delegate repository type [" + delegateType + "] for setting [" + DELEGATE_TYPE_SETTING.getKey() + "]" + ); + } + final String repositoryPasswordName = PASSWORD_NAME_SETTING.get(metadata.settings()); + if (Strings.hasLength(repositoryPasswordName) == false) { + throw new IllegalArgumentException("Repository setting [" + PASSWORD_NAME_SETTING.getKey() + "] must be set"); + } + final SecureString repositoryPassword = repositoryPasswordsMap.get(repositoryPasswordName); + if (repositoryPassword == null) { + throw new IllegalArgumentException( + "Secure setting [" + + ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(repositoryPasswordName).getKey() + + "] must be set" + ); + } + final Repository delegatedRepository = factory.create( + new RepositoryMetadata(metadata.name(), delegateType, metadata.settings()) + ); + if (false == (delegatedRepository instanceof BlobStoreRepository) || delegatedRepository instanceof EncryptedRepository) { + throw new IllegalArgumentException("Unsupported delegate repository type [" + DELEGATE_TYPE_SETTING.getKey() + "]"); + } + if (false == getLicenseState().isAllowed(XPackLicenseState.Feature.ENCRYPTED_SNAPSHOT)) { + logger.warn( + new ParameterizedMessage( + "Encrypted snapshots are not allowed for the currently installed license [{}]." + + " Snapshots to the [{}] encrypted repository are not permitted." + + " All the other operations, including restore, work without restrictions.", + getLicenseState().getOperationMode().description(), + metadata.name() + ), + LicenseUtils.newComplianceException("encrypted snapshots") + ); + } + return createEncryptedRepository( + metadata, + registry, + clusterService, + bigArrays, + recoverySettings, + (BlobStoreRepository) delegatedRepository, + () -> getLicenseState(), + repositoryPassword + ); + } + }); + } + + // protected for tests + protected EncryptedRepository createEncryptedRepository( + RepositoryMetadata metadata, + NamedXContentRegistry registry, + ClusterService clusterService, + BigArrays bigArrays, + RecoverySettings recoverySettings, + BlobStoreRepository delegatedRepository, + Supplier licenseStateSupplier, + SecureString repoPassword + ) throws GeneralSecurityException { + return new EncryptedRepository( + metadata, + registry, + clusterService, + bigArrays, + recoverySettings, + delegatedRepository, + licenseStateSupplier, + repoPassword + ); } } diff --git a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStream.java b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStream.java index 6629db2cec22f..158e5699840a9 100644 --- a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStream.java +++ b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStream.java @@ -6,6 +6,8 @@ package org.elasticsearch.repositories.encrypted; +import org.elasticsearch.core.internal.io.IOUtils; + import javax.crypto.Cipher; import javax.crypto.CipherInputStream; import javax.crypto.NoSuchPaddingException; @@ -171,6 +173,11 @@ public void reset() throws IOException { } } + @Override + public void close() throws IOException { + IOUtils.close(super::close, source); + } + private static Cipher getPacketEncryptionCipher(SecretKey secretKey, byte[] packetIv) throws IOException { GCMParameterSpec gcmParameterSpec = new GCMParameterSpec(EncryptedRepository.GCM_TAG_LENGTH_IN_BYTES * Byte.SIZE, packetIv); try { diff --git a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/SingleUseKey.java b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/SingleUseKey.java new file mode 100644 index 0000000000000..fe4729e8acec1 --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/SingleUseKey.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.repositories.encrypted; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; + +import javax.crypto.SecretKey; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Container class for a {@code SecretKey} with a unique identifier, and a 4-byte wide {@code Integer} nonce, that can be used for a + * single encryption operation. Use {@link #createSingleUseKeySupplier(CheckedSupplier)} to obtain a {@code Supplier} that returns + * a new {@link SingleUseKey} instance on every invocation. The number of unique {@code SecretKey}s (and their associated identifiers) + * generated is minimized and, at the same time, ensuring that a given {@code nonce} is not reused with the same key. + */ +final class SingleUseKey { + private static final Logger logger = LogManager.getLogger(SingleUseKey.class); + static final int MIN_NONCE = Integer.MIN_VALUE; + static final int MAX_NONCE = Integer.MAX_VALUE; + private static final int MAX_ATTEMPTS = 9; + private static final SingleUseKey EXPIRED_KEY = new SingleUseKey(null, null, MAX_NONCE); + + private final BytesReference keyId; + private final SecretKey key; + private final int nonce; + + // for tests use only! + SingleUseKey(BytesReference KeyId, SecretKey Key, int nonce) { + this.keyId = KeyId; + this.key = Key; + this.nonce = nonce; + } + + public BytesReference getKeyId() { + return keyId; + } + + public SecretKey getKey() { + return key; + } + + public int getNonce() { + return nonce; + } + + /** + * Returns a {@code CheckedSupplier} of {@code SingleUseKey}s so that no two instances contain the same key and nonce pair. + * The current implementation increments the {@code nonce} while keeping the key constant, until the {@code nonce} space + * is exhausted, at which moment a new key is generated and the {@code nonce} is reset back. + * + * @param keyGenerator supplier for the key and the key id + */ + static CheckedSupplier createSingleUseKeySupplier( + CheckedSupplier, T> keyGenerator + ) { + final AtomicReference keyCurrentlyInUse = new AtomicReference<>(EXPIRED_KEY); + return internalSingleUseKeySupplier(keyGenerator, keyCurrentlyInUse); + } + + // for tests use only, the {@code keyCurrentlyInUse} must not be exposed to caller code + static CheckedSupplier internalSingleUseKeySupplier( + CheckedSupplier, T> keyGenerator, + AtomicReference keyCurrentlyInUse + ) { + final Object lock = new Object(); + return () -> { + for (int attemptNo = 0; attemptNo < MAX_ATTEMPTS; attemptNo++) { + final SingleUseKey nonceAndKey = keyCurrentlyInUse.getAndUpdate( + prev -> prev.nonce < MAX_NONCE ? new SingleUseKey(prev.keyId, prev.key, prev.nonce + 1) : EXPIRED_KEY + ); + if (nonceAndKey.nonce < MAX_NONCE) { + // this is the commonly used code path, where just the nonce is incremented + logger.trace( + () -> new ParameterizedMessage("Key with id [{}] reused with nonce [{}]", nonceAndKey.keyId, nonceAndKey.nonce) + ); + return nonceAndKey; + } else { + // this is the infrequent code path, where a new key is generated and the nonce is reset back + logger.trace( + () -> new ParameterizedMessage("Try to generate a new key to replace the key with id [{}]", nonceAndKey.keyId) + ); + synchronized (lock) { + if (keyCurrentlyInUse.get().nonce == MAX_NONCE) { + final Tuple newKey = keyGenerator.get(); + logger.debug(() -> new ParameterizedMessage("New key with id [{}] has been generated", newKey.v1())); + keyCurrentlyInUse.set(new SingleUseKey(newKey.v1(), newKey.v2(), MIN_NONCE)); + } + } + } + } + throw new IllegalStateException("Failure to generate new key"); + }; + } +} diff --git a/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/AESKeyUtilsTests.java b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/AESKeyUtilsTests.java new file mode 100644 index 0000000000000..d30bc34a0f237 --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/AESKeyUtilsTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.repositories.encrypted; + +import org.elasticsearch.test.ESTestCase; + +import javax.crypto.SecretKey; +import javax.crypto.spec.SecretKeySpec; + +import java.security.InvalidKeyException; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class AESKeyUtilsTests extends ESTestCase { + + public void testWrapUnwrap() throws Exception { + byte[] keyToWrapBytes = randomByteArrayOfLength(AESKeyUtils.KEY_LENGTH_IN_BYTES); + SecretKey keyToWrap = new SecretKeySpec(keyToWrapBytes, "AES"); + byte[] wrappingKeyBytes = randomByteArrayOfLength(AESKeyUtils.KEY_LENGTH_IN_BYTES); + SecretKey wrappingKey = new SecretKeySpec(wrappingKeyBytes, "AES"); + byte[] wrappedKey = AESKeyUtils.wrap(wrappingKey, keyToWrap); + assertThat(wrappedKey.length, equalTo(AESKeyUtils.WRAPPED_KEY_LENGTH_IN_BYTES)); + SecretKey unwrappedKey = AESKeyUtils.unwrap(wrappingKey, wrappedKey); + assertThat(unwrappedKey, equalTo(keyToWrap)); + } + + public void testComputeId() throws Exception { + byte[] key1Bytes = randomByteArrayOfLength(AESKeyUtils.KEY_LENGTH_IN_BYTES); + SecretKey key1 = new SecretKeySpec(key1Bytes, "AES"); + byte[] key2Bytes = randomByteArrayOfLength(AESKeyUtils.KEY_LENGTH_IN_BYTES); + SecretKey key2 = new SecretKeySpec(key2Bytes, "AES"); + assertThat(AESKeyUtils.computeId(key1), not(equalTo(AESKeyUtils.computeId(key2)))); + assertThat(AESKeyUtils.computeId(key1), equalTo(AESKeyUtils.computeId(key1))); + assertThat(AESKeyUtils.computeId(key2), equalTo(AESKeyUtils.computeId(key2))); + } + + public void testFailedWrapUnwrap() throws Exception { + byte[] toWrapBytes = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7 }; + SecretKey keyToWrap = new SecretKeySpec(toWrapBytes, "AES"); + byte[] wrapBytes = new byte[] { 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 0, 0, 0, 0 }; + SecretKey wrappingKey = new SecretKeySpec(wrapBytes, "AES"); + byte[] wrappedKey = AESKeyUtils.wrap(wrappingKey, keyToWrap); + for (int i = 0; i < wrappedKey.length; i++) { + wrappedKey[i] ^= 0xFFFFFFFF; + expectThrows(InvalidKeyException.class, () -> AESKeyUtils.unwrap(wrappingKey, wrappedKey)); + wrappedKey[i] ^= 0xFFFFFFFF; + } + } +} diff --git a/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/ChainingInputStreamTests.java b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/ChainingInputStreamTests.java index 8694358234555..c664f29ffbbfc 100644 --- a/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/ChainingInputStreamTests.java +++ b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/ChainingInputStreamTests.java @@ -24,6 +24,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; +import static org.mockito.Matchers.anyInt; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -32,6 +33,95 @@ public class ChainingInputStreamTests extends ESTestCase { + public void testChainComponentsWhenUsingFactoryMethod() throws Exception { + InputStream input1 = mock(InputStream.class); + when(input1.markSupported()).thenReturn(true); + when(input1.read()).thenReturn(randomIntBetween(0, 255)); + InputStream input2 = mock(InputStream.class); + when(input2.markSupported()).thenReturn(true); + when(input2.read()).thenReturn(randomIntBetween(0, 255)); + + ChainingInputStream chain = ChainingInputStream.chain(input1, input2); + + chain.read(); + verify(input1).read(); + verify(input2, times(0)).read(); + + when(input1.read()).thenReturn(-1); + chain.read(); + verify(input1, times(2)).read(); + verify(input1, times(0)).close(); + verify(input2).read(); + + when(input2.read()).thenReturn(-1); + chain.read(); + verify(input1, times(2)).read(); + verify(input2, times(2)).read(); + verify(input1, times(0)).close(); + verify(input2, times(0)).close(); + + chain.close(); + verify(input1).close(); + verify(input2).close(); + } + + public void testMarkAndResetWhenUsingFactoryMethod() throws Exception { + InputStream input1 = mock(InputStream.class); + when(input1.markSupported()).thenReturn(true); + when(input1.read()).thenReturn(randomIntBetween(0, 255)); + InputStream input2 = mock(InputStream.class); + when(input2.markSupported()).thenReturn(true); + when(input2.read()).thenReturn(randomIntBetween(0, 255)); + + ChainingInputStream chain = ChainingInputStream.chain(input1, input2); + verify(input1, times(1)).mark(anyInt()); + verify(input2, times(1)).mark(anyInt()); + + // mark at the beginning + chain.mark(randomIntBetween(1, 32)); + verify(input1, times(1)).mark(anyInt()); + verify(input2, times(1)).mark(anyInt()); + + verify(input1, times(0)).reset(); + chain.read(); + verify(input1, times(1)).reset(); + chain.reset(); + verify(input1, times(0)).close(); + verify(input1, times(1)).reset(); + chain.read(); + verify(input1, times(2)).reset(); + + // mark at the first component + chain.mark(randomIntBetween(1, 32)); + verify(input1, times(2)).mark(anyInt()); + verify(input2, times(1)).mark(anyInt()); + + when(input1.read()).thenReturn(-1); + chain.read(); + verify(input1, times(0)).close(); + chain.reset(); + verify(input1, times(3)).reset(); + + chain.read(); + verify(input2, times(2)).reset(); + + // mark at the second component + chain.mark(randomIntBetween(1, 32)); + verify(input1, times(2)).mark(anyInt()); + verify(input2, times(2)).mark(anyInt()); + + when(input2.read()).thenReturn(-1); + chain.read(); + verify(input1, times(0)).close(); + verify(input2, times(0)).close(); + chain.reset(); + verify(input2, times(3)).reset(); + + chain.close(); + verify(input1, times(1)).close(); + verify(input2, times(1)).close(); + } + public void testSkipWithinComponent() throws Exception { byte[] b1 = randomByteArrayOfLength(randomIntBetween(2, 16)); ChainingInputStream test = new ChainingInputStream() { diff --git a/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/DecryptionPacketsInputStreamTests.java b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/DecryptionPacketsInputStreamTests.java index 66f9f028e0c39..66470ce874007 100644 --- a/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/DecryptionPacketsInputStreamTests.java +++ b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/DecryptionPacketsInputStreamTests.java @@ -54,37 +54,6 @@ public void testSuccessEncryptAndDecryptTypicalPacketLength() throws Exception { } } - public void testFailureEncryptAndDecryptWrongNonce() throws Exception { - int len = 256 + Randomness.get().nextInt(256); - // 2-3 packets - int packetLen = 1 + Randomness.get().nextInt(len / 2); - byte[] plainBytes = new byte[len]; - Randomness.get().nextBytes(plainBytes); - SecretKey secretKey = generateSecretKey(); - int encryptNonce = Randomness.get().nextInt(); - int decryptNonce = Randomness.get().nextInt(); - while (decryptNonce == encryptNonce) { - decryptNonce = Randomness.get().nextInt(); - } - byte[] encryptedBytes; - try ( - InputStream in = new EncryptionPacketsInputStream( - new ByteArrayInputStream(plainBytes, 0, len), - secretKey, - encryptNonce, - packetLen - ) - ) { - encryptedBytes = in.readAllBytes(); - } - try ( - InputStream in = new DecryptionPacketsInputStream(new ByteArrayInputStream(encryptedBytes), secretKey, decryptNonce, packetLen) - ) { - IOException e = expectThrows(IOException.class, () -> { in.readAllBytes(); }); - assertThat(e.getMessage(), Matchers.startsWith("Packet nonce mismatch.")); - } - } - public void testFailureEncryptAndDecryptWrongKey() throws Exception { int len = 256 + Randomness.get().nextInt(256); // 2-3 packets @@ -105,9 +74,7 @@ public void testFailureEncryptAndDecryptWrongKey() throws Exception { ) { encryptedBytes = in.readAllBytes(); } - try ( - InputStream in = new DecryptionPacketsInputStream(new ByteArrayInputStream(encryptedBytes), decryptSecretKey, nonce, packetLen) - ) { + try (InputStream in = new DecryptionPacketsInputStream(new ByteArrayInputStream(encryptedBytes), decryptSecretKey, packetLen)) { IOException e = expectThrows(IOException.class, () -> { in.readAllBytes(); }); assertThat(e.getMessage(), Matchers.is("Exception during packet decryption")); } @@ -131,9 +98,7 @@ public void testFailureEncryptAndDecryptAlteredCiphertext() throws Exception { // flip bit encryptedBytes[i] ^= (1 << j); // fail decryption - try ( - InputStream in = new DecryptionPacketsInputStream(new ByteArrayInputStream(encryptedBytes), secretKey, nonce, packetLen) - ) { + try (InputStream in = new DecryptionPacketsInputStream(new ByteArrayInputStream(encryptedBytes), secretKey, packetLen)) { IOException e = expectThrows(IOException.class, () -> { in.readAllBytes(); }); assertThat(e.getMessage(), Matchers.is("Exception during packet decryption")); } @@ -162,16 +127,11 @@ public void testFailureEncryptAndDecryptAlteredCiphertextIV() throws Exception { // flip bit encryptedBytes[i + j] ^= (1 << k); try ( - InputStream in = new DecryptionPacketsInputStream( - new ByteArrayInputStream(encryptedBytes), - secretKey, - nonce, - packetLen - ) + InputStream in = new DecryptionPacketsInputStream(new ByteArrayInputStream(encryptedBytes), secretKey, packetLen) ) { IOException e = expectThrows(IOException.class, () -> { in.readAllBytes(); }); if (j < Integer.BYTES) { - assertThat(e.getMessage(), Matchers.startsWith("Packet nonce mismatch")); + assertThat(e.getMessage(), Matchers.startsWith("Exception during packet decryption")); } else { assertThat(e.getMessage(), Matchers.startsWith("Packet counter mismatch")); } @@ -197,7 +157,6 @@ private void testEncryptAndDecryptSuccess(byte[] plainBytes, SecretKey secretKey InputStream in = new DecryptionPacketsInputStream( new ReadLessFilterInputStream(new ByteArrayInputStream(encryptedBytes)), secretKey, - nonce, packetLen ) ) { diff --git a/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/EncryptedRepositoryTests.java b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/EncryptedRepositoryTests.java new file mode 100644 index 0000000000000..a41f152c57dce --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/EncryptedRepositoryTests.java @@ -0,0 +1,178 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.repositories.encrypted; + +import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.cluster.service.ClusterApplierService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.Before; + +import javax.crypto.SecretKey; +import javax.crypto.spec.SecretKeySpec; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class EncryptedRepositoryTests extends ESTestCase { + + private SecureString repoPassword; + private BlobPath delegatedPath; + private BlobStore delegatedBlobStore; + private BlobStoreRepository delegatedRepository; + private RepositoryMetadata repositoryMetadata; + private EncryptedRepository encryptedRepository; + private EncryptedRepository.EncryptedBlobStore encryptedBlobStore; + private Map blobsMap; + + @Before + public void setUpMocks() throws Exception { + this.repoPassword = new SecureString(randomAlphaOfLength(20).toCharArray()); + this.delegatedPath = randomFrom( + BlobPath.cleanPath(), + BlobPath.cleanPath().add(randomAlphaOfLength(8)), + BlobPath.cleanPath().add(randomAlphaOfLength(4)).add(randomAlphaOfLength(4)) + ); + this.delegatedBlobStore = mock(BlobStore.class); + this.delegatedRepository = mock(BlobStoreRepository.class); + when(delegatedRepository.blobStore()).thenReturn(delegatedBlobStore); + when(delegatedRepository.basePath()).thenReturn(delegatedPath); + this.repositoryMetadata = new RepositoryMetadata( + randomAlphaOfLength(4), + EncryptedRepositoryPlugin.REPOSITORY_TYPE_NAME, + Settings.EMPTY + ); + ClusterApplierService clusterApplierService = mock(ClusterApplierService.class); + when(clusterApplierService.threadPool()).thenReturn(mock(ThreadPool.class)); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterApplierService()).thenReturn(clusterApplierService); + this.encryptedRepository = new EncryptedRepository( + repositoryMetadata, + mock(NamedXContentRegistry.class), + clusterService, + mock(BigArrays.class), + mock(RecoverySettings.class), + delegatedRepository, + () -> mock(XPackLicenseState.class), + repoPassword + ); + this.encryptedBlobStore = (EncryptedRepository.EncryptedBlobStore) encryptedRepository.createBlobStore(); + this.blobsMap = new HashMap<>(); + doAnswer(invocationOnMockBlobStore -> { + BlobPath blobPath = ((BlobPath) invocationOnMockBlobStore.getArguments()[0]); + BlobContainer blobContainer = mock(BlobContainer.class); + // write atomic + doAnswer(invocationOnMockBlobContainer -> { + String DEKId = ((String) invocationOnMockBlobContainer.getArguments()[0]); + InputStream DEKInputStream = ((InputStream) invocationOnMockBlobContainer.getArguments()[1]); + this.blobsMap.put(blobPath.add(DEKId), BytesReference.toBytes(Streams.readFully(DEKInputStream))); + return null; + }).when(blobContainer).writeBlobAtomic(any(String.class), any(InputStream.class), anyLong(), anyBoolean()); + // read + doAnswer(invocationOnMockBlobContainer -> { + String DEKId = ((String) invocationOnMockBlobContainer.getArguments()[0]); + return new ByteArrayInputStream(blobsMap.get(blobPath.add(DEKId))); + }).when(blobContainer).readBlob(any(String.class)); + return blobContainer; + }).when(this.delegatedBlobStore).blobContainer(any(BlobPath.class)); + } + + public void testStoreDEKSuccess() throws Exception { + String DEKId = randomAlphaOfLengthBetween(2, 20); + SecretKey DEK = new SecretKeySpec(randomByteArrayOfLength(32), "AES"); + + encryptedBlobStore.storeDEK(DEKId, DEK); + + Tuple KEK = encryptedRepository.generateKEK(DEKId); + assertThat(blobsMap.keySet(), contains(delegatedPath.add(EncryptedRepository.DEK_ROOT_CONTAINER).add(DEKId).add(KEK.v1()))); + byte[] wrappedKey = blobsMap.values().iterator().next(); + SecretKey unwrappedKey = AESKeyUtils.unwrap(KEK.v2(), wrappedKey); + assertThat(unwrappedKey.getEncoded(), equalTo(DEK.getEncoded())); + } + + public void testGetDEKSuccess() throws Exception { + String DEKId = randomAlphaOfLengthBetween(2, 20); + SecretKey DEK = new SecretKeySpec(randomByteArrayOfLength(32), "AES"); + Tuple KEK = encryptedRepository.generateKEK(DEKId); + + byte[] wrappedDEK = AESKeyUtils.wrap(KEK.v2(), DEK); + blobsMap.put(delegatedPath.add(EncryptedRepository.DEK_ROOT_CONTAINER).add(DEKId).add(KEK.v1()), wrappedDEK); + + SecretKey loadedDEK = encryptedBlobStore.getDEKById(DEKId); + assertThat(loadedDEK.getEncoded(), equalTo(DEK.getEncoded())); + } + + public void testGetTamperedDEKFails() throws Exception { + String DEKId = randomAlphaOfLengthBetween(2, 20); + SecretKey DEK = new SecretKeySpec("01234567890123456789012345678901".getBytes(StandardCharsets.UTF_8), "AES"); + Tuple KEK = encryptedRepository.generateKEK(DEKId); + + byte[] wrappedDEK = AESKeyUtils.wrap(KEK.v2(), DEK); + int tamperPos = randomIntBetween(0, wrappedDEK.length - 1); + wrappedDEK[tamperPos] ^= 0xFF; + blobsMap.put(delegatedPath.add(EncryptedRepository.DEK_ROOT_CONTAINER).add(DEKId).add(KEK.v1()), wrappedDEK); + + RepositoryException e = expectThrows(RepositoryException.class, () -> encryptedBlobStore.getDEKById(DEKId)); + assertThat(e.repository(), equalTo(repositoryMetadata.name())); + assertThat(e.getMessage(), containsString("Failure to AES unwrap the DEK")); + } + + public void testGetDEKIOException() { + doAnswer(invocationOnMockBlobStore -> { + BlobPath blobPath = ((BlobPath) invocationOnMockBlobStore.getArguments()[0]); + BlobContainer blobContainer = mock(BlobContainer.class); + // read + doAnswer(invocationOnMockBlobContainer -> { throw new IOException("Tested IOException"); }).when(blobContainer) + .readBlob(any(String.class)); + return blobContainer; + }).when(this.delegatedBlobStore).blobContainer(any(BlobPath.class)); + IOException e = expectThrows(IOException.class, () -> encryptedBlobStore.getDEKById("id")); + assertThat(e.getMessage(), containsString("Tested IOException")); + } + + public void testGenerateKEK() { + String id1 = "fixed identifier 1"; + String id2 = "fixed identifier 2"; + Tuple KEK1 = encryptedRepository.generateKEK(id1); + Tuple KEK2 = encryptedRepository.generateKEK(id2); + assertThat(KEK1.v1(), not(equalTo(KEK2.v1()))); + assertThat(KEK1.v2(), not(equalTo(KEK2.v2()))); + Tuple sameKEK1 = encryptedRepository.generateKEK(id1); + assertThat(KEK1.v1(), equalTo(sameKEK1.v1())); + assertThat(KEK1.v2(), equalTo(sameKEK1.v2())); + } + +} diff --git a/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStreamTests.java b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStreamTests.java index 91e820af659c0..e2d8a9b0fd64d 100644 --- a/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStreamTests.java +++ b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStreamTests.java @@ -111,13 +111,13 @@ public void testShortPacketSizes() throws Exception { public void testPacketSizeMultipleOfAESBlockSize() throws Exception { int packetSize = 1 + Randomness.get().nextInt(8); testEncryptPacketWise( - 1 + Randomness.get().nextInt(packetSize * EncryptedRepository.AES_BLOCK_SIZE_IN_BYTES), - packetSize * EncryptedRepository.AES_BLOCK_SIZE_IN_BYTES, + 1 + Randomness.get().nextInt(packetSize * EncryptedRepository.AES_BLOCK_LENGTH_IN_BYTES), + packetSize * EncryptedRepository.AES_BLOCK_LENGTH_IN_BYTES, new DefaultBufferedReadAllStrategy() ); testEncryptPacketWise( - packetSize * EncryptedRepository.AES_BLOCK_SIZE_IN_BYTES + Randomness.get().nextInt(8192), - packetSize * EncryptedRepository.AES_BLOCK_SIZE_IN_BYTES, + packetSize * EncryptedRepository.AES_BLOCK_LENGTH_IN_BYTES + Randomness.get().nextInt(8192), + packetSize * EncryptedRepository.AES_BLOCK_LENGTH_IN_BYTES, new DefaultBufferedReadAllStrategy() ); } diff --git a/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/LocalStateEncryptedRepositoryPlugin.java b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/LocalStateEncryptedRepositoryPlugin.java new file mode 100644 index 0000000000000..7e6080ceac476 --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/LocalStateEncryptedRepositoryPlugin.java @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.repositories.encrypted; + +import org.apache.lucene.index.IndexCommit; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; + +import java.nio.file.Path; +import java.security.GeneralSecurityException; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Supplier; + +public final class LocalStateEncryptedRepositoryPlugin extends LocalStateCompositeXPackPlugin { + + final EncryptedRepositoryPlugin encryptedRepositoryPlugin; + + public LocalStateEncryptedRepositoryPlugin(final Settings settings, final Path configPath) { + super(settings, configPath); + final LocalStateEncryptedRepositoryPlugin thisVar = this; + + encryptedRepositoryPlugin = new EncryptedRepositoryPlugin() { + + @Override + protected XPackLicenseState getLicenseState() { + return thisVar.getLicenseState(); + } + + @Override + protected EncryptedRepository createEncryptedRepository( + RepositoryMetadata metadata, + NamedXContentRegistry registry, + ClusterService clusterService, + BigArrays bigArrays, + RecoverySettings recoverySettings, + BlobStoreRepository delegatedRepository, + Supplier licenseStateSupplier, + SecureString repoPassword + ) throws GeneralSecurityException { + return new TestEncryptedRepository( + metadata, + registry, + clusterService, + bigArrays, + recoverySettings, + delegatedRepository, + licenseStateSupplier, + repoPassword + ); + } + }; + plugins.add(encryptedRepositoryPlugin); + } + + static class TestEncryptedRepository extends EncryptedRepository { + private final Lock snapshotShardLock = new ReentrantLock(); + private final Condition snapshotShardCondition = snapshotShardLock.newCondition(); + private final AtomicBoolean snapshotShardBlock = new AtomicBoolean(false); + + TestEncryptedRepository( + RepositoryMetadata metadata, + NamedXContentRegistry registry, + ClusterService clusterService, + BigArrays bigArrays, + RecoverySettings recoverySettings, + BlobStoreRepository delegatedRepository, + Supplier licenseStateSupplier, + SecureString repoPassword + ) throws GeneralSecurityException { + super(metadata, registry, clusterService, bigArrays, recoverySettings, delegatedRepository, licenseStateSupplier, repoPassword); + } + + @Override + public void snapshotShard( + Store store, + MapperService mapperService, + SnapshotId snapshotId, + IndexId indexId, + IndexCommit snapshotIndexCommit, + String shardStateIdentifier, + IndexShardSnapshotStatus snapshotStatus, + Version repositoryMetaVersion, + Map userMetadata, + ActionListener listener + ) { + snapshotShardLock.lock(); + try { + while (snapshotShardBlock.get()) { + snapshotShardCondition.await(); + } + super.snapshotShard( + store, + mapperService, + snapshotId, + indexId, + snapshotIndexCommit, + shardStateIdentifier, + snapshotStatus, + repositoryMetaVersion, + userMetadata, + listener + ); + } catch (InterruptedException e) { + listener.onFailure(e); + } finally { + snapshotShardLock.unlock(); + } + } + + void blockSnapshotShard() { + snapshotShardLock.lock(); + try { + snapshotShardBlock.set(true); + snapshotShardCondition.signalAll(); + } finally { + snapshotShardLock.unlock(); + } + } + + void unblockSnapshotShard() { + snapshotShardLock.lock(); + try { + snapshotShardBlock.set(false); + snapshotShardCondition.signalAll(); + } finally { + snapshotShardLock.unlock(); + } + } + } + +} diff --git a/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/SingleUseKeyTests.java b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/SingleUseKeyTests.java new file mode 100644 index 0000000000000..034cc41a84888 --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/SingleUseKeyTests.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.repositories.encrypted; + +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.Before; + +import javax.crypto.SecretKey; +import javax.crypto.spec.SecretKeySpec; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.contains; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class SingleUseKeyTests extends ESTestCase { + + byte[] testKeyPlaintext; + SecretKey testKey; + BytesReference testKeyId; + + @Before + public void setUpMocks() { + testKeyPlaintext = randomByteArrayOfLength(32); + testKey = new SecretKeySpec(testKeyPlaintext, "AES"); + testKeyId = new BytesArray(randomAlphaOfLengthBetween(2, 32)); + } + + public void testNewKeySupplier() throws Exception { + CheckedSupplier singleUseKeySupplier = SingleUseKey.createSingleUseKeySupplier( + () -> new Tuple<>(testKeyId, testKey) + ); + SingleUseKey generatedSingleUseKey = singleUseKeySupplier.get(); + assertThat(generatedSingleUseKey.getKeyId(), equalTo(testKeyId)); + assertThat(generatedSingleUseKey.getNonce(), equalTo(SingleUseKey.MIN_NONCE)); + assertThat(generatedSingleUseKey.getKey().getEncoded(), equalTo(testKeyPlaintext)); + } + + public void testNonceIncrement() throws Exception { + int nonce = randomIntBetween(SingleUseKey.MIN_NONCE, SingleUseKey.MAX_NONCE - 2); + SingleUseKey singleUseKey = new SingleUseKey(testKeyId, testKey, nonce); + AtomicReference keyCurrentlyInUse = new AtomicReference<>(singleUseKey); + @SuppressWarnings("unchecked") + CheckedSupplier, IOException> keyGenerator = mock(CheckedSupplier.class); + CheckedSupplier singleUseKeySupplier = SingleUseKey.internalSingleUseKeySupplier( + keyGenerator, + keyCurrentlyInUse + ); + SingleUseKey generatedSingleUseKey = singleUseKeySupplier.get(); + assertThat(generatedSingleUseKey.getKeyId(), equalTo(testKeyId)); + assertThat(generatedSingleUseKey.getNonce(), equalTo(nonce)); + assertThat(generatedSingleUseKey.getKey().getEncoded(), equalTo(testKeyPlaintext)); + SingleUseKey generatedSingleUseKey2 = singleUseKeySupplier.get(); + assertThat(generatedSingleUseKey2.getKeyId(), equalTo(testKeyId)); + assertThat(generatedSingleUseKey2.getNonce(), equalTo(nonce + 1)); + assertThat(generatedSingleUseKey2.getKey().getEncoded(), equalTo(testKeyPlaintext)); + verifyZeroInteractions(keyGenerator); + } + + public void testConcurrentWrapAround() throws Exception { + int nThreads = 3; + TestThreadPool testThreadPool = new TestThreadPool( + "SingleUserKeyTests#testConcurrentWrapAround", + Settings.builder() + .put("thread_pool." + ThreadPool.Names.GENERIC + ".size", nThreads) + .put("thread_pool." + ThreadPool.Names.GENERIC + ".queue_size", 1) + .build() + ); + int nonce = SingleUseKey.MAX_NONCE; + SingleUseKey singleUseKey = new SingleUseKey(null, null, nonce); + + AtomicReference keyCurrentlyInUse = new AtomicReference<>(singleUseKey); + @SuppressWarnings("unchecked") + CheckedSupplier, IOException> keyGenerator = mock(CheckedSupplier.class); + when(keyGenerator.get()).thenReturn(new Tuple<>(testKeyId, testKey)); + CheckedSupplier singleUseKeySupplier = SingleUseKey.internalSingleUseKeySupplier( + keyGenerator, + keyCurrentlyInUse + ); + List generatedKeys = new ArrayList<>(nThreads); + for (int i = 0; i < nThreads; i++) { + generatedKeys.add(null); + } + for (int i = 0; i < nThreads; i++) { + final int resultIdx = i; + testThreadPool.generic().execute(() -> { + try { + generatedKeys.set(resultIdx, singleUseKeySupplier.get()); + } catch (IOException e) { + fail(); + } + }); + } + terminate(testThreadPool); + verify(keyGenerator, times(1)).get(); + assertThat(keyCurrentlyInUse.get().getNonce(), equalTo(SingleUseKey.MIN_NONCE + nThreads)); + assertThat(generatedKeys.stream().map(suk -> suk.getKey()).collect(Collectors.toSet()).size(), equalTo(1)); + assertThat( + generatedKeys.stream().map(suk -> suk.getKey().getEncoded()).collect(Collectors.toSet()).iterator().next(), + equalTo(testKeyPlaintext) + ); + assertThat(generatedKeys.stream().map(suk -> suk.getKeyId()).collect(Collectors.toSet()).iterator().next(), equalTo(testKeyId)); + assertThat(generatedKeys.stream().map(suk -> suk.getNonce()).collect(Collectors.toSet()).size(), equalTo(nThreads)); + assertThat( + generatedKeys.stream().map(suk -> suk.getNonce()).collect(Collectors.toSet()), + contains(SingleUseKey.MIN_NONCE, SingleUseKey.MIN_NONCE + 1, SingleUseKey.MIN_NONCE + 2) + ); + } + + public void testNonceWrapAround() throws Exception { + int nonce = SingleUseKey.MAX_NONCE; + SingleUseKey singleUseKey = new SingleUseKey(testKeyId, testKey, nonce); + AtomicReference keyCurrentlyInUse = new AtomicReference<>(singleUseKey); + byte[] newTestKeyPlaintext = randomByteArrayOfLength(32); + SecretKey newTestKey = new SecretKeySpec(newTestKeyPlaintext, "AES"); + BytesReference newTestKeyId = new BytesArray(randomAlphaOfLengthBetween(2, 32)); + CheckedSupplier singleUseKeySupplier = SingleUseKey.internalSingleUseKeySupplier( + () -> new Tuple<>(newTestKeyId, newTestKey), + keyCurrentlyInUse + ); + SingleUseKey generatedSingleUseKey = singleUseKeySupplier.get(); + assertThat(generatedSingleUseKey.getKeyId(), equalTo(newTestKeyId)); + assertThat(generatedSingleUseKey.getNonce(), equalTo(SingleUseKey.MIN_NONCE)); + assertThat(generatedSingleUseKey.getKey().getEncoded(), equalTo(newTestKeyPlaintext)); + } + + public void testGeneratorException() { + int nonce = SingleUseKey.MAX_NONCE; + SingleUseKey singleUseKey = new SingleUseKey(null, null, nonce); + AtomicReference keyCurrentlyInUse = new AtomicReference<>(singleUseKey); + CheckedSupplier singleUseKeySupplier = SingleUseKey.internalSingleUseKeySupplier( + () -> { throw new IOException("expected exception"); }, + keyCurrentlyInUse + ); + expectThrows(IOException.class, () -> singleUseKeySupplier.get()); + } +}