From 3dd9d229876e3bc455b34f23c05af4783d49558c Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Wed, 20 Jun 2018 17:34:46 +0200 Subject: [PATCH 01/13] lazy snapshot repository initialization --- .../repositories/url/URLRepository.java | 38 +++-- .../repositories/url/URLRepositoryTests.java | 22 ++- .../repositories/azure/AzureRepository.java | 62 +++++--- .../azure/AzureRepositorySettingsTests.java | 29 ++-- .../azure/AzureSnapshotRestoreTests.java | 18 ++- .../gcs/GoogleCloudStorageRepository.java | 28 ++-- ...eCloudStorageBlobStoreRepositoryTests.java | 18 ++- .../repositories/hdfs/HdfsRepository.java | 40 ++++-- .../repositories/s3/S3Repository.java | 50 ++++--- .../repositories/s3/S3RepositoryPlugin.java | 3 +- .../s3/RepositoryCredentialsTests.java | 8 +- .../s3/S3BlobStoreRepositoryTests.java | 23 ++- .../repositories/s3/S3RepositoryTests.java | 14 +- .../repositories/RepositoriesService.java | 63 +++++---- .../VerifyNodeRepositoryAction.java | 2 +- .../blobstore/BlobStoreRepository.java | 133 +++++++++++++----- .../repositories/fs/FsRepository.java | 64 +++++---- ...ClusterStateServiceRandomUpdatesTests.java | 2 +- .../BlobStoreRepositoryRestoreTests.java | 9 +- .../blobstore/BlobStoreRepositoryTests.java | 2 + .../fs}/FsBlobStoreRepositoryIT.java | 19 ++- .../SharedClusterSnapshotRestoreIT.java | 1 + .../snapshots/mockstore/MockRepository.java | 13 +- .../ESBlobStoreRepositoryIntegTestCase.java | 25 +++- 24 files changed, 479 insertions(+), 207 deletions(-) rename server/src/test/java/org/elasticsearch/{snapshots => repositories/fs}/FsBlobStoreRepositoryIT.java (66%) diff --git a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java index c1128fd683a70..7dc67a1f7f56e 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java @@ -20,8 +20,8 @@ package org.elasticsearch.repositories.url; import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.url.URLBlobStore; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -31,7 +31,6 @@ import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; -import java.io.IOException; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; @@ -49,7 +48,7 @@ *
{@code concurrent_streams}
Number of concurrent read/write stream (per repository on each node). Defaults to 5.
* */ -public class URLRepository extends BlobStoreRepository { +public class URLRepository extends BlobStoreRepository { public static final String TYPE = "url"; @@ -71,33 +70,46 @@ public class URLRepository extends BlobStoreRepository { private final Environment environment; - private final URLBlobStore blobStore; - private final BlobPath basePath; /** * Constructs a read-only URL-based repository */ public URLRepository(RepositoryMetaData metadata, Environment environment, - NamedXContentRegistry namedXContentRegistry) throws IOException { + NamedXContentRegistry namedXContentRegistry) { super(metadata, environment.settings(), namedXContentRegistry); if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(settings) == false) { throw new RepositoryException(metadata.name(), "missing url"); } + this.environment = environment; supportedProtocols = SUPPORTED_PROTOCOLS_SETTING.get(settings); urlWhiteList = ALLOWED_URLS_SETTING.get(settings).toArray(new URIPattern[]{}); - this.environment = environment; - - URL url = URL_SETTING.exists(metadata.settings()) ? URL_SETTING.get(metadata.settings()) : REPOSITORIES_URL_SETTING.get(settings); - URL normalizedURL = checkURL(url); - blobStore = new URLBlobStore(settings, normalizedURL); basePath = BlobPath.cleanPath(); } + private URL getUrl() { + URL url = URL_SETTING.exists(metadata.settings()) + ? URL_SETTING.get(metadata.settings()) : REPOSITORIES_URL_SETTING.get(settings); + return checkURL(url); + } + + @Override + protected URLBlobStore createBlobStore() { + URL normalizedURL = getUrl(); + return new URLBlobStore(settings, normalizedURL); + } + + // only use for testing + @Override + protected BlobContainer snapshotsBlobContainer() { + return super.snapshotsBlobContainer(); + } + + // only use for testing @Override - protected BlobStore blobStore() { - return blobStore; + protected URLBlobStore innerBlobStore() { + return super.innerBlobStore(); } @Override diff --git a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java index 1af4c1eaba9ad..524a6ac086050 100644 --- a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java +++ b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java @@ -31,6 +31,10 @@ import java.nio.file.Path; import java.util.Collections; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.nullValue; + public class URLRepositoryTests extends ESTestCase { public void testWhiteListingRepoURL() throws IOException { @@ -41,8 +45,12 @@ public void testWhiteListingRepoURL() throws IOException { .put(URLRepository.REPOSITORIES_URL_SETTING.getKey(), repoPath) .build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); - new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), + final URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), new NamedXContentRegistry(Collections.emptyList())); + + assertThat("blob store has to be lazy initialized", repository.innerBlobStore(), is(nullValue())); + repository.snapshotsBlobContainer(); + assertThat("snapshotsBlobContainer has to initialize blob store", repository.innerBlobStore(), not(nullValue())); } public void testIfNotWhiteListedMustSetRepoURL() throws IOException { @@ -52,9 +60,11 @@ public void testIfNotWhiteListedMustSetRepoURL() throws IOException { .put(URLRepository.REPOSITORIES_URL_SETTING.getKey(), repoPath) .build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); + URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), + new NamedXContentRegistry(Collections.emptyList())); + assertThat(repository.innerBlobStore(), is(nullValue())); try { - new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), - new NamedXContentRegistry(Collections.emptyList())); + repository.snapshotsBlobContainer(); fail("RepositoryException should have been thrown."); } catch (RepositoryException e) { String msg = "[url] file url [" + repoPath @@ -73,9 +83,11 @@ public void testMustBeSupportedProtocol() throws IOException { .put(URLRepository.SUPPORTED_PROTOCOLS_SETTING.getKey(), "http,https") .build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); + URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), + new NamedXContentRegistry(Collections.emptyList())); + assertThat(repository.innerBlobStore(), is(nullValue())); try { - new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), - new NamedXContentRegistry(Collections.emptyList())); + repository.snapshotsBlobContainer(); fail("RepositoryException should have been thrown."); } catch (RepositoryException e) { assertEquals("[url] unsupported url protocol [file] from URL [" + repoPath +"]", e.getMessage()); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 47b398a4c2fd3..63462dbaa9578 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeValue; @@ -38,7 +37,6 @@ import org.elasticsearch.snapshots.SnapshotCreationException; import org.elasticsearch.snapshots.SnapshotId; -import java.io.IOException; import java.net.URISyntaxException; import java.util.List; import java.util.Locale; @@ -58,7 +56,7 @@ *
{@code compress}
If set to true metadata files will be stored compressed. Defaults to false.
* */ -public class AzureRepository extends BlobStoreRepository { +public class AzureRepository extends BlobStoreRepository { public static final String TYPE = "azure"; @@ -78,25 +76,21 @@ public static final class Repository { public static final Setting READONLY_SETTING = Setting.boolSetting("readonly", false, Property.NodeScope); } - private final AzureBlobStore blobStore; private final BlobPath basePath; private final ByteSizeValue chunkSize; private final boolean compress; - private final boolean readonly; + private final Environment environment; + private final AzureStorageService storageService; + private volatile boolean readonly; public AzureRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, - AzureStorageService storageService) throws IOException, URISyntaxException, StorageException { + AzureStorageService storageService) { super(metadata, environment.settings(), namedXContentRegistry); - this.blobStore = new AzureBlobStore(metadata, environment.settings(), storageService); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); this.compress = Repository.COMPRESS_SETTING.get(metadata.settings()); - // If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting. - // For secondary_only setting, the repository should be read only - if (Repository.READONLY_SETTING.exists(metadata.settings())) { - this.readonly = Repository.READONLY_SETTING.get(metadata.settings()); - } else { - this.readonly = this.blobStore.getLocationMode() == LocationMode.SECONDARY_ONLY; - } + this.environment = environment; + this.storageService = storageService; + final String basePath = Strings.trimLeadingCharacter(Repository.BASE_PATH_SETTING.get(metadata.settings()), '/'); if (Strings.hasLength(basePath)) { // Remove starting / if any @@ -108,15 +102,40 @@ public AzureRepository(RepositoryMetaData metadata, Environment environment, Nam } else { this.basePath = BlobPath.cleanPath(); } - logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( - "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", blobStore, chunkSize, compress, basePath)); + } + + // only use for testing + @Override + protected AzureBlobStore blobStore() { + return super.blobStore(); + } + + // only use for testing + @Override + protected AzureBlobStore innerBlobStore() { + return super.innerBlobStore(); } /** * {@inheritDoc} */ @Override - protected BlobStore blobStore() { + protected AzureBlobStore createBlobStore() throws URISyntaxException, StorageException { + final AzureBlobStore blobStore = new AzureBlobStore(metadata, environment.settings(), storageService); + + // If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting. + // For secondary_only setting, the repository should be read only + + if (Repository.READONLY_SETTING.exists(metadata.settings())) { + this.readonly = Repository.READONLY_SETTING.get(metadata.settings()); + } else { + this.readonly = blobStore.getLocationMode() == LocationMode.SECONDARY_ONLY; + } + + logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", + blobStore, chunkSize, compress, basePath)); + return blobStore; } @@ -144,6 +163,7 @@ protected ByteSizeValue chunkSize() { @Override public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData clusterMetadata) { try { + final AzureBlobStore blobStore = blobStore(); if (blobStore.containerExist() == false) { throw new IllegalArgumentException("The bucket [" + blobStore + "] does not exist. Please create it before " + " creating an azure snapshot repository backed by it."); @@ -156,6 +176,14 @@ public void initializeSnapshot(SnapshotId snapshotId, List indices, Met @Override public boolean isReadOnly() { + // If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting. + // For secondary_only setting, the repository should be read only + if (Repository.READONLY_SETTING.exists(metadata.settings())) { + this.readonly = Repository.READONLY_SETTING.get(metadata.settings()); + } else { + // otherwise it's possible obtain actual value only if blobStore is created + blobStore(); + } return readonly; } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java index 639905042cf87..2c274128af3ad 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.LocationMode; -import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -30,76 +29,76 @@ import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; -import java.net.URISyntaxException; - import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; public class AzureRepositorySettingsTests extends ESTestCase { - private AzureRepository azureRepository(Settings settings) throws StorageException, IOException, URISyntaxException { + private AzureRepository azureRepository(Settings settings) { Settings internalSettings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) .put(settings) .build(); - return new AzureRepository(new RepositoryMetaData("foo", "azure", internalSettings), + final AzureRepository azureRepository = new AzureRepository(new RepositoryMetaData("foo", "azure", internalSettings), TestEnvironment.newEnvironment(internalSettings), NamedXContentRegistry.EMPTY, mock(AzureStorageService.class)); + assertThat(azureRepository.innerBlobStore(), is(nullValue())); + return azureRepository; } - public void testReadonlyDefault() throws StorageException, IOException, URISyntaxException { + public void testReadonlyDefault() { assertThat(azureRepository(Settings.EMPTY).isReadOnly(), is(false)); } - public void testReadonlyDefaultAndReadonlyOn() throws StorageException, IOException, URISyntaxException { + public void testReadonlyDefaultAndReadonlyOn() { assertThat(azureRepository(Settings.builder() .put("readonly", true) .build()).isReadOnly(), is(true)); } - public void testReadonlyWithPrimaryOnly() throws StorageException, IOException, URISyntaxException { + public void testReadonlyWithPrimaryOnly() { assertThat(azureRepository(Settings.builder() .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_ONLY.name()) .build()).isReadOnly(), is(false)); } - public void testReadonlyWithPrimaryOnlyAndReadonlyOn() throws StorageException, IOException, URISyntaxException { + public void testReadonlyWithPrimaryOnlyAndReadonlyOn() { assertThat(azureRepository(Settings.builder() .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_ONLY.name()) .put("readonly", true) .build()).isReadOnly(), is(true)); } - public void testReadonlyWithSecondaryOnlyAndReadonlyOn() throws StorageException, IOException, URISyntaxException { + public void testReadonlyWithSecondaryOnlyAndReadonlyOn() { assertThat(azureRepository(Settings.builder() .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.SECONDARY_ONLY.name()) .put("readonly", true) .build()).isReadOnly(), is(true)); } - public void testReadonlyWithSecondaryOnlyAndReadonlyOff() throws StorageException, IOException, URISyntaxException { + public void testReadonlyWithSecondaryOnlyAndReadonlyOff() { assertThat(azureRepository(Settings.builder() .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.SECONDARY_ONLY.name()) .put("readonly", false) .build()).isReadOnly(), is(false)); } - public void testReadonlyWithPrimaryAndSecondaryOnlyAndReadonlyOn() throws StorageException, IOException, URISyntaxException { + public void testReadonlyWithPrimaryAndSecondaryOnlyAndReadonlyOn() { assertThat(azureRepository(Settings.builder() .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_THEN_SECONDARY.name()) .put("readonly", true) .build()).isReadOnly(), is(true)); } - public void testReadonlyWithPrimaryAndSecondaryOnlyAndReadonlyOff() throws StorageException, IOException, URISyntaxException { + public void testReadonlyWithPrimaryAndSecondaryOnlyAndReadonlyOff() { assertThat(azureRepository(Settings.builder() .put(AzureRepository.Repository.LOCATION_MODE_SETTING.getKey(), LocationMode.PRIMARY_THEN_SECONDARY.name()) .put("readonly", false) .build()).isReadOnly(), is(false)); } - public void testChunkSize() throws StorageException, IOException, URISyntaxException { + public void testChunkSize() { // default chunk size AzureRepository azureRepository = azureRepository(Settings.EMPTY); assertEquals(AzureStorageService.MAX_CHUNK_SIZE, azureRepository.chunkSize()); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java index 10163bb2f31df..1edb5f3532382 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java @@ -57,7 +57,11 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; /** * Those integration tests need an Azure access and must be run with @@ -408,12 +412,24 @@ public void testGeoRedundantStorage() throws Exception { } @Override - protected void createTestRepository(String name) { + protected void createTestRepository(String name, boolean verify) { assertAcked(client().admin().cluster().preparePutRepository(name) .setType(AzureRepository.TYPE) + .setVerify(verify) .setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) .put(Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); } + + + @Override + protected void afterCreationCheck(org.elasticsearch.repositories.Repository repository, boolean verify) { + assertThat(repository, instanceOf(AzureRepository.class)); + + AzureRepository azureRepository = (AzureRepository) repository; + + assertThat("azure blob store has to be lazy initialized", + azureRepository.innerBlobStore(), verify ? is(notNullValue()) : is(nullValue())); + } } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index 83d48eeda20aa..3a625d5a23fa0 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -22,7 +22,6 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -38,7 +37,7 @@ import static org.elasticsearch.common.settings.Setting.byteSizeSetting; import static org.elasticsearch.common.settings.Setting.simpleString; -class GoogleCloudStorageRepository extends BlobStoreRepository { +class GoogleCloudStorageRepository extends BlobStoreRepository { // package private for testing static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); @@ -59,15 +58,14 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { private final ByteSizeValue chunkSize; private final boolean compress; private final BlobPath basePath; - private final GoogleCloudStorageBlobStore blobStore; + private final GoogleCloudStorageService storageService; GoogleCloudStorageRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, - GoogleCloudStorageService storageService) throws Exception { + GoogleCloudStorageService storageService){ super(metadata, environment.settings(), namedXContentRegistry); + this.storageService = storageService; - String bucket = getSetting(BUCKET, metadata); - String clientName = CLIENT_NAME.get(metadata.settings()); String basePath = BASE_PATH.get(metadata.settings()); if (Strings.hasLength(basePath)) { BlobPath path = new BlobPath(); @@ -81,16 +79,26 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { this.compress = getSetting(COMPRESS, metadata); this.chunkSize = getSetting(CHUNK_SIZE, metadata); + } + @Override + protected GoogleCloudStorageBlobStore createBlobStore() { + String bucket = getSetting(BUCKET, metadata); + String clientName = CLIENT_NAME.get(metadata.settings()); logger.debug("using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath, chunkSize, compress); - - this.blobStore = new GoogleCloudStorageBlobStore(settings, bucket, clientName, storageService); + return new GoogleCloudStorageBlobStore(settings, bucket, clientName, storageService); } + // only use for testing + @Override + protected GoogleCloudStorageBlobStore blobStore() { + return super.blobStore(); + } + // only use for testing @Override - protected BlobStore blobStore() { - return blobStore; + protected GoogleCloudStorageBlobStore innerBlobStore() { + return super.innerBlobStore(); } @Override diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 3692b26f2bbb7..532d395b67b8c 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; import org.junit.AfterClass; @@ -34,6 +35,10 @@ import java.util.concurrent.ConcurrentMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase { @@ -49,9 +54,10 @@ protected Collection> nodePlugins() { } @Override - protected void createTestRepository(String name) { + protected void createTestRepository(String name, boolean verify) { assertAcked(client().admin().cluster().preparePutRepository(name) .setType(GoogleCloudStorageRepository.TYPE) + .setVerify(verify) .setSettings(Settings.builder() .put("bucket", BUCKET) .put("base_path", GoogleCloudStorageBlobStoreRepositoryTests.class.getSimpleName()) @@ -59,6 +65,16 @@ protected void createTestRepository(String name) { .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); } + @Override + protected void afterCreationCheck(Repository repository, boolean verify) { + assertThat(repository, instanceOf(GoogleCloudStorageRepository.class)); + + GoogleCloudStorageRepository gcsRepository = (GoogleCloudStorageRepository) repository; + + assertThat("gcs blob store has to be lazy initialized", + gcsRepository.innerBlobStore(), verify ? is(notNullValue()) : is(nullValue())); + } + @AfterClass public static void wipeRepository() { blobs.clear(); diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index 5ef1c7d18d666..350684368567e 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -42,7 +42,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -51,7 +50,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; -public final class HdfsRepository extends BlobStoreRepository { +public final class HdfsRepository extends BlobStoreRepository { private static final Logger LOGGER = Loggers.getLogger(HdfsRepository.class); @@ -62,23 +61,18 @@ public final class HdfsRepository extends BlobStoreRepository { private final boolean compress; private final BlobPath basePath = BlobPath.cleanPath(); - private HdfsBlobStore blobStore; - // buffer size passed to HDFS read/write methods // TODO: why 100KB? private static final ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue(100, ByteSizeUnit.KB); public HdfsRepository(RepositoryMetaData metadata, Environment environment, - NamedXContentRegistry namedXContentRegistry) throws IOException { + NamedXContentRegistry namedXContentRegistry) { super(metadata, environment.settings(), namedXContentRegistry); this.environment = environment; this.chunkSize = metadata.settings().getAsBytesSize("chunk_size", null); this.compress = metadata.settings().getAsBoolean("compress", false); - } - @Override - protected void doStart() { String uriSetting = getMetadata().settings().get("uri"); if (Strings.hasText(uriSetting) == false) { throw new IllegalArgumentException("No 'uri' defined for hdfs snapshot/restore"); @@ -101,8 +95,6 @@ protected void doStart() { // initialize our blobstore using elevated privileges. SpecialPermission.check(); - blobStore = AccessController.doPrivileged((PrivilegedAction) () -> createBlobstore(uri, pathSetting, getMetadata().settings())); - super.doStart(); } private HdfsBlobStore createBlobstore(URI uri, String path, Settings repositorySettings) { @@ -229,7 +221,33 @@ private static String getHostName() { } @Override - protected BlobStore blobStore() { + protected HdfsBlobStore createBlobStore() { + String uriSetting = getMetadata().settings().get("uri"); + if (Strings.hasText(uriSetting) == false) { + throw new IllegalArgumentException("No 'uri' defined for hdfs snapshot/restore"); + } + URI uri = URI.create(uriSetting); + if ("hdfs".equalsIgnoreCase(uri.getScheme()) == false) { + throw new IllegalArgumentException(String.format(Locale.ROOT, + "Invalid scheme [%s] specified in uri [%s]; only 'hdfs' uri allowed for hdfs snapshot/restore", uri.getScheme(), uriSetting)); + } + if (Strings.hasLength(uri.getPath()) && uri.getPath().equals("/") == false) { + throw new IllegalArgumentException(String.format(Locale.ROOT, + "Use 'path' option to specify a path [%s], not the uri [%s] for hdfs snapshot/restore", uri.getPath(), uriSetting)); + } + + String pathSetting = getMetadata().settings().get("path"); + // get configuration + if (pathSetting == null) { + throw new IllegalArgumentException("No 'path' defined for hdfs snapshot/restore"); + } + + // initialize our blobstore using elevated privileges. + SpecialPermission.check(); + + final HdfsBlobStore blobStore = + AccessController.doPrivileged((PrivilegedAction) + () -> createBlobstore(uri, pathSetting, getMetadata().settings())); return blobStore; } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 063e266837bad..6024eeb50d797 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; @@ -36,7 +35,6 @@ import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; -import java.io.IOException; import java.util.Map; import java.util.function.Function; @@ -52,7 +50,7 @@ *
{@code compress}
If set to true metadata files will be stored compressed. Defaults to false.
* */ -class S3Repository extends BlobStoreRepository { +class S3Repository extends BlobStoreRepository { static final String TYPE = "s3"; @@ -145,7 +143,7 @@ class S3Repository extends BlobStoreRepository { */ static final Setting BASE_PATH_SETTING = Setting.simpleString("base_path"); - private final S3BlobStore blobStore; + private final AwsS3Service awsService; private final BlobPath basePath; @@ -157,15 +155,15 @@ class S3Repository extends BlobStoreRepository { * Constructs an s3 backed repository */ S3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry, - AwsS3Service awsService) throws IOException { + AwsS3Service awsService) { super(metadata, settings, namedXContentRegistry); + this.awsService = awsService; final String bucket = BUCKET_SETTING.get(metadata.settings()); if (bucket == null) { throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository"); } - final boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); final ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); this.compress = COMPRESS_SETTING.get(metadata.settings()); @@ -176,33 +174,51 @@ class S3Repository extends BlobStoreRepository { ") can't be lower than " + BUFFER_SIZE_SETTING.getKey() + " (" + bufferSize + ")."); } + final String basePath = BASE_PATH_SETTING.get(metadata.settings()); + if (Strings.hasLength(basePath)) { + this.basePath = new BlobPath().add(basePath); + } else { + this.basePath = BlobPath.cleanPath(); + } + } + + @Override + protected S3BlobStore createBlobStore() { + final String bucket = BUCKET_SETTING.get(metadata.settings()); + if (bucket == null) { + throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository"); + } + + final boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); + final ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); + // Parse and validate the user's S3 Storage Class setting final String storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); final String cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); final String clientName = CLIENT_NAME.get(metadata.settings()); logger.debug("using bucket [{}], chunk_size [{}], server_side_encryption [{}], " + - "buffer_size [{}], cannedACL [{}], storageClass [{}]", + "buffer_size [{}], cannedACL [{}], storageClass [{}]", bucket, chunkSize, serverSideEncryption, bufferSize, cannedACL, storageClass); - // deprecated behavior: override client credentials from the cluster state // (repository settings) if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { overrideCredentialsFromClusterState(awsService); } - blobStore = new S3BlobStore(settings, awsService, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); - final String basePath = BASE_PATH_SETTING.get(metadata.settings()); - if (Strings.hasLength(basePath)) { - this.basePath = new BlobPath().add(basePath); - } else { - this.basePath = BlobPath.cleanPath(); - } + return new S3BlobStore(settings, awsService, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); + } + + // only use for testing + @Override + protected S3BlobStore blobStore() { + return super.blobStore(); } + // only use for testing @Override - protected BlobStore blobStore() { - return blobStore; + protected S3BlobStore innerBlobStore() { + return super.innerBlobStore(); } @Override diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 93561c94d2b9a..3aa84a1f308f0 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -74,8 +74,7 @@ protected S3RepositoryPlugin(AwsS3Service awsS3Service) { } // proxy method for testing - protected S3Repository getS3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry) - throws IOException { + protected S3Repository getS3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry) { return new S3Repository(metadata, settings, namedXContentRegistry, awsS3Service); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index f3bd894977999..65a0403b5e070 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -109,7 +109,7 @@ public void testRepositoryCredentialsOverrideSecureCredentials() throws IOExcept .put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret").build()); try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings); S3Repository s3repo = s3Plugin.getS3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY); - AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { + AmazonS3Reference s3Ref = s3repo.blobStore().clientReference()) { final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); @@ -132,7 +132,7 @@ public void testRepositoryCredentialsOnly() throws IOException { .build()); try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(Settings.EMPTY); S3Repository s3repo = s3Plugin.getS3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY); - AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { + AmazonS3Reference s3Ref = s3repo.blobStore().clientReference()) { final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); @@ -163,7 +163,7 @@ public void testReinitSecureCredentials() throws IOException { final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", builder.build()); try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings); S3Repository s3repo = s3Plugin.getS3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY)) { - try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { + try (AmazonS3Reference s3Ref = s3repo.blobStore().clientReference()) { final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials .getCredentials(); if (repositorySettings) { @@ -190,7 +190,7 @@ public void testReinitSecureCredentials() throws IOException { } } // check credentials have been updated - try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { + try (AmazonS3Reference s3Ref = s3repo.blobStore().clientReference()) { final AWSCredentials newCredentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials .getCredentials(); assertThat(newCredentials.getAWSAccessKeyId(), is("new_secret_aws_key")); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 2843390f1aa80..076655aca33c5 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -51,7 +51,11 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase { @@ -84,8 +88,11 @@ public static void wipeRepository() { } @Override - protected void createTestRepository(final String name) { - assertAcked(client().admin().cluster().preparePutRepository(name).setType(S3Repository.TYPE).setSettings(Settings.builder() + protected void createTestRepository(final String name, boolean verify) { + assertAcked(client().admin().cluster().preparePutRepository(name) + .setType(S3Repository.TYPE) + .setVerify(verify) + .setSettings(Settings.builder() .put(S3Repository.BUCKET_SETTING.getKey(), bucket) .put(S3Repository.CLIENT_NAME.getKey(), client) .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), bufferSize) @@ -96,6 +103,16 @@ protected void createTestRepository(final String name) { .put(S3Repository.SECRET_KEY_SETTING.getKey(), "not_used_but_this_is_a_secret"))); } + @Override + protected void afterCreationCheck(Repository repository, boolean verify) { + assertThat(repository, instanceOf(S3Repository.class)); + + S3Repository s3Repository = (S3Repository) repository; + + assertThat("s3 blob store has to be lazy initialized", + s3Repository.innerBlobStore(), verify ? is(notNullValue()) : is(nullValue())); + } + @Override protected Collection> nodePlugins() { return Collections.singletonList(TestS3RepositoryPlugin.class); @@ -125,7 +142,7 @@ void overrideCredentialsFromClusterState(AwsS3Service awsService) { public void testInsecureRepositoryCredentials() throws Exception { final String repositoryName = "testInsecureRepositoryCredentials"; - createTestRepository(repositoryName); + createAndCheckTestRepository(repositoryName); final NodeClient nodeClient = internalCluster().getInstance(NodeClient.class); final RestGetRepositoriesAction getRepoAction = new RestGetRepositoriesAction(Settings.EMPTY, mock(RestController.class), internalCluster().getInstance(SettingsFilter.class)); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 5c0aada66585c..4b1e28bc374f6 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -30,11 +30,13 @@ import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; -import java.io.IOException; import java.util.Collections; import java.util.Map; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; public class S3RepositoryTests extends ESTestCase { @@ -76,7 +78,7 @@ public void close() { } } - public void testInvalidChunkBufferSizeSettings() throws IOException { + public void testInvalidChunkBufferSizeSettings() { // chunk < buffer should fail final Settings s1 = bufferAndChunkSettings(10, 5); final Exception e1 = expectThrows(RepositoryException.class, @@ -112,7 +114,7 @@ private RepositoryMetaData getRepositoryMetaData(Settings settings) { return new RepositoryMetaData("dummy-repo", "mock", Settings.builder().put(settings).build()); } - public void testBasePathSetting() throws IOException { + public void testBasePathSetting() { final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() .put(S3Repository.BASE_PATH_SETTING.getKey(), "foo/bar").build()); try (S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())) { @@ -120,10 +122,12 @@ public void testBasePathSetting() throws IOException { } } - public void testDefaultBufferSize() throws IOException { + public void testDefaultBufferSize() { final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.EMPTY); try (S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())) { - final long defaultBufferSize = ((S3BlobStore) s3repo.blobStore()).bufferSizeInBytes(); + assertThat(s3repo.innerBlobStore(), is(nullValue())); + final long defaultBufferSize = s3repo.blobStore().bufferSizeInBytes(); + assertThat(s3repo.innerBlobStore(), not(nullValue())); assertThat(defaultBufferSize, Matchers.lessThanOrEqualTo(100L * 1024 * 1024)); assertThat(defaultBufferSize, Matchers.greaterThanOrEqualTo(5L * 1024 * 1024)); } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index d5b2a6413e9a9..0bade8cdc8741 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.snapshots.SnapshotsService; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -58,16 +59,20 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta private final ClusterService clusterService; + private final ThreadPool threadPool; + private final VerifyNodeRepositoryAction verifyAction; private volatile Map repositories = Collections.emptyMap(); @Inject public RepositoriesService(Settings settings, ClusterService clusterService, TransportService transportService, - Map typesRegistry) { + Map typesRegistry, + ThreadPool threadPool) { super(settings); this.typesRegistry = typesRegistry; this.clusterService = clusterService; + this.threadPool = threadPool; // Doesn't make sense to maintain repositories on non-master and non-data nodes // Nothing happens there anyway if (DiscoveryNode.isDataNode(settings) || DiscoveryNode.isMasterNode(settings)) { @@ -208,39 +213,47 @@ public boolean mustAck(DiscoveryNode discoveryNode) { public void verifyRepository(final String repositoryName, final ActionListener listener) { final Repository repository = repository(repositoryName); try { - final String verificationToken = repository.startVerification(); - if (verificationToken != null) { + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { try { - verifyAction.verify(repositoryName, verificationToken, new ActionListener() { - @Override - public void onResponse(VerifyResponse verifyResponse) { + final String verificationToken = repository.startVerification(); + if (verificationToken != null) { + try { + verifyAction.verify(repositoryName, verificationToken, new ActionListener() { + @Override + public void onResponse(VerifyResponse verifyResponse) { + try { + repository.endVerification(verificationToken); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage( + "[{}] failed to finish repository verification", repositoryName), e); + listener.onFailure(e); + return; + } + listener.onResponse(verifyResponse); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } catch (Exception e) { try { repository.endVerification(verificationToken); - } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), e); - listener.onFailure(e); - return; + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn(() -> new ParameterizedMessage( + "[{}] failed to finish repository verification", repositoryName), inner); } - listener.onResponse(verifyResponse); - } - - @Override - public void onFailure(Exception e) { listener.onFailure(e); } - }); - } catch (Exception e) { - try { - repository.endVerification(verificationToken); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn(() -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), inner); + } else { + listener.onResponse(new VerifyResponse(new DiscoveryNode[0], new VerificationFailure[0])); } + } catch (Exception e) { listener.onFailure(e); } - } else { - listener.onResponse(new VerifyResponse(new DiscoveryNode[0], new VerificationFailure[0])); - } + }); } catch (Exception e) { listener.onFailure(e); } diff --git a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java index 380ae97408016..fbaf369912e8a 100644 --- a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java @@ -61,7 +61,7 @@ public VerifyNodeRepositoryAction(Settings settings, TransportService transportS this.transportService = transportService; this.clusterService = clusterService; this.repositoriesService = repositoriesService; - transportService.registerRequestHandler(ACTION_NAME, VerifyNodeRepositoryRequest::new, ThreadPool.Names.SAME, new VerifyNodeRepositoryRequestHandler()); + transportService.registerRequestHandler(ACTION_NAME, VerifyNodeRepositoryRequest::new, ThreadPool.Names.SNAPSHOT, new VerifyNodeRepositoryRequestHandler()); } public void verify(String repository, String verificationToken, final ActionListener listener) { diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 618dd3b8bc3b9..f13078002789e 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -102,6 +102,7 @@ import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotMissingException; import org.elasticsearch.snapshots.SnapshotShardFailure; +import org.elasticsearch.threadpool.ThreadPool; import java.io.FilterInputStream; import java.io.IOException; @@ -126,8 +127,8 @@ /** * BlobStore - based implementation of Snapshot Repository *

- * This repository works with any {@link BlobStore} implementation. The blobStore should be initialized in the derived - * class before {@link #doStart()} is called. + * This repository works with any {@link BlobStore} implementation. The blobStore could be (and preferred) lazy initialized in + * {@link #createBlobStore()}. *

* BlobStoreRepository maintains the following structure in the blob store *

@@ -167,9 +168,7 @@
  * }
  * 
*/ -public abstract class BlobStoreRepository extends AbstractLifecycleComponent implements Repository { - - private BlobContainer snapshotsBlobContainer; +public abstract class BlobStoreRepository extends AbstractLifecycleComponent implements Repository { protected final RepositoryMetaData metadata; @@ -225,6 +224,12 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final ChecksumBlobStoreFormat indexShardSnapshotsFormat; + private final Object lock = new Object(); + + private volatile BlobContainer snapshotsBlobContainer; + + private volatile BS blobStore; + /** * Constructs new BlobStoreRepository * @@ -251,7 +256,6 @@ protected BlobStoreRepository(RepositoryMetaData metadata, Settings globalSettin @Override protected void doStart() { - this.snapshotsBlobContainer = blobStore().blobContainer(basePath()); globalMetaDataFormat = new ChecksumBlobStoreFormat<>(METADATA_CODEC, METADATA_NAME_FORMAT, MetaData::fromXContent, namedXContentRegistry, isCompress()); indexMetaDataFormat = new ChecksumBlobStoreFormat<>(INDEX_METADATA_CODEC, METADATA_NAME_FORMAT, @@ -265,17 +269,71 @@ protected void doStop() {} @Override protected void doClose() { - try { - blobStore().close(); - } catch (Exception t) { - logger.warn("cannot close blob store", t); + BlobStore store = blobStore; + if (store != null) { + try { + store.close(); + } catch (Exception t) { + logger.warn("cannot close blob store", t); + } + } + } + + // package private, only use for testing + BlobContainer blobContainer() { + return snapshotsBlobContainer; + } + + /** + * maintains single lazy instance of {@link BlobContainer} + */ + protected BlobContainer snapshotsBlobContainer() { + BlobContainer blobContainer = snapshotsBlobContainer; + if (blobContainer == null) { + synchronized (lock) { + blobContainer = snapshotsBlobContainer; + if (blobContainer == null) { + blobContainer = blobStore().blobContainer(basePath()); + snapshotsBlobContainer = blobContainer; + } + } + } + + return blobContainer; + } + + /** + * maintains single lazy instance of {@link BlobStore} + */ + protected BS blobStore() { + BS store = blobStore; + if (store == null) { + synchronized (lock) { + store = blobStore; + if (store == null) { + try { + store = createBlobStore(); + } catch (RepositoryException e) { + throw e; + } catch (Exception e) { + throw new RepositoryException(metadata.name(), "cannot create blob store" , e); + } + blobStore = store; + } + } } + return store; + } + + // for test purposes only + protected BS innerBlobStore() { + return blobStore; } /** - * Returns the BlobStore to read and write data. + * Creates new BlobStore to read and write data. */ - protected abstract BlobStore blobStore(); + protected abstract BS createBlobStore() throws Exception; /** * Returns base path of the repository @@ -319,12 +377,12 @@ public void initializeSnapshot(SnapshotId snapshotId, List indices, Met if (repositoryData.getAllSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) { throw new InvalidSnapshotNameException(metadata.name(), snapshotId.getName(), "snapshot with the same name already exists"); } - if (snapshotFormat.exists(snapshotsBlobContainer, snapshotId.getUUID())) { + if (snapshotFormat.exists(snapshotsBlobContainer(), snapshotId.getUUID())) { throw new InvalidSnapshotNameException(metadata.name(), snapshotId.getName(), "snapshot with the same name already exists"); } // Write Global MetaData - globalMetaDataFormat.write(clusterMetaData, snapshotsBlobContainer, snapshotId.getUUID()); + globalMetaDataFormat.write(clusterMetaData, snapshotsBlobContainer(), snapshotId.getUUID()); // write the index metadata for each index in the snapshot for (IndexId index : indices) { @@ -421,7 +479,7 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { private void deleteSnapshotBlobIgnoringErrors(final SnapshotInfo snapshotInfo, final String blobId) { try { - snapshotFormat.delete(snapshotsBlobContainer, blobId); + snapshotFormat.delete(snapshotsBlobContainer(), blobId); } catch (IOException e) { if (snapshotInfo != null) { logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]", @@ -434,7 +492,7 @@ private void deleteSnapshotBlobIgnoringErrors(final SnapshotInfo snapshotInfo, f private void deleteGlobalMetaDataBlobIgnoringErrors(final SnapshotInfo snapshotInfo, final String blobId) { try { - globalMetaDataFormat.delete(snapshotsBlobContainer, blobId); + globalMetaDataFormat.delete(snapshotsBlobContainer(), blobId); } catch (IOException e) { if (snapshotInfo != null) { logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]", @@ -472,7 +530,7 @@ public SnapshotInfo finalizeSnapshot(final SnapshotId snapshotId, startTime, failure, System.currentTimeMillis(), totalShards, shardFailures, includeGlobalState); try { - snapshotFormat.write(blobStoreSnapshot, snapshotsBlobContainer, snapshotId.getUUID()); + snapshotFormat.write(blobStoreSnapshot, snapshotsBlobContainer(), snapshotId.getUUID()); final RepositoryData repositoryData = getRepositoryData(); writeIndexGen(repositoryData.addSnapshot(snapshotId, blobStoreSnapshot.state(), indices), repositoryStateId); } catch (FileAlreadyExistsException ex) { @@ -490,7 +548,7 @@ public SnapshotInfo finalizeSnapshot(final SnapshotId snapshotId, @Override public SnapshotInfo getSnapshotInfo(final SnapshotId snapshotId) { try { - return snapshotFormat.read(snapshotsBlobContainer, snapshotId.getUUID()); + return snapshotFormat.read(snapshotsBlobContainer(), snapshotId.getUUID()); } catch (NoSuchFileException ex) { throw new SnapshotMissingException(metadata.name(), snapshotId, ex); } catch (IOException | NotXContentException ex) { @@ -501,7 +559,7 @@ public SnapshotInfo getSnapshotInfo(final SnapshotId snapshotId) { @Override public MetaData getSnapshotGlobalMetaData(final SnapshotId snapshotId) { try { - return globalMetaDataFormat.read(snapshotsBlobContainer, snapshotId.getUUID()); + return globalMetaDataFormat.read(snapshotsBlobContainer(), snapshotId.getUUID()); } catch (NoSuchFileException ex) { throw new SnapshotMissingException(metadata.name(), snapshotId, ex); } catch (IOException ex) { @@ -543,11 +601,18 @@ public long getRestoreThrottleTimeInNanos() { return restoreRateLimitingTimeInNanos.count(); } + protected void verificationThreadCheck() { + assert Thread.currentThread().getName().contains(ThreadPool.Names.SNAPSHOT) : + "Expected current thread [" + Thread.currentThread() + "] to be the snapshot thread."; + } + @Override public String startVerification() { + verificationThreadCheck(); try { if (isReadOnly()) { - // It's readonly - so there is not much we can do here to verify it + // It's readonly - so there is not much we can do here to verify it apart try to create blobStore() + blobStore(); return null; } else { String seed = UUIDs.randomBase64UUID(); @@ -584,7 +649,7 @@ public RepositoryData getRepositoryData() { final String snapshotsIndexBlobName = INDEX_FILE_PREFIX + Long.toString(indexGen); RepositoryData repositoryData; - try (InputStream blob = snapshotsBlobContainer.readBlob(snapshotsIndexBlobName)) { + try (InputStream blob = snapshotsBlobContainer().readBlob(snapshotsIndexBlobName)) { BytesStreamOutput out = new BytesStreamOutput(); Streams.copy(blob, out); // EMPTY is safe here because RepositoryData#fromXContent calls namedObject @@ -598,7 +663,7 @@ public RepositoryData getRepositoryData() { } // now load the incompatible snapshot ids, if they exist - try (InputStream blob = snapshotsBlobContainer.readBlob(INCOMPATIBLE_SNAPSHOTS_BLOB)) { + try (InputStream blob = snapshotsBlobContainer().readBlob(INCOMPATIBLE_SNAPSHOTS_BLOB)) { BytesStreamOutput out = new BytesStreamOutput(); Streams.copy(blob, out); try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, @@ -636,11 +701,6 @@ public boolean isReadOnly() { return readOnly; } - // package private, only use for testing - BlobContainer blobContainer() { - return snapshotsBlobContainer; - } - protected void writeIndexGen(final RepositoryData repositoryData, final long repositoryStateId) throws IOException { assert isReadOnly() == false; // can not write to a read only repository final long currentGen = latestIndexBlobId(); @@ -668,8 +728,8 @@ protected void writeIndexGen(final RepositoryData repositoryData, final long rep // delete the N-2 index file if it exists, keep the previous one around as a backup if (isReadOnly() == false && newGen - 2 >= 0) { final String oldSnapshotIndexFile = INDEX_FILE_PREFIX + Long.toString(newGen - 2); - if (snapshotsBlobContainer.blobExists(oldSnapshotIndexFile)) { - snapshotsBlobContainer.deleteBlob(oldSnapshotIndexFile); + if (snapshotsBlobContainer().blobExists(oldSnapshotIndexFile)) { + snapshotsBlobContainer().deleteBlob(oldSnapshotIndexFile); } } @@ -679,8 +739,8 @@ protected void writeIndexGen(final RepositoryData repositoryData, final long rep bStream.writeLong(newGen); genBytes = bStream.bytes(); } - if (snapshotsBlobContainer.blobExists(INDEX_LATEST_BLOB)) { - snapshotsBlobContainer.deleteBlob(INDEX_LATEST_BLOB); + if (snapshotsBlobContainer().blobExists(INDEX_LATEST_BLOB)) { + snapshotsBlobContainer().deleteBlob(INDEX_LATEST_BLOB); } logger.debug("Repository [{}] updating index.latest with generation [{}]", metadata.name(), newGen); writeAtomic(INDEX_LATEST_BLOB, genBytes); @@ -702,8 +762,8 @@ void writeIncompatibleSnapshots(RepositoryData repositoryData) throws IOExceptio } bytes = bStream.bytes(); } - if (snapshotsBlobContainer.blobExists(INCOMPATIBLE_SNAPSHOTS_BLOB)) { - snapshotsBlobContainer.deleteBlob(INCOMPATIBLE_SNAPSHOTS_BLOB); + if (snapshotsBlobContainer().blobExists(INCOMPATIBLE_SNAPSHOTS_BLOB)) { + snapshotsBlobContainer().deleteBlob(INCOMPATIBLE_SNAPSHOTS_BLOB); } // write the incompatible snapshots blob writeAtomic(INCOMPATIBLE_SNAPSHOTS_BLOB, bytes); @@ -744,7 +804,7 @@ long latestIndexBlobId() throws IOException { // package private for testing long readSnapshotIndexLatestBlob() throws IOException { - try (InputStream blob = snapshotsBlobContainer.readBlob(INDEX_LATEST_BLOB)) { + try (InputStream blob = snapshotsBlobContainer().readBlob(INDEX_LATEST_BLOB)) { BytesStreamOutput out = new BytesStreamOutput(); Streams.copy(blob, out); return Numbers.bytesToLong(out.bytes().toBytesRef()); @@ -752,7 +812,7 @@ long readSnapshotIndexLatestBlob() throws IOException { } private long listBlobsToGetLatestIndexId() throws IOException { - Map blobs = snapshotsBlobContainer.listBlobsByPrefix(INDEX_FILE_PREFIX); + Map blobs = snapshotsBlobContainer().listBlobsByPrefix(INDEX_FILE_PREFIX); long latest = RepositoryData.EMPTY_REPO_GEN; if (blobs.isEmpty()) { // no snapshot index blobs have been written yet @@ -774,7 +834,7 @@ private long listBlobsToGetLatestIndexId() throws IOException { private void writeAtomic(final String blobName, final BytesReference bytesRef) throws IOException { try (InputStream stream = bytesRef.streamInput()) { - snapshotsBlobContainer.writeBlobAtomic(blobName, stream, bytesRef.length()); + snapshotsBlobContainer().writeBlobAtomic(blobName, stream, bytesRef.length()); } } @@ -814,6 +874,7 @@ public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Ve @Override public void verify(String seed, DiscoveryNode localNode) { + verificationThreadCheck(); BlobContainer testBlobContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed))); if (testBlobContainer.blobExists("master.dat")) { try { diff --git a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java index 4d4ab60feef0f..6be1b0882b920 100644 --- a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java @@ -31,7 +31,6 @@ import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; -import java.io.IOException; import java.nio.file.Path; import java.util.function.Function; @@ -46,7 +45,7 @@ *
{@code compress}
If set to true metadata files will be stored compressed. Defaults to false.
* */ -public class FsRepository extends BlobStoreRepository { +public class FsRepository extends BlobStoreRepository { public static final String TYPE = "fs"; @@ -61,8 +60,7 @@ public class FsRepository extends BlobStoreRepository { public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope); public static final Setting REPOSITORIES_COMPRESS_SETTING = Setting.boolSetting("repositories.fs.compress", false, Property.NodeScope); - - private final FsBlobStore blobStore; + private final Environment environment; private ByteSizeValue chunkSize; @@ -74,37 +72,55 @@ public class FsRepository extends BlobStoreRepository { * Constructs a shared file system repository. */ public FsRepository(RepositoryMetaData metadata, Environment environment, - NamedXContentRegistry namedXContentRegistry) throws IOException { + NamedXContentRegistry namedXContentRegistry) { super(metadata, environment.settings(), namedXContentRegistry); - String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings()); - if (location.isEmpty()) { - logger.warn("the repository location is missing, it should point to a shared file system location that is available on all master and data nodes"); - throw new RepositoryException(metadata.name(), "missing location"); - } - Path locationFile = environment.resolveRepoFile(location); - if (locationFile == null) { - if (environment.repoFiles().length > 0) { - logger.warn("The specified location [{}] doesn't start with any repository paths specified by the path.repo setting: [{}] ", location, environment.repoFiles()); - throw new RepositoryException(metadata.name(), "location [" + location + "] doesn't match any of the locations specified by path.repo"); - } else { - logger.warn("The specified location [{}] should start with a repository path specified by the path.repo setting, but the path.repo setting was not set on this node", location); - throw new RepositoryException(metadata.name(), "location [" + location + "] doesn't match any of the locations specified by path.repo because this setting is empty"); - } - } + this.environment = environment; + checkAndGetLocation(metadata, environment); - blobStore = new FsBlobStore(settings, locationFile); if (CHUNK_SIZE_SETTING.exists(metadata.settings())) { this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); } else { this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(settings); } - this.compress = COMPRESS_SETTING.exists(metadata.settings()) ? COMPRESS_SETTING.get(metadata.settings()) : REPOSITORIES_COMPRESS_SETTING.get(settings); + this.compress = COMPRESS_SETTING.exists(metadata.settings()) + ? COMPRESS_SETTING.get(metadata.settings()) : REPOSITORIES_COMPRESS_SETTING.get(settings); this.basePath = BlobPath.cleanPath(); } @Override - protected BlobStore blobStore() { - return blobStore; + protected BlobStore createBlobStore() throws Exception { + final Path locationFile = checkAndGetLocation(metadata, environment); + return new FsBlobStore(settings, locationFile); + } + + // only use for testing + @Override + protected BlobStore innerBlobStore() { + return super.innerBlobStore(); + } + + private Path checkAndGetLocation(RepositoryMetaData metadata, Environment environment) { + String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings()); + if (location.isEmpty()) { + logger.warn("the repository location is missing, it should point to a shared file system location" + + " that is available on all master and data nodes"); + throw new RepositoryException(metadata.name(), "missing location"); + } + Path locationFile = environment.resolveRepoFile(location); + if (locationFile == null) { + if (environment.repoFiles().length > 0) { + logger.warn("The specified location [{}] doesn't start with any " + + "repository paths specified by the path.repo setting: [{}] ", location, environment.repoFiles()); + throw new RepositoryException(metadata.name(), "location [" + location + + "] doesn't match any of the locations specified by path.repo"); + } else { + logger.warn("The specified location [{}] should start with a repository path specified by" + + " the path.repo setting, but the path.repo setting was not set on this node", location); + throw new RepositoryException(metadata.name(), "location [" + location + + "] doesn't match any of the locations specified by path.repo because this setting is empty"); + } + } + return locationFile; } @Override diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 4a496167c80c1..eeb6e0c4dbb92 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -405,7 +405,7 @@ private IndicesClusterStateService createIndicesClusterStateService(DiscoveryNod Collections.emptySet()); final ClusterService clusterService = mock(ClusterService.class); final RepositoriesService repositoriesService = new RepositoriesService(settings, clusterService, - transportService, null); + transportService, null, threadPool); final PeerRecoveryTargetService recoveryTargetService = new PeerRecoveryTargetService(settings, threadPool, transportService, null, clusterService); final ShardStateAction shardStateAction = mock(ShardStateAction.class); diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java index 7a1d3a894204f..4924941ce9539 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java @@ -173,10 +173,15 @@ public void testSnapshotWithConflictingName() throws IOException { } /** Create a {@link Repository} with a random name **/ - private Repository createRepository() throws IOException { + private Repository createRepository() { Settings settings = Settings.builder().put("location", randomAlphaOfLength(10)).build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData(randomAlphaOfLength(10), FsRepository.TYPE, settings); - return new FsRepository(repositoryMetaData, createEnvironment(), xContentRegistry()); + return new FsRepository(repositoryMetaData, createEnvironment(), xContentRegistry()) { + @Override + protected void verificationThreadCheck() { + // eliminate thread name check as we create repo manually + } + }; } /** Create a {@link Environment} with random path.home and path.repo **/ diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index 7e4d5cc54a900..591365ad49153 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -43,6 +43,7 @@ import static org.elasticsearch.repositories.RepositoryDataTests.generateRandomRepoData; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; /** * Tests for the {@link BlobStoreRepository} and its subclasses. @@ -217,6 +218,7 @@ private BlobStoreRepository setupRepo() { final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); @SuppressWarnings("unchecked") final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName); + assertThat("blobContainer has to be lazy initialized", repository.blobContainer(), nullValue()); return repository; } diff --git a/server/src/test/java/org/elasticsearch/snapshots/FsBlobStoreRepositoryIT.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java similarity index 66% rename from server/src/test/java/org/elasticsearch/snapshots/FsBlobStoreRepositoryIT.java rename to server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java index 792b1bdbdddb0..312a04b40b33e 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/FsBlobStoreRepositoryIT.java +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java @@ -16,22 +16,37 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.snapshots; +package org.elasticsearch.repositories.fs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class FsBlobStoreRepositoryIT extends ESBlobStoreRepositoryIntegTestCase { @Override - protected void createTestRepository(String name) { + protected void createTestRepository(String name, boolean verify) { assertAcked(client().admin().cluster().preparePutRepository(name) + .setVerify(verify) .setType("fs").setSettings(Settings.builder() .put("location", randomRepoPath()) .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + } + + @Override + protected void afterCreationCheck(Repository repository, boolean verify) { + assertThat(repository, instanceOf(FsRepository.class)); + + FsRepository fsRepository = (FsRepository) repository; + assertThat("fs blob store has to be lazy initialized", + fsRepository.innerBlobStore(), verify ? is(notNullValue()) : is(nullValue())); } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index c9ca1637b1ade..3899bfcbec327 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -1739,6 +1739,7 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { logger.info("--> trying to create a repository with different name"); assertAcked(client.admin().cluster().preparePutRepository("test-repo-2") + .setVerify(false) // do not do verification itself as snapshot threads could be fully blocked .setType("fs").setSettings(Settings.builder().put("location", repositoryLocation.resolve("test")))); logger.info("--> unblocking blocked node"); diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index d0702acf10373..c1efccc60de29 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -92,8 +92,6 @@ public long getFailureCount() { private final long waitAfterUnblock; - private final MockBlobStore mockBlobStore; - private final String randomPrefix; private volatile boolean blockOnInitialization; @@ -128,7 +126,6 @@ public MockRepository(RepositoryMetaData metadata, Environment environment, waitAfterUnblock = metadata.settings().getAsLong("wait_after_unblock", 0L); allowAtomicOperations = metadata.settings().getAsBoolean("allow_atomic_operations", true); logger.info("starting mock repository with random prefix {}", randomPrefix); - mockBlobStore = new MockBlobStore(super.blobStore()); } @Override @@ -163,8 +160,8 @@ protected void doStop() { } @Override - protected BlobStore blobStore() { - return mockBlobStore; + protected BlobStore createBlobStore() throws Exception { + return new MockBlobStore(super.createBlobStore()); } public synchronized void unblock() { @@ -195,7 +192,7 @@ public boolean blocked() { } private synchronized boolean blockExecution() { - logger.debug("Blocking execution"); + logger.debug("[{}] Blocking execution", metadata.name()); boolean wasBlocked = false; try { while (blockOnDataFiles || blockOnControlFiles || blockOnInitialization || blockOnWriteIndexFile || @@ -207,7 +204,7 @@ private synchronized boolean blockExecution() { } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } - logger.debug("Unblocking execution"); + logger.debug("[{}] Unblocking execution", metadata.name()); return wasBlocked; } @@ -285,7 +282,7 @@ private void maybeIOExceptionOrBlock(String blobName) throws IOException { } private void blockExecutionAndMaybeWait(final String blobName) { - logger.info("blocking I/O operation for file [{}] at path [{}]", blobName, path()); + logger.info("[{}] blocking I/O operation for file [{}] at path [{}]", metadata.name(), blobName, path()); if (blockExecution() && waitAfterUnblock > 0) { try { // Delay operation after unblocking diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index c79f432278ae8..ca0266df81bb1 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.test.ESIntegTestCase; @@ -47,12 +48,28 @@ */ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase { - protected abstract void createTestRepository(String name); + protected abstract void createTestRepository(String name, boolean verify); + + protected void afterCreationCheck(Repository repository, boolean verify) { + + } + + protected void createAndCheckTestRepository(String name) { + final boolean verify = randomBoolean(); + createTestRepository(name, verify); + + final RepositoriesService repositoriesService = + internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class).iterator().next(); + + final Repository repository = repositoriesService.repository(name); + + afterCreationCheck(repository, verify); + } public void testSnapshotAndRestore() throws Exception { String repoName = randomAsciiName(); logger.info("--> creating repository {}", repoName); - createTestRepository(repoName); + createAndCheckTestRepository(repoName); int indexCount = randomIntBetween(1, 5); int[] docCounts = new int[indexCount]; String[] indexNames = generateRandomNames(indexCount); @@ -114,7 +131,7 @@ public void testSnapshotAndRestore() throws Exception { public void testMultipleSnapshotAndRollback() throws Exception { String repoName = randomAsciiName(); logger.info("--> creating repository {}", repoName); - createTestRepository(repoName); + createAndCheckTestRepository(repoName); int iterationCount = randomIntBetween(2, 5); int[] docCounts = new int[iterationCount]; String indexName = randomAsciiName(); @@ -171,7 +188,7 @@ public void testIndicesDeletedFromRepository() throws Exception { logger.info("--> creating repository"); final String repoName = "test-repo"; - createTestRepository(repoName); + createAndCheckTestRepository(repoName); createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); From 2eaed57e0be6146cf448ca39f5922bb68a8bb599 Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Thu, 28 Jun 2018 12:04:53 +0200 Subject: [PATCH 02/13] drop unused import --- .../org/elasticsearch/repositories/s3/S3RepositoryTests.java | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 69389f7826dcc..18cbd9cc3f535 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; -import java.io.IOException; import java.util.Collections; import java.util.Map; From c89a103960cd8991fb92bc32bb1a2d676419410c Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Mon, 2 Jul 2018 17:32:43 +0200 Subject: [PATCH 03/13] move checks to ctor; rename methods for consistency --- .../repositories/url/URLRepository.java | 25 ++++---- .../repositories/url/URLRepositoryTests.java | 14 ++-- .../repositories/azure/AzureRepository.java | 15 ++--- .../azure/AzureRepositorySettingsTests.java | 2 +- .../gcs/GoogleCloudStorageRepository.java | 27 ++++---- ...eCloudStorageBlobStoreRepositoryTests.java | 2 +- .../repositories/hdfs/HdfsRepository.java | 22 +------ .../repositories/s3/S3Repository.java | 22 +++---- .../s3/RepositoryCredentialsTests.java | 8 +-- .../s3/S3BlobStoreRepositoryTests.java | 2 +- .../repositories/s3/S3RepositoryTests.java | 6 +- .../blobstore/BlobStoreRepository.java | 64 +++++++++---------- .../repositories/fs/FsRepository.java | 50 +++++++-------- .../blobstore/BlobStoreRepositoryTests.java | 2 +- .../fs/FsBlobStoreRepositoryIT.java | 2 +- 15 files changed, 114 insertions(+), 149 deletions(-) diff --git a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java index 7dc67a1f7f56e..9a59706e961dc 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java @@ -22,6 +22,7 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.url.URLBlobStore; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -48,7 +49,7 @@ *
{@code concurrent_streams}
Number of concurrent read/write stream (per repository on each node). Defaults to 5.
* */ -public class URLRepository extends BlobStoreRepository { +public class URLRepository extends BlobStoreRepository { public static final String TYPE = "url"; @@ -72,6 +73,8 @@ public class URLRepository extends BlobStoreRepository { private final BlobPath basePath; + private final URL url; + /** * Constructs a read-only URL-based repository */ @@ -86,30 +89,26 @@ public URLRepository(RepositoryMetaData metadata, Environment environment, supportedProtocols = SUPPORTED_PROTOCOLS_SETTING.get(settings); urlWhiteList = ALLOWED_URLS_SETTING.get(settings).toArray(new URIPattern[]{}); basePath = BlobPath.cleanPath(); - } - - private URL getUrl() { - URL url = URL_SETTING.exists(metadata.settings()) + url = URL_SETTING.exists(metadata.settings()) ? URL_SETTING.get(metadata.settings()) : REPOSITORIES_URL_SETTING.get(settings); - return checkURL(url); + checkURL(url); } @Override - protected URLBlobStore createBlobStore() { - URL normalizedURL = getUrl(); - return new URLBlobStore(settings, normalizedURL); + protected BlobStore createBlobStore() { + return new URLBlobStore(settings, url); } // only use for testing @Override - protected BlobContainer snapshotsBlobContainer() { - return super.snapshotsBlobContainer(); + protected BlobContainer blobContainer() { + return super.blobContainer(); } // only use for testing @Override - protected URLBlobStore innerBlobStore() { - return super.innerBlobStore(); + protected BlobStore getBlobStore() { + return super.getBlobStore(); } @Override diff --git a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java index 524a6ac086050..01a7784e7da74 100644 --- a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java +++ b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java @@ -48,9 +48,9 @@ public void testWhiteListingRepoURL() throws IOException { final URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), new NamedXContentRegistry(Collections.emptyList())); - assertThat("blob store has to be lazy initialized", repository.innerBlobStore(), is(nullValue())); - repository.snapshotsBlobContainer(); - assertThat("snapshotsBlobContainer has to initialize blob store", repository.innerBlobStore(), not(nullValue())); + assertThat("blob store has to be lazy initialized", repository.getBlobStore(), is(nullValue())); + repository.blobContainer(); + assertThat("blobContainer has to initialize blob store", repository.getBlobStore(), not(nullValue())); } public void testIfNotWhiteListedMustSetRepoURL() throws IOException { @@ -62,9 +62,9 @@ public void testIfNotWhiteListedMustSetRepoURL() throws IOException { RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), new NamedXContentRegistry(Collections.emptyList())); - assertThat(repository.innerBlobStore(), is(nullValue())); + assertThat(repository.getBlobStore(), is(nullValue())); try { - repository.snapshotsBlobContainer(); + repository.blobContainer(); fail("RepositoryException should have been thrown."); } catch (RepositoryException e) { String msg = "[url] file url [" + repoPath @@ -85,9 +85,9 @@ public void testMustBeSupportedProtocol() throws IOException { RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), new NamedXContentRegistry(Collections.emptyList())); - assertThat(repository.innerBlobStore(), is(nullValue())); + assertThat(repository.getBlobStore(), is(nullValue())); try { - repository.snapshotsBlobContainer(); + repository.blobContainer(); fail("RepositoryException should have been thrown."); } catch (RepositoryException e) { assertEquals("[url] unsupported url protocol [file] from URL [" + repoPath +"]", e.getMessage()); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 63462dbaa9578..fcd15ffcbb2d5 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeValue; @@ -56,7 +57,7 @@ *
{@code compress}
If set to true metadata files will be stored compressed. Defaults to false.
* */ -public class AzureRepository extends BlobStoreRepository { +public class AzureRepository extends BlobStoreRepository { public static final String TYPE = "azure"; @@ -106,14 +107,8 @@ public AzureRepository(RepositoryMetaData metadata, Environment environment, Nam // only use for testing @Override - protected AzureBlobStore blobStore() { - return super.blobStore(); - } - - // only use for testing - @Override - protected AzureBlobStore innerBlobStore() { - return super.innerBlobStore(); + protected BlobStore getBlobStore() { + return super.getBlobStore(); } /** @@ -163,7 +158,7 @@ protected ByteSizeValue chunkSize() { @Override public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData clusterMetadata) { try { - final AzureBlobStore blobStore = blobStore(); + final AzureBlobStore blobStore = (AzureBlobStore) blobStore(); if (blobStore.containerExist() == false) { throw new IllegalArgumentException("The bucket [" + blobStore + "] does not exist. Please create it before " + " creating an azure snapshot repository backed by it."); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java index 2c274128af3ad..b4b71577cbcdc 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java @@ -43,7 +43,7 @@ private AzureRepository azureRepository(Settings settings) { .build(); final AzureRepository azureRepository = new AzureRepository(new RepositoryMetaData("foo", "azure", internalSettings), TestEnvironment.newEnvironment(internalSettings), NamedXContentRegistry.EMPTY, mock(AzureStorageService.class)); - assertThat(azureRepository.innerBlobStore(), is(nullValue())); + assertThat(azureRepository.getBlobStore(), is(nullValue())); return azureRepository; } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index 3a625d5a23fa0..4a1268af21755 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -22,6 +22,7 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -37,7 +38,7 @@ import static org.elasticsearch.common.settings.Setting.byteSizeSetting; import static org.elasticsearch.common.settings.Setting.simpleString; -class GoogleCloudStorageRepository extends BlobStoreRepository { +class GoogleCloudStorageRepository extends BlobStoreRepository { // package private for testing static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); @@ -55,10 +56,12 @@ class GoogleCloudStorageRepository extends BlobStoreRepository CLIENT_NAME = new Setting<>("client", "default", Function.identity()); - private final ByteSizeValue chunkSize; - private final boolean compress; - private final BlobPath basePath; private final GoogleCloudStorageService storageService; + private final BlobPath basePath; + private final boolean compress; + private final ByteSizeValue chunkSize; + private final String bucket; + private final String clientName; GoogleCloudStorageRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, @@ -79,26 +82,20 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { +public final class HdfsRepository extends BlobStoreRepository { private static final Logger LOGGER = Loggers.getLogger(HdfsRepository.class); @@ -223,28 +223,8 @@ private static String getHostName() { @Override protected HdfsBlobStore createBlobStore() { String uriSetting = getMetadata().settings().get("uri"); - if (Strings.hasText(uriSetting) == false) { - throw new IllegalArgumentException("No 'uri' defined for hdfs snapshot/restore"); - } URI uri = URI.create(uriSetting); - if ("hdfs".equalsIgnoreCase(uri.getScheme()) == false) { - throw new IllegalArgumentException(String.format(Locale.ROOT, - "Invalid scheme [%s] specified in uri [%s]; only 'hdfs' uri allowed for hdfs snapshot/restore", uri.getScheme(), uriSetting)); - } - if (Strings.hasLength(uri.getPath()) && uri.getPath().equals("/") == false) { - throw new IllegalArgumentException(String.format(Locale.ROOT, - "Use 'path' option to specify a path [%s], not the uri [%s] for hdfs snapshot/restore", uri.getPath(), uriSetting)); - } - String pathSetting = getMetadata().settings().get("path"); - // get configuration - if (pathSetting == null) { - throw new IllegalArgumentException("No 'path' defined for hdfs snapshot/restore"); - } - - // initialize our blobstore using elevated privileges. - SpecialPermission.check(); - final HdfsBlobStore blobStore = AccessController.doPrivileged((PrivilegedAction) () -> createBlobstore(uri, pathSetting, getMetadata().settings())); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 2163b6f53c806..86ea48cb5abce 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; @@ -49,7 +50,7 @@ *
{@code compress}
If set to true metadata files will be stored compressed. Defaults to false.
* */ -class S3Repository extends BlobStoreRepository { +class S3Repository extends BlobStoreRepository { static final String TYPE = "s3"; @@ -181,14 +182,16 @@ class S3Repository extends BlobStoreRepository { } else { this.basePath = BlobPath.cleanPath(); } + + // (repository settings) + if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { + overrideCredentialsFromClusterState(service); + } } @Override protected S3BlobStore createBlobStore() { final String bucket = BUCKET_SETTING.get(metadata.settings()); - if (bucket == null) { - throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository"); - } final boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); final ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); @@ -202,24 +205,19 @@ protected S3BlobStore createBlobStore() { "buffer_size [{}], cannedACL [{}], storageClass [{}]", bucket, chunkSize, serverSideEncryption, bufferSize, cannedACL, storageClass); - // (repository settings) - if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { - overrideCredentialsFromClusterState(service); - } - return new S3BlobStore(settings, service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); } // only use for testing @Override - protected S3BlobStore blobStore() { + protected BlobStore blobStore() { return super.blobStore(); } // only use for testing @Override - protected S3BlobStore innerBlobStore() { - return super.innerBlobStore(); + protected BlobStore getBlobStore() { + return super.getBlobStore(); } @Override diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 1a0fb06b21f5e..18c5192b86086 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -103,7 +103,7 @@ public void testRepositoryCredentialsOverrideSecureCredentials() throws IOExcept .put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret").build()); try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings); S3Repository s3repo = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY); - AmazonS3Reference s3Ref = s3repo.blobStore().clientReference()) { + AmazonS3Reference s3Ref = ((S3BlobStore)s3repo.blobStore()).clientReference()) { final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); @@ -126,7 +126,7 @@ public void testRepositoryCredentialsOnly() throws IOException { .build()); try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(Settings.EMPTY); S3Repository s3repo = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY); - AmazonS3Reference s3Ref = s3repo.blobStore().clientReference()) { + AmazonS3Reference s3Ref = ((S3BlobStore)s3repo.blobStore()).clientReference()) { final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); @@ -157,7 +157,7 @@ public void testReinitSecureCredentials() throws IOException { final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", builder.build()); try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings); S3Repository s3repo = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY)) { - try (AmazonS3Reference s3Ref = s3repo.blobStore().clientReference()) { + try (AmazonS3Reference s3Ref = ((S3BlobStore)s3repo.blobStore()).clientReference()) { final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials .getCredentials(); if (repositorySettings) { @@ -184,7 +184,7 @@ public void testReinitSecureCredentials() throws IOException { } } // check credentials have been updated - try (AmazonS3Reference s3Ref = s3repo.blobStore().clientReference()) { + try (AmazonS3Reference s3Ref = ((S3BlobStore)s3repo.blobStore()).clientReference()) { final AWSCredentials newCredentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials .getCredentials(); assertThat(newCredentials.getAWSAccessKeyId(), is("new_secret_aws_key")); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index b10c7d936434c..f5e1886c251d2 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -110,7 +110,7 @@ protected void afterCreationCheck(Repository repository, boolean verify) { S3Repository s3Repository = (S3Repository) repository; assertThat("s3 blob store has to be lazy initialized", - s3Repository.innerBlobStore(), verify ? is(notNullValue()) : is(nullValue())); + s3Repository.getBlobStore(), verify ? is(notNullValue()) : is(nullValue())); } @Override diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 18cbd9cc3f535..f1fc2d9d42b06 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -119,9 +119,9 @@ public void testBasePathSetting() { public void testDefaultBufferSize() { final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.EMPTY); try (S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())) { - assertThat(s3repo.innerBlobStore(), is(nullValue())); - final long defaultBufferSize = s3repo.blobStore().bufferSizeInBytes(); - assertThat(s3repo.innerBlobStore(), not(nullValue())); + assertThat(s3repo.getBlobStore(), is(nullValue())); + final long defaultBufferSize = ((S3BlobStore)s3repo.blobStore()).bufferSizeInBytes(); + assertThat(s3repo.getBlobStore(), not(nullValue())); assertThat(defaultBufferSize, Matchers.lessThanOrEqualTo(100L * 1024 * 1024)); assertThat(defaultBufferSize, Matchers.greaterThanOrEqualTo(5L * 1024 * 1024)); } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index e495070db304b..1a7ffb59924e1 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -168,7 +168,7 @@ * } * */ -public abstract class BlobStoreRepository extends AbstractLifecycleComponent implements Repository { +public abstract class BlobStoreRepository extends AbstractLifecycleComponent implements Repository { protected final RepositoryMetaData metadata; @@ -226,9 +226,9 @@ public abstract class BlobStoreRepository extends Abstract private final Object lock = new Object(); - private volatile BlobContainer snapshotsBlobContainer; + private volatile BlobContainer blobContainer; - private volatile BS blobStore; + private volatile BlobStore blobStore; /** * Constructs new BlobStoreRepository @@ -280,21 +280,26 @@ protected void doClose() { } // package private, only use for testing - BlobContainer blobContainer() { - return snapshotsBlobContainer; + BlobContainer getBlobContainer() { + return blobContainer; + } + + // for test purposes only + protected BlobStore getBlobStore() { + return blobStore; } /** * maintains single lazy instance of {@link BlobContainer} */ - protected BlobContainer snapshotsBlobContainer() { - BlobContainer blobContainer = snapshotsBlobContainer; + protected BlobContainer blobContainer() { + BlobContainer blobContainer = this.blobContainer; if (blobContainer == null) { synchronized (lock) { - blobContainer = snapshotsBlobContainer; + blobContainer = this.blobContainer; if (blobContainer == null) { blobContainer = blobStore().blobContainer(basePath()); - snapshotsBlobContainer = blobContainer; + this.blobContainer = blobContainer; } } } @@ -305,8 +310,8 @@ protected BlobContainer snapshotsBlobContainer() { /** * maintains single lazy instance of {@link BlobStore} */ - protected BS blobStore() { - BS store = blobStore; + protected BlobStore blobStore() { + BlobStore store = blobStore; if (store == null) { synchronized (lock) { store = blobStore; @@ -325,15 +330,10 @@ protected BS blobStore() { return store; } - // for test purposes only - protected BS innerBlobStore() { - return blobStore; - } - /** * Creates new BlobStore to read and write data. */ - protected abstract BS createBlobStore() throws Exception; + protected abstract BlobStore createBlobStore() throws Exception; /** * Returns base path of the repository @@ -377,12 +377,12 @@ public void initializeSnapshot(SnapshotId snapshotId, List indices, Met if (repositoryData.getAllSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) { throw new InvalidSnapshotNameException(metadata.name(), snapshotId.getName(), "snapshot with the same name already exists"); } - if (snapshotFormat.exists(snapshotsBlobContainer(), snapshotId.getUUID())) { + if (snapshotFormat.exists(blobContainer(), snapshotId.getUUID())) { throw new InvalidSnapshotNameException(metadata.name(), snapshotId.getName(), "snapshot with the same name already exists"); } // Write Global MetaData - globalMetaDataFormat.write(clusterMetaData, snapshotsBlobContainer(), snapshotId.getUUID()); + globalMetaDataFormat.write(clusterMetaData, blobContainer(), snapshotId.getUUID()); // write the index metadata for each index in the snapshot for (IndexId index : indices) { @@ -479,7 +479,7 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { private void deleteSnapshotBlobIgnoringErrors(final SnapshotInfo snapshotInfo, final String blobId) { try { - snapshotFormat.delete(snapshotsBlobContainer(), blobId); + snapshotFormat.delete(blobContainer(), blobId); } catch (IOException e) { if (snapshotInfo != null) { logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]", @@ -492,7 +492,7 @@ private void deleteSnapshotBlobIgnoringErrors(final SnapshotInfo snapshotInfo, f private void deleteGlobalMetaDataBlobIgnoringErrors(final SnapshotInfo snapshotInfo, final String blobId) { try { - globalMetaDataFormat.delete(snapshotsBlobContainer(), blobId); + globalMetaDataFormat.delete(blobContainer(), blobId); } catch (IOException e) { if (snapshotInfo != null) { logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]", @@ -530,7 +530,7 @@ public SnapshotInfo finalizeSnapshot(final SnapshotId snapshotId, startTime, failure, System.currentTimeMillis(), totalShards, shardFailures, includeGlobalState); try { - snapshotFormat.write(blobStoreSnapshot, snapshotsBlobContainer(), snapshotId.getUUID()); + snapshotFormat.write(blobStoreSnapshot, blobContainer(), snapshotId.getUUID()); final RepositoryData repositoryData = getRepositoryData(); writeIndexGen(repositoryData.addSnapshot(snapshotId, blobStoreSnapshot.state(), indices), repositoryStateId); } catch (FileAlreadyExistsException ex) { @@ -548,7 +548,7 @@ public SnapshotInfo finalizeSnapshot(final SnapshotId snapshotId, @Override public SnapshotInfo getSnapshotInfo(final SnapshotId snapshotId) { try { - return snapshotFormat.read(snapshotsBlobContainer(), snapshotId.getUUID()); + return snapshotFormat.read(blobContainer(), snapshotId.getUUID()); } catch (NoSuchFileException ex) { throw new SnapshotMissingException(metadata.name(), snapshotId, ex); } catch (IOException | NotXContentException ex) { @@ -559,7 +559,7 @@ public SnapshotInfo getSnapshotInfo(final SnapshotId snapshotId) { @Override public MetaData getSnapshotGlobalMetaData(final SnapshotId snapshotId) { try { - return globalMetaDataFormat.read(snapshotsBlobContainer(), snapshotId.getUUID()); + return globalMetaDataFormat.read(blobContainer(), snapshotId.getUUID()); } catch (NoSuchFileException ex) { throw new SnapshotMissingException(metadata.name(), snapshotId, ex); } catch (IOException ex) { @@ -649,7 +649,7 @@ public RepositoryData getRepositoryData() { final String snapshotsIndexBlobName = INDEX_FILE_PREFIX + Long.toString(indexGen); RepositoryData repositoryData; - try (InputStream blob = snapshotsBlobContainer().readBlob(snapshotsIndexBlobName)) { + try (InputStream blob = blobContainer().readBlob(snapshotsIndexBlobName)) { BytesStreamOutput out = new BytesStreamOutput(); Streams.copy(blob, out); // EMPTY is safe here because RepositoryData#fromXContent calls namedObject @@ -663,7 +663,7 @@ public RepositoryData getRepositoryData() { } // now load the incompatible snapshot ids, if they exist - try (InputStream blob = snapshotsBlobContainer().readBlob(INCOMPATIBLE_SNAPSHOTS_BLOB)) { + try (InputStream blob = blobContainer().readBlob(INCOMPATIBLE_SNAPSHOTS_BLOB)) { BytesStreamOutput out = new BytesStreamOutput(); Streams.copy(blob, out); try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, @@ -728,7 +728,7 @@ protected void writeIndexGen(final RepositoryData repositoryData, final long rep // delete the N-2 index file if it exists, keep the previous one around as a backup if (isReadOnly() == false && newGen - 2 >= 0) { final String oldSnapshotIndexFile = INDEX_FILE_PREFIX + Long.toString(newGen - 2); - snapshotsBlobContainer().deleteBlobIgnoringIfNotExists(oldSnapshotIndexFile); + blobContainer().deleteBlobIgnoringIfNotExists(oldSnapshotIndexFile); } // write the current generation to the index-latest file @@ -737,7 +737,7 @@ protected void writeIndexGen(final RepositoryData repositoryData, final long rep bStream.writeLong(newGen); genBytes = bStream.bytes(); } - snapshotsBlobContainer().deleteBlobIgnoringIfNotExists(INDEX_LATEST_BLOB); + blobContainer().deleteBlobIgnoringIfNotExists(INDEX_LATEST_BLOB); logger.debug("Repository [{}] updating index.latest with generation [{}]", metadata.name(), newGen); writeAtomic(INDEX_LATEST_BLOB, genBytes); } @@ -758,7 +758,7 @@ void writeIncompatibleSnapshots(RepositoryData repositoryData) throws IOExceptio } bytes = bStream.bytes(); } - snapshotsBlobContainer().deleteBlobIgnoringIfNotExists(INCOMPATIBLE_SNAPSHOTS_BLOB); + blobContainer().deleteBlobIgnoringIfNotExists(INCOMPATIBLE_SNAPSHOTS_BLOB); // write the incompatible snapshots blob writeAtomic(INCOMPATIBLE_SNAPSHOTS_BLOB, bytes); } @@ -798,7 +798,7 @@ long latestIndexBlobId() throws IOException { // package private for testing long readSnapshotIndexLatestBlob() throws IOException { - try (InputStream blob = snapshotsBlobContainer().readBlob(INDEX_LATEST_BLOB)) { + try (InputStream blob = blobContainer().readBlob(INDEX_LATEST_BLOB)) { BytesStreamOutput out = new BytesStreamOutput(); Streams.copy(blob, out); return Numbers.bytesToLong(out.bytes().toBytesRef()); @@ -806,7 +806,7 @@ long readSnapshotIndexLatestBlob() throws IOException { } private long listBlobsToGetLatestIndexId() throws IOException { - Map blobs = snapshotsBlobContainer().listBlobsByPrefix(INDEX_FILE_PREFIX); + Map blobs = blobContainer().listBlobsByPrefix(INDEX_FILE_PREFIX); long latest = RepositoryData.EMPTY_REPO_GEN; if (blobs.isEmpty()) { // no snapshot index blobs have been written yet @@ -828,7 +828,7 @@ private long listBlobsToGetLatestIndexId() throws IOException { private void writeAtomic(final String blobName, final BytesReference bytesRef) throws IOException { try (InputStream stream = bytesRef.streamInput()) { - snapshotsBlobContainer().writeBlobAtomic(blobName, stream, bytesRef.length()); + blobContainer().writeBlobAtomic(blobName, stream, bytesRef.length()); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java index 6be1b0882b920..b8fd6654bf64c 100644 --- a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java @@ -45,7 +45,7 @@ *
{@code compress}
If set to true metadata files will be stored compressed. Defaults to false.
* */ -public class FsRepository extends BlobStoreRepository { +public class FsRepository extends BlobStoreRepository { public static final String TYPE = "fs"; @@ -75,31 +75,6 @@ public FsRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry) { super(metadata, environment.settings(), namedXContentRegistry); this.environment = environment; - checkAndGetLocation(metadata, environment); - - if (CHUNK_SIZE_SETTING.exists(metadata.settings())) { - this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); - } else { - this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(settings); - } - this.compress = COMPRESS_SETTING.exists(metadata.settings()) - ? COMPRESS_SETTING.get(metadata.settings()) : REPOSITORIES_COMPRESS_SETTING.get(settings); - this.basePath = BlobPath.cleanPath(); - } - - @Override - protected BlobStore createBlobStore() throws Exception { - final Path locationFile = checkAndGetLocation(metadata, environment); - return new FsBlobStore(settings, locationFile); - } - - // only use for testing - @Override - protected BlobStore innerBlobStore() { - return super.innerBlobStore(); - } - - private Path checkAndGetLocation(RepositoryMetaData metadata, Environment environment) { String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings()); if (location.isEmpty()) { logger.warn("the repository location is missing, it should point to a shared file system location" @@ -120,7 +95,28 @@ private Path checkAndGetLocation(RepositoryMetaData metadata, Environment enviro + "] doesn't match any of the locations specified by path.repo because this setting is empty"); } } - return locationFile; + + if (CHUNK_SIZE_SETTING.exists(metadata.settings())) { + this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); + } else { + this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(settings); + } + this.compress = COMPRESS_SETTING.exists(metadata.settings()) + ? COMPRESS_SETTING.get(metadata.settings()) : REPOSITORIES_COMPRESS_SETTING.get(settings); + this.basePath = BlobPath.cleanPath(); + } + + @Override + protected BlobStore createBlobStore() throws Exception { + final String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings()); + final Path locationFile = environment.resolveRepoFile(location); + return new FsBlobStore(settings, locationFile); + } + + // only use for testing + @Override + protected BlobStore getBlobStore() { + return super.getBlobStore(); } @Override diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index 591365ad49153..9c14a9778760f 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -218,7 +218,7 @@ private BlobStoreRepository setupRepo() { final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); @SuppressWarnings("unchecked") final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName); - assertThat("blobContainer has to be lazy initialized", repository.blobContainer(), nullValue()); + assertThat("getBlobContainer has to be lazy initialized", repository.getBlobContainer(), nullValue()); return repository; } diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java index 312a04b40b33e..2c0d0060ad782 100644 --- a/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java @@ -47,6 +47,6 @@ protected void afterCreationCheck(Repository repository, boolean verify) { FsRepository fsRepository = (FsRepository) repository; assertThat("fs blob store has to be lazy initialized", - fsRepository.innerBlobStore(), verify ? is(notNullValue()) : is(nullValue())); + fsRepository.getBlobStore(), verify ? is(notNullValue()) : is(nullValue())); } } From 3529a7ebebffc7cda2b27a40a27d70c99ca87111 Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Tue, 3 Jul 2018 08:44:01 +0200 Subject: [PATCH 04/13] fix URLRepositoryTests --- .../repositories/url/URLRepositoryTests.java | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java index 01a7784e7da74..a862e4d5b9c1d 100644 --- a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java +++ b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java @@ -60,11 +60,9 @@ public void testIfNotWhiteListedMustSetRepoURL() throws IOException { .put(URLRepository.REPOSITORIES_URL_SETTING.getKey(), repoPath) .build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); - URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), - new NamedXContentRegistry(Collections.emptyList())); - assertThat(repository.getBlobStore(), is(nullValue())); try { - repository.blobContainer(); + new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), + new NamedXContentRegistry(Collections.emptyList())); fail("RepositoryException should have been thrown."); } catch (RepositoryException e) { String msg = "[url] file url [" + repoPath @@ -83,11 +81,9 @@ public void testMustBeSupportedProtocol() throws IOException { .put(URLRepository.SUPPORTED_PROTOCOLS_SETTING.getKey(), "http,https") .build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); - URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), - new NamedXContentRegistry(Collections.emptyList())); - assertThat(repository.getBlobStore(), is(nullValue())); try { - repository.blobContainer(); + new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), + new NamedXContentRegistry(Collections.emptyList())); fail("RepositoryException should have been thrown."); } catch (RepositoryException e) { assertEquals("[url] unsupported url protocol [file] from URL [" + repoPath +"]", e.getMessage()); From bc60b22b141302a7eda084f02a36ea3ad2303a02 Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Mon, 9 Jul 2018 13:16:03 +0200 Subject: [PATCH 05/13] changes due to Yannick's review - test code clean up; blob store parameters are moved to ctors; using SetOnce and lifecycle-aware lazy creation --- .../repositories/url/URLRepository.java | 4 +- .../repositories/url/URLRepositoryTests.java | 33 ++++++++++++-- .../repositories/azure/AzureRepository.java | 29 +++++-------- .../gcs/GoogleCloudStorageRepository.java | 9 +--- ...eCloudStorageBlobStoreRepositoryTests.java | 10 +---- .../repositories/hdfs/HdfsRepository.java | 14 +++--- .../repositories/s3/S3Repository.java | 43 +++++++++++-------- .../s3/RepositoryCredentialsTests.java | 8 ++-- .../s3/S3BlobStoreRepositoryTests.java | 10 +---- .../blobstore/BlobStoreRepository.java | 26 ++++++----- .../repositories/fs/FsRepository.java | 6 --- .../BlobStoreRepositoryRestoreTests.java | 4 +- .../fs/FsBlobStoreRepositoryIT.java | 10 +---- .../ESBlobStoreRepositoryIntegTestCase.java | 18 +++++--- 14 files changed, 112 insertions(+), 112 deletions(-) diff --git a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java index 9a59706e961dc..98b8c0a1945a5 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java @@ -91,12 +91,12 @@ public URLRepository(RepositoryMetaData metadata, Environment environment, basePath = BlobPath.cleanPath(); url = URL_SETTING.exists(metadata.settings()) ? URL_SETTING.get(metadata.settings()) : REPOSITORIES_URL_SETTING.get(settings); - checkURL(url); } @Override protected BlobStore createBlobStore() { - return new URLBlobStore(settings, url); + URL normalizedURL = checkURL(url); + return new URLBlobStore(settings, normalizedURL); } // only use for testing diff --git a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java index a862e4d5b9c1d..d9f1eea67537c 100644 --- a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java +++ b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java @@ -47,6 +47,7 @@ public void testWhiteListingRepoURL() throws IOException { RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); final URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), new NamedXContentRegistry(Collections.emptyList())); + repository.start(); assertThat("blob store has to be lazy initialized", repository.getBlobStore(), is(nullValue())); repository.blobContainer(); @@ -60,9 +61,11 @@ public void testIfNotWhiteListedMustSetRepoURL() throws IOException { .put(URLRepository.REPOSITORIES_URL_SETTING.getKey(), repoPath) .build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); + final URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), + new NamedXContentRegistry(Collections.emptyList())); + repository.start(); try { - new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), - new NamedXContentRegistry(Collections.emptyList())); + repository.blobContainer(); fail("RepositoryException should have been thrown."); } catch (RepositoryException e) { String msg = "[url] file url [" + repoPath @@ -81,13 +84,35 @@ public void testMustBeSupportedProtocol() throws IOException { .put(URLRepository.SUPPORTED_PROTOCOLS_SETTING.getKey(), "http,https") .build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); + final URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), + new NamedXContentRegistry(Collections.emptyList())); + repository.start(); try { - new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), - new NamedXContentRegistry(Collections.emptyList())); + repository.blobContainer(); fail("RepositoryException should have been thrown."); } catch (RepositoryException e) { assertEquals("[url] unsupported url protocol [file] from URL [" + repoPath +"]", e.getMessage()); } } + public void testNonNormalizedUrl() throws IOException { + Settings baseSettings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(URLRepository.ALLOWED_URLS_SETTING.getKey(), "file:/tmp/") + .put(URLRepository.REPOSITORIES_URL_SETTING.getKey(), "file:/var/" ) + .build(); + RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); + final URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), + new NamedXContentRegistry(Collections.emptyList())); + repository.start(); + try { + repository.blobContainer(); + fail("RepositoryException should have been thrown."); + } catch (RepositoryException e) { + assertEquals("[url] file url [file:/var/] doesn't match any of the locations " + + "specified by path.repo or repositories.url.allowed_urls", + e.getMessage()); + } + } + } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index fcd15ffcbb2d5..0797c78af33bb 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -82,7 +82,7 @@ public static final class Repository { private final boolean compress; private final Environment environment; private final AzureStorageService storageService; - private volatile boolean readonly; + private final boolean readonly; public AzureRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, AzureStorageService storageService) { @@ -103,6 +103,15 @@ public AzureRepository(RepositoryMetaData metadata, Environment environment, Nam } else { this.basePath = BlobPath.cleanPath(); } + + // If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting. + // For secondary_only setting, the repository should be read only + final LocationMode locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); + if (Repository.READONLY_SETTING.exists(metadata.settings())) { + this.readonly = Repository.READONLY_SETTING.get(metadata.settings()); + } else { + this.readonly = locationMode == LocationMode.SECONDARY_ONLY; + } } // only use for testing @@ -118,19 +127,9 @@ protected BlobStore getBlobStore() { protected AzureBlobStore createBlobStore() throws URISyntaxException, StorageException { final AzureBlobStore blobStore = new AzureBlobStore(metadata, environment.settings(), storageService); - // If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting. - // For secondary_only setting, the repository should be read only - - if (Repository.READONLY_SETTING.exists(metadata.settings())) { - this.readonly = Repository.READONLY_SETTING.get(metadata.settings()); - } else { - this.readonly = blobStore.getLocationMode() == LocationMode.SECONDARY_ONLY; - } - logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", blobStore, chunkSize, compress, basePath)); - return blobStore; } @@ -171,14 +170,6 @@ public void initializeSnapshot(SnapshotId snapshotId, List indices, Met @Override public boolean isReadOnly() { - // If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting. - // For secondary_only setting, the repository should be read only - if (Repository.READONLY_SETTING.exists(metadata.settings())) { - this.readonly = Repository.READONLY_SETTING.get(metadata.settings()); - } else { - // otherwise it's possible obtain actual value only if blobStore is created - blobStore(); - } return readonly; } } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index 4a1268af21755..fe6c8889bd238 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -22,7 +22,6 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -65,7 +64,7 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { GoogleCloudStorageRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, - GoogleCloudStorageService storageService){ + GoogleCloudStorageService storageService) { super(metadata, environment.settings(), namedXContentRegistry); this.storageService = storageService; @@ -92,12 +91,6 @@ protected GoogleCloudStorageBlobStore createBlobStore() { return new GoogleCloudStorageBlobStore(settings, bucket, clientName, storageService); } - // only use for testing - @Override - protected BlobStore getBlobStore() { - return super.getBlobStore(); - } - @Override protected BlobPath basePath() { return basePath; diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index b077415f89af7..6d5c1bbf85310 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -36,9 +36,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase { @@ -66,13 +63,8 @@ protected void createTestRepository(String name, boolean verify) { } @Override - protected void afterCreationCheck(Repository repository, boolean verify) { + protected void afterCreationCheck(Repository repository) { assertThat(repository, instanceOf(GoogleCloudStorageRepository.class)); - - GoogleCloudStorageRepository gcsRepository = (GoogleCloudStorageRepository) repository; - - assertThat("gcs blob store has to be lazy initialized", - gcsRepository.getBlobStore(), verify ? is(notNullValue()) : is(nullValue())); } @AfterClass diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index bc82713c21cc6..97285f9cecb0d 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -60,6 +60,8 @@ public final class HdfsRepository extends BlobStoreRepository { private final ByteSizeValue chunkSize; private final boolean compress; private final BlobPath basePath = BlobPath.cleanPath(); + private final URI uri; + private final String pathSetting; // buffer size passed to HDFS read/write methods // TODO: why 100KB? @@ -77,7 +79,7 @@ public HdfsRepository(RepositoryMetaData metadata, Environment environment, if (Strings.hasText(uriSetting) == false) { throw new IllegalArgumentException("No 'uri' defined for hdfs snapshot/restore"); } - URI uri = URI.create(uriSetting); + uri = URI.create(uriSetting); if ("hdfs".equalsIgnoreCase(uri.getScheme()) == false) { throw new IllegalArgumentException(String.format(Locale.ROOT, "Invalid scheme [%s] specified in uri [%s]; only 'hdfs' uri allowed for hdfs snapshot/restore", uri.getScheme(), uriSetting)); @@ -87,14 +89,11 @@ public HdfsRepository(RepositoryMetaData metadata, Environment environment, "Use 'path' option to specify a path [%s], not the uri [%s] for hdfs snapshot/restore", uri.getPath(), uriSetting)); } - String pathSetting = getMetadata().settings().get("path"); + pathSetting = getMetadata().settings().get("path"); // get configuration if (pathSetting == null) { throw new IllegalArgumentException("No 'path' defined for hdfs snapshot/restore"); } - - // initialize our blobstore using elevated privileges. - SpecialPermission.check(); } private HdfsBlobStore createBlobstore(URI uri, String path, Settings repositorySettings) { @@ -222,9 +221,8 @@ private static String getHostName() { @Override protected HdfsBlobStore createBlobStore() { - String uriSetting = getMetadata().settings().get("uri"); - URI uri = URI.create(uriSetting); - String pathSetting = getMetadata().settings().get("path"); + // initialize our blobstore using elevated privileges. + SpecialPermission.check(); final HdfsBlobStore blobStore = AccessController.doPrivileged((PrivilegedAction) () -> createBlobstore(uri, pathSetting, getMetadata().settings())); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 86ea48cb5abce..ec60536f135b2 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -145,12 +145,24 @@ class S3Repository extends BlobStoreRepository { private final S3Service service; - private final BlobPath basePath; + private final String bucket; + + private final ByteSizeValue bufferSize; private final ByteSizeValue chunkSize; private final boolean compress; + private final BlobPath basePath; + + private final boolean serverSideEncryption; + + private final String storageClass; + + private final String cannedACL; + + private final String clientName; + /** * Constructs an s3 backed repository */ @@ -161,12 +173,13 @@ class S3Repository extends BlobStoreRepository { super(metadata, settings, namedXContentRegistry); this.service = service; - final String bucket = BUCKET_SETTING.get(metadata.settings()); + // Parse and validate the user's S3 Storage Class setting + this.bucket = BUCKET_SETTING.get(metadata.settings()); if (bucket == null) { throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository"); } - final ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); + this.bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); this.compress = COMPRESS_SETTING.get(metadata.settings()); @@ -183,6 +196,16 @@ class S3Repository extends BlobStoreRepository { this.basePath = BlobPath.cleanPath(); } + this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); + + this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); + this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); + this.clientName = CLIENT_NAME.get(metadata.settings()); + + logger.debug("using bucket [{}], chunk_size [{}], server_side_encryption [{}], " + + "buffer_size [{}], cannedACL [{}], storageClass [{}]", + bucket, chunkSize, serverSideEncryption, bufferSize, cannedACL, storageClass); + // (repository settings) if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { overrideCredentialsFromClusterState(service); @@ -191,20 +214,6 @@ class S3Repository extends BlobStoreRepository { @Override protected S3BlobStore createBlobStore() { - final String bucket = BUCKET_SETTING.get(metadata.settings()); - - final boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); - final ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); - - // Parse and validate the user's S3 Storage Class setting - final String storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); - final String cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); - final String clientName = CLIENT_NAME.get(metadata.settings()); - - logger.debug("using bucket [{}], chunk_size [{}], server_side_encryption [{}], " + - "buffer_size [{}], cannedACL [{}], storageClass [{}]", - bucket, chunkSize, serverSideEncryption, bufferSize, cannedACL, storageClass); - return new S3BlobStore(settings, service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 18c5192b86086..744a27dc48e32 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -103,7 +103,7 @@ public void testRepositoryCredentialsOverrideSecureCredentials() throws IOExcept .put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret").build()); try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings); S3Repository s3repo = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY); - AmazonS3Reference s3Ref = ((S3BlobStore)s3repo.blobStore()).clientReference()) { + AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); @@ -126,7 +126,7 @@ public void testRepositoryCredentialsOnly() throws IOException { .build()); try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(Settings.EMPTY); S3Repository s3repo = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY); - AmazonS3Reference s3Ref = ((S3BlobStore)s3repo.blobStore()).clientReference()) { + AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); @@ -157,7 +157,7 @@ public void testReinitSecureCredentials() throws IOException { final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", builder.build()); try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings); S3Repository s3repo = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY)) { - try (AmazonS3Reference s3Ref = ((S3BlobStore)s3repo.blobStore()).clientReference()) { + try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials .getCredentials(); if (repositorySettings) { @@ -184,7 +184,7 @@ public void testReinitSecureCredentials() throws IOException { } } // check credentials have been updated - try (AmazonS3Reference s3Ref = ((S3BlobStore)s3repo.blobStore()).clientReference()) { + try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { final AWSCredentials newCredentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials .getCredentials(); assertThat(newCredentials.getAWSAccessKeyId(), is("new_secret_aws_key")); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index f5e1886c251d2..51fc48dfb598c 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -52,10 +52,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase { @@ -104,13 +101,8 @@ protected void createTestRepository(final String name, boolean verify) { } @Override - protected void afterCreationCheck(Repository repository, boolean verify) { + protected void afterCreationCheck(Repository repository) { assertThat(repository, instanceOf(S3Repository.class)); - - S3Repository s3Repository = (S3Repository) repository; - - assertThat("s3 blob store has to be lazy initialized", - s3Repository.getBlobStore(), verify ? is(notNullValue()) : is(nullValue())); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 5cde5adc498ce..724fadda5aa91 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -34,6 +34,7 @@ import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; @@ -226,9 +227,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final Object lock = new Object(); - private volatile BlobContainer blobContainer; + private final SetOnce blobContainer = new SetOnce<>(); - private volatile BlobStore blobStore; + private final SetOnce blobStore = new SetOnce<>(); /** * Constructs new BlobStoreRepository @@ -269,7 +270,7 @@ protected void doStop() {} @Override protected void doClose() { - BlobStore store = blobStore; + BlobStore store = blobStore.get(); if (store != null) { try { store.close(); @@ -281,25 +282,25 @@ protected void doClose() { // package private, only use for testing BlobContainer getBlobContainer() { - return blobContainer; + return blobContainer.get(); } // for test purposes only protected BlobStore getBlobStore() { - return blobStore; + return blobStore.get(); } /** * maintains single lazy instance of {@link BlobContainer} */ protected BlobContainer blobContainer() { - BlobContainer blobContainer = this.blobContainer; + BlobContainer blobContainer = this.blobContainer.get(); if (blobContainer == null) { synchronized (lock) { - blobContainer = this.blobContainer; + blobContainer = this.blobContainer.get(); if (blobContainer == null) { blobContainer = blobStore().blobContainer(basePath()); - this.blobContainer = blobContainer; + this.blobContainer.set(blobContainer); } } } @@ -311,11 +312,14 @@ protected BlobContainer blobContainer() { * maintains single lazy instance of {@link BlobStore} */ protected BlobStore blobStore() { - BlobStore store = blobStore; + BlobStore store = blobStore.get(); if (store == null) { synchronized (lock) { - store = blobStore; + store = blobStore.get(); if (store == null) { + if (lifecycle.started() == false) { + throw new RepositoryException(metadata.name(), "repository is not in started state"); + } try { store = createBlobStore(); } catch (RepositoryException e) { @@ -323,7 +327,7 @@ protected BlobStore blobStore() { } catch (Exception e) { throw new RepositoryException(metadata.name(), "cannot create blob store" , e); } - blobStore = store; + blobStore.set(store); } } } diff --git a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java index b8fd6654bf64c..643ff2bc93d3c 100644 --- a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java @@ -113,12 +113,6 @@ protected BlobStore createBlobStore() throws Exception { return new FsBlobStore(settings, locationFile); } - // only use for testing - @Override - protected BlobStore getBlobStore() { - return super.getBlobStore(); - } - @Override protected boolean isCompress() { return compress; diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java index 4924941ce9539..3d6c908aebe5c 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java @@ -176,12 +176,14 @@ public void testSnapshotWithConflictingName() throws IOException { private Repository createRepository() { Settings settings = Settings.builder().put("location", randomAlphaOfLength(10)).build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData(randomAlphaOfLength(10), FsRepository.TYPE, settings); - return new FsRepository(repositoryMetaData, createEnvironment(), xContentRegistry()) { + final FsRepository repository = new FsRepository(repositoryMetaData, createEnvironment(), xContentRegistry()) { @Override protected void verificationThreadCheck() { // eliminate thread name check as we create repo manually } }; + repository.start(); + return repository; } /** Create a {@link Environment} with random path.home and path.repo **/ diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java index 2c0d0060ad782..1ed42cb24746b 100644 --- a/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java @@ -25,9 +25,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; public class FsBlobStoreRepositoryIT extends ESBlobStoreRepositoryIntegTestCase { @Override @@ -41,12 +38,7 @@ protected void createTestRepository(String name, boolean verify) { } @Override - protected void afterCreationCheck(Repository repository, boolean verify) { + protected void afterCreationCheck(Repository repository) { assertThat(repository, instanceOf(FsRepository.class)); - - FsRepository fsRepository = (FsRepository) repository; - - assertThat("fs blob store has to be lazy initialized", - fsRepository.getBlobStore(), verify ? is(notNullValue()) : is(nullValue())); } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index cbe76d723fcaf..226ec63845a9f 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -44,6 +44,9 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; /** * Basic integration tests for blob-based repository validation. @@ -52,7 +55,7 @@ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase protected abstract void createTestRepository(String name, boolean verify); - protected void afterCreationCheck(Repository repository, boolean verify) { + protected void afterCreationCheck(Repository repository) { } @@ -60,12 +63,17 @@ protected void createAndCheckTestRepository(String name) { final boolean verify = randomBoolean(); createTestRepository(name, verify); - final RepositoriesService repositoriesService = - internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class).iterator().next(); + final Iterable repositoriesServices = + internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class); - final Repository repository = repositoriesService.repository(name); + for (RepositoriesService repositoriesService : repositoriesServices) { + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(name); + + afterCreationCheck(repository); + assertThat("blob store has to be lazy initialized", + repository.getBlobStore(), verify ? is(notNullValue()) : is(nullValue())); + } - afterCreationCheck(repository, verify); } public void testSnapshotAndRestore() throws Exception { From 3a322aa8e377b16dfa120b02444893d8c98908d0 Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Mon, 9 Jul 2018 13:57:38 +0200 Subject: [PATCH 06/13] extend comment for r/o repo verification --- .../repositories/blobstore/BlobStoreRepository.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 724fadda5aa91..74bb9032d7f1c 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -615,7 +615,10 @@ public String startVerification() { verificationThreadCheck(); try { if (isReadOnly()) { + // TODO: add repository verification for read-only repositories + // It's readonly - so there is not much we can do here to verify it apart try to create blobStore() + // and check that is is accessible on the master blobStore(); return null; } else { From f84ec61ce184fd06f7bdff3cee54f49e0e69eca7 Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Mon, 9 Jul 2018 15:17:37 +0200 Subject: [PATCH 07/13] added missed start --- .../s3/RepositoryCredentialsTests.java | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 744a27dc48e32..7db6671cf39c5 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -102,8 +102,8 @@ public void testRepositoryCredentialsOverrideSecureCredentials() throws IOExcept .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key") .put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret").build()); try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings); - S3Repository s3repo = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY); - AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { + S3Repository s3repo = createAndStartRepository(metadata, s3Plugin); + AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); @@ -125,8 +125,8 @@ public void testRepositoryCredentialsOnly() throws IOException { .put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret") .build()); try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(Settings.EMPTY); - S3Repository s3repo = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY); - AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { + S3Repository s3repo = createAndStartRepository(metadata, s3Plugin); + AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key")); assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); @@ -140,6 +140,12 @@ public void testRepositoryCredentialsOnly() throws IOException { + " See the breaking changes documentation for the next major version."); } + private S3Repository createAndStartRepository(RepositoryMetaData metadata, S3RepositoryPlugin s3Plugin) { + final S3Repository repository = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY); + repository.start(); + return repository; + } + public void testReinitSecureCredentials() throws IOException { final String clientName = randomFrom("default", "some_client"); // initial client node settings @@ -156,7 +162,7 @@ public void testReinitSecureCredentials() throws IOException { } final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", builder.build()); try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings); - S3Repository s3repo = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY)) { + S3Repository s3repo = createAndStartRepository(metadata, s3Plugin)) { try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) { final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials .getCredentials(); From da5c07f9fdd12711f779f7a6a8fb6d3489e0396e Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Tue, 10 Jul 2018 08:42:34 +0200 Subject: [PATCH 08/13] added missed start --- .../org/elasticsearch/repositories/s3/S3RepositoryTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index f1fc2d9d42b06..e023e35deb84d 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -120,6 +120,7 @@ public void testDefaultBufferSize() { final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.EMPTY); try (S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())) { assertThat(s3repo.getBlobStore(), is(nullValue())); + s3repo.start(); final long defaultBufferSize = ((S3BlobStore)s3repo.blobStore()).bufferSizeInBytes(); assertThat(s3repo.getBlobStore(), not(nullValue())); assertThat(defaultBufferSize, Matchers.lessThanOrEqualTo(100L * 1024 * 1024)); From d25b248d55dc2f4fae209717e21200e934649ff5 Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Tue, 10 Jul 2018 17:38:23 +0200 Subject: [PATCH 09/13] fix test; close blobStore if a concurrent blobStore initialization is started during close --- .../repositories/s3/S3RepositoryTests.java | 22 +++++++++++++------ .../blobstore/BlobStoreRepository.java | 13 +++++++---- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index e023e35deb84d..df057b4ea13a6 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -76,23 +76,23 @@ public void testInvalidChunkBufferSizeSettings() { // chunk < buffer should fail final Settings s1 = bufferAndChunkSettings(10, 5); final Exception e1 = expectThrows(RepositoryException.class, - () -> new S3Repository(getRepositoryMetaData(s1), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())); + () -> createS3Repo(getRepositoryMetaData(s1))); assertThat(e1.getMessage(), containsString("chunk_size (5mb) can't be lower than buffer_size (10mb)")); // chunk > buffer should pass final Settings s2 = bufferAndChunkSettings(5, 10); - new S3Repository(getRepositoryMetaData(s2), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()).close(); + createS3Repo(getRepositoryMetaData(s2)).close(); // chunk = buffer should pass final Settings s3 = bufferAndChunkSettings(5, 5); - new S3Repository(getRepositoryMetaData(s3), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()).close(); + createS3Repo(getRepositoryMetaData(s3)).close(); // buffer < 5mb should fail final Settings s4 = bufferAndChunkSettings(4, 10); final IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, - () -> new S3Repository(getRepositoryMetaData(s4), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()) + () -> createS3Repo(getRepositoryMetaData(s4)) .close()); assertThat(e2.getMessage(), containsString("failed to parse value [4mb] for setting [buffer_size], must be >= [5mb]")); final Settings s5 = bufferAndChunkSettings(5, 6000000); final IllegalArgumentException e3 = expectThrows(IllegalArgumentException.class, - () -> new S3Repository(getRepositoryMetaData(s5), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()) + () -> createS3Repo(getRepositoryMetaData(s5)) .close()); assertThat(e3.getMessage(), containsString("failed to parse value [6000000mb] for setting [chunk_size], must be <= [5tb]")); } @@ -111,14 +111,14 @@ private RepositoryMetaData getRepositoryMetaData(Settings settings) { public void testBasePathSetting() { final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder() .put(S3Repository.BASE_PATH_SETTING.getKey(), "foo/bar").build()); - try (S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())) { + try (S3Repository s3repo = createS3Repo(metadata)) { assertEquals("foo/bar/", s3repo.basePath().buildAsString()); } } public void testDefaultBufferSize() { final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.EMPTY); - try (S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())) { + try (S3Repository s3repo = createS3Repo(metadata)) { assertThat(s3repo.getBlobStore(), is(nullValue())); s3repo.start(); final long defaultBufferSize = ((S3BlobStore)s3repo.blobStore()).bufferSizeInBytes(); @@ -127,4 +127,12 @@ public void testDefaultBufferSize() { assertThat(defaultBufferSize, Matchers.greaterThanOrEqualTo(5L * 1024 * 1024)); } } + + private S3Repository createS3Repo(RepositoryMetaData metadata) { + return new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()) { + @Override + protected void verificationThreadCheck() { + } + }; + } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 74bb9032d7f1c..8a764ddf42e4d 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -270,7 +270,11 @@ protected void doStop() {} @Override protected void doClose() { - BlobStore store = blobStore.get(); + BlobStore store; + // to close blobStore if blobStore initialization is started during close + synchronized (lock) { + store = blobStore.get(); + } if (store != null) { try { store.close(); @@ -320,6 +324,7 @@ protected BlobStore blobStore() { if (lifecycle.started() == false) { throw new RepositoryException(metadata.name(), "repository is not in started state"); } + verificationThreadCheck(); try { store = createBlobStore(); } catch (RepositoryException e) { @@ -606,13 +611,13 @@ public long getRestoreThrottleTimeInNanos() { } protected void verificationThreadCheck() { - assert Thread.currentThread().getName().contains(ThreadPool.Names.SNAPSHOT) : - "Expected current thread [" + Thread.currentThread() + "] to be the snapshot thread."; + assert Thread.currentThread().getName().contains(ThreadPool.Names.SNAPSHOT) + || Thread.currentThread().getName().contains(ThreadPool.Names.GENERIC) : + "Expected current thread [" + Thread.currentThread() + "] to be the snapshot or generic thread."; } @Override public String startVerification() { - verificationThreadCheck(); try { if (isReadOnly()) { // TODO: add repository verification for read-only repositories From b2ac9d85f35cccbc22fbf11b40ca93a8a4ecd31a Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Wed, 11 Jul 2018 13:14:22 +0200 Subject: [PATCH 10/13] fix thread check for unit test --- .../repositories/url/URLRepositoryTests.java | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java index d9f1eea67537c..fd287dabdd8d6 100644 --- a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java +++ b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java @@ -37,6 +37,15 @@ public class URLRepositoryTests extends ESTestCase { + private URLRepository createRepository(Settings baseSettings, RepositoryMetaData repositoryMetaData) { + return new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), + new NamedXContentRegistry(Collections.emptyList())) { + @Override + protected void verificationThreadCheck() { + } + }; + } + public void testWhiteListingRepoURL() throws IOException { String repoPath = createTempDir().resolve("repository").toUri().toURL().toString(); Settings baseSettings = Settings.builder() @@ -45,8 +54,7 @@ public void testWhiteListingRepoURL() throws IOException { .put(URLRepository.REPOSITORIES_URL_SETTING.getKey(), repoPath) .build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); - final URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), - new NamedXContentRegistry(Collections.emptyList())); + final URLRepository repository = createRepository(baseSettings, repositoryMetaData); repository.start(); assertThat("blob store has to be lazy initialized", repository.getBlobStore(), is(nullValue())); @@ -61,8 +69,7 @@ public void testIfNotWhiteListedMustSetRepoURL() throws IOException { .put(URLRepository.REPOSITORIES_URL_SETTING.getKey(), repoPath) .build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); - final URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), - new NamedXContentRegistry(Collections.emptyList())); + final URLRepository repository = createRepository(baseSettings, repositoryMetaData); repository.start(); try { repository.blobContainer(); @@ -84,8 +91,7 @@ public void testMustBeSupportedProtocol() throws IOException { .put(URLRepository.SUPPORTED_PROTOCOLS_SETTING.getKey(), "http,https") .build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); - final URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), - new NamedXContentRegistry(Collections.emptyList())); + final URLRepository repository = createRepository(baseSettings, repositoryMetaData); repository.start(); try { repository.blobContainer(); @@ -102,8 +108,7 @@ public void testNonNormalizedUrl() throws IOException { .put(URLRepository.REPOSITORIES_URL_SETTING.getKey(), "file:/var/" ) .build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); - final URLRepository repository = new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), - new NamedXContentRegistry(Collections.emptyList())); + final URLRepository repository = createRepository(baseSettings, repositoryMetaData); repository.start(); try { repository.blobContainer(); From c6b836a7a535fb6a53e9475b184ebb362698f23b Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Wed, 11 Jul 2018 15:00:51 +0200 Subject: [PATCH 11/13] fix thread check for unit test --- .../repositories/s3/S3RepositoryPlugin.java | 2 +- .../repositories/s3/RepositoryCredentialsTests.java | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index f07724e765832..57c03ba590e48 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -61,7 +61,7 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo }); } - private final S3Service service; + protected final S3Service service; public S3RepositoryPlugin(final Settings settings) { this(settings, new S3Service(settings)); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 7db6671cf39c5..2863921f3f6c6 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -80,6 +80,16 @@ AmazonS3 buildClient(AWSCredentialsProvider credentials, ClientConfiguration con ProxyS3RepositoryPlugin(Settings settings) { super(settings, new ProxyS3Service(settings)); } + + @Override + protected S3Repository createRepository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry registry) { + return new S3Repository(metadata, settings, registry, service){ + @Override + protected void verificationThreadCheck() { + + } + }; + } } public void testRepositoryCredentialsOverrideSecureCredentials() throws IOException { From 8dfa60d218accad78501952d9e1e41e92cb3e4d2 Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Thu, 12 Jul 2018 17:20:31 +0200 Subject: [PATCH 12/13] enforcing access to blobStore / blobContainer only to snapshot and generic threads --- .../repositories/RepositoriesService.java | 38 ++++++++++--------- .../blobstore/BlobStoreRepository.java | 5 ++- .../blobstore/BlobStoreRepositoryTests.java | 31 ++++++++++++++- .../SharedClusterSnapshotRestoreIT.java | 22 +++++++++-- .../ESBlobStoreRepositoryIntegTestCase.java | 23 ++++++++--- 5 files changed, 91 insertions(+), 28 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 0bade8cdc8741..c6cbaa50cdf02 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -221,15 +221,17 @@ public void verifyRepository(final String repositoryName, final ActionListener() { @Override public void onResponse(VerifyResponse verifyResponse) { - try { - repository.endVerification(verificationToken); - } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage( - "[{}] failed to finish repository verification", repositoryName), e); - listener.onFailure(e); - return; - } - listener.onResponse(verifyResponse); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { + try { + repository.endVerification(verificationToken); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage( + "[{}] failed to finish repository verification", repositoryName), e); + listener.onFailure(e); + return; + } + listener.onResponse(verifyResponse); + }); } @Override @@ -238,14 +240,16 @@ public void onFailure(Exception e) { } }); } catch (Exception e) { - try { - repository.endVerification(verificationToken); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn(() -> new ParameterizedMessage( - "[{}] failed to finish repository verification", repositoryName), inner); - } - listener.onFailure(e); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { + try { + repository.endVerification(verificationToken); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn(() -> new ParameterizedMessage( + "[{}] failed to finish repository verification", repositoryName), inner); + } + listener.onFailure(e); + }); } } else { listener.onResponse(new VerifyResponse(new DiscoveryNode[0], new VerificationFailure[0])); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 8a764ddf42e4d..37f58e9172d4a 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -298,6 +298,8 @@ protected BlobStore getBlobStore() { * maintains single lazy instance of {@link BlobContainer} */ protected BlobContainer blobContainer() { + verificationThreadCheck(); + BlobContainer blobContainer = this.blobContainer.get(); if (blobContainer == null) { synchronized (lock) { @@ -316,6 +318,8 @@ protected BlobContainer blobContainer() { * maintains single lazy instance of {@link BlobStore} */ protected BlobStore blobStore() { + verificationThreadCheck(); + BlobStore store = blobStore.get(); if (store == null) { synchronized (lock) { @@ -324,7 +328,6 @@ protected BlobStore blobStore() { if (lifecycle.started() == false) { throw new RepositoryException(metadata.name(), "repository is not in started state"); } - verificationThreadCheck(); try { store = createBlobStore(); } catch (RepositoryException e) { diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index 9c14a9778760f..9f9bf18956e29 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -24,10 +24,16 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; @@ -37,8 +43,10 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import static org.elasticsearch.repositories.RepositoryDataTests.generateRandomRepoData; @@ -50,6 +58,25 @@ */ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase { + static final String REPO_TYPE = "fsLike"; + + protected Collection> getPlugins() { + return Arrays.asList(FsLikeRepoPlugin.class); + } + + public static class FsLikeRepoPlugin extends org.elasticsearch.plugins.Plugin implements RepositoryPlugin { + + @Override + public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { + return Collections.singletonMap(REPO_TYPE, + (metadata) -> new FsRepository(metadata, env, namedXContentRegistry) { + @Override + protected void verificationThreadCheck() { + } + }); + } + } + public void testRetrieveSnapshots() throws Exception { final Client client = client(); final Path location = ESIntegTestCase.randomRepoPath(node().settings()); @@ -58,7 +85,7 @@ public void testRetrieveSnapshots() throws Exception { logger.info("--> creating repository"); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repositoryName) - .setType("fs") + .setType(REPO_TYPE) .setSettings(Settings.builder().put(node().settings()).put("location", location)) .get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); @@ -210,7 +237,7 @@ private BlobStoreRepository setupRepo() { PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repositoryName) - .setType("fs") + .setType(REPO_TYPE) .setSettings(Settings.builder().put(node().settings()).put("location", location)) .get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 3899bfcbec327..d2954a4c128ba 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.snapshots; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; @@ -93,6 +94,7 @@ import org.elasticsearch.script.StoredScriptsIT; import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.nio.channels.SeekableByteChannel; @@ -1262,7 +1264,7 @@ public void testDeleteSnapshotWithMissingIndexAndShardMetadata() throws Exceptio RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); Repository repository = service.repository("test-repo"); - final Map indexIds = repository.getRepositoryData().getIndices(); + final Map indexIds = getRepositoryData(repository).getIndices(); final Path indicesPath = repo.resolve("indices"); logger.info("--> delete index metadata and shard metadata"); @@ -2564,7 +2566,7 @@ public void testDeleteOrphanSnapshot() throws Exception { logger.info("--> emulate an orphan snapshot"); RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); - final RepositoryData repositoryData = repositoriesService.repository(repositoryName).getRepositoryData(); + final RepositoryData repositoryData = getRepositoryData(repositoriesService.repository(repositoryName)); final IndexId indexId = repositoryData.resolveIndexId(idxName); clusterService.submitStateUpdateTask("orphan snapshot test", new ClusterStateUpdateTask() { @@ -2785,7 +2787,8 @@ public void testRestoreSnapshotWithCorruptedIndexMetadata() throws Exception { RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); Repository repository = service.repository("test-repo"); - final Map indexIds = repository.getRepositoryData().getIndices(); + final RepositoryData repositoryData = getRepositoryData(repository); + final Map indexIds = repositoryData.getIndices(); assertThat(indexIds.size(), equalTo(nbIndices)); // Choose a random index from the snapshot @@ -3446,6 +3449,19 @@ public void testAbortedSnapshotDuringInitDoesNotStart() throws Exception { } } + private RepositoryData getRepositoryData(Repository repository) throws InterruptedException { + ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, internalCluster().getMasterName()); + final SetOnce repositoryData = new SetOnce<>(); + final CountDownLatch latch = new CountDownLatch(1); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { + repositoryData.set(repository.getRepositoryData()); + latch.countDown(); + }); + + latch.await(); + return repositoryData.get(); + } + private void verifySnapshotInfo(final GetSnapshotsResponse response, final Map> indicesPerSnapshot) { for (SnapshotInfo snapshotInfo : response.getSnapshots()) { final List expected = snapshotInfo.indices(); diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index 226ec63845a9f..439728bac9ea6 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.repositories.blobstore; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder; @@ -32,12 +33,14 @@ import org.elasticsearch.snapshots.SnapshotMissingException; import org.elasticsearch.snapshots.SnapshotRestoreException; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -202,7 +205,7 @@ public void testMultipleSnapshotAndRollback() throws Exception { } } - public void testIndicesDeletedFromRepository() { + public void testIndicesDeletedFromRepository() throws Exception { Client client = client(); logger.info("--> creating repository"); @@ -244,12 +247,22 @@ public void testIndicesDeletedFromRepository() { logger.info("--> verify index folder deleted from blob container"); RepositoriesService repositoriesSvc = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); + ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, internalCluster().getMasterName()); @SuppressWarnings("unchecked") BlobStoreRepository repository = (BlobStoreRepository) repositoriesSvc.repository(repoName); - BlobContainer indicesBlobContainer = repository.blobStore().blobContainer(repository.basePath().add("indices")); - RepositoryData repositoryData = repository.getRepositoryData(); - for (IndexId indexId : repositoryData.getIndices().values()) { + + final SetOnce indicesBlobContainer = new SetOnce<>(); + final SetOnce repositoryData = new SetOnce<>(); + final CountDownLatch latch = new CountDownLatch(1); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { + indicesBlobContainer.set(repository.blobStore().blobContainer(repository.basePath().add("indices"))); + repositoryData.set(repository.getRepositoryData()); + latch.countDown(); + }); + + latch.await(); + for (IndexId indexId : repositoryData.get().getIndices().values()) { if (indexId.getName().equals("test-idx-3")) { - assertFalse(indicesBlobContainer.blobExists(indexId.getId())); // deleted index + assertFalse(indicesBlobContainer.get().blobExists(indexId.getId())); // deleted index } } } From ae3ee247e9d4bced811523a28651cf533c658c5f Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Fri, 13 Jul 2018 18:00:42 +0200 Subject: [PATCH 13/13] verifyBlobInnersAccess renamed to assertSnapshotOrGenericThread; added couple comments for assertSnapshotOrGenericThread and the reason of FsLikeRepoPlugin in BlobStoreRepositoryTests.java --- .../repositories/url/URLRepositoryTests.java | 3 ++- .../repositories/s3/RepositoryCredentialsTests.java | 4 ++-- .../elasticsearch/repositories/s3/S3RepositoryTests.java | 3 ++- .../repositories/blobstore/BlobStoreRepository.java | 8 ++++---- .../blobstore/BlobStoreRepositoryRestoreTests.java | 2 +- .../repositories/blobstore/BlobStoreRepositoryTests.java | 4 +++- 6 files changed, 14 insertions(+), 10 deletions(-) diff --git a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java index fd287dabdd8d6..2de4c132673db 100644 --- a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java +++ b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java @@ -41,7 +41,8 @@ private URLRepository createRepository(Settings baseSettings, RepositoryMetaData return new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), new NamedXContentRegistry(Collections.emptyList())) { @Override - protected void verificationThreadCheck() { + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we create repo manually on test/main threads } }; } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 2863921f3f6c6..7eb603b4b78e5 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -85,8 +85,8 @@ AmazonS3 buildClient(AWSCredentialsProvider credentials, ClientConfiguration con protected S3Repository createRepository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry registry) { return new S3Repository(metadata, settings, registry, service){ @Override - protected void verificationThreadCheck() { - + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we create repo manually on test/main threads } }; } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index df057b4ea13a6..dcc46661bef61 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -131,7 +131,8 @@ public void testDefaultBufferSize() { private S3Repository createS3Repo(RepositoryMetaData metadata) { return new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()) { @Override - protected void verificationThreadCheck() { + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we create repo manually on test/main threads } }; } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 37f58e9172d4a..22743e38839d5 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -298,7 +298,7 @@ protected BlobStore getBlobStore() { * maintains single lazy instance of {@link BlobContainer} */ protected BlobContainer blobContainer() { - verificationThreadCheck(); + assertSnapshotOrGenericThread(); BlobContainer blobContainer = this.blobContainer.get(); if (blobContainer == null) { @@ -318,7 +318,7 @@ protected BlobContainer blobContainer() { * maintains single lazy instance of {@link BlobStore} */ protected BlobStore blobStore() { - verificationThreadCheck(); + assertSnapshotOrGenericThread(); BlobStore store = blobStore.get(); if (store == null) { @@ -613,7 +613,7 @@ public long getRestoreThrottleTimeInNanos() { return restoreRateLimitingTimeInNanos.count(); } - protected void verificationThreadCheck() { + protected void assertSnapshotOrGenericThread() { assert Thread.currentThread().getName().contains(ThreadPool.Names.SNAPSHOT) || Thread.currentThread().getName().contains(ThreadPool.Names.GENERIC) : "Expected current thread [" + Thread.currentThread() + "] to be the snapshot or generic thread."; @@ -881,7 +881,7 @@ public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Ve @Override public void verify(String seed, DiscoveryNode localNode) { - verificationThreadCheck(); + assertSnapshotOrGenericThread(); BlobContainer testBlobContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed))); if (testBlobContainer.blobExists("master.dat")) { try { diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java index 3d6c908aebe5c..0eae9a1420068 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java @@ -178,7 +178,7 @@ private Repository createRepository() { RepositoryMetaData repositoryMetaData = new RepositoryMetaData(randomAlphaOfLength(10), FsRepository.TYPE, settings); final FsRepository repository = new FsRepository(repositoryMetaData, createEnvironment(), xContentRegistry()) { @Override - protected void verificationThreadCheck() { + protected void assertSnapshotOrGenericThread() { // eliminate thread name check as we create repo manually } }; diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index 9f9bf18956e29..1abdb97f174b6 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -64,6 +64,7 @@ protected Collection> getPlugins() { return Arrays.asList(FsLikeRepoPlugin.class); } + // the reason for this plug-in is to drop any assertSnapshotOrGenericThread as mostly all access in this test goes from test threads public static class FsLikeRepoPlugin extends org.elasticsearch.plugins.Plugin implements RepositoryPlugin { @Override @@ -71,7 +72,8 @@ public Map getRepositories(Environment env, NamedXCo return Collections.singletonMap(REPO_TYPE, (metadata) -> new FsRepository(metadata, env, namedXContentRegistry) { @Override - protected void verificationThreadCheck() { + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we access blobStore on test/main threads } }); }