|
150 | 150 | import java.util.stream.Stream;
|
151 | 151 |
|
152 | 152 | import static org.elasticsearch.core.Strings.format;
|
| 153 | +import static org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; |
153 | 154 | import static org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName;
|
154 | 155 |
|
155 | 156 | /**
|
@@ -376,6 +377,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
376 | 377 | */
|
377 | 378 | private final int maxSnapshotCount;
|
378 | 379 |
|
| 380 | + private final ShardSnapshotTaskRunner shardSnapshotTaskRunner; |
| 381 | + |
379 | 382 | /**
|
380 | 383 | * Constructs new BlobStoreRepository
|
381 | 384 | * @param metadata The metadata for this repository including name and settings
|
@@ -405,6 +408,12 @@ protected BlobStoreRepository(
|
405 | 408 | this.basePath = basePath;
|
406 | 409 | this.maxSnapshotCount = MAX_SNAPSHOTS_SETTING.get(metadata.settings());
|
407 | 410 | this.repoDataDeduplicator = new ResultDeduplicator<>(threadPool.getThreadContext());
|
| 411 | + shardSnapshotTaskRunner = new ShardSnapshotTaskRunner( |
| 412 | + threadPool.info(ThreadPool.Names.SNAPSHOT).getMax(), |
| 413 | + threadPool.executor(ThreadPool.Names.SNAPSHOT), |
| 414 | + this::doSnapshotShard, |
| 415 | + this::snapshotFile |
| 416 | + ); |
408 | 417 | }
|
409 | 418 |
|
410 | 419 | @Override
|
@@ -2629,6 +2638,10 @@ private void writeAtomic(
|
2629 | 2638 |
|
2630 | 2639 | @Override
|
2631 | 2640 | public void snapshotShard(SnapshotShardContext context) {
|
| 2641 | + shardSnapshotTaskRunner.enqueueShardSnapshot(context); |
| 2642 | + } |
| 2643 | + |
| 2644 | + private void doSnapshotShard(SnapshotShardContext context) { |
2632 | 2645 | if (isReadOnly()) {
|
2633 | 2646 | context.onFailure(new RepositoryException(metadata.name(), "cannot snapshot shard on a readonly repository"));
|
2634 | 2647 | return;
|
@@ -2889,45 +2902,19 @@ public void snapshotShard(SnapshotShardContext context) {
|
2889 | 2902 | snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis(), shardSnapshotResult);
|
2890 | 2903 | context.onResponse(shardSnapshotResult);
|
2891 | 2904 | }, context::onFailure);
|
2892 |
| - if (indexIncrementalFileCount == 0) { |
| 2905 | + if (indexIncrementalFileCount == 0 || filesToSnapshot.isEmpty()) { |
2893 | 2906 | allFilesUploadedListener.onResponse(Collections.emptyList());
|
2894 | 2907 | return;
|
2895 | 2908 | }
|
2896 |
| - final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); |
2897 |
| - // Start as many workers as fit into the snapshot pool at once at the most |
2898 |
| - final int workers = Math.min(threadPool.info(ThreadPool.Names.SNAPSHOT).getMax(), indexIncrementalFileCount); |
2899 |
| - final ActionListener<Void> filesListener = fileQueueListener(filesToSnapshot, workers, allFilesUploadedListener); |
2900 |
| - for (int i = 0; i < workers; ++i) { |
2901 |
| - executeOneFileSnapshot(store, snapshotId, context.indexId(), snapshotStatus, filesToSnapshot, executor, filesListener); |
| 2909 | + final ActionListener<Void> filesListener = fileQueueListener(filesToSnapshot, filesToSnapshot.size(), allFilesUploadedListener); |
| 2910 | + for (FileInfo fileInfo : filesToSnapshot) { |
| 2911 | + shardSnapshotTaskRunner.enqueueFileSnapshot(context, fileInfo, filesListener); |
2902 | 2912 | }
|
2903 | 2913 | } catch (Exception e) {
|
2904 | 2914 | context.onFailure(e);
|
2905 | 2915 | }
|
2906 | 2916 | }
|
2907 | 2917 |
|
2908 |
| - private void executeOneFileSnapshot( |
2909 |
| - Store store, |
2910 |
| - SnapshotId snapshotId, |
2911 |
| - IndexId indexId, |
2912 |
| - IndexShardSnapshotStatus snapshotStatus, |
2913 |
| - BlockingQueue<BlobStoreIndexShardSnapshot.FileInfo> filesToSnapshot, |
2914 |
| - Executor executor, |
2915 |
| - ActionListener<Void> listener |
2916 |
| - ) throws InterruptedException { |
2917 |
| - final ShardId shardId = store.shardId(); |
2918 |
| - final BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = filesToSnapshot.poll(0L, TimeUnit.MILLISECONDS); |
2919 |
| - if (snapshotFileInfo == null) { |
2920 |
| - listener.onResponse(null); |
2921 |
| - } else { |
2922 |
| - executor.execute(ActionRunnable.wrap(listener, l -> { |
2923 |
| - try (Releasable ignored = incrementStoreRef(store, snapshotStatus, shardId)) { |
2924 |
| - snapshotFile(snapshotFileInfo, indexId, shardId, snapshotId, snapshotStatus, store); |
2925 |
| - executeOneFileSnapshot(store, snapshotId, indexId, snapshotStatus, filesToSnapshot, executor, l); |
2926 |
| - } |
2927 |
| - })); |
2928 |
| - } |
2929 |
| - } |
2930 |
| - |
2931 | 2918 | private static Releasable incrementStoreRef(Store store, IndexShardSnapshotStatus snapshotStatus, ShardId shardId) {
|
2932 | 2919 | if (store.tryIncRef() == false) {
|
2933 | 2920 | if (snapshotStatus.isAborted()) {
|
@@ -3116,10 +3103,10 @@ void ensureNotClosing(final Store store) throws AlreadyClosedException {
|
3116 | 3103 |
|
3117 | 3104 | private static ActionListener<Void> fileQueueListener(
|
3118 | 3105 | BlockingQueue<BlobStoreIndexShardSnapshot.FileInfo> files,
|
3119 |
| - int workers, |
| 3106 | + int numberOfFiles, |
3120 | 3107 | ActionListener<Collection<Void>> listener
|
3121 | 3108 | ) {
|
3122 |
| - return new GroupedActionListener<>(listener, workers).delegateResponse((l, e) -> { |
| 3109 | + return new GroupedActionListener<>(listener, numberOfFiles).delegateResponse((l, e) -> { |
3123 | 3110 | files.clear(); // Stop uploading the remaining files if we run into any exception
|
3124 | 3111 | l.onFailure(e);
|
3125 | 3112 | });
|
@@ -3426,19 +3413,20 @@ private Tuple<BlobStoreIndexShardSnapshots, Long> buildBlobStoreIndexShardSnapsh
|
3426 | 3413 |
|
3427 | 3414 | /**
|
3428 | 3415 | * Snapshot individual file
|
3429 |
| - * @param fileInfo file to be snapshotted |
| 3416 | + * @param fileInfo file to snapshot |
3430 | 3417 | */
|
3431 |
| - private void snapshotFile( |
3432 |
| - BlobStoreIndexShardSnapshot.FileInfo fileInfo, |
3433 |
| - IndexId indexId, |
3434 |
| - ShardId shardId, |
3435 |
| - SnapshotId snapshotId, |
3436 |
| - IndexShardSnapshotStatus snapshotStatus, |
3437 |
| - Store store |
3438 |
| - ) throws IOException { |
| 3418 | + private void snapshotFile(SnapshotShardContext context, FileInfo fileInfo) throws IOException { |
| 3419 | + final IndexId indexId = context.indexId(); |
| 3420 | + final Store store = context.store(); |
| 3421 | + final ShardId shardId = store.shardId(); |
| 3422 | + final IndexShardSnapshotStatus snapshotStatus = context.status(); |
| 3423 | + final SnapshotId snapshotId = context.snapshotId(); |
3439 | 3424 | final BlobContainer shardContainer = shardContainer(indexId, shardId);
|
3440 | 3425 | final String file = fileInfo.physicalName();
|
3441 |
| - try (IndexInput indexInput = store.openVerifyingInput(file, IOContext.READONCE, fileInfo.metadata())) { |
| 3426 | + try ( |
| 3427 | + Releasable ignored = BlobStoreRepository.incrementStoreRef(store, snapshotStatus, store.shardId()); |
| 3428 | + IndexInput indexInput = store.openVerifyingInput(file, IOContext.READONCE, fileInfo.metadata()) |
| 3429 | + ) { |
3442 | 3430 | for (int i = 0; i < fileInfo.numberOfParts(); i++) {
|
3443 | 3431 | final long partBytes = fileInfo.partBytes(i);
|
3444 | 3432 |
|
|
0 commit comments