19
19
20
20
package org .elasticsearch .repositories .gcs ;
21
21
22
+ import com .google .cloud .BatchResult ;
22
23
import com .google .cloud .ReadChannel ;
23
24
import com .google .cloud .WriteChannel ;
24
25
import com .google .cloud .storage .Blob ;
27
28
import com .google .cloud .storage .Bucket ;
28
29
import com .google .cloud .storage .Storage ;
29
30
import com .google .cloud .storage .Storage .BlobListOption ;
31
+ import com .google .cloud .storage .StorageBatch ;
30
32
import com .google .cloud .storage .StorageException ;
31
33
32
- import org .apache .logging .log4j .LogManager ;
33
- import org .apache .logging .log4j .Logger ;
34
34
import org .elasticsearch .common .SuppressForbidden ;
35
35
import org .elasticsearch .common .blobstore .BlobContainer ;
36
36
import org .elasticsearch .common .blobstore .BlobMetaData ;
50
50
import java .nio .channels .WritableByteChannel ;
51
51
import java .nio .file .FileAlreadyExistsException ;
52
52
import java .nio .file .NoSuchFileException ;
53
+ import java .util .ArrayList ;
53
54
import java .util .Collection ;
55
+ import java .util .Collections ;
54
56
import java .util .List ;
55
57
import java .util .Map ;
58
+ import java .util .concurrent .atomic .AtomicReference ;
56
59
import java .util .stream .Collectors ;
57
60
58
61
import static java .net .HttpURLConnection .HTTP_NOT_FOUND ;
59
62
import static java .net .HttpURLConnection .HTTP_PRECON_FAILED ;
60
63
61
64
class GoogleCloudStorageBlobStore implements BlobStore {
62
-
63
- private static final Logger logger = LogManager .getLogger (GoogleCloudStorageBlobStore .class );
64
65
65
66
// The recommended maximum size of a blob that should be uploaded in a single
66
67
// request. Larger files should be uploaded over multiple requests (this is
@@ -105,7 +106,7 @@ public void close() {
105
106
* @param bucketName name of the bucket
106
107
* @return true iff the bucket exists
107
108
*/
108
- boolean doesBucketExist (String bucketName ) {
109
+ private boolean doesBucketExist (String bucketName ) {
109
110
try {
110
111
final Bucket bucket = SocketAccess .doPrivilegedIOException (() -> client ().get (bucketName ));
111
112
return bucket != null ;
@@ -295,16 +296,16 @@ void deleteBlob(String blobName) throws IOException {
295
296
*
296
297
* @param prefix prefix of the blobs to delete
297
298
*/
298
- void deleteBlobsByPrefix (String prefix ) throws IOException {
299
- deleteBlobs (listBlobsByPrefix ("" , prefix ).keySet ());
299
+ private void deleteBlobsByPrefix (String prefix ) throws IOException {
300
+ deleteBlobsIgnoringIfNotExists (listBlobsByPrefix ("" , prefix ).keySet ());
300
301
}
301
302
302
303
/**
303
304
* Deletes multiple blobs from the specific bucket using a batch request
304
305
*
305
306
* @param blobNames names of the blobs to delete
306
307
*/
307
- void deleteBlobs (Collection <String > blobNames ) throws IOException {
308
+ void deleteBlobsIgnoringIfNotExists (Collection <String > blobNames ) throws IOException {
308
309
if (blobNames .isEmpty ()) {
309
310
return ;
310
311
}
@@ -314,17 +315,33 @@ void deleteBlobs(Collection<String> blobNames) throws IOException {
314
315
return ;
315
316
}
316
317
final List <BlobId > blobIdsToDelete = blobNames .stream ().map (blob -> BlobId .of (bucketName , blob )).collect (Collectors .toList ());
317
- final List <Boolean > deletedStatuses = SocketAccess .doPrivilegedIOException (() -> client ().delete (blobIdsToDelete ));
318
- assert blobIdsToDelete .size () == deletedStatuses .size ();
319
- boolean failed = false ;
320
- for (int i = 0 ; i < blobIdsToDelete .size (); i ++) {
321
- if (deletedStatuses .get (i ) == false ) {
322
- logger .error ("Failed to delete blob [{}] in bucket [{}]" , blobIdsToDelete .get (i ).getName (), bucketName );
323
- failed = true ;
318
+ final List <BlobId > failedBlobs = Collections .synchronizedList (new ArrayList <>());
319
+ final StorageException e = SocketAccess .doPrivilegedIOException (() -> {
320
+ final AtomicReference <StorageException > ioe = new AtomicReference <>();
321
+ final StorageBatch batch = client ().batch ();
322
+ for (BlobId blob : blobIdsToDelete ) {
323
+ batch .delete (blob ).notify (
324
+ new BatchResult .Callback <Boolean , StorageException >() {
325
+ @ Override
326
+ public void success (Boolean result ) {
327
+ }
328
+
329
+ @ Override
330
+ public void error (StorageException exception ) {
331
+ if (exception .getCode () != HTTP_NOT_FOUND ) {
332
+ failedBlobs .add (blob );
333
+ if (ioe .compareAndSet (null , exception ) == false ) {
334
+ ioe .get ().addSuppressed (exception );
335
+ }
336
+ }
337
+ }
338
+ });
324
339
}
325
- }
326
- if (failed ) {
327
- throw new IOException ("Failed to delete all [" + blobIdsToDelete .size () + "] blobs" );
340
+ batch .submit ();
341
+ return ioe .get ();
342
+ });
343
+ if (e != null ) {
344
+ throw new IOException ("Exception when deleting blobs [" + failedBlobs + "]" , e );
328
345
}
329
346
}
330
347
0 commit comments