@@ -56,6 +56,11 @@ import api = google.firestore.v1;
56
56
*/
57
57
const MAX_BATCH_SIZE = 20 ;
58
58
59
+ /*!
60
+ * The maximum number of writes can be can in a single batch that is being retried.
61
+ */
62
+ export const RETRY_MAX_BATCH_SIZE = 10 ;
63
+
59
64
/*!
60
65
* The starting maximum number of operations per second as allowed by the
61
66
* 500/50/5 rule.
@@ -213,6 +218,13 @@ class BulkCommitBatch extends WriteBatch {
213
218
// been resolved.
214
219
readonly pendingOps : Array < BulkWriterOperation > = [ ] ;
215
220
221
+ readonly maxBatchSize : number ;
222
+
223
+ constructor ( firestore : Firestore , maxBatchSize : number ) {
224
+ super ( firestore ) ;
225
+ this . maxBatchSize = maxBatchSize ;
226
+ }
227
+
216
228
has ( documentRef : firestore . DocumentReference < unknown > ) : boolean {
217
229
return this . docPaths . has ( documentRef . path ) ;
218
230
}
@@ -333,14 +345,17 @@ export class BulkWriter {
333
345
* Visible for testing.
334
346
* @private
335
347
*/
336
- _maxBatchSize = MAX_BATCH_SIZE ;
348
+ private _maxBatchSize = MAX_BATCH_SIZE ;
337
349
338
350
/**
339
351
* The batch that is currently used to schedule operations. Once this batch
340
352
* reaches maximum capacity, a new batch is created.
341
353
* @private
342
354
*/
343
- private _bulkCommitBatch = new BulkCommitBatch ( this . firestore ) ;
355
+ private _bulkCommitBatch = new BulkCommitBatch (
356
+ this . firestore ,
357
+ this . _maxBatchSize
358
+ ) ;
344
359
345
360
/**
346
361
* A pointer to the tail of all active BulkWriter operations. This pointer
@@ -384,6 +399,16 @@ export class BulkWriter {
384
399
return this . _bufferedOperations . length ;
385
400
}
386
401
402
+ // Visible for testing.
403
+ _setMaxBatchSize ( size : number ) : void {
404
+ assert (
405
+ this . _bulkCommitBatch . pendingOps . length === 0 ,
406
+ 'BulkCommitBatch should be empty'
407
+ ) ;
408
+ this . _maxBatchSize = size ;
409
+ this . _bulkCommitBatch = new BulkCommitBatch ( this . firestore , size ) ;
410
+ }
411
+
387
412
/**
388
413
* The maximum number of pending operations that can be enqueued onto this
389
414
* BulkWriter instance. Once the this number of writes have been enqueued,
@@ -840,7 +865,6 @@ export class BulkWriter {
840
865
if ( this . _bulkCommitBatch . _opCount === 0 ) return ;
841
866
842
867
const pendingBatch = this . _bulkCommitBatch ;
843
- this . _bulkCommitBatch = new BulkCommitBatch ( this . firestore ) ;
844
868
845
869
// Use the write with the longest backoff duration when determining backoff.
846
870
const highestBackoffDuration = pendingBatch . pendingOps . reduce ( ( prev , cur ) =>
@@ -849,6 +873,13 @@ export class BulkWriter {
849
873
const backoffMsWithJitter = BulkWriter . _applyJitter ( highestBackoffDuration ) ;
850
874
const delayedExecution = new Deferred < void > ( ) ;
851
875
876
+ // A backoff duration greater than 0 implies that this batch is a retry.
877
+ // Retried writes are sent with a batch size of 10 in order to guarantee
878
+ // that the batch is under the 10MiB limit.
879
+ const maxBatchSize =
880
+ highestBackoffDuration > 0 ? RETRY_MAX_BATCH_SIZE : this . _maxBatchSize ;
881
+ this . _bulkCommitBatch = new BulkCommitBatch ( this . firestore , maxBatchSize ) ;
882
+
852
883
if ( backoffMsWithJitter > 0 ) {
853
884
delayExecution ( ( ) => delayedExecution . resolve ( ) , backoffMsWithJitter ) ;
854
885
} else {
@@ -988,7 +1019,7 @@ export class BulkWriter {
988
1019
enqueueOnBatchCallback ( this . _bulkCommitBatch ) ;
989
1020
this . _bulkCommitBatch . processLastOperation ( op ) ;
990
1021
991
- if ( this . _bulkCommitBatch . _opCount === this . _maxBatchSize ) {
1022
+ if ( this . _bulkCommitBatch . _opCount === this . _bulkCommitBatch . maxBatchSize ) {
992
1023
this . _scheduleCurrentBatch ( ) ;
993
1024
} else if ( op . flushed ) {
994
1025
// If flush() was called before this operation was enqueued into a batch,
0 commit comments