-
Notifications
You must be signed in to change notification settings - Fork 1.8k
/
Copy pathcrud.prose.test.ts
1376 lines (1249 loc) · 57.7 KB
/
crud.prose.test.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import { expect } from 'chai';
import { once } from 'events';
import { type CommandStartedEvent } from '../../../mongodb';
import {
type ClientBulkWriteModel,
type ClientSession,
type Collection,
type Document,
MongoBulkWriteError,
type MongoClient,
MongoClientBulkWriteError,
MongoInvalidArgumentError,
MongoServerError,
type PkFactory
} from '../../mongodb';
import { filterForCommands } from '../shared';
describe('CRUD Prose Spec Tests', () => {
let client: MongoClient;
beforeEach(async function () {
client = this.configuration.newClient({ monitorCommands: true });
await client.connect();
});
afterEach(async () => {
if (client) {
await client.close();
client.removeAllListeners();
}
});
// TODO(NODE-3888): Implement this test
it.skip('1. WriteConcernError.details exposes writeConcernError.errInfo', {
/**
* Test that writeConcernError.errInfo in a command response is propagated as WriteConcernError.details (or equivalent) in the driver.
* Using a 4.0+ server, set the following failpoint:
* @example
* ```js
* {
* "configureFailPoint": "failCommand",
* "data": {
* "failCommands": ["insert"],
* "writeConcernError": {
* "code": 100,
* "codeName": "UnsatisfiableWriteConcern",
* "errmsg": "Not enough data-bearing nodes",
* "errInfo": {
* "writeConcern": {
* "w": 2,
* "wtimeout": 0,
* "provenance": "clientSupplied"
* }
* }
* }
* },
* "mode": { "times": 1 }
* }
* ```
*
* Then, perform an insert operation and assert that a WriteConcernError occurs and that
* its details property is both accessible and matches the errInfo object from the failpoint.
*/
metadata: { requires: { mongodb: '>=4.0.0' } },
async test() {
throw new Error('This test is not implemented!');
}
}).skipReason = 'TODO(NODE-3888): Implement this test';
describe('2. WriteError.details exposes writeErrors[].errInfo', () => {
/**
* Test that writeErrors[].errInfo in a command response is propagated as WriteError.details (or equivalent) in the driver.
* Using a 5.0+ server, create a collection with document validation like so:
* @example
* ```js
* {
* "create": "test",
* "validator": {
* "x": { $type: "string" }
* }
* }
*```
* Enable command monitoring to observe CommandSucceededEvents.
* Then, insert an invalid document (e.g. `{x: 1}`)
* and assert that a WriteError occurs, that its code is 121 (i.e. DocumentValidationFailure),
* and that its details property is accessible.
* Additionally, assert that a CommandSucceededEvent was observed
* and that the writeErrors[0].errInfo field in the response document matches the WriteError's details property.
*/
let collection;
beforeEach(async () => {
try {
await client.db().collection('wc_details').drop();
} catch {
// don't care
}
collection = await client
.db()
.createCollection('wc_details', { validator: { x: { $type: 'string' } } });
});
it('test case: insert MongoServerError', {
metadata: { requires: { mongodb: '>=5.0.0' } },
async test() {
const evCapture = once(client, 'commandSucceeded');
let errInfoFromError;
try {
await collection.insertOne({ x: /not a string/ });
expect.fail('The insert should fail the validation that x must be a string');
} catch (error) {
expect(error).to.be.instanceOf(MongoServerError);
expect(error).to.have.property('code', 121);
expect(error).to.have.property('errInfo').that.is.an('object');
errInfoFromError = error.errInfo;
}
const commandSucceededEvents = await evCapture;
expect(commandSucceededEvents).to.have.lengthOf(1);
const ev = commandSucceededEvents[0];
expect(ev).to.have.nested.property('reply.writeErrors[0].errInfo').that.is.an('object');
const errInfoFromEvent = ev.reply.writeErrors[0].errInfo;
expect(errInfoFromError).to.deep.equal(errInfoFromEvent);
}
});
it('test case: insertMany MongoBulkWriteError', {
metadata: { requires: { mongodb: '>=5.0.0' } },
async test() {
const evCapture = once(client, 'commandSucceeded');
let errInfoFromError;
try {
await collection.insertMany([{ x: /not a string/ }]);
expect.fail('The insert should fail the validation that x must be a string');
} catch (error) {
expect(error).to.be.instanceOf(MongoBulkWriteError);
expect(error).to.have.property('code', 121);
expect(error).to.have.property('writeErrors').that.is.an('array');
expect(error.writeErrors[0]).to.have.property('errInfo').that.is.an('object');
errInfoFromError = error.writeErrors[0].errInfo;
}
const commandSucceededEvents = await evCapture;
expect(commandSucceededEvents).to.have.lengthOf(1);
const ev = commandSucceededEvents[0];
expect(ev).to.have.nested.property('reply.writeErrors[0].errInfo').that.is.an('object');
const errInfoFromEvent = ev.reply.writeErrors[0].errInfo;
expect(errInfoFromError).to.deep.equal(errInfoFromEvent);
}
});
});
describe('3. MongoClient.bulkWrite batch splits a writeModels input with greater than maxWriteBatchSize operations', function () {
// Test that MongoClient.bulkWrite properly handles writeModels inputs containing a number of writes greater than
// maxWriteBatchSize.
// This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless.
// Construct a MongoClient (referred to as client) with command monitoring enabled to observe CommandStartedEvents.
// Perform a hello command using client and record the maxWriteBatchSize value contained in the response. Then,
// construct the following write model (referred to as model):
// InsertOne: {
// "namespace": "db.coll",
// "document": { "a": "b" }
// }
// Construct a list of write models (referred to as models) with model repeated maxWriteBatchSize + 1 times. Execute
// bulkWrite on client with models. Assert that the bulk write succeeds and returns a BulkWriteResult with an
// insertedCount value of maxWriteBatchSize + 1.
// Assert that two CommandStartedEvents (referred to as firstEvent and secondEvent) were observed for the bulkWrite
// command. Assert that the length of firstEvent.command.ops is maxWriteBatchSize. Assert that the length of
// secondEvent.command.ops is 1. If the driver exposes operationIds in its CommandStartedEvents, assert that
// firstEvent.operationId is equal to secondEvent.operationId.
let client: MongoClient;
let maxWriteBatchSize;
let models: ClientBulkWriteModel<Document>[] = [];
const commands: CommandStartedEvent[] = [];
beforeEach(async function () {
client = this.configuration.newClient({}, { monitorCommands: true });
await client.connect();
await client.db('db').collection('coll').drop();
const hello = await client.db('admin').command({ hello: 1 });
maxWriteBatchSize = hello.maxWriteBatchSize;
client.on('commandStarted', filterForCommands('bulkWrite', commands));
commands.length = 0;
models = Array.from({ length: maxWriteBatchSize + 1 }, () => {
return {
namespace: 'db.coll',
name: 'insertOne',
document: { a: 'b' }
};
});
});
afterEach(async function () {
await client.close();
});
it('splits the commands into 2 operations', {
metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } },
async test() {
const result = await client.bulkWrite(models);
expect(result.insertedCount).to.equal(maxWriteBatchSize + 1);
expect(commands.length).to.equal(2);
expect(commands[0].command.ops.length).to.equal(maxWriteBatchSize);
expect(commands[1].command.ops.length).to.equal(1);
}
});
});
describe('4. MongoClient.bulkWrite batch splits when an ops payload exceeds maxMessageSizeBytes', function () {
// Test that MongoClient.bulkWrite properly handles a writeModels input which constructs an ops array larger
// than maxMessageSizeBytes.
// This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless.
// Construct a MongoClient (referred to as client) with command monitoring enabled to observe CommandStartedEvents.
// Perform a hello command using client and record the following values from the response: maxBsonObjectSize
// and maxMessageSizeBytes. Then, construct the following document (referred to as document):
// {
// "a": "b".repeat(maxBsonObjectSize - 500)
// }
// Construct the following write model (referred to as model):
// InsertOne: {
// "namespace": "db.coll",
// "document": document
// }
// Use the following calculation to determine the number of inserts that should be provided to
// MongoClient.bulkWrite: maxMessageSizeBytes / maxBsonObjectSize + 1 (referred to as numModels). This number
// ensures that the inserts provided to MongoClient.bulkWrite will require multiple bulkWrite commands to be
// sent to the server.
// Construct as list of write models (referred to as models) with model repeated numModels times. Then execute
// bulkWrite on client with models. Assert that the bulk write succeeds and returns a BulkWriteResult with
// an insertedCount value of numModels.
// Assert that two CommandStartedEvents (referred to as firstEvent and secondEvent) were observed. Assert
// that the length of firstEvent.command.ops is numModels - 1. Assert that the length of secondEvent.command.ops
// is 1. If the driver exposes operationIds in its CommandStartedEvents, assert that firstEvent.operationId is
// equal to secondEvent.operationId.
let client: MongoClient;
let maxBsonObjectSize;
let maxMessageSizeBytes;
let numModels;
let models: ClientBulkWriteModel<Document>[] = [];
const commands: CommandStartedEvent[] = [];
beforeEach(async function () {
client = this.configuration.newClient({}, { monitorCommands: true });
await client.connect();
await client.db('db').collection('coll').drop();
const hello = await client.db('admin').command({ hello: 1 });
maxBsonObjectSize = hello.maxBsonObjectSize;
maxMessageSizeBytes = hello.maxMessageSizeBytes;
numModels = Math.floor(maxMessageSizeBytes / maxBsonObjectSize + 1);
client.on('commandStarted', filterForCommands('bulkWrite', commands));
commands.length = 0;
models = Array.from({ length: numModels }, () => {
return {
name: 'insertOne',
namespace: 'db.coll',
document: {
a: 'b'.repeat(maxBsonObjectSize - 500)
}
};
});
});
afterEach(async function () {
await client.close();
});
it('splits the commands into 2 operations', {
metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } },
async test() {
const result = await client.bulkWrite(models);
expect(result.insertedCount).to.equal(numModels);
expect(commands.length).to.equal(2);
expect(commands[0].command.ops.length).to.equal(numModels - 1);
expect(commands[1].command.ops.length).to.equal(1);
}
});
});
describe('5. MongoClient.bulkWrite collects WriteConcernErrors across batches', function () {
// Test that MongoClient.bulkWrite properly collects and reports writeConcernErrors returned in separate batches.
// This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless.
// Construct a MongoClient (referred to as client) with retryWrites: false configured and command monitoring
// enabled to observe CommandStartedEvents. Perform a hello command using client and record the maxWriteBatchSize
// value contained in the response. Then, configure the following fail point with client:
// {
// "configureFailPoint": "failCommand",
// "mode": { "times": 2 },
// "data": {
// "failCommands": ["bulkWrite"],
// "writeConcernError": {
// "code": 91,
// "errmsg": "Replication is being shut down"
// }
// }
// }
// Construct the following write model (referred to as model):
// InsertOne: {
// "namespace": "db.coll",
// "document": { "a": "b" }
// }
// Construct a list of write models (referred to as models) with model repeated maxWriteBatchSize + 1 times.
// Execute bulkWrite on client with models. Assert that the bulk write fails and returns a BulkWriteError (referred to as error).
// Assert that error.writeConcernErrors has a length of 2.
// Assert that error.partialResult is populated. Assert that error.partialResult.insertedCount is equal to maxWriteBatchSize + 1.
// Assert that two CommandStartedEvents were observed for the bulkWrite command.
let client: MongoClient;
let maxWriteBatchSize;
let models: ClientBulkWriteModel[] = [];
const commands: CommandStartedEvent[] = [];
beforeEach(async function () {
client = this.configuration.newClient({}, { monitorCommands: true, retryWrites: false });
await client.connect();
await client.db('db').collection('coll').drop();
const hello = await client.db('admin').command({ hello: 1 });
await client.db('admin').command({
configureFailPoint: 'failCommand',
mode: { times: 2 },
data: {
failCommands: ['bulkWrite'],
writeConcernError: {
code: 91,
errmsg: 'Replication is being shut down'
}
}
});
maxWriteBatchSize = hello.maxWriteBatchSize;
client.on('commandStarted', filterForCommands('bulkWrite', commands));
commands.length = 0;
models = Array.from({ length: maxWriteBatchSize + 1 }, () => {
return {
namespace: 'db.coll',
name: 'insertOne',
document: { a: 'b' }
};
});
});
afterEach(async function () {
await client.close();
});
it('splits the commands into 2 operations and handles the errors', {
metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } },
async test() {
const error = await client.bulkWrite(models).catch(error => error);
expect(error).to.be.instanceOf(MongoClientBulkWriteError);
expect(error.writeConcernErrors.length).to.equal(2);
expect(error.partialResult.insertedCount).to.equal(maxWriteBatchSize + 1);
expect(commands.length).to.equal(2);
}
});
});
describe('6. MongoClient.bulkWrite handles individual WriteErrors across batches', function () {
// Test that MongoClient.bulkWrite handles individual write errors across batches for ordered and unordered bulk writes.
// This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless.
// Construct a MongoClient (referred to as client) with command monitoring enabled to observe CommandStartedEvents.
// Perform a hello command using client and record the maxWriteBatchSize value contained in the response.
// Construct a MongoCollection (referred to as collection) with the namespace "db.coll" (referred to as namespace).
// Drop collection. Then, construct the following document (referred to as document):
// {
// "_id": 1
// }
// Insert document into collection.
// Create the following write model (referred to as model):
// InsertOne {
// "namespace": namespace,
// "document": document
// }
// Construct a list of write models (referred to as models) with model repeated maxWriteBatchSize + 1 times.
let client: MongoClient;
let maxWriteBatchSize;
let models: ClientBulkWriteModel<Document>[] = [];
const commands: CommandStartedEvent[] = [];
beforeEach(async function () {
client = this.configuration.newClient({}, { monitorCommands: true, retryWrites: false });
await client.connect();
await client.db('db').collection('coll').drop();
const hello = await client.db('admin').command({ hello: 1 });
await client.db('db').collection<{ _id?: number }>('coll').insertOne({ _id: 1 });
maxWriteBatchSize = hello.maxWriteBatchSize;
client.on('commandStarted', filterForCommands('bulkWrite', commands));
commands.length = 0;
models = Array.from({ length: maxWriteBatchSize + 1 }, () => {
return {
namespace: 'db.coll',
name: 'insertOne',
document: { _id: 1 }
};
});
});
afterEach(async function () {
await client.close();
});
context('when the bulk write is unordered', function () {
// Unordered
// Test that an unordered bulk write collects WriteErrors across batches.
// Execute bulkWrite on client with models and ordered set to false. Assert that the bulk write fails
// and returns a BulkWriteError (referred to as unorderedError).
// Assert that unorderedError.writeErrors has a length of maxWriteBatchSize + 1.
// Assert that two CommandStartedEvents were observed for the bulkWrite command.
it('splits the commands into 2 operations and handles the errors', {
metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } },
async test() {
const error = await client.bulkWrite(models, { ordered: false }).catch(error => error);
expect(error).to.be.instanceOf(MongoClientBulkWriteError);
expect(error.writeErrors.size).to.equal(maxWriteBatchSize + 1);
expect(commands.length).to.equal(2);
}
});
});
context('when the bulk write is ordered', function () {
// Ordered
// Test that an ordered bulk write does not execute further batches when a WriteError occurs.
// Execute bulkWrite on client with models and ordered set to true. Assert that the bulk write fails
// and returns a BulkWriteError (referred to as orderedError).
// Assert that orderedError.writeErrors has a length of 1.
// Assert that one CommandStartedEvent was observed for the bulkWrite command.
it('splits the commands into 2 operations and halts on first error', {
metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } },
async test() {
const error = await client.bulkWrite(models, { ordered: true }).catch(error => error);
expect(error).to.be.instanceOf(MongoClientBulkWriteError);
expect(error.writeErrors.size).to.equal(1);
expect(commands.length).to.equal(1);
}
});
});
});
describe('7. MongoClient.bulkWrite handles a cursor requiring a getMore', function () {
// Test that MongoClient.bulkWrite properly iterates the results cursor when getMore is required.
// This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless.
// Construct a MongoClient (referred to as client) with command monitoring enabled to observe
// CommandStartedEvents. Perform a hello command using client and record the maxBsonObjectSize value from the response.
// Construct a MongoCollection (referred to as collection) with the namespace "db.coll" (referred to as namespace).
// Drop collection. Then create the following list of write models (referred to as models):
// UpdateOne {
// "namespace": namespace,
// "filter": { "_id": "a".repeat(maxBsonObjectSize / 2) },
// "update": { "$set": { "x": 1 } },
// "upsert": true
// },
// UpdateOne {
// "namespace": namespace,
// "filter": { "_id": "b".repeat(maxBsonObjectSize / 2) },
// "update": { "$set": { "x": 1 } },
// "upsert": true
// },
// Execute bulkWrite on client with models and verboseResults set to true. Assert that the bulk write succeeds and returns a BulkWriteResult (referred to as result).
// Assert that result.upsertedCount is equal to 2.
// Assert that the length of result.updateResults is equal to 2.
// Assert that a CommandStartedEvent was observed for the getMore command.
let client: MongoClient;
let maxBsonObjectSize;
const models: ClientBulkWriteModel<Document>[] = [];
const commands: CommandStartedEvent[] = [];
beforeEach(async function () {
client = this.configuration.newClient({}, { monitorCommands: true });
await client.connect();
await client.db('db').collection('coll').drop();
const hello = await client.db('admin').command({ hello: 1 });
maxBsonObjectSize = hello.maxBsonObjectSize;
client.on('commandStarted', filterForCommands('getMore', commands));
commands.length = 0;
models.push({
name: 'updateOne',
namespace: 'db.coll',
filter: { _id: 'a'.repeat(maxBsonObjectSize / 2) },
update: { $set: { x: 1 } },
upsert: true
});
models.push({
name: 'updateOne',
namespace: 'db.coll',
filter: { _id: 'b'.repeat(maxBsonObjectSize / 2) },
update: { $set: { x: 1 } },
upsert: true
});
});
afterEach(async function () {
await client.close();
});
it('handles a getMore on the results', {
metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } },
async test() {
const result = await client.bulkWrite(models, { verboseResults: true });
expect(result.upsertedCount).to.equal(2);
expect(result.updateResults.size).to.equal(2);
expect(commands.length).to.equal(1);
}
});
});
describe('8. MongoClient.bulkWrite handles a cursor requiring getMore within a transaction', function () {
// Test that MongoClient.bulkWrite executed within a transaction properly iterates the results
// cursor when getMore is required.
// This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless.
// This test must not be run against standalone servers.
// Construct a MongoClient (referred to as client) with command monitoring enabled to observe
// CommandStartedEvents. Perform a hello command using client and record the maxBsonObjectSize value from the response.
// Construct a MongoCollection (referred to as collection) with the namespace "db.coll" (referred to as namespace). Drop collection.
// Start a session on client (referred to as session). Start a transaction on session.
// Create the following list of write models (referred to as models):
// UpdateOne {
// "namespace": namespace,
// "filter": { "_id": "a".repeat(maxBsonObjectSize / 2) },
// "update": { "$set": { "x": 1 } },
// "upsert": true
// },
// UpdateOne {
// "namespace": namespace,
// "filter": { "_id": "b".repeat(maxBsonObjectSize / 2) },
// "update": { "$set": { "x": 1 } },
// "upsert": true
// },
// Execute bulkWrite on client with models, session, and verboseResults set to true. Assert that the bulk
// write succeeds and returns a BulkWriteResult (referred to as result).
// Assert that result.upsertedCount is equal to 2.
// Assert that the length of result.updateResults is equal to 2.
// Assert that a CommandStartedEvent was observed for the getMore command.
let client: MongoClient;
let session: ClientSession;
let maxBsonObjectSize;
const models: ClientBulkWriteModel<Document>[] = [];
const commands: CommandStartedEvent[] = [];
beforeEach(async function () {
client = this.configuration.newClient({}, { monitorCommands: true });
await client.connect();
await client.db('db').collection('coll').drop();
const hello = await client.db('admin').command({ hello: 1 });
maxBsonObjectSize = hello.maxBsonObjectSize;
client.on('commandStarted', filterForCommands('getMore', commands));
commands.length = 0;
models.push({
name: 'updateOne',
namespace: 'db.coll',
filter: { _id: 'a'.repeat(maxBsonObjectSize / 2) },
update: { $set: { x: 1 } },
upsert: true
});
models.push({
name: 'updateOne',
namespace: 'db.coll',
filter: { _id: 'b'.repeat(maxBsonObjectSize / 2) },
update: { $set: { x: 1 } },
upsert: true
});
session = client.startSession();
session.startTransaction();
});
afterEach(async function () {
await session.endSession();
await client.close();
});
it('handles a getMore on the results in a transaction', {
metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid', topology: '!single' } },
async test() {
const result = await client.bulkWrite(models, { verboseResults: true, session });
expect(result.upsertedCount).to.equal(2);
expect(result.updateResults.size).to.equal(2);
expect(commands.length).to.equal(1);
}
});
});
describe('9. MongoClient.bulkWrite handles a getMore error', function () {
// Test that MongoClient.bulkWrite properly handles a failure that occurs when attempting a getMore.
// This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless.
// Construct a MongoClient (referred to as client) with command monitoring enabled to observe CommandStartedEvents.
// Perform a hello command using client and record the maxBsonObjectSize value from the response. Then,
// configure the following fail point with client:
// {
// "configureFailPoint": "failCommand",
// "mode": { "times": 1 },
// "data": {
// "failCommands": ["getMore"],
// "errorCode": 8
// }
// }
// Construct a MongoCollection (referred to as collection) with the namespace "db.coll" (referred to as namespace).
// Drop collection. Then create the following list of write models (referred to as models):
// UpdateOne {
// "namespace": namespace,
// "filter": { "_id": "a".repeat(maxBsonObjectSize / 2) },
// "update": { "$set": { "x": 1 } },
// "upsert": true
// },
// UpdateOne {
// "namespace": namespace,
// "filter": { "_id": "b".repeat(maxBsonObjectSize / 2) },
// "update": { "$set": { "x": 1 } },
// "upsert": true
// },
// Execute bulkWrite on client with models and verboseResults set to true. Assert that the bulk write
// fails and returns a BulkWriteError (referred to as bulkWriteError).
// Assert that bulkWriteError.error is populated with an error (referred to as topLevelError). Assert
// that topLevelError.errorCode is equal to 8.
// Assert that bulkWriteError.partialResult is populated with a result (referred to as partialResult).
// Assert that partialResult.upsertedCount is equal to 2. Assert that the length of
// partialResult.updateResults is equal to 1.
// Assert that a CommandStartedEvent was observed for the getMore command.
// Assert that a CommandStartedEvent was observed for the killCursors command.
let client: MongoClient;
let maxBsonObjectSize;
const models: ClientBulkWriteModel<Document>[] = [];
const getMoreCommands: CommandStartedEvent[] = [];
const killCursorsCommands: CommandStartedEvent[] = [];
beforeEach(async function () {
client = this.configuration.newClient({}, { monitorCommands: true });
await client.connect();
await client.db('db').collection('coll').drop();
const hello = await client.db('admin').command({ hello: 1 });
maxBsonObjectSize = hello.maxBsonObjectSize;
await client.db('admin').command({
configureFailPoint: 'failCommand',
mode: { times: 1 },
data: {
failCommands: ['getMore'],
errorCode: 8
}
});
client.on('commandStarted', filterForCommands('getMore', getMoreCommands));
client.on('commandStarted', filterForCommands('killCursors', killCursorsCommands));
getMoreCommands.length = 0;
killCursorsCommands.length = 0;
models.push({
name: 'updateOne',
namespace: 'db.coll',
filter: { _id: 'a'.repeat(maxBsonObjectSize / 2) },
update: { $set: { x: 1 } },
upsert: true
});
models.push({
name: 'updateOne',
namespace: 'db.coll',
filter: { _id: 'b'.repeat(maxBsonObjectSize / 2) },
update: { $set: { x: 1 } },
upsert: true
});
});
afterEach(async function () {
await client.close();
});
it('handles a getMore that errors', {
metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } },
async test() {
const error = await client
.bulkWrite(models, { verboseResults: true })
.catch(error => error);
expect(error).to.be.instanceOf(MongoClientBulkWriteError);
expect(error.cause.code).to.equal(8);
expect(error.partialResult).to.exist;
// TODO: Need to handle batches in cursor one at a time and not call toArray()
expect(error.partialResult.upsertedCount).to.equal(2);
expect(error.partialResult.updateResults.size).to.equal(1);
expect(getMoreCommands.length).to.equal(1);
expect(killCursorsCommands.length).to.equal(1);
}
});
});
describe('10. MongoClient.bulkWrite returns error for unacknowledged too-large insert', function () {
// This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless.
// Construct a MongoClient (referred to as client).
// Perform a hello command using client and record the following values from the response: maxBsonObjectSize.
// Then, construct the following document (referred to as document):
// {
// "a": "b".repeat(maxBsonObjectSize)
// }
let client: MongoClient;
let maxBsonObjectSize;
let document: Document;
beforeEach(async function () {
client = this.configuration.newClient({}, { monitorCommands: true });
await client.connect();
await client.db('db').collection('coll').drop();
const hello = await client.db('admin').command({ hello: 1 });
maxBsonObjectSize = hello.maxBsonObjectSize;
document = {
a: 'b'.repeat(maxBsonObjectSize)
};
});
afterEach(async function () {
await client.close();
});
context('when performing inserts', function () {
// With insert
// Construct the following write model (referred to as model):
// InsertOne: {
// "namespace": "db.coll",
// "document": document
// }
// Construct as list of write models (referred to as models) with the one model.
// Call MongoClient.bulkWrite with models and BulkWriteOptions.writeConcern set to an unacknowledged write concern.
// Expect a client-side error due the size.
it('throws an error', {
metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } },
async test() {
const error = await client
.bulkWrite([{ name: 'insertOne', namespace: 'db.coll', document: document }], {
writeConcern: { w: 0 },
ordered: false
})
.catch(error => error);
expect(error.message).to.include('Client bulk write operation ops of length');
}
});
});
context('when performing replacements', function () {
// With replace
// Construct the following write model (referred to as model):
// ReplaceOne: {
// "namespace": "db.coll",
// "filter": {},
// "replacement": document
// }
// Construct as list of write models (referred to as models) with the one model.
// Call MongoClient.bulkWrite with models and BulkWriteOptions.writeConcern set to an unacknowledged write concern.
// Expect a client-side error due the size.
it('throws an error', {
metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } },
async test() {
const error = await client
.bulkWrite(
[{ name: 'replaceOne', namespace: 'db.coll', filter: {}, replacement: document }],
{ writeConcern: { w: 0 }, ordered: false }
)
.catch(error => error);
expect(error.message).to.include('Client bulk write operation ops of length');
}
});
});
});
describe('11. MongoClient.bulkWrite batch splits when the addition of a new namespace exceeds the maximum message size', function () {
// Test that MongoClient.bulkWrite batch splits a bulk write when the addition of a new namespace to nsInfo causes the size
// of the message to exceed maxMessageSizeBytes - 1000.
// This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless.
// Repeat the following setup for each test case:
// Setup
// Construct a MongoClient (referred to as client) with command monitoring enabled to observe CommandStartedEvents. Perform
// a hello command using client and record the following values from the response: maxBsonObjectSize and maxMessageSizeBytes.
// Calculate the following values:
// opsBytes = maxMessageSizeBytes - 1122
// numModels = opsBytes / maxBsonObjectSize
// remainderBytes = opsBytes % maxBsonObjectSize
// Construct the following write model (referred to as firstModel):
// InsertOne {
// "namespace": "db.coll",
// "document": { "a": "b".repeat(maxBsonObjectSize - 57) }
// }
// Create a list of write models (referred to as models) with firstModel repeated numModels times.
// If remainderBytes is greater than or equal to 217, add 1 to numModels and append the following write model to models:
// InsertOne {
// "namespace": "db.coll",
// "document": { "a": "b".repeat(remainderBytes - 57) }
// }
// Then perform the following two tests:
let client: MongoClient;
let maxBsonObjectSize;
let maxMessageSizeBytes;
let opsBytes;
let numModels;
let remainderBytes;
let models: ClientBulkWriteModel<Document>[] = [];
const commands: CommandStartedEvent[] = [];
beforeEach(async function () {
client = this.configuration.newClient({}, { monitorCommands: true });
await client.connect();
await client.db('db').collection('coll').drop();
const hello = await client.db('admin').command({ hello: 1 });
maxBsonObjectSize = hello.maxBsonObjectSize;
maxMessageSizeBytes = hello.maxMessageSizeBytes;
opsBytes = maxMessageSizeBytes - 1122;
numModels = Math.floor(opsBytes / maxBsonObjectSize);
remainderBytes = opsBytes % maxBsonObjectSize;
client.on('commandStarted', filterForCommands('bulkWrite', commands));
commands.length = 0;
models = [];
models = Array.from({ length: numModels }, () => {
return {
namespace: 'db.coll',
name: 'insertOne',
document: { a: 'b'.repeat(maxBsonObjectSize - 57) }
};
});
if (remainderBytes >= 217) {
numModels++;
models.push({
namespace: 'db.coll',
name: 'insertOne',
document: { a: 'b'.repeat(remainderBytes - 57) }
});
}
});
afterEach(async function () {
await client.close();
});
context('when no batch splitting is required', function () {
// Case 1: No batch-splitting required
// Create the following write model (referred to as sameNamespaceModel):
// InsertOne {
// "namespace": "db.coll",
// "document": { "a": "b" }
// }
// Append sameNamespaceModel to models.
// Execute bulkWrite on client with models. Assert that the bulk write succeeds and returns a BulkWriteResult (referred to as result).
// Assert that result.insertedCount is equal to numModels + 1.
// Assert that one CommandStartedEvent was observed for the bulkWrite command (referred to as event).
// Assert that the length of event.command.ops is numModels + 1. Assert that the length of event.command.nsInfo is 1.
// Assert that the namespace contained in event.command.nsInfo is "db.coll".
it('executes in a single batch', {
metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } },
async test() {
const sameNamespaceModel: ClientBulkWriteModel<Document> = {
name: 'insertOne',
namespace: 'db.coll',
document: { a: 'b' }
};
const testModels = models.concat([sameNamespaceModel]);
const result = await client.bulkWrite(testModels);
expect(result.insertedCount).to.equal(numModels + 1);
expect(commands.length).to.equal(1);
expect(commands[0].command.ops.length).to.equal(numModels + 1);
expect(commands[0].command.nsInfo.length).to.equal(1);
expect(commands[0].command.nsInfo[0].ns).to.equal('db.coll');
}
});
});
context('when batch splitting is required', function () {
// Case 2: Batch-splitting required
// Construct the following namespace (referred to as namespace):
// "db." + "c".repeat(200)
// Create the following write model (referred to as newNamespaceModel):
// InsertOne {
// "namespace": namespace,
// "document": { "a": "b" }
// }
// Append newNamespaceModel to models.
// Execute bulkWrite on client with models. Assert that the bulk write succeeds and returns a BulkWriteResult (referred to as result).
// Assert that result.insertedCount is equal to numModels + 1.
// Assert that two CommandStartedEvents were observed for the bulkWrite command (referred to as firstEvent and secondEvent).
// Assert that the length of firstEvent.command.ops is equal to numModels. Assert that the length of firstEvent.command.nsInfo
// is equal to 1. Assert that the namespace contained in firstEvent.command.nsInfo is "db.coll".
// Assert that the length of secondEvent.command.ops is equal to 1. Assert that the length of secondEvent.command.nsInfo
// is equal to 1. Assert that the namespace contained in secondEvent.command.nsInfo is namespace.
it('executes in multiple batches', {
metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } },
async test() {
const namespace = `db.${'c'.repeat(200)}`;
const newNamespaceModel: ClientBulkWriteModel<Document> = {
name: 'insertOne',
namespace: namespace,
document: { a: 'b' }
};
const testModels = models.concat([newNamespaceModel]);
const result = await client.bulkWrite(testModels);
expect(result.insertedCount).to.equal(numModels + 1);
expect(commands.length).to.equal(2);
expect(commands[0].command.ops.length).to.equal(numModels);
expect(commands[0].command.nsInfo.length).to.equal(1);
expect(commands[0].command.nsInfo[0].ns).to.equal('db.coll');
expect(commands[1].command.ops.length).to.equal(1);
expect(commands[1].command.nsInfo.length).to.equal(1);
expect(commands[1].command.nsInfo[0].ns).to.equal(namespace);
}
});
});
});
describe('12. MongoClient.bulkWrite returns an error if no operations can be added to ops', function () {
// Test that MongoClient.bulkWrite returns an error if an operation provided exceeds maxMessageSizeBytes
// such that an empty ops payload would be sent.
// This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless.
// This test may be skipped by drivers that are not able to construct arbitrarily large documents.
// Construct a MongoClient (referred to as client). Perform a hello command using client and record
// the maxMessageSizeBytes value contained in the response.
let client: MongoClient;
let maxMessageSizeBytes;
beforeEach(async function () {
client = this.configuration.newClient({});
await client.connect();
await client.db('db').collection('coll').drop();
const hello = await client.db('admin').command({ hello: 1 });
maxMessageSizeBytes = hello.maxMessageSizeBytes;
});
afterEach(async function () {
await client.close();
});
context('when the document is too large', function () {
// Case 1: document too large
// Construct the following write model (referred to as largeDocumentModel):
// InsertOne {
// "namespace": "db.coll",
// "document": { "a": "b".repeat(maxMessageSizeBytes) }
// }
// Execute bulkWrite on client with largeDocumentModel. Assert that an error (referred to as error) is returned.
// Assert that error is a client error.
it('raises a client error', {
metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } },
async test() {
const model: ClientBulkWriteModel<Document> = {
name: 'insertOne',
namespace: 'db.coll',
document: { a: 'b'.repeat(maxMessageSizeBytes) }
};
const error = await client.bulkWrite([model]).catch(error => error);
expect(error).to.be.instanceOf(MongoInvalidArgumentError);
}
});
});
context('when the namespace is too large', function () {
// Case 2: namespace too large
// Construct the following namespace (referred to as namespace):
// "db." + "c".repeat(maxMessageSizeBytes)
// Construct the following write model (referred to as largeNamespaceModel):
// InsertOne {
// "namespace": namespace,
// "document": { "a": "b" }
// }
// Execute bulkWrite on client with largeNamespaceModel. Assert that an error (referred to as error) is returned.
// Assert that error is a client error.
it('raises a client error', {
metadata: { requires: { mongodb: '>=8.0.0', serverless: 'forbid' } },
async test() {
const namespace = `db.${'c'.repeat(maxMessageSizeBytes)}`;
const model: ClientBulkWriteModel<Document> = {
name: 'insertOne',
namespace: namespace,
document: { a: 'b' }
};
const error = await client.bulkWrite([model]).catch(error => error);
expect(error).to.be.instanceOf(MongoInvalidArgumentError);
}
});
});
});
describe('13. MongoClient.bulkWrite returns an error if auto-encryption is configured', function () {
// This test is expected to be removed when DRIVERS-2888 is resolved.
// Test that MongoClient.bulkWrite returns an error if the client has auto-encryption configured.
// This test must only be run on 8.0+ servers. This test must be skipped on Atlas Serverless.
// Construct a MongoClient (referred to as client) configured with the following AutoEncryptionOpts:
// AutoEncryptionOpts {
// "keyVaultNamespace": "db.coll",
// "kmsProviders": {