-
Notifications
You must be signed in to change notification settings - Fork 1.8k
/
Copy pathserver_selection.prose.sharded_retryable_reads.test.ts
161 lines (147 loc) · 5.5 KB
/
server_selection.prose.sharded_retryable_reads.test.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
import { expect } from 'chai';
import type { CommandFailedEvent, CommandSucceededEvent } from '../../mongodb';
const TEST_METADATA = { requires: { mongodb: '>=4.2.9', topology: 'sharded' } };
const FAIL_COMMAND = {
configureFailPoint: 'failCommand',
mode: { times: 1 },
data: {
failCommands: ['find'],
errorCode: 6,
closeConnection: true
}
};
const DISABLE_FAIL_COMMAND = {
configureFailPoint: 'failCommand',
mode: 'off',
data: {
failCommands: ['find'],
errorCode: 6,
closeConnection: true
}
};
describe('Server Selection Sharded Retryable Reads Prose tests', function () {
context('Retryable Reads Are Retried on a Different mongos if One is Available', function () {
const commandFailedEvents: CommandFailedEvent[] = [];
let client;
let utilClientOne;
let utilClientTwo;
// This test MUST be executed against a sharded cluster that has at least two
// mongos instances.
// 1. Ensure that a test is run against a sharded cluster that has at least two
// mongoses. If there are more than two mongoses in the cluster, pick two to
// test against.
beforeEach(async function () {
const uri = this.configuration.url({
monitorCommands: true,
useMultipleMongoses: true
});
// 3. Create a client with ``retryReads=true`` that connects to the cluster,
// providing the two selected mongoses as seeds.
client = this.configuration.newClient(uri, {
monitorCommands: true,
retryReads: true
});
client.on('commandFailed', event => {
commandFailedEvents.push(event);
});
await client.connect();
const seeds = client.topology.s.seedlist.map(address => address.toString());
// 2. Create a client per mongos using the direct connection, and configure the
// following fail points on each mongos::
// {
// configureFailPoint: "failCommand",
// mode: { times: 1 },
// data: {
// failCommands: ["find"],
// errorCode: 6,
// closeConnection: true
// }
// }
utilClientOne = this.configuration.newClient(`mongodb://${seeds[0]}`, {
directConnection: true
});
utilClientTwo = this.configuration.newClient(`mongodb://${seeds[1]}`, {
directConnection: true
});
await utilClientOne.db('admin').command(FAIL_COMMAND);
await utilClientTwo.db('admin').command(FAIL_COMMAND);
});
afterEach(async function () {
await client?.close();
await utilClientOne.db('admin').command(DISABLE_FAIL_COMMAND);
await utilClientTwo.db('admin').command(DISABLE_FAIL_COMMAND);
await utilClientOne?.close();
await utilClientTwo?.close();
});
// 4. Enable command monitoring, and execute a ``find`` command that is
// supposed to fail on both mongoses.
// 5. Asserts that there were failed command events from each mongos.
// 6. Disable the fail points.
it('retries on a different mongos', TEST_METADATA, async function () {
await client
.db('test')
.collection('test')
.find()
.toArray()
.catch(() => null);
expect(commandFailedEvents[0].address).to.not.equal(commandFailedEvents[1].address);
});
});
// 1. Ensure that a test is run against a sharded cluster. If there are multiple
// mongoses in the cluster, pick one to test against.
context('Retryable Reads Are Retried on the Same mongos if No Others are Available', function () {
const commandFailedEvents: CommandFailedEvent[] = [];
const commandSucceededEvents: CommandSucceededEvent[] = [];
let client;
let utilClient;
beforeEach(async function () {
const uri = this.configuration.url({
monitorCommands: true
});
// 3. Create a client with ``retryReads=true`` that connects to the cluster,
// providing the selected mongos as the seed.
client = this.configuration.newClient(uri, {
monitorCommands: true,
retryReads: true
});
client.on('commandFailed', event => {
commandFailedEvents.push(event);
});
client.on('commandSucceeded', event => {
commandSucceededEvents.push(event);
});
// 2. Create a client that connects to the mongos using the direct connection,
// and configure the following fail point on the mongos::
// {
// configureFailPoint: "failCommand",
// mode: { times: 1 },
// data: {
// failCommands: ["find"],
// errorCode: 6,
// closeConnection: true
// }
// }
utilClient = this.configuration.newClient(uri, {
directConnection: true
});
await utilClient.db('admin').command(FAIL_COMMAND);
});
afterEach(async function () {
await client?.close();
await utilClient?.db('admin').command(DISABLE_FAIL_COMMAND);
await utilClient?.close();
});
// 4. Enable command monitoring, and execute a ``find`` command.
// 5. Asserts that there was a failed command and a successful command event.
// 6. Disable the fail point.
it('retries on the same mongos', TEST_METADATA, async function () {
await client
.db('test')
.collection('test')
.find()
.toArray()
.catch(() => null);
expect(commandFailedEvents[0].address).to.equal(commandSucceededEvents[0].address);
});
});
});