@@ -64,13 +64,13 @@ public void testTranslogHistoryTransferred() throws Exception {
64
64
int docs = shards .indexDocs (10 );
65
65
getTranslog (shards .getPrimary ()).rollGeneration ();
66
66
shards .flush ();
67
- if (randomBoolean ()) {
68
- docs += shards .indexDocs (10 );
69
- }
67
+ int moreDocs = shards .indexDocs (randomInt (10 ));
70
68
shards .addReplica ();
71
69
shards .startAll ();
72
70
final IndexShard replica = shards .getReplicas ().get (0 );
73
- assertThat (getTranslog (replica ).totalOperations (), equalTo (docs ));
71
+ boolean softDeletesEnabled = replica .indexSettings ().isSoftDeleteEnabled ();
72
+ assertThat (getTranslog (replica ).totalOperations (), equalTo (softDeletesEnabled ? moreDocs : docs + moreDocs ));
73
+ shards .assertAllEqual (docs + moreDocs );
74
74
}
75
75
}
76
76
@@ -107,7 +107,7 @@ public void testRetentionPolicyChangeDuringRecovery() throws Exception {
107
107
}
108
108
}
109
109
110
- public void testRecoveryWithOutOfOrderDelete () throws Exception {
110
+ public void testRecoveryWithOutOfOrderDeleteWithTranslog () throws Exception {
111
111
/*
112
112
* The flow of this test:
113
113
* - delete #1
@@ -117,12 +117,9 @@ public void testRecoveryWithOutOfOrderDelete() throws Exception {
117
117
* - flush (commit point has max_seqno 3, and local checkpoint 1 -> points at gen 2, previous commit point is maintained)
118
118
* - index #2
119
119
* - index #5
120
- * - If flush and the translog/lucene retention disabled, delete #1 will be removed while index #0 is still retained and replayed.
120
+ * - If flush and the translog retention disabled, delete #1 will be removed while index #0 is still retained and replayed.
121
121
*/
122
- Settings settings = Settings .builder ().put (IndexSettings .INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING .getKey (), 10 )
123
- // If soft-deletes is enabled, delete#1 will be reclaimed because its segment (segment_1) is fully deleted
124
- // index#0 will be retained if merge is disabled; otherwise it will be reclaimed because gcp=3 and retained_ops=0
125
- .put (MergePolicyConfig .INDEX_MERGE_ENABLED , false ).build ();
122
+ Settings settings = Settings .builder ().put (IndexSettings .INDEX_SOFT_DELETES_SETTING .getKey (), false ).build ();
126
123
try (ReplicationGroup shards = createGroup (1 , settings )) {
127
124
shards .startAll ();
128
125
// create out of order delete and index op on replica
@@ -131,7 +128,7 @@ public void testRecoveryWithOutOfOrderDelete() throws Exception {
131
128
132
129
// delete #1
133
130
orgReplica .applyDeleteOperationOnReplica (1 , 2 , "type" , "id" );
134
- orgReplica . flush ( new FlushRequest (). force ( true )) ; // isolate delete#1 in its own translog generation and lucene segment
131
+ getTranslog ( orgReplica ). rollGeneration () ; // isolate the delete in it's own generation
135
132
// index #0
136
133
orgReplica .applyIndexOperationOnReplica (0 , 1 , IndexRequest .UNSET_AUTO_GENERATED_TIMESTAMP , false ,
137
134
SourceToParse .source (indexName , "type" , "id" , new BytesArray ("{}" ), XContentType .JSON ));
@@ -151,17 +148,16 @@ public void testRecoveryWithOutOfOrderDelete() throws Exception {
151
148
final int translogOps ;
152
149
if (randomBoolean ()) {
153
150
if (randomBoolean ()) {
154
- logger .info ("--> flushing shard (translog/soft-deletes will be trimmed)" );
151
+ logger .info ("--> flushing shard (translog will be trimmed)" );
155
152
IndexMetaData .Builder builder = IndexMetaData .builder (orgReplica .indexSettings ().getIndexMetaData ());
156
153
builder .settings (Settings .builder ().put (orgReplica .indexSettings ().getSettings ())
157
154
.put (IndexSettings .INDEX_TRANSLOG_RETENTION_AGE_SETTING .getKey (), "-1" )
158
- .put (IndexSettings .INDEX_TRANSLOG_RETENTION_SIZE_SETTING .getKey (), "-1" )
159
- .put (IndexSettings .INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING .getKey (), 0 ));
155
+ .put (IndexSettings .INDEX_TRANSLOG_RETENTION_SIZE_SETTING .getKey (), "-1" ));
160
156
orgReplica .indexSettings ().updateIndexMetaData (builder .build ());
161
157
orgReplica .onSettingsChanged ();
162
158
translogOps = 5 ; // 4 ops + seqno gaps (delete #1 is removed but index #0 will be replayed).
163
159
} else {
164
- logger .info ("--> flushing shard (translog/soft-deletes will be retained)" );
160
+ logger .info ("--> flushing shard (translog will be retained)" );
165
161
translogOps = 6 ; // 5 ops + seqno gaps
166
162
}
167
163
flushShard (orgReplica );
@@ -180,6 +176,62 @@ public void testRecoveryWithOutOfOrderDelete() throws Exception {
180
176
}
181
177
}
182
178
179
+ public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes () throws Exception {
180
+ Settings settings = Settings .builder ()
181
+ .put (IndexSettings .INDEX_SOFT_DELETES_SETTING .getKey (), true )
182
+ .put (IndexSettings .INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING .getKey (), 10 )
183
+ // If soft-deletes is enabled, delete#1 will be reclaimed because its segment (segment_1) is fully deleted
184
+ // index#0 will be retained if merge is disabled; otherwise it will be reclaimed because gcp=3 and retained_ops=0
185
+ .put (MergePolicyConfig .INDEX_MERGE_ENABLED , false ).build ();
186
+ try (ReplicationGroup shards = createGroup (1 , settings )) {
187
+ shards .startAll ();
188
+ // create out of order delete and index op on replica
189
+ final IndexShard orgReplica = shards .getReplicas ().get (0 );
190
+ final String indexName = orgReplica .shardId ().getIndexName ();
191
+
192
+ // delete #1
193
+ orgReplica .applyDeleteOperationOnReplica (1 , 2 , "type" , "id" );
194
+ orgReplica .flush (new FlushRequest ().force (true )); // isolate delete#1 in its own translog generation and lucene segment
195
+ // index #0
196
+ orgReplica .applyIndexOperationOnReplica (0 , 1 , IndexRequest .UNSET_AUTO_GENERATED_TIMESTAMP , false ,
197
+ SourceToParse .source (indexName , "type" , "id" , new BytesArray ("{}" ), XContentType .JSON ));
198
+ // index #3
199
+ orgReplica .applyIndexOperationOnReplica (3 , 1 , IndexRequest .UNSET_AUTO_GENERATED_TIMESTAMP , false ,
200
+ SourceToParse .source (indexName , "type" , "id-3" , new BytesArray ("{}" ), XContentType .JSON ));
201
+ // Flushing a new commit with local checkpoint=1 allows to delete the translog gen #1.
202
+ orgReplica .flush (new FlushRequest ().force (true ).waitIfOngoing (true ));
203
+ // index #2
204
+ orgReplica .applyIndexOperationOnReplica (2 , 1 , IndexRequest .UNSET_AUTO_GENERATED_TIMESTAMP , false ,
205
+ SourceToParse .source (indexName , "type" , "id-2" , new BytesArray ("{}" ), XContentType .JSON ));
206
+ orgReplica .updateGlobalCheckpointOnReplica (3L , "test" );
207
+ // index #5 -> force NoOp #4.
208
+ orgReplica .applyIndexOperationOnReplica (5 , 1 , IndexRequest .UNSET_AUTO_GENERATED_TIMESTAMP , false ,
209
+ SourceToParse .source (indexName , "type" , "id-5" , new BytesArray ("{}" ), XContentType .JSON ));
210
+
211
+ if (randomBoolean ()) {
212
+ if (randomBoolean ()) {
213
+ logger .info ("--> flushing shard (translog/soft-deletes will be trimmed)" );
214
+ IndexMetaData .Builder builder = IndexMetaData .builder (orgReplica .indexSettings ().getIndexMetaData ());
215
+ builder .settings (Settings .builder ().put (orgReplica .indexSettings ().getSettings ())
216
+ .put (IndexSettings .INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING .getKey (), 0 ));
217
+ orgReplica .indexSettings ().updateIndexMetaData (builder .build ());
218
+ orgReplica .onSettingsChanged ();
219
+ }
220
+ flushShard (orgReplica );
221
+ }
222
+
223
+ final IndexShard orgPrimary = shards .getPrimary ();
224
+ shards .promoteReplicaToPrimary (orgReplica ).get (); // wait for primary/replica sync to make sure seq# gap is closed.
225
+
226
+ IndexShard newReplica = shards .addReplicaWithExistingPath (orgPrimary .shardPath (), orgPrimary .routingEntry ().currentNodeId ());
227
+ shards .recoverReplica (newReplica );
228
+ shards .assertAllEqual (3 );
229
+ try (Translog .Snapshot snapshot = newReplica .getHistoryOperations ("test" , 0 )) {
230
+ assertThat (snapshot , SnapshotMatchers .size (6 ));
231
+ }
232
+ }
233
+ }
234
+
183
235
public void testDifferentHistoryUUIDDisablesOPsRecovery () throws Exception {
184
236
try (ReplicationGroup shards = createGroup (1 )) {
185
237
shards .startAll ();
@@ -228,7 +280,8 @@ public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception {
228
280
shards .recoverReplica (newReplica );
229
281
// file based recovery should be made
230
282
assertThat (newReplica .recoveryState ().getIndex ().fileDetails (), not (empty ()));
231
- assertThat (getTranslog (newReplica ).totalOperations (), equalTo (numDocs ));
283
+ boolean softDeletesEnabled = replica .indexSettings ().isSoftDeleteEnabled ();
284
+ assertThat (getTranslog (newReplica ).totalOperations (), equalTo (softDeletesEnabled ? nonFlushedDocs : numDocs ));
232
285
233
286
// history uuid was restored
234
287
assertThat (newReplica .getHistoryUUID (), equalTo (historyUUID ));
@@ -332,7 +385,8 @@ public void testShouldFlushAfterPeerRecovery() throws Exception {
332
385
shards .recoverReplica (replica );
333
386
// Make sure the flushing will eventually be completed (eg. `shouldPeriodicallyFlush` is false)
334
387
assertBusy (() -> assertThat (getEngine (replica ).shouldPeriodicallyFlush (), equalTo (false )));
335
- assertThat (getTranslog (replica ).totalOperations (), equalTo (numDocs ));
388
+ boolean softDeletesEnabled = replica .indexSettings ().isSoftDeleteEnabled ();
389
+ assertThat (getTranslog (replica ).totalOperations (), equalTo (softDeletesEnabled ? 0 : numDocs ));
336
390
shards .assertAllEqual (numDocs );
337
391
}
338
392
}
0 commit comments