@@ -160,46 +160,54 @@ extension SegmentDestination {
160
160
guard let analytics = self . analytics else { return }
161
161
guard let httpClient = self . httpClient else { return }
162
162
163
- guard let files = storage. dataStore. fetch ( ) ? . dataFiles else { return }
164
-
165
- for url in files {
166
- // enter for this url we're going to kick off
167
- group. enter ( )
168
- analytics. log ( message: " Processing Batch: \n \( url. lastPathComponent) " )
163
+ // Cooperative release of allocated memory by URL instances (dataFiles).
164
+ autoreleasepool {
165
+ guard let files = storage. dataStore. fetch ( ) ? . dataFiles else { return }
169
166
170
- // set up the task
171
- let uploadTask = httpClient. startBatchUpload ( writeKey: analytics. configuration. values. writeKey, batch: url) { [ weak self] result in
172
- defer {
173
- group. leave ( )
174
- }
175
- guard let self else { return }
176
- switch result {
177
- case . success( _) :
178
- storage. remove ( data: [ url] )
179
- cleanupUploads ( )
167
+ for url in files {
168
+ // Use the autorelease pool to ensure that unnecessary memory allocations
169
+ // are released after each iteration. If there is a large backlog of files
170
+ // to iterate, the host applications may crash due to OOM issues.
171
+ autoreleasepool {
172
+ // enter for this url we're going to kick off
173
+ group. enter ( )
174
+ analytics. log ( message: " Processing Batch: \n \( url. lastPathComponent) " )
180
175
181
- // we don't want to retry events in a given batch when a 400
182
- // response for malformed JSON is returned
183
- case . failure( Segment . HTTPClientErrors. statusCode ( code: 400 ) ) :
184
- storage. remove ( data: [ url] )
185
- cleanupUploads ( )
186
- default :
187
- break
176
+ // set up the task
177
+ let uploadTask = httpClient. startBatchUpload ( writeKey: analytics. configuration. values. writeKey, batch: url) { [ weak self] result in
178
+ defer {
179
+ group. leave ( )
180
+ }
181
+ guard let self else { return }
182
+ switch result {
183
+ case . success( _) :
184
+ storage. remove ( data: [ url] )
185
+ cleanupUploads ( )
186
+
187
+ // we don't want to retry events in a given batch when a 400
188
+ // response for malformed JSON is returned
189
+ case . failure( Segment . HTTPClientErrors. statusCode ( code: 400 ) ) :
190
+ storage. remove ( data: [ url] )
191
+ cleanupUploads ( )
192
+ default :
193
+ break
194
+ }
195
+
196
+ analytics. log ( message: " Processed: \( url. lastPathComponent) " )
197
+ // the upload we have here has just finished.
198
+ // make sure it gets removed and it's cleanup() called rather
199
+ // than waiting on the next flush to come around.
200
+ cleanupUploads ( )
201
+ }
202
+
203
+ // we have a legit upload in progress now, so add it to our list.
204
+ if let upload = uploadTask {
205
+ add ( uploadTask: UploadTaskInfo ( url: url, data: nil , task: upload) )
206
+ } else {
207
+ // we couldn't get a task, so we need to leave the group or things will hang.
208
+ group. leave ( )
209
+ }
188
210
}
189
-
190
- analytics. log ( message: " Processed: \( url. lastPathComponent) " )
191
- // the upload we have here has just finished.
192
- // make sure it gets removed and it's cleanup() called rather
193
- // than waiting on the next flush to come around.
194
- cleanupUploads ( )
195
- }
196
-
197
- // we have a legit upload in progress now, so add it to our list.
198
- if let upload = uploadTask {
199
- add ( uploadTask: UploadTaskInfo ( url: url, data: nil , task: upload) )
200
- } else {
201
- // we couldn't get a task, so we need to leave the group or things will hang.
202
- group. leave ( )
203
211
}
204
212
}
205
213
}
0 commit comments