@@ -166,58 +166,6 @@ export interface StreamOffset {
166
166
* StreamApi
167
167
*/
168
168
169
- /**
170
- * @deprecated This function will be removed by 09.2020. Please use doCommitOffsets.
171
- * After reading data, you should commit the offset of the last message read from each partition so
172
- * that your application can resume reading new messages from the correct partition in the event that there is a
173
- * disruption to the subscription, such as an application crash. An offset can also be useful if you delete a subscription
174
- * then recreate a subscription for the same layer, because the new subscription can start reading data from the offset.
175
- * To read messages already committed, use the /seek endpoint, then use /partitions.
176
- * The base path to use is the value of 'nodeBaseURL' returned from /subscribe POST request.
177
- *
178
- * @summary Commits offsets of the last message read
179
- * @param layerId The ID of the stream layer.
180
- * @param commitOffsets The offsets to commit. It should be same as the offset of the message you wish to commit.
181
- * Do not pass offset + 1 as mentioned in Kafka Consumer documentation. The service adds one to the offset you specify.
182
- * @param subscriptionId The subscriptionId received in the response of the /subscribe request (required if mode=parallel).
183
- * @param mode The subscription mode of this subscriptionId (as provided in /subscribe POST API).
184
- * @param xCorrelationId The correlation-id (value of Response Header 'X-Correlation-Id') from prior step in process.
185
- * See the [API Reference](https://developer.here.com/olp/documentation/data-api/api-reference.html) for the `stream` API
186
- */
187
- export async function commitOffsets (
188
- builder : RequestBuilder ,
189
- params : {
190
- layerId : string ;
191
- commitOffsets : CommitOffsetsRequest ;
192
- subscriptionId ?: string ;
193
- mode ?: "serial" | "parallel" ;
194
- xCorrelationId ?: string ;
195
- }
196
- ) : Promise < any > {
197
- const baseUrl = "/layers/{layerId}/offsets" . replace (
198
- "{layerId}" ,
199
- UrlBuilder . toString ( params [ "layerId" ] )
200
- ) ;
201
-
202
- const urlBuilder = new UrlBuilder ( builder . baseUrl + baseUrl ) ;
203
- urlBuilder . appendQuery ( "subscriptionId" , params [ "subscriptionId" ] ) ;
204
- urlBuilder . appendQuery ( "mode" , params [ "mode" ] ) ;
205
-
206
- const headers : { [ header : string ] : string } = { } ;
207
- const options : RequestOptions = {
208
- method : "PUT" ,
209
- headers
210
- } ;
211
- headers [ "Content-Type" ] = "application/json" ;
212
- if ( params [ "commitOffsets" ] !== undefined ) {
213
- options . body = JSON . stringify ( params [ "commitOffsets" ] ) ;
214
- }
215
- if ( params [ "xCorrelationId" ] !== undefined ) {
216
- headers [ "X-Correlation-Id" ] = params [ "xCorrelationId" ] as string ;
217
- }
218
- return builder . request < any > ( urlBuilder , options ) ;
219
- }
220
-
221
169
/**
222
170
* After reading data, you should commit the offset of the last message read from each partition so
223
171
* that your application can resume reading new messages from the correct partition in the event that there is a
0 commit comments