Skip to content

Commit 1ee6a34

Browse files
Auto-generated API code (#2716)
1 parent 34bb7f5 commit 1ee6a34

File tree

4 files changed

+24
-180
lines changed

4 files changed

+24
-180
lines changed

Diff for: docs/reference.asciidoc

-40
Original file line numberDiff line numberDiff line change
@@ -8038,25 +8038,6 @@ client.inference.get({ ... })
80388038
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))*: The task type
80398039
** *`inference_id` (Optional, string)*: The inference Id
80408040

8041-
[discrete]
8042-
==== post_eis_chat_completion
8043-
Perform a chat completion task through the Elastic Inference Service (EIS).
8044-
8045-
Perform a chat completion inference task with the `elastic` service.
8046-
8047-
{ref}/post-inference-api.html[Endpoint documentation]
8048-
[source,ts]
8049-
----
8050-
client.inference.postEisChatCompletion({ eis_inference_id })
8051-
----
8052-
8053-
[discrete]
8054-
==== Arguments
8055-
8056-
* *Request (object):*
8057-
** *`eis_inference_id` (string)*: The unique identifier of the inference endpoint.
8058-
** *`chat_completion_request` (Optional, { messages, model, max_completion_tokens, stop, temperature, tool_choice, tools, top_p })*
8059-
80608041
[discrete]
80618042
==== put
80628043
Create an inference endpoint.
@@ -8277,27 +8258,6 @@ These settings are specific to the `cohere` service.
82778258
** *`task_settings` (Optional, { input_type, return_documents, top_n, truncate })*: Settings to configure the inference task.
82788259
These settings are specific to the task type you specified.
82798260

8280-
[discrete]
8281-
==== put_eis
8282-
Create an Elastic Inference Service (EIS) inference endpoint.
8283-
8284-
Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).
8285-
8286-
[source,ts]
8287-
----
8288-
client.inference.putEis({ task_type, eis_inference_id, service, service_settings })
8289-
----
8290-
8291-
[discrete]
8292-
==== Arguments
8293-
8294-
* *Request (object):*
8295-
** *`task_type` (Enum("chat_completion"))*: The type of the inference task that the model will perform.
8296-
NOTE: The `chat_completion` task type only supports streaming and only through the _stream API.
8297-
** *`eis_inference_id` (string)*: The unique identifier of the inference endpoint.
8298-
** *`service` (Enum("elastic"))*: The type of service supported for the specified task type. In this case, `elastic`.
8299-
** *`service_settings` ({ model_id, rate_limit })*: Settings used to install the inference model. These settings are specific to the `elastic` service.
8300-
83018261
[discrete]
83028262
==== put_elasticsearch
83038263
Create an Elasticsearch inference endpoint.

Diff for: src/api/api/inference.ts

-82
Original file line numberDiff line numberDiff line change
@@ -209,43 +209,6 @@ export default class Inference {
209209
return await this.transport.request({ path, method, querystring, body, meta }, options)
210210
}
211211

212-
/**
213-
* Perform a chat completion task through the Elastic Inference Service (EIS). Perform a chat completion inference task with the `elastic` service.
214-
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/post-inference-api.html | Elasticsearch API documentation}
215-
*/
216-
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest | TB.InferencePostEisChatCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePostEisChatCompletionResponse>
217-
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest | TB.InferencePostEisChatCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePostEisChatCompletionResponse, unknown>>
218-
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest | TB.InferencePostEisChatCompletionRequest, options?: TransportRequestOptions): Promise<T.InferencePostEisChatCompletionResponse>
219-
async postEisChatCompletion (this: That, params: T.InferencePostEisChatCompletionRequest | TB.InferencePostEisChatCompletionRequest, options?: TransportRequestOptions): Promise<any> {
220-
const acceptedPath: string[] = ['eis_inference_id']
221-
const acceptedBody: string[] = ['chat_completion_request']
222-
const querystring: Record<string, any> = {}
223-
// @ts-expect-error
224-
let body: any = params.body ?? undefined
225-
226-
for (const key in params) {
227-
if (acceptedBody.includes(key)) {
228-
// @ts-expect-error
229-
body = params[key]
230-
} else if (acceptedPath.includes(key)) {
231-
continue
232-
} else if (key !== 'body') {
233-
// @ts-expect-error
234-
querystring[key] = params[key]
235-
}
236-
}
237-
238-
const method = 'POST'
239-
const path = `/_inference/chat_completion/${encodeURIComponent(params.eis_inference_id.toString())}/_stream`
240-
const meta: TransportRequestMetadata = {
241-
name: 'inference.post_eis_chat_completion',
242-
pathParts: {
243-
eis_inference_id: params.eis_inference_id
244-
}
245-
}
246-
return await this.transport.request({ path, method, querystring, body, meta }, options)
247-
}
248-
249212
/**
250213
* Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
251214
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/put-inference-api.html | Elasticsearch API documentation}
@@ -561,51 +524,6 @@ export default class Inference {
561524
return await this.transport.request({ path, method, querystring, body, meta }, options)
562525
}
563526

564-
/**
565-
* Create an Elastic Inference Service (EIS) inference endpoint. Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).
566-
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-elastic.html | Elasticsearch API documentation}
567-
*/
568-
async putEis (this: That, params: T.InferencePutEisRequest | TB.InferencePutEisRequest, options?: TransportRequestOptionsWithOutMeta): Promise<T.InferencePutEisResponse>
569-
async putEis (this: That, params: T.InferencePutEisRequest | TB.InferencePutEisRequest, options?: TransportRequestOptionsWithMeta): Promise<TransportResult<T.InferencePutEisResponse, unknown>>
570-
async putEis (this: That, params: T.InferencePutEisRequest | TB.InferencePutEisRequest, options?: TransportRequestOptions): Promise<T.InferencePutEisResponse>
571-
async putEis (this: That, params: T.InferencePutEisRequest | TB.InferencePutEisRequest, options?: TransportRequestOptions): Promise<any> {
572-
const acceptedPath: string[] = ['task_type', 'eis_inference_id']
573-
const acceptedBody: string[] = ['service', 'service_settings']
574-
const querystring: Record<string, any> = {}
575-
// @ts-expect-error
576-
const userBody: any = params?.body
577-
let body: Record<string, any> | string
578-
if (typeof userBody === 'string') {
579-
body = userBody
580-
} else {
581-
body = userBody != null ? { ...userBody } : undefined
582-
}
583-
584-
for (const key in params) {
585-
if (acceptedBody.includes(key)) {
586-
body = body ?? {}
587-
// @ts-expect-error
588-
body[key] = params[key]
589-
} else if (acceptedPath.includes(key)) {
590-
continue
591-
} else if (key !== 'body') {
592-
// @ts-expect-error
593-
querystring[key] = params[key]
594-
}
595-
}
596-
597-
const method = 'PUT'
598-
const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.eis_inference_id.toString())}`
599-
const meta: TransportRequestMetadata = {
600-
name: 'inference.put_eis',
601-
pathParts: {
602-
task_type: params.task_type,
603-
eis_inference_id: params.eis_inference_id
604-
}
605-
}
606-
return await this.transport.request({ path, method, querystring, body, meta }, options)
607-
}
608-
609527
/**
610528
* Create an Elasticsearch inference endpoint. Create an inference endpoint to perform an inference task with the `elasticsearch` service. > info > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
611529
* @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-elasticsearch.html | Elasticsearch API documentation}

Diff for: src/api/types.ts

+12-27
Original file line numberDiff line numberDiff line change
@@ -9182,7 +9182,8 @@ export interface ClusterGetSettingsResponse {
91829182
export interface ClusterHealthHealthResponseBody {
91839183
active_primary_shards: integer
91849184
active_shards: integer
9185-
active_shards_percent_as_number: Percentage
9185+
active_shards_percent?: string
9186+
active_shards_percent_as_number: double
91869187
cluster_name: Name
91879188
delayed_unassigned_shards: integer
91889189
indices?: Record<IndexName, ClusterHealthIndexHealthStats>
@@ -9222,7 +9223,7 @@ export interface ClusterHealthRequest extends RequestBase {
92229223
timeout?: Duration
92239224
wait_for_active_shards?: WaitForActiveShards
92249225
wait_for_events?: WaitForEvents
9225-
wait_for_nodes?: string | integer
9226+
wait_for_nodes?: ClusterHealthWaitForNodes
92269227
wait_for_no_initializing_shards?: boolean
92279228
wait_for_no_relocating_shards?: boolean
92289229
wait_for_status?: HealthStatus
@@ -9240,6 +9241,8 @@ export interface ClusterHealthShardHealthStats {
92409241
unassigned_primary_shards: integer
92419242
}
92429243

9244+
export type ClusterHealthWaitForNodes = string | integer
9245+
92439246
export interface ClusterInfoRequest extends RequestBase {
92449247
target: ClusterInfoTargets
92459248
}
@@ -13014,15 +13017,6 @@ export type InferenceDenseByteVector = byte[]
1301413017

1301513018
export type InferenceDenseVector = float[]
1301613019

13017-
export interface InferenceEisServiceSettings {
13018-
model_id: string
13019-
rate_limit?: InferenceRateLimitSetting
13020-
}
13021-
13022-
export type InferenceEisServiceType = 'elastic'
13023-
13024-
export type InferenceEisTaskType = 'chat_completion'
13025-
1302613020
export interface InferenceElasticsearchServiceSettings {
1302713021
adaptive_allocations?: InferenceAdaptiveAllocations
1302813022
deployment_id?: string
@@ -13294,13 +13288,6 @@ export interface InferenceGetResponse {
1329413288
endpoints: InferenceInferenceEndpointInfo[]
1329513289
}
1329613290

13297-
export interface InferencePostEisChatCompletionRequest extends RequestBase {
13298-
eis_inference_id: Id
13299-
chat_completion_request?: InferenceRequestChatCompletion
13300-
}
13301-
13302-
export type InferencePostEisChatCompletionResponse = StreamResult
13303-
1330413291
export interface InferencePutRequest extends RequestBase {
1330513292
task_type?: InferenceTaskType
1330613293
inference_id: Id
@@ -13375,15 +13362,6 @@ export interface InferencePutCohereRequest extends RequestBase {
1337513362

1337613363
export type InferencePutCohereResponse = InferenceInferenceEndpointInfo
1337713364

13378-
export interface InferencePutEisRequest extends RequestBase {
13379-
task_type: InferenceEisTaskType
13380-
eis_inference_id: Id
13381-
service: InferenceEisServiceType
13382-
service_settings: InferenceEisServiceSettings
13383-
}
13384-
13385-
export type InferencePutEisResponse = InferenceInferenceEndpointInfo
13386-
1338713365
export interface InferencePutElasticsearchRequest extends RequestBase {
1338813366
task_type: InferenceElasticsearchTaskType
1338913367
elasticsearch_inference_id: Id
@@ -13765,6 +13743,8 @@ export interface IngestInferenceProcessor extends IngestProcessorBase {
1376513743
target_field?: Field
1376613744
field_map?: Record<Field, any>
1376713745
inference_config?: IngestInferenceConfig
13746+
input_output?: IngestInputConfig | IngestInputConfig[]
13747+
ignore_missing?: boolean
1376813748
}
1376913749

1377013750
export interface IngestIngest {
@@ -13773,6 +13753,11 @@ export interface IngestIngest {
1377313753
pipeline?: Name
1377413754
}
1377513755

13756+
export interface IngestInputConfig {
13757+
input_field: string
13758+
output_field: string
13759+
}
13760+
1377613761
export interface IngestIpLocationProcessor extends IngestProcessorBase {
1377713762
database_file?: string
1377813763
field: Field

Diff for: src/api/typesWithBodyKey.ts

+12-31
Original file line numberDiff line numberDiff line change
@@ -9278,7 +9278,8 @@ export interface ClusterGetSettingsResponse {
92789278
export interface ClusterHealthHealthResponseBody {
92799279
active_primary_shards: integer
92809280
active_shards: integer
9281-
active_shards_percent_as_number: Percentage
9281+
active_shards_percent?: string
9282+
active_shards_percent_as_number: double
92829283
cluster_name: Name
92839284
delayed_unassigned_shards: integer
92849285
indices?: Record<IndexName, ClusterHealthIndexHealthStats>
@@ -9318,7 +9319,7 @@ export interface ClusterHealthRequest extends RequestBase {
93189319
timeout?: Duration
93199320
wait_for_active_shards?: WaitForActiveShards
93209321
wait_for_events?: WaitForEvents
9321-
wait_for_nodes?: string | integer
9322+
wait_for_nodes?: ClusterHealthWaitForNodes
93229323
wait_for_no_initializing_shards?: boolean
93239324
wait_for_no_relocating_shards?: boolean
93249325
wait_for_status?: HealthStatus
@@ -9336,6 +9337,8 @@ export interface ClusterHealthShardHealthStats {
93369337
unassigned_primary_shards: integer
93379338
}
93389339

9340+
export type ClusterHealthWaitForNodes = string | integer
9341+
93399342
export interface ClusterInfoRequest extends RequestBase {
93409343
target: ClusterInfoTargets
93419344
}
@@ -13256,15 +13259,6 @@ export type InferenceDenseByteVector = byte[]
1325613259

1325713260
export type InferenceDenseVector = float[]
1325813261

13259-
export interface InferenceEisServiceSettings {
13260-
model_id: string
13261-
rate_limit?: InferenceRateLimitSetting
13262-
}
13263-
13264-
export type InferenceEisServiceType = 'elastic'
13265-
13266-
export type InferenceEisTaskType = 'chat_completion'
13267-
1326813262
export interface InferenceElasticsearchServiceSettings {
1326913263
adaptive_allocations?: InferenceAdaptiveAllocations
1327013264
deployment_id?: string
@@ -13540,14 +13534,6 @@ export interface InferenceGetResponse {
1354013534
endpoints: InferenceInferenceEndpointInfo[]
1354113535
}
1354213536

13543-
export interface InferencePostEisChatCompletionRequest extends RequestBase {
13544-
eis_inference_id: Id
13545-
/** @deprecated The use of the 'body' key has been deprecated, use 'chat_completion_request' instead. */
13546-
body?: InferenceRequestChatCompletion
13547-
}
13548-
13549-
export type InferencePostEisChatCompletionResponse = StreamResult
13550-
1355113537
export interface InferencePutRequest extends RequestBase {
1355213538
task_type?: InferenceTaskType
1355313539
inference_id: Id
@@ -13641,18 +13627,6 @@ export interface InferencePutCohereRequest extends RequestBase {
1364113627

1364213628
export type InferencePutCohereResponse = InferenceInferenceEndpointInfo
1364313629

13644-
export interface InferencePutEisRequest extends RequestBase {
13645-
task_type: InferenceEisTaskType
13646-
eis_inference_id: Id
13647-
/** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */
13648-
body?: {
13649-
service: InferenceEisServiceType
13650-
service_settings: InferenceEisServiceSettings
13651-
}
13652-
}
13653-
13654-
export type InferencePutEisResponse = InferenceInferenceEndpointInfo
13655-
1365613630
export interface InferencePutElasticsearchRequest extends RequestBase {
1365713631
task_type: InferenceElasticsearchTaskType
1365813632
elasticsearch_inference_id: Id
@@ -14077,6 +14051,8 @@ export interface IngestInferenceProcessor extends IngestProcessorBase {
1407714051
target_field?: Field
1407814052
field_map?: Record<Field, any>
1407914053
inference_config?: IngestInferenceConfig
14054+
input_output?: IngestInputConfig | IngestInputConfig[]
14055+
ignore_missing?: boolean
1408014056
}
1408114057

1408214058
export interface IngestIngest {
@@ -14085,6 +14061,11 @@ export interface IngestIngest {
1408514061
pipeline?: Name
1408614062
}
1408714063

14064+
export interface IngestInputConfig {
14065+
input_field: string
14066+
output_field: string
14067+
}
14068+
1408814069
export interface IngestIpLocationProcessor extends IngestProcessorBase {
1408914070
database_file?: string
1409014071
field: Field

0 commit comments

Comments
 (0)