Skip to content

Commit a003078

Browse files
Auto-generated API code (#2625)
1 parent 25e8e84 commit a003078

File tree

3 files changed

+21
-5
lines changed

3 files changed

+21
-5
lines changed

docs/reference.asciidoc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4047,8 +4047,8 @@ client.ilm.start({ ... })
40474047
==== Arguments
40484048

40494049
* *Request (object):*
4050-
** *`master_timeout` (Optional, string | -1 | 0)*
4051-
** *`timeout` (Optional, string | -1 | 0)*
4050+
** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node
4051+
** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout
40524052

40534053
[discrete]
40544054
==== stop
@@ -4069,8 +4069,8 @@ client.ilm.stop({ ... })
40694069
==== Arguments
40704070

40714071
* *Request (object):*
4072-
** *`master_timeout` (Optional, string | -1 | 0)*
4073-
** *`timeout` (Optional, string | -1 | 0)*
4072+
** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node
4073+
** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout
40744074

40754075
[discrete]
40764076
=== indices
@@ -5765,7 +5765,7 @@ client.inference.put({ inference_id })
57655765
* *Request (object):*
57665766
** *`inference_id` (string)*: The inference Id
57675767
** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type
5768-
** *`inference_config` (Optional, { service, service_settings, task_settings })*
5768+
** *`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })*
57695769

57705770
[discrete]
57715771
==== stream_inference

src/api/types.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12501,7 +12501,15 @@ export type InferenceDenseByteVector = byte[]
1250112501

1250212502
export type InferenceDenseVector = float[]
1250312503

12504+
export interface InferenceInferenceChunkingSettings extends InferenceInferenceEndpoint {
12505+
max_chunk_size?: integer
12506+
overlap?: integer
12507+
sentence_overlap?: integer
12508+
strategy?: string
12509+
}
12510+
1250412511
export interface InferenceInferenceEndpoint {
12512+
chunking_settings?: InferenceInferenceChunkingSettings
1250512513
service: string
1250612514
service_settings: InferenceServiceSettings
1250712515
task_settings?: InferenceTaskSettings

src/api/typesWithBodyKey.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12725,7 +12725,15 @@ export type InferenceDenseByteVector = byte[]
1272512725

1272612726
export type InferenceDenseVector = float[]
1272712727

12728+
export interface InferenceInferenceChunkingSettings extends InferenceInferenceEndpoint {
12729+
max_chunk_size?: integer
12730+
overlap?: integer
12731+
sentence_overlap?: integer
12732+
strategy?: string
12733+
}
12734+
1272812735
export interface InferenceInferenceEndpoint {
12736+
chunking_settings?: InferenceInferenceChunkingSettings
1272912737
service: string
1273012738
service_settings: InferenceServiceSettings
1273112739
task_settings?: InferenceTaskSettings

0 commit comments

Comments
 (0)