Skip to content

Commit 155517e

Browse files
authored
Inference: remove the hf-only options now that the API is warm-only (#1192)
1 parent f85f5f7 commit 155517e

File tree

8 files changed

+19
-66
lines changed

8 files changed

+19
-66
lines changed

packages/agents/src/tools/imageToText.ts

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,9 @@ export const imageToTextTool: Tool = {
1515
if (typeof data === "string") throw "Input must be a blob.";
1616

1717
return (
18-
await inference.imageToText(
19-
{
20-
data,
21-
},
22-
{ wait_for_model: true }
23-
)
18+
await inference.imageToText({
19+
data,
20+
})
2421
).generated_text;
2522
},
2623
};

packages/agents/src/tools/speechToText.ts

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,9 @@ export const speechToTextTool: Tool = {
1515
if (typeof data === "string") throw "Input must be a blob.";
1616

1717
return (
18-
await inference.automaticSpeechRecognition(
19-
{
20-
data,
21-
},
22-
{ wait_for_model: true }
23-
)
18+
await inference.automaticSpeechRecognition({
19+
data,
20+
})
2421
).text;
2522
},
2623
};

packages/agents/src/tools/textToImage.ts

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,8 @@ export const textToImageTool: Tool = {
1919
const data = await input;
2020
if (typeof data !== "string") throw "Input must be a string.";
2121

22-
return await inference.textToImage(
23-
{
24-
inputs: data,
25-
},
26-
{ wait_for_model: true }
27-
);
22+
return await inference.textToImage({
23+
inputs: data,
24+
});
2825
},
2926
};

packages/agents/src/tools/textToSpeech.ts

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,9 @@ export const textToSpeechTool: Tool = {
1919
const data = await input;
2020
if (typeof data !== "string") throw "Input must be a string.";
2121

22-
return inference.textToSpeech(
23-
{
24-
inputs: data,
25-
model: "espnet/kan-bayashi_ljspeech_vits",
26-
},
27-
{ wait_for_model: true }
28-
);
22+
return inference.textToSpeech({
23+
inputs: data,
24+
model: "espnet/kan-bayashi_ljspeech_vits",
25+
});
2926
},
3027
};

packages/inference/src/lib/makeRequestOptions.ts

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,7 @@ export async function makeRequestOptions(
3939
let otherArgs = remainingArgs;
4040
const provider = maybeProvider ?? "hf-inference";
4141

42-
const { forceTask, includeCredentials, taskHint, wait_for_model, use_cache, dont_load_model, chatCompletion } =
43-
options ?? {};
42+
const { forceTask, includeCredentials, taskHint, chatCompletion } = options ?? {};
4443

4544
if (endpointUrl && provider !== "hf-inference") {
4645
throw new Error(`Cannot use endpointUrl with a third-party provider.`);
@@ -102,18 +101,6 @@ export async function makeRequestOptions(
102101
headers["Content-Type"] = "application/json";
103102
}
104103

105-
if (provider === "hf-inference") {
106-
if (wait_for_model) {
107-
headers["X-Wait-For-Model"] = "true";
108-
}
109-
if (use_cache === false) {
110-
headers["X-Use-Cache"] = "false";
111-
}
112-
if (dont_load_model) {
113-
headers["X-Load-Model"] = "0";
114-
}
115-
}
116-
117104
if (provider === "replicate") {
118105
headers["Prefer"] = "wait";
119106
}

packages/inference/src/tasks/custom/request.ts

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,8 @@ export async function request<T>(
1818
const { url, info } = await makeRequestOptions(args, options);
1919
const response = await (options?.fetch ?? fetch)(url, info);
2020

21-
if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
22-
return request(args, {
23-
...options,
24-
wait_for_model: true,
25-
});
21+
if (options?.retry_on_error !== false && response.status === 503) {
22+
return request(args, options);
2623
}
2724

2825
if (!response.ok) {

packages/inference/src/tasks/custom/streamingRequest.ts

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,8 @@ export async function* streamingRequest<T>(
2020
const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
2121
const response = await (options?.fetch ?? fetch)(url, info);
2222

23-
if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
24-
return yield* streamingRequest(args, {
25-
...options,
26-
wait_for_model: true,
27-
});
23+
if (options?.retry_on_error !== false && response.status === 503) {
24+
return yield* streamingRequest(args, options);
2825
}
2926
if (!response.ok) {
3027
if (response.headers.get("Content-Type")?.startsWith("application/json")) {

packages/inference/src/types.ts

Lines changed: 1 addition & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -7,26 +7,10 @@ export type ModelId = string;
77

88
export interface Options {
99
/**
10-
* (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true.
10+
* (Default: true) Boolean. If a request 503s, the request will be retried with the same parameters.
1111
*/
1212
retry_on_error?: boolean;
13-
/**
14-
* (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
15-
*/
16-
use_cache?: boolean;
17-
/**
18-
* (Default: false). Boolean. Do not load the model if it's not already available.
19-
*/
20-
dont_load_model?: boolean;
21-
/**
22-
* (Default: false). Boolean to use GPU instead of CPU for inference (requires Startup plan at least).
23-
*/
24-
use_gpu?: boolean;
2513

26-
/**
27-
* (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places.
28-
*/
29-
wait_for_model?: boolean;
3014
/**
3115
* Custom fetch function to use instead of the default one, for example to use a proxy or edit headers.
3216
*/

0 commit comments

Comments
 (0)