Skip to content

Inference: remove the hf-only options now that the API is warm-only #1192

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 3 additions & 6 deletions packages/agents/src/tools/imageToText.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,9 @@ export const imageToTextTool: Tool = {
if (typeof data === "string") throw "Input must be a blob.";

return (
await inference.imageToText(
{
data,
},
{ wait_for_model: true }
)
await inference.imageToText({
data,
})
).generated_text;
},
};
9 changes: 3 additions & 6 deletions packages/agents/src/tools/speechToText.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,9 @@ export const speechToTextTool: Tool = {
if (typeof data === "string") throw "Input must be a blob.";

return (
await inference.automaticSpeechRecognition(
{
data,
},
{ wait_for_model: true }
)
await inference.automaticSpeechRecognition({
data,
})
).text;
},
};
9 changes: 3 additions & 6 deletions packages/agents/src/tools/textToImage.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,8 @@ export const textToImageTool: Tool = {
const data = await input;
if (typeof data !== "string") throw "Input must be a string.";

return await inference.textToImage(
{
inputs: data,
},
{ wait_for_model: true }
);
return await inference.textToImage({
inputs: data,
});
},
};
11 changes: 4 additions & 7 deletions packages/agents/src/tools/textToSpeech.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,9 @@ export const textToSpeechTool: Tool = {
const data = await input;
if (typeof data !== "string") throw "Input must be a string.";

return inference.textToSpeech(
{
inputs: data,
model: "espnet/kan-bayashi_ljspeech_vits",
},
{ wait_for_model: true }
);
return inference.textToSpeech({
inputs: data,
model: "espnet/kan-bayashi_ljspeech_vits",
});
},
};
15 changes: 1 addition & 14 deletions packages/inference/src/lib/makeRequestOptions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,7 @@ export async function makeRequestOptions(
let otherArgs = remainingArgs;
const provider = maybeProvider ?? "hf-inference";

const { forceTask, includeCredentials, taskHint, wait_for_model, use_cache, dont_load_model, chatCompletion } =
options ?? {};
const { forceTask, includeCredentials, taskHint, chatCompletion } = options ?? {};

if (endpointUrl && provider !== "hf-inference") {
throw new Error(`Cannot use endpointUrl with a third-party provider.`);
Expand Down Expand Up @@ -101,18 +100,6 @@ export async function makeRequestOptions(
headers["Content-Type"] = "application/json";
}

if (provider === "hf-inference") {
if (wait_for_model) {
headers["X-Wait-For-Model"] = "true";
}
if (use_cache === false) {
headers["X-Use-Cache"] = "false";
}
if (dont_load_model) {
headers["X-Load-Model"] = "0";
}
}

if (provider === "replicate") {
headers["Prefer"] = "wait";
}
Expand Down
7 changes: 2 additions & 5 deletions packages/inference/src/tasks/custom/request.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,8 @@ export async function request<T>(
const { url, info } = await makeRequestOptions(args, options);
const response = await (options?.fetch ?? fetch)(url, info);

if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
return request(args, {
...options,
wait_for_model: true,
});
if (options?.retry_on_error !== false && response.status === 503) {
return request(args, options);
}

if (!response.ok) {
Expand Down
7 changes: 2 additions & 5 deletions packages/inference/src/tasks/custom/streamingRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,8 @@ export async function* streamingRequest<T>(
const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
const response = await (options?.fetch ?? fetch)(url, info);

if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
return yield* streamingRequest(args, {
...options,
wait_for_model: true,
});
if (options?.retry_on_error !== false && response.status === 503) {
return yield* streamingRequest(args, options);
}
if (!response.ok) {
if (response.headers.get("Content-Type")?.startsWith("application/json")) {
Expand Down
18 changes: 1 addition & 17 deletions packages/inference/src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,26 +7,10 @@ export type ModelId = string;

export interface Options {
/**
* (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true.
* (Default: true) Boolean. If a request 503s, the request will be retried with the same parameters.
*/
retry_on_error?: boolean;
/**
* (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
*/
use_cache?: boolean;
/**
* (Default: false). Boolean. Do not load the model if it's not already available.
*/
dont_load_model?: boolean;
/**
* (Default: false). Boolean to use GPU instead of CPU for inference (requires Startup plan at least).
*/
use_gpu?: boolean;

/**
* (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places.
*/
wait_for_model?: boolean;
/**
* Custom fetch function to use instead of the default one, for example to use a proxy or edit headers.
*/
Expand Down