Skip to content

Commit 8ae4b4d

Browse files
replace prompts with inquirer
1 parent 860b9d4 commit 8ae4b4d

16 files changed

+540
-374
lines changed

helpers/providers/anthropic.ts

+24-19
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
import prompts from "prompts";
1+
import inquirer from "inquirer";
22
import { ModelConfigParams } from ".";
3-
import { questionHandlers, toChoice } from "../../questions/utils";
3+
import { toChoice } from "../../questions/utils";
44

55
const MODELS = [
66
"claude-3-opus",
@@ -56,42 +56,47 @@ export async function askAnthropicQuestions({
5656
},
5757
};
5858

59-
if (!config.apiKey) {
60-
const { key } = await prompts(
59+
if (!config.apiKey && !process.env.CI) {
60+
const { key } = await inquirer.prompt([
6161
{
62-
type: "text",
62+
type: "input",
6363
name: "key",
64-
message:
65-
"Please provide your Anthropic API key (or leave blank to use ANTHROPIC_API_KEY env variable):",
64+
message: askModels
65+
? "Please provide your Anthropic API key (or leave blank to use ANTHROPIC_API_KEY env variable):"
66+
: "Please provide your Anthropic API key (leave blank to skip):",
67+
validate: (value: string) => {
68+
if (askModels && !value) {
69+
if (process.env.ANTHROPIC_API_KEY) {
70+
return true;
71+
}
72+
return "ANTHROPIC_API_KEY env variable is not set - key is required";
73+
}
74+
return true;
75+
},
6676
},
67-
questionHandlers,
68-
);
77+
]);
6978
config.apiKey = key || process.env.ANTHROPIC_API_KEY;
7079
}
7180

7281
if (askModels) {
73-
const { model } = await prompts(
82+
const { model } = await inquirer.prompt([
7483
{
75-
type: "select",
84+
type: "list",
7685
name: "model",
7786
message: "Which LLM model would you like to use?",
7887
choices: MODELS.map(toChoice),
79-
initial: 0,
8088
},
81-
questionHandlers,
82-
);
89+
]);
8390
config.model = model;
8491

85-
const { embeddingModel } = await prompts(
92+
const { embeddingModel } = await inquirer.prompt([
8693
{
87-
type: "select",
94+
type: "list",
8895
name: "embeddingModel",
8996
message: "Which embedding model would you like to use?",
9097
choices: Object.keys(EMBEDDING_MODELS).map(toChoice),
91-
initial: 0,
9298
},
93-
questionHandlers,
94-
);
99+
]);
95100
config.embeddingModel = embeddingModel;
96101
config.dimensions =
97102
EMBEDDING_MODELS[

helpers/providers/azure.ts

+37-14
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
1-
import prompts from "prompts";
1+
import inquirer from "inquirer";
22
import { ModelConfigParams, ModelConfigQuestionsParams } from ".";
3-
import { questionHandlers } from "../../questions/utils";
43

54
const ALL_AZURE_OPENAI_CHAT_MODELS: Record<string, { openAIModel: string }> = {
65
"gpt-35-turbo": { openAIModel: "gpt-3.5-turbo" },
@@ -66,29 +65,49 @@ export async function askAzureQuestions({
6665
},
6766
};
6867

68+
if (!config.apiKey) {
69+
const { key } = await inquirer.prompt([
70+
{
71+
type: "input",
72+
name: "key",
73+
message:
74+
"Please provide your Azure OpenAI API key (or leave blank to use AZURE_OPENAI_API_KEY env variable):",
75+
},
76+
]);
77+
config.apiKey = key || process.env.AZURE_OPENAI_API_KEY;
78+
}
79+
80+
if (!config.endpoint) {
81+
const { endpoint } = await inquirer.prompt([
82+
{
83+
type: "input",
84+
name: "endpoint",
85+
message:
86+
"Please provide your Azure OpenAI endpoint (or leave blank to use AZURE_OPENAI_ENDPOINT env variable):",
87+
},
88+
]);
89+
config.endpoint = endpoint || process.env.AZURE_OPENAI_ENDPOINT;
90+
}
91+
6992
if (askModels) {
70-
const { model } = await prompts(
93+
const { model } = await inquirer.prompt([
7194
{
72-
type: "select",
95+
type: "list",
7396
name: "model",
7497
message: "Which LLM model would you like to use?",
75-
choices: getAvailableModelChoices(),
76-
initial: 0,
98+
choices: getAvailableModelChoices().map(toChoice),
7799
},
78-
questionHandlers,
79-
);
100+
]);
80101
config.model = model;
81102

82-
const { embeddingModel } = await prompts(
103+
const { embeddingModel } = await inquirer.prompt([
83104
{
84-
type: "select",
105+
type: "list",
85106
name: "embeddingModel",
86107
message: "Which embedding model would you like to use?",
87-
choices: getAvailableEmbeddingModelChoices(),
88-
initial: 0,
108+
choices: getAvailableEmbeddingModelChoices().map(toChoice),
89109
},
90-
questionHandlers,
91-
);
110+
]);
92111
config.embeddingModel = embeddingModel;
93112
config.dimensions = getDimensions(embeddingModel);
94113
}
@@ -113,3 +132,7 @@ function getAvailableEmbeddingModelChoices() {
113132
function getDimensions(modelName: string) {
114133
return ALL_AZURE_OPENAI_EMBEDDING_MODELS[modelName].dimensions;
115134
}
135+
136+
function toChoice(item: { title: string; value: string }) {
137+
return { name: item.title, value: item.value };
138+
}

helpers/providers/gemini.ts

+23-18
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
import prompts from "prompts";
1+
import inquirer from "inquirer";
22
import { ModelConfigParams } from ".";
3-
import { questionHandlers, toChoice } from "../../questions/utils";
3+
import { toChoice } from "../../questions/utils";
44

55
const MODELS = ["gemini-1.5-pro-latest", "gemini-pro", "gemini-pro-vision"];
66
type ModelData = {
@@ -41,41 +41,46 @@ export async function askGeminiQuestions({
4141
};
4242

4343
if (!config.apiKey) {
44-
const { key } = await prompts(
44+
const { key } = await inquirer.prompt([
4545
{
46-
type: "text",
46+
type: "input",
4747
name: "key",
48-
message:
49-
"Please provide your Google API key (or leave blank to use GOOGLE_API_KEY env variable):",
48+
message: askModels
49+
? "Please provide your Google API key (or leave blank to use GOOGLE_API_KEY env variable):"
50+
: "Please provide your Google API key (leave blank to skip):",
51+
validate: (value: string) => {
52+
if (askModels && !value) {
53+
if (process.env.GOOGLE_API_KEY) {
54+
return true;
55+
}
56+
return "GOOGLE_API_KEY env variable is not set - key is required";
57+
}
58+
return true;
59+
},
5060
},
51-
questionHandlers,
52-
);
61+
]);
5362
config.apiKey = key || process.env.GOOGLE_API_KEY;
5463
}
5564

5665
if (askModels) {
57-
const { model } = await prompts(
66+
const { model } = await inquirer.prompt([
5867
{
59-
type: "select",
68+
type: "list",
6069
name: "model",
6170
message: "Which LLM model would you like to use?",
6271
choices: MODELS.map(toChoice),
63-
initial: 0,
6472
},
65-
questionHandlers,
66-
);
73+
]);
6774
config.model = model;
6875

69-
const { embeddingModel } = await prompts(
76+
const { embeddingModel } = await inquirer.prompt([
7077
{
71-
type: "select",
78+
type: "list",
7279
name: "embeddingModel",
7380
message: "Which embedding model would you like to use?",
7481
choices: Object.keys(EMBEDDING_MODELS).map(toChoice),
75-
initial: 0,
7682
},
77-
questionHandlers,
78-
);
83+
]);
7984
config.embeddingModel = embeddingModel;
8085
config.dimensions = EMBEDDING_MODELS[embeddingModel].dimensions;
8186
}

helpers/providers/groq.ts

+23-18
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
import prompts from "prompts";
1+
import inquirer from "inquirer";
22
import { ModelConfigParams } from ".";
3-
import { questionHandlers, toChoice } from "../../questions/utils";
3+
import { toChoice } from "../../questions/utils";
44

55
import got from "got";
66
import ora from "ora";
@@ -97,43 +97,48 @@ export async function askGroqQuestions({
9797
};
9898

9999
if (!config.apiKey) {
100-
const { key } = await prompts(
100+
const { key } = await inquirer.prompt([
101101
{
102-
type: "text",
102+
type: "input",
103103
name: "key",
104-
message:
105-
"Please provide your Groq API key (or leave blank to use GROQ_API_KEY env variable):",
104+
message: askModels
105+
? "Please provide your Groq API key (or leave blank to use GROQ_API_KEY env variable):"
106+
: "Please provide your Groq API key (leave blank to skip):",
107+
validate: (value: string) => {
108+
if (askModels && !value) {
109+
if (process.env.GROQ_API_KEY) {
110+
return true;
111+
}
112+
return "GROQ_API_KEY env variable is not set - key is required";
113+
}
114+
return true;
115+
},
106116
},
107-
questionHandlers,
108-
);
117+
]);
109118
config.apiKey = key || process.env.GROQ_API_KEY;
110119
}
111120

112121
if (askModels) {
113122
const modelChoices = await getAvailableModelChoicesGroq(config.apiKey!);
114123

115-
const { model } = await prompts(
124+
const { model } = await inquirer.prompt([
116125
{
117-
type: "select",
126+
type: "list",
118127
name: "model",
119128
message: "Which LLM model would you like to use?",
120129
choices: modelChoices,
121-
initial: 0,
122130
},
123-
questionHandlers,
124-
);
131+
]);
125132
config.model = model;
126133

127-
const { embeddingModel } = await prompts(
134+
const { embeddingModel } = await inquirer.prompt([
128135
{
129-
type: "select",
136+
type: "list",
130137
name: "embeddingModel",
131138
message: "Which embedding model would you like to use?",
132139
choices: Object.keys(EMBEDDING_MODELS).map(toChoice),
133-
initial: 0,
134140
},
135-
questionHandlers,
136-
);
141+
]);
137142
config.embeddingModel = embeddingModel;
138143
config.dimensions =
139144
EMBEDDING_MODELS[

helpers/providers/huggingface.ts

+21-13
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
import prompts from "prompts";
1+
import inquirer from "inquirer";
22
import { ModelConfigParams } from ".";
3-
import { questionHandlers, toChoice } from "../../questions/utils";
3+
import { toChoice } from "../../questions/utils";
44

55
const MODELS = ["HuggingFaceH4/zephyr-7b-alpha"];
66
type ModelData = {
@@ -37,29 +37,37 @@ export async function askHuggingfaceQuestions({
3737
},
3838
};
3939

40+
if (!config.apiKey) {
41+
const { key } = await inquirer.prompt([
42+
{
43+
type: "input",
44+
name: "key",
45+
message:
46+
"Please provide your Huggingface API key (or leave blank to use HF_API_KEY env variable):",
47+
},
48+
]);
49+
config.apiKey = key || process.env.HF_API_KEY;
50+
}
51+
4052
if (askModels) {
41-
const { model } = await prompts(
53+
const { model } = await inquirer.prompt([
4254
{
43-
type: "select",
55+
type: "list",
4456
name: "model",
45-
message: "Which Hugging Face model would you like to use?",
57+
message: "Which LLM model would you like to use?",
4658
choices: MODELS.map(toChoice),
47-
initial: 0,
4859
},
49-
questionHandlers,
50-
);
60+
]);
5161
config.model = model;
5262

53-
const { embeddingModel } = await prompts(
63+
const { embeddingModel } = await inquirer.prompt([
5464
{
55-
type: "select",
65+
type: "list",
5666
name: "embeddingModel",
5767
message: "Which embedding model would you like to use?",
5868
choices: Object.keys(EMBEDDING_MODELS).map(toChoice),
59-
initial: 0,
6069
},
61-
questionHandlers,
62-
);
70+
]);
6371
config.embeddingModel = embeddingModel;
6472
config.dimensions = EMBEDDING_MODELS[embeddingModel].dimensions;
6573
}

0 commit comments

Comments
 (0)