Skip to content

Commit 087a45e

Browse files
committed
Merge remote-tracking branch 'origin' into lee/agent-workflows
2 parents 1e90a6a + ee69ce7 commit 087a45e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+476
-370
lines changed

.changeset/kind-mice-repair.md

-5
This file was deleted.

.changeset/mean-fireants-visit.md

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"create-llama": patch
3+
---
4+
5+
fix: add trycatch for generating error

.changeset/rare-eyes-protect.md

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"create-llama": patch
3+
---
4+
5+
bump: chat-ui and tailwind v4

.changeset/sharp-peas-grow.md

-5
This file was deleted.

.changeset/thin-buses-hunt.md

-5
This file was deleted.

CHANGELOG.md

+12
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,17 @@
11
# create-llama
22

3+
## 0.4.0
4+
5+
### Minor Changes
6+
7+
- 61204a1: chore: bump LITS 0.9
8+
9+
### Patch Changes
10+
11+
- 9e723c3: Standardize the code of the workflow use case (Python)
12+
- d5da55b: feat: add components.json to use CLI
13+
- c1552eb: chore: move wikipedia tool to create-llama
14+
315
## 0.3.28
416

517
### Patch Changes

helpers/typescript.ts

+90-22
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ import { assetRelocator, copy } from "../helpers/copy";
66
import { callPackageManager } from "../helpers/install";
77
import { templatesDir } from "./dir";
88
import { PackageManager } from "./get-pkg-manager";
9-
import { InstallTemplateArgs } from "./types";
9+
import { InstallTemplateArgs, ModelProvider, TemplateVectorDB } from "./types";
1010

1111
/**
1212
* Install a LlamaIndex internal template to a given `root` directory.
@@ -27,6 +27,7 @@ export const installTSTemplate = async ({
2727
dataSources,
2828
useLlamaParse,
2929
useCase,
30+
modelConfig,
3031
}: InstallTemplateArgs & { backend: boolean }) => {
3132
console.log(bold(`Using ${packageManager}.`));
3233

@@ -181,6 +182,12 @@ export const installTSTemplate = async ({
181182
cwd: path.join(compPath, "loaders", "typescript", loaderFolder),
182183
});
183184

185+
// copy provider settings
186+
await copy("**", enginePath, {
187+
parents: true,
188+
cwd: path.join(compPath, "providers", "typescript", modelConfig.provider),
189+
});
190+
184191
// Select and copy engine code based on data sources and tools
185192
let engine;
186193
tools = tools ?? [];
@@ -239,6 +246,8 @@ export const installTSTemplate = async ({
239246
ui,
240247
observability,
241248
vectorDb,
249+
backend,
250+
modelConfig,
242251
});
243252

244253
if (
@@ -249,6 +258,68 @@ export const installTSTemplate = async ({
249258
}
250259
};
251260

261+
const providerDependencies: {
262+
[key in ModelProvider]?: Record<string, string>;
263+
} = {
264+
openai: {
265+
"@llamaindex/openai": "^0.1.52",
266+
},
267+
gemini: {
268+
"@llamaindex/google": "^0.0.7",
269+
},
270+
ollama: {
271+
"@llamaindex/ollama": "^0.0.40",
272+
},
273+
mistral: {
274+
"@llamaindex/mistral": "^0.0.5",
275+
},
276+
"azure-openai": {
277+
"@llamaindex/openai": "^0.1.52",
278+
},
279+
groq: {
280+
"@llamaindex/groq": "^0.0.51",
281+
"@llamaindex/huggingface": "^0.0.36", // groq uses huggingface as default embedding model
282+
},
283+
anthropic: {
284+
"@llamaindex/anthropic": "^0.1.0",
285+
"@llamaindex/huggingface": "^0.0.36", // anthropic uses huggingface as default embedding model
286+
},
287+
};
288+
289+
const vectorDbDependencies: Record<TemplateVectorDB, Record<string, string>> = {
290+
astra: {
291+
"@llamaindex/astra": "^0.0.5",
292+
},
293+
chroma: {
294+
"@llamaindex/chroma": "^0.0.5",
295+
},
296+
llamacloud: {},
297+
milvus: {
298+
"@zilliz/milvus2-sdk-node": "^2.4.6",
299+
"@llamaindex/milvus": "^0.1.0",
300+
},
301+
mongo: {
302+
mongodb: "6.7.0",
303+
"@llamaindex/mongodb": "^0.0.5",
304+
},
305+
none: {},
306+
pg: {
307+
pg: "^8.12.0",
308+
pgvector: "^0.2.0",
309+
"@llamaindex/postgres": "^0.0.33",
310+
},
311+
pinecone: {
312+
"@llamaindex/pinecone": "^0.0.5",
313+
},
314+
qdrant: {
315+
"@qdrant/js-client-rest": "^1.11.0",
316+
"@llamaindex/qdrant": "^0.1.0",
317+
},
318+
weaviate: {
319+
"@llamaindex/weaviate": "^0.0.5",
320+
},
321+
};
322+
252323
async function updatePackageJson({
253324
root,
254325
appName,
@@ -258,6 +329,8 @@ async function updatePackageJson({
258329
ui,
259330
observability,
260331
vectorDb,
332+
backend,
333+
modelConfig,
261334
}: Pick<
262335
InstallTemplateArgs,
263336
| "root"
@@ -267,8 +340,10 @@ async function updatePackageJson({
267340
| "ui"
268341
| "observability"
269342
| "vectorDb"
343+
| "modelConfig"
270344
> & {
271345
relativeEngineDestPath: string;
346+
backend: boolean;
272347
}): Promise<any> {
273348
const packageJsonFile = path.join(root, "package.json");
274349
const packageJson: any = JSON.parse(
@@ -308,32 +383,25 @@ async function updatePackageJson({
308383
};
309384
}
310385

311-
if (vectorDb === "pg") {
386+
if (backend) {
312387
packageJson.dependencies = {
313388
...packageJson.dependencies,
314-
pg: "^8.12.0",
315-
pgvector: "^0.2.0",
389+
"@llamaindex/readers": "^2.0.0",
316390
};
317-
}
318391

319-
if (vectorDb === "qdrant") {
320-
packageJson.dependencies = {
321-
...packageJson.dependencies,
322-
"@qdrant/js-client-rest": "^1.11.0",
323-
};
324-
}
325-
if (vectorDb === "mongo") {
326-
packageJson.dependencies = {
327-
...packageJson.dependencies,
328-
mongodb: "^6.7.0",
329-
};
330-
}
392+
if (vectorDb && vectorDb in vectorDbDependencies) {
393+
packageJson.dependencies = {
394+
...packageJson.dependencies,
395+
...vectorDbDependencies[vectorDb],
396+
};
397+
}
331398

332-
if (vectorDb === "milvus") {
333-
packageJson.dependencies = {
334-
...packageJson.dependencies,
335-
"@zilliz/milvus2-sdk-node": "^2.4.6",
336-
};
399+
if (modelConfig.provider && modelConfig.provider in providerDependencies) {
400+
packageJson.dependencies = {
401+
...packageJson.dependencies,
402+
...providerDependencies[modelConfig.provider],
403+
};
404+
}
337405
}
338406

339407
if (observability === "traceloop") {

package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "create-llama",
3-
"version": "0.3.28",
3+
"version": "0.4.0",
44
"description": "Create LlamaIndex-powered apps with one command",
55
"keywords": [
66
"rag",

templates/components/loaders/typescript/file/loader.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import {
22
FILE_EXT_TO_READER,
33
SimpleDirectoryReader,
4-
} from "llamaindex/readers/index";
4+
} from "@llamaindex/readers/directory";
55

66
export const DATA_DIR = "./data";
77

templates/components/loaders/typescript/llama_parse/loader.ts

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
import { LlamaParseReader } from "llamaindex";
21
import {
32
FILE_EXT_TO_READER,
43
SimpleDirectoryReader,
5-
} from "llamaindex/readers/index";
4+
} from "@llamaindex/readers/directory";
5+
import { LlamaParseReader } from "llamaindex";
66

77
export const DATA_DIR = "./data";
88

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import {
2+
ALL_AVAILABLE_ANTHROPIC_MODELS,
3+
Anthropic,
4+
} from "@llamaindex/anthropic";
5+
import { HuggingFaceEmbedding } from "@llamaindex/huggingface";
6+
import { Settings } from "llamaindex";
7+
8+
export function setupProvider() {
9+
const embedModelMap: Record<string, string> = {
10+
"all-MiniLM-L6-v2": "Xenova/all-MiniLM-L6-v2",
11+
"all-mpnet-base-v2": "Xenova/all-mpnet-base-v2",
12+
};
13+
Settings.llm = new Anthropic({
14+
model: process.env.MODEL as keyof typeof ALL_AVAILABLE_ANTHROPIC_MODELS,
15+
});
16+
Settings.embedModel = new HuggingFaceEmbedding({
17+
modelType: embedModelMap[process.env.EMBEDDING_MODEL!],
18+
});
19+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai";
2+
import { Settings } from "llamaindex";
3+
4+
export function setupProvider() {
5+
// Map Azure OpenAI model names to OpenAI model names (only for TS)
6+
const AZURE_OPENAI_MODEL_MAP: Record<string, string> = {
7+
"gpt-35-turbo": "gpt-3.5-turbo",
8+
"gpt-35-turbo-16k": "gpt-3.5-turbo-16k",
9+
"gpt-4o": "gpt-4o",
10+
"gpt-4": "gpt-4",
11+
"gpt-4-32k": "gpt-4-32k",
12+
"gpt-4-turbo": "gpt-4-turbo",
13+
"gpt-4-turbo-2024-04-09": "gpt-4-turbo",
14+
"gpt-4-vision-preview": "gpt-4-vision-preview",
15+
"gpt-4-1106-preview": "gpt-4-1106-preview",
16+
"gpt-4o-2024-05-13": "gpt-4o-2024-05-13",
17+
};
18+
19+
const azureConfig = {
20+
apiKey: process.env.AZURE_OPENAI_KEY,
21+
endpoint: process.env.AZURE_OPENAI_ENDPOINT,
22+
apiVersion:
23+
process.env.AZURE_OPENAI_API_VERSION || process.env.OPENAI_API_VERSION,
24+
};
25+
26+
Settings.llm = new OpenAI({
27+
model:
28+
AZURE_OPENAI_MODEL_MAP[process.env.MODEL ?? "gpt-35-turbo"] ??
29+
"gpt-3.5-turbo",
30+
maxTokens: process.env.LLM_MAX_TOKENS
31+
? Number(process.env.LLM_MAX_TOKENS)
32+
: undefined,
33+
azure: {
34+
...azureConfig,
35+
deployment: process.env.AZURE_OPENAI_LLM_DEPLOYMENT,
36+
},
37+
});
38+
39+
Settings.embedModel = new OpenAIEmbedding({
40+
model: process.env.EMBEDDING_MODEL,
41+
dimensions: process.env.EMBEDDING_DIM
42+
? parseInt(process.env.EMBEDDING_DIM)
43+
: undefined,
44+
azure: {
45+
...azureConfig,
46+
deployment: process.env.AZURE_OPENAI_EMBEDDING_DEPLOYMENT,
47+
},
48+
});
49+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import {
2+
Gemini,
3+
GEMINI_EMBEDDING_MODEL,
4+
GEMINI_MODEL,
5+
GeminiEmbedding,
6+
} from "@llamaindex/google";
7+
import { Settings } from "llamaindex";
8+
9+
export function setupProvider() {
10+
Settings.llm = new Gemini({
11+
model: process.env.MODEL as GEMINI_MODEL,
12+
});
13+
Settings.embedModel = new GeminiEmbedding({
14+
model: process.env.EMBEDDING_MODEL as GEMINI_EMBEDDING_MODEL,
15+
});
16+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
import { Groq } from "@llamaindex/groq";
2+
import { HuggingFaceEmbedding } from "@llamaindex/huggingface";
3+
import { Settings } from "llamaindex";
4+
5+
export function setupProvider() {
6+
const embedModelMap: Record<string, string> = {
7+
"all-MiniLM-L6-v2": "Xenova/all-MiniLM-L6-v2",
8+
"all-mpnet-base-v2": "Xenova/all-mpnet-base-v2",
9+
};
10+
11+
Settings.llm = new Groq({
12+
model: process.env.MODEL!,
13+
});
14+
15+
Settings.embedModel = new HuggingFaceEmbedding({
16+
modelType: embedModelMap[process.env.EMBEDDING_MODEL!],
17+
});
18+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import {
2+
ALL_AVAILABLE_MISTRAL_MODELS,
3+
MistralAI,
4+
MistralAIEmbedding,
5+
MistralAIEmbeddingModelType,
6+
} from "@llamaindex/mistral";
7+
import { Settings } from "llamaindex";
8+
9+
export function setupProvider() {
10+
Settings.llm = new MistralAI({
11+
model: process.env.MODEL as keyof typeof ALL_AVAILABLE_MISTRAL_MODELS,
12+
});
13+
Settings.embedModel = new MistralAIEmbedding({
14+
model: process.env.EMBEDDING_MODEL as MistralAIEmbeddingModelType,
15+
});
16+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import { Ollama, OllamaEmbedding } from "@llamaindex/ollama";
2+
import { Settings } from "llamaindex";
3+
4+
export function setupProvider() {
5+
const config = {
6+
host: process.env.OLLAMA_BASE_URL ?? "http://127.0.0.1:11434",
7+
};
8+
Settings.llm = new Ollama({
9+
model: process.env.MODEL ?? "",
10+
config,
11+
});
12+
Settings.embedModel = new OllamaEmbedding({
13+
model: process.env.EMBEDDING_MODEL ?? "",
14+
config,
15+
});
16+
}

0 commit comments

Comments
 (0)