Skip to content

fix typing issue and add typing test for llamaindexserver templates #613

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 11 commits into from
May 13, 2025
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .prettierignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ build/
.next/
out/
packages/server/server/
**/playwright-report/
**/test-results/

# Python
python/
Expand Down
87 changes: 68 additions & 19 deletions packages/create-llama/e2e/typescript/resolve_dependencies.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,26 @@ import { exec } from "child_process";
import fs from "fs";
import path from "path";
import util from "util";
import { TemplateFramework, TemplateVectorDB } from "../../helpers/types";
import {
TemplateFramework,
TemplateType,
TemplateUseCase,
TemplateVectorDB,
} from "../../helpers/types";
import { createTestDir, runCreateLlama } from "../utils";

const execAsync = util.promisify(exec);

const templateFramework: TemplateFramework = process.env.FRAMEWORK
? (process.env.FRAMEWORK as TemplateFramework)
: "nextjs";
const templateTypes: TemplateType[] = ["streaming", "llamaindexserver"];
const useCases: TemplateUseCase[] = [
"agentic_rag",
"deep_research",
"financial_report",
"artifacts",
];
const dataSource: string = process.env.DATASOURCE
? process.env.DATASOURCE
: "--example-file";
Expand All @@ -29,40 +41,77 @@ const vectorDbs: TemplateVectorDB[] = [
];

test.describe("Test resolve TS dependencies", () => {
// Test vector DBs without LlamaParse
for (const vectorDb of vectorDbs) {
const optionDescription = `vectorDb: ${vectorDb}, dataSource: ${dataSource}`;
test.describe.configure({ retries: 0 });

test(`Vector DB test - ${optionDescription}`, async () => {
await runTest(vectorDb, false);
});
}
for (const templateType of templateTypes) {
// Test vector DBs without LlamaParse
for (const vectorDb of vectorDbs) {
const optionDescription = `templateType: ${templateType}, vectorDb: ${vectorDb}, dataSource: ${dataSource}`;

test(`Vector DB test - ${optionDescription}`, async () => {
// skip vectordb test for llamaindexserver
test.skip(
templateType === "llamaindexserver",
"skipping vectorDB test for llamaindexserver",
);

// Test LlamaParse with vectorDB 'none'
test(`LlamaParse test - vectorDb: none, dataSource: ${dataSource}, llamaParse: true`, async () => {
await runTest("none", true);
});
await runTest({
templateType: templateType,
useLlamaParse: false, // Disable LlamaParse for vectorDB test
vectorDb: vectorDb,
});
});
}

// No vectorDB, with LlamaParse and useCase
for (const useCase of useCases) {
const optionDescription = `templateType: ${templateType}, useCase: ${useCase}`;
test.describe(`useCase test - ${optionDescription}`, () => {
test.skip(
templateType === "streaming",
"Skipping use case test for streaming template.",
);
test(`no llamaParse - ${optionDescription}`, async () => {
await runTest({
templateType: templateType,
useLlamaParse: false,
useCase: useCase,
});
});
test(`llamaParse - ${optionDescription}`, async () => {
await runTest({
templateType: templateType,
useLlamaParse: true,
useCase: useCase,
});
});
});
}
}

async function runTest(
vectorDb: TemplateVectorDB | "none",
useLlamaParse: boolean,
) {
async function runTest(options: {
templateType: TemplateType;
useLlamaParse: boolean;
useCase?: TemplateUseCase;
vectorDb?: TemplateVectorDB;
}) {
const cwd = await createTestDir();

const result = await runCreateLlama({
cwd: cwd,
templateType: "streaming",
templateType: options.templateType,
templateFramework: templateFramework,
dataSource: dataSource,
vectorDb: vectorDb,
vectorDb: options.vectorDb ?? "none",
port: 3000,
postInstallAction: "none",
templateUI: undefined,
appType: templateFramework === "nextjs" ? "" : "--no-frontend",
llamaCloudProjectName: undefined,
llamaCloudIndexName: undefined,
tools: undefined,
useLlamaParse: useLlamaParse,
useLlamaParse: options.useLlamaParse,
useCase: options.useCase,
});
const name = result.projectName;

Expand Down
6 changes: 3 additions & 3 deletions packages/create-llama/helpers/python.ts
Original file line number Diff line number Diff line change
Expand Up @@ -569,13 +569,13 @@ const installLlamaIndexServerTemplate = async ({

await copy("*.py", path.join(root, "app"), {
parents: true,
cwd: path.join(templatesDir, "components", "workflows", "python", useCase),
cwd: path.join(templatesDir, "components", "use-cases", "python", useCase),
});

// Copy custom UI component code
await copy(`*`, path.join(root, "components"), {
parents: true,
cwd: path.join(templatesDir, "components", "ui", "workflows", useCase),
cwd: path.join(templatesDir, "components", "ui", "use-cases", useCase),
});

if (useLlamaParse) {
Expand Down Expand Up @@ -606,7 +606,7 @@ const installLlamaIndexServerTemplate = async ({
// Copy README.md
await copy("README-template.md", path.join(root), {
parents: true,
cwd: path.join(templatesDir, "components", "workflows", "python", useCase),
cwd: path.join(templatesDir, "components", "use-cases", "python", useCase),
rename: assetRelocator,
});
};
Expand Down
21 changes: 5 additions & 16 deletions packages/create-llama/helpers/typescript.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,23 +31,24 @@ const installLlamaIndexServerTemplate = async ({
process.exit(1);
}

await copy("*.ts", path.join(root, "src", "app"), {
parents: true,
await copy("**", path.join(root), {
cwd: path.join(
templatesDir,
"components",
"workflows",
"use-cases",
"typescript",
useCase,
),
rename: assetRelocator,
});

// copy workflow UI components to output/components folder
await copy("*", path.join(root, "components"), {
parents: true,
cwd: path.join(templatesDir, "components", "ui", "workflows", useCase),
cwd: path.join(templatesDir, "components", "ui", "use-cases", useCase),
});

// Override generate.ts if workflow use case doesn't use custom UI
if (vectorDb === "llamacloud") {
await copy("generate.ts", path.join(root, "src"), {
parents: true,
Expand All @@ -74,18 +75,6 @@ const installLlamaIndexServerTemplate = async ({
rename: () => "data.ts",
});
}
// Copy README.md
await copy("README-template.md", path.join(root), {
parents: true,
cwd: path.join(
templatesDir,
"components",
"workflows",
"typescript",
useCase,
),
rename: assetRelocator,
});
};

const installLegacyTSTemplate = async ({
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
import { SimpleDirectoryReader } from "@llamaindex/readers/directory";
import "dotenv/config";
import { storageContextFromDefaults, VectorStoreIndex } from "llamaindex";
import { initSettings } from "./app/settings";

async function generateDatasource() {
console.log(`Generating storage context...`);
// Split documents, create embeddings and store them in the storage context
const storageContext = await storageContextFromDefaults({
persistDir: "storage",
});
// load documents from current directory into an index
const reader = new SimpleDirectoryReader();
const documents = await reader.loadData("data");

await VectorStoreIndex.fromDocuments(documents, {
storageContext,
});
console.log("Storage context successfully generated.");
}

(async () => {
const args = process.argv.slice(2);
const command = args[0];

initSettings();

if (command === "ui") {
console.error("This project doesn't use any custom UI.");
return;
} else {
if (command !== "datasource") {
console.error(
`Unrecognized command: ${command}. Generating datasource by default.`,
);
}
await generateDatasource();
}
})();
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
import { SimpleDirectoryReader } from "@llamaindex/readers/directory";
import "dotenv/config";
import { storageContextFromDefaults, VectorStoreIndex } from "llamaindex";
import { initSettings } from "./app/settings";

async function generateDatasource() {
console.log(`Generating storage context...`);
// Split documents, create embeddings and store them in the storage context
const storageContext = await storageContextFromDefaults({
persistDir: "storage",
});
// load documents from current directory into an index
const reader = new SimpleDirectoryReader();
const documents = await reader.loadData("data");

await VectorStoreIndex.fromDocuments(documents, {
storageContext,
});
console.log("Storage context successfully generated.");
}

(async () => {
const args = process.argv.slice(2);
const command = args[0];

initSettings();

if (command === "ui") {
console.error("This project doesn't use any custom UI.");
return;
} else {
if (command !== "datasource") {
console.error(
`Unrecognized command: ${command}. Generating datasource by default.`,
);
}
await generateDatasource();
}
})();
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
"@llamaindex/openai": "0.2.0",
"@llamaindex/server": "0.2.0",
"@llamaindex/workflow": "1.1.1",
"@llamaindex/tools": "0.0.4",
"@llamaindex/tools": "0.0.9",
"dotenv": "^16.4.7",
"zod": "^3.23.8",
"llamaindex": "0.10.5"
Expand Down
Loading