Skip to content

Commit e7d30b1

Browse files
refactor: test frameworks and datasources via matrix (#211)
1 parent e974c8e commit e7d30b1

File tree

2 files changed

+102
-116
lines changed

2 files changed

+102
-116
lines changed

.github/workflows/e2e.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@ jobs:
1818
node-version: [18, 20]
1919
python-version: ["3.11"]
2020
os: [macos-latest, windows-latest, ubuntu-22.04]
21+
frameworks: ["nextjs", "express", "fastapi"]
22+
datasources: ["--no-files", "--example-file"]
2123
defaults:
2224
run:
2325
shell: bash
@@ -63,6 +65,8 @@ jobs:
6365
env:
6466
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
6567
LLAMA_CLOUD_API_KEY: ${{ secrets.LLAMA_CLOUD_API_KEY }}
68+
FRAMEWORK: ${{ matrix.frameworks }}
69+
DATASOURCE: ${{ matrix.datasources }}
6670
working-directory: .
6771

6872
- uses: actions/upload-artifact@v3

e2e/basic.spec.ts

Lines changed: 98 additions & 116 deletions
Original file line numberDiff line numberDiff line change
@@ -11,128 +11,110 @@ import type {
1111
} from "../helpers";
1212
import { createTestDir, runCreateLlama, type AppType } from "./utils";
1313

14-
const templateTypes: TemplateType[] = ["streaming"];
15-
const templateFrameworks: TemplateFramework[] = [
16-
"nextjs",
17-
"express",
18-
"fastapi",
19-
];
20-
const dataSources: string[] = ["--no-files", "--example-file"];
21-
const templateUIs: TemplateUI[] = ["shadcn"];
22-
const templatePostInstallActions: TemplatePostInstallAction[] = [
23-
"none",
24-
"runApp",
25-
];
14+
const templateType: TemplateType = "streaming";
15+
const templateFramework: TemplateFramework = process.env.FRAMEWORK
16+
? (process.env.FRAMEWORK as TemplateFramework)
17+
: "fastapi";
18+
const dataSource: string = process.env.DATASOURCE
19+
? process.env.DATASOURCE
20+
: "--example-file";
21+
const templateUI: TemplateUI = "shadcn";
22+
const templatePostInstallAction: TemplatePostInstallAction = "runApp";
2623

2724
const llamaCloudProjectName = "create-llama";
2825
const llamaCloudIndexName = "e2e-test";
2926

30-
for (const templateType of templateTypes) {
31-
for (const templateFramework of templateFrameworks) {
32-
for (const dataSource of dataSources) {
33-
for (const templateUI of templateUIs) {
34-
for (const templatePostInstallAction of templatePostInstallActions) {
35-
const appType: AppType =
36-
templateFramework === "nextjs" ? "" : "--frontend";
37-
const userMessage =
38-
dataSource !== "--no-files"
39-
? "Physical standard for letters"
40-
: "Hello";
41-
test.describe(`try create-llama ${templateType} ${templateFramework} ${dataSource} ${templateUI} ${appType} ${templatePostInstallAction}`, async () => {
42-
let port: number;
43-
let externalPort: number;
44-
let cwd: string;
45-
let name: string;
46-
let appProcess: ChildProcess;
47-
// Only test without using vector db for now
48-
const vectorDb = "none";
27+
const appType: AppType = templateFramework === "nextjs" ? "" : "--frontend";
28+
const userMessage =
29+
dataSource !== "--no-files" ? "Physical standard for letters" : "Hello";
30+
test.describe(`try create-llama ${templateType} ${templateFramework} ${dataSource} ${templateUI} ${appType} ${templatePostInstallAction}`, async () => {
31+
let port: number;
32+
let externalPort: number;
33+
let cwd: string;
34+
let name: string;
35+
let appProcess: ChildProcess;
36+
// Only test without using vector db for now
37+
const vectorDb = "none";
4938

50-
test.beforeAll(async () => {
51-
port = Math.floor(Math.random() * 10000) + 10000;
52-
externalPort = port + 1;
53-
cwd = await createTestDir();
54-
const result = await runCreateLlama(
55-
cwd,
56-
templateType,
57-
templateFramework,
58-
dataSource,
59-
templateUI,
60-
vectorDb,
61-
appType,
62-
port,
63-
externalPort,
64-
templatePostInstallAction,
65-
llamaCloudProjectName,
66-
llamaCloudIndexName,
67-
);
68-
name = result.projectName;
69-
appProcess = result.appProcess;
70-
});
39+
test.beforeAll(async () => {
40+
port = Math.floor(Math.random() * 10000) + 10000;
41+
externalPort = port + 1;
42+
cwd = await createTestDir();
43+
const result = await runCreateLlama(
44+
cwd,
45+
templateType,
46+
templateFramework,
47+
dataSource,
48+
templateUI,
49+
vectorDb,
50+
appType,
51+
port,
52+
externalPort,
53+
templatePostInstallAction,
54+
llamaCloudProjectName,
55+
llamaCloudIndexName,
56+
);
57+
name = result.projectName;
58+
appProcess = result.appProcess;
59+
});
7160

72-
test("App folder should exist", async () => {
73-
const dirExists = fs.existsSync(path.join(cwd, name));
74-
expect(dirExists).toBeTruthy();
75-
});
76-
test("Frontend should have a title", async ({ page }) => {
77-
test.skip(templatePostInstallAction !== "runApp");
78-
await page.goto(`http://localhost:${port}`);
79-
await expect(page.getByText("Built by LlamaIndex")).toBeVisible();
80-
});
61+
test("App folder should exist", async () => {
62+
const dirExists = fs.existsSync(path.join(cwd, name));
63+
expect(dirExists).toBeTruthy();
64+
});
65+
test("Frontend should have a title", async ({ page }) => {
66+
test.skip(templatePostInstallAction !== "runApp");
67+
await page.goto(`http://localhost:${port}`);
68+
await expect(page.getByText("Built by LlamaIndex")).toBeVisible();
69+
});
8170

82-
test("Frontend should be able to submit a message and receive a response", async ({
83-
page,
84-
}) => {
85-
test.skip(templatePostInstallAction !== "runApp");
86-
await page.goto(`http://localhost:${port}`);
87-
await page.fill("form input", userMessage);
88-
const [response] = await Promise.all([
89-
page.waitForResponse(
90-
(res) => {
91-
return (
92-
res.url().includes("/api/chat") && res.status() === 200
93-
);
94-
},
95-
{
96-
timeout: 1000 * 60,
97-
},
98-
),
99-
page.click("form button[type=submit]"),
100-
]);
101-
const text = await response.text();
102-
console.log("AI response when submitting message: ", text);
103-
expect(response.ok()).toBeTruthy();
104-
});
71+
test("Frontend should be able to submit a message and receive a response", async ({
72+
page,
73+
}) => {
74+
test.skip(templatePostInstallAction !== "runApp");
75+
await page.goto(`http://localhost:${port}`);
76+
await page.fill("form input", userMessage);
77+
const [response] = await Promise.all([
78+
page.waitForResponse(
79+
(res) => {
80+
return res.url().includes("/api/chat") && res.status() === 200;
81+
},
82+
{
83+
timeout: 1000 * 60,
84+
},
85+
),
86+
page.click("form button[type=submit]"),
87+
]);
88+
const text = await response.text();
89+
console.log("AI response when submitting message: ", text);
90+
expect(response.ok()).toBeTruthy();
91+
});
10592

106-
test("Backend frameworks should response when calling non-streaming chat API", async ({
107-
request,
108-
}) => {
109-
test.skip(templatePostInstallAction !== "runApp");
110-
test.skip(templateFramework === "nextjs");
111-
const response = await request.post(
112-
`http://localhost:${externalPort}/api/chat/request`,
113-
{
114-
data: {
115-
messages: [
116-
{
117-
role: "user",
118-
content: userMessage,
119-
},
120-
],
121-
},
122-
},
123-
);
124-
const text = await response.text();
125-
console.log("AI response when calling API: ", text);
126-
expect(response.ok()).toBeTruthy();
127-
});
93+
test("Backend frameworks should response when calling non-streaming chat API", async ({
94+
request,
95+
}) => {
96+
test.skip(templatePostInstallAction !== "runApp");
97+
test.skip(templateFramework === "nextjs");
98+
const response = await request.post(
99+
`http://localhost:${externalPort}/api/chat/request`,
100+
{
101+
data: {
102+
messages: [
103+
{
104+
role: "user",
105+
content: userMessage,
106+
},
107+
],
108+
},
109+
},
110+
);
111+
const text = await response.text();
112+
console.log("AI response when calling API: ", text);
113+
expect(response.ok()).toBeTruthy();
114+
});
128115

129-
// clean processes
130-
test.afterAll(async () => {
131-
appProcess?.kill();
132-
});
133-
});
134-
}
135-
}
136-
}
137-
}
138-
}
116+
// clean processes
117+
test.afterAll(async () => {
118+
appProcess?.kill();
119+
});
120+
});

0 commit comments

Comments
 (0)