Skip to content

feat: implement interpreter tool #94

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 20 commits into from
May 23, 2024
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/gentle-knives-unite.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"create-llama": patch
---

feat: implement interpreter tool
55 changes: 48 additions & 7 deletions helpers/env-variables.ts
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import fs from "fs/promises";
import path from "path";
import { TOOL_SYSTEM_PROMPT_ENV_VAR, Tool } from "./tools";
import {
ModelConfig,
TemplateDataSource,
TemplateFramework,
TemplateVectorDB,
} from "./types";

type EnvVar = {
export type EnvVar = {
name?: string;
description?: string;
value?: string;
Expand Down Expand Up @@ -260,15 +261,52 @@ const getEngineEnvs = (): EnvVar[] => {
"The number of similar embeddings to return when retrieving documents.",
value: "3",
},
{
name: "SYSTEM_PROMPT",
description: `Custom system prompt.
Example:
SYSTEM_PROMPT="You are a helpful assistant who helps users with their questions."`,
},
];
};

const getToolEnvs = (tools?: Tool[]): EnvVar[] => {
if (!tools?.length) return [];
const toolEnvs: EnvVar[] = [];
tools.forEach((tool) => {
if (tool.envVars?.length) {
toolEnvs.push(
// Don't include the system prompt env var here
// It should be handled separately by merging with the default system prompt
...tool.envVars.filter(
(env) => env.name !== TOOL_SYSTEM_PROMPT_ENV_VAR,
),
);
}
});
return toolEnvs;
};

const getSystemPromptEnv = (tools?: Tool[]): EnvVar => {
const defaultSystemPrompt =
"You are a helpful assistant who helps users with their questions.";

// build tool system prompt by merging all tool system prompts
let toolSystemPrompt = "";
tools?.forEach((tool) => {
const toolSystemPromptEnv = tool.envVars?.find(
(env) => env.name === TOOL_SYSTEM_PROMPT_ENV_VAR,
);
if (toolSystemPromptEnv) {
toolSystemPrompt += toolSystemPromptEnv.value + "\n";
}
});

const systemPrompt = toolSystemPrompt
? `\"${toolSystemPrompt}\"`
: defaultSystemPrompt;

return {
name: "SYSTEM_PROMPT",
description: "The system prompt for the AI model.",
value: systemPrompt,
};
};

export const createBackendEnvFile = async (
root: string,
opts: {
Expand All @@ -278,6 +316,7 @@ export const createBackendEnvFile = async (
framework?: TemplateFramework;
dataSources?: TemplateDataSource[];
port?: number;
tools?: Tool[];
},
) => {
// Init env values
Expand All @@ -295,6 +334,8 @@ export const createBackendEnvFile = async (
// Add vector database environment variables
...getVectorDBEnvs(opts.vectorDb, opts.framework),
...getFrameworkEnvs(opts.framework, opts.port),
...getToolEnvs(opts.tools),
getSystemPromptEnv(opts.tools),
];
// Render and write env file
const content = renderEnvVar(envVars);
Expand Down
1 change: 1 addition & 0 deletions helpers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ export const installTemplate = async (
framework: props.framework,
dataSources: props.dataSources,
port: props.externalPort,
tools: props.tools,
});

if (props.dataSources.length > 0) {
Expand Down
55 changes: 55 additions & 0 deletions helpers/tools.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,12 @@ import fs from "fs/promises";
import path from "path";
import { red } from "picocolors";
import yaml from "yaml";
import { EnvVar } from "./env-variables";
import { makeDir } from "./make-dir";
import { TemplateFramework } from "./types";

export const TOOL_SYSTEM_PROMPT_ENV_VAR = "TOOL_SYSTEM_PROMPT";

export enum ToolType {
LLAMAHUB = "llamahub",
LOCAL = "local",
Expand All @@ -17,6 +20,7 @@ export type Tool = {
dependencies?: ToolDependencies[];
supportedFrameworks?: Array<TemplateFramework>;
type: ToolType;
envVars?: EnvVar[];
};

export type ToolDependencies = {
Expand All @@ -42,6 +46,13 @@ export const supportedTools: Tool[] = [
],
supportedFrameworks: ["fastapi"],
type: ToolType.LLAMAHUB,
envVars: [
{
name: TOOL_SYSTEM_PROMPT_ENV_VAR,
description: "System prompt for google search tool.",
value: `You are a Google search agent. You help users to get information from Google search.`,
},
],
},
{
display: "Wikipedia",
Expand All @@ -54,13 +65,57 @@ export const supportedTools: Tool[] = [
],
supportedFrameworks: ["fastapi", "express", "nextjs"],
type: ToolType.LLAMAHUB,
envVars: [
{
name: TOOL_SYSTEM_PROMPT_ENV_VAR,
description: "System prompt for wiki tool.",
value: `You are a Wikipedia agent. You help users to get information from Wikipedia.`,
},
],
},
{
display: "Weather",
name: "weather",
dependencies: [],
supportedFrameworks: ["fastapi", "express", "nextjs"],
type: ToolType.LOCAL,
envVars: [
{
name: TOOL_SYSTEM_PROMPT_ENV_VAR,
description: "System prompt for weather tool.",
value: `You are a weather forecast agent. You help users to get the weather forecast for a given location.`,
},
],
},
{
display: "Code Interpreter",
name: "interpreter",
dependencies: [],
supportedFrameworks: ["fastapi", "express", "nextjs"],
type: ToolType.LOCAL,
envVars: [
{
name: "E2B_API_KEY",
description:
"E2B_API_KEY key is required to run code interpreter tool. Get it here: https://e2b.dev/docs/getting-started/api-key",
},
{
name: "E2B_FILESERVER_URL_PREFIX",
description:
"E2B_FILESERVER_URL_PREFIX is the origin of the filesystem server. It's usually the same as your backend origin. Example: http://localhost:3000",
},
{
name: TOOL_SYSTEM_PROMPT_ENV_VAR,
description: "System prompt for code interpreter tool.",
value: `You are a Python interpreter.
- You are given tasks to complete and you run python code to solve them.
- The python code runs in a Jupyter notebook. Every time you call \`interpreter\` tool, the python code is executed in a separate cell. It's okay to make multiple calls to \`interpreter\`.
- Display visualizations using matplotlib or any other visualization library directly in the notebook. Shouldn't save the visualizations to a file, just return the base64 encoded data.
- You can install any pip package (if it exists) if you need to but the usual packages for data analysis are already preinstalled.
- You can run any python code you want in a secure environment.
- Use absolute url from result to display images or any other media.`,
},
],
},
];

Expand Down
5 changes: 4 additions & 1 deletion templates/components/engines/typescript/agent/chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,10 @@ export async function createChatEngine() {
// add tools from LlamaIndexTS (if configured)
const llamaTools = await ToolsFactory.createTools(config.llamahub);
tools.push(...llamaTools);
} catch {}
} catch (e) {
console.error("Error loading tools", e);
throw e;
}

return new OpenAIAgent({
tools,
Expand Down
4 changes: 4 additions & 0 deletions templates/components/engines/typescript/agent/tools/index.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import { BaseToolWithCall } from "llamaindex";
import { InterpreterTool, InterpreterToolParams } from "./interpreter";
import { WeatherTool, WeatherToolParams } from "./weather";

type ToolCreator = (config: unknown) => BaseToolWithCall;
Expand All @@ -7,6 +8,9 @@ const toolFactory: Record<string, ToolCreator> = {
weather: (config: unknown) => {
return new WeatherTool(config as WeatherToolParams);
},
interpreter: (config: unknown) => {
return new InterpreterTool(config as InterpreterToolParams);
},
};

export function createLocalTools(
Expand Down
Loading
Loading