Skip to content

upload file to sandbox #355

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 34 commits into from
Oct 16, 2024
Merged
Show file tree
Hide file tree
Changes from 30 commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
7dcbf2f
tmp
leehuwuj Oct 8, 2024
1e2502a
update private file handler
leehuwuj Oct 11, 2024
e12bd29
enhance code
leehuwuj Oct 11, 2024
1cef23c
reduce complexity
leehuwuj Oct 11, 2024
5bd3591
fix mypy
leehuwuj Oct 11, 2024
c8a9472
fix mypy
leehuwuj Oct 11, 2024
5fd25f6
remove comment
leehuwuj Oct 11, 2024
a4d3d36
support upload file and enhance interpreter tool
leehuwuj Oct 14, 2024
6efadd4
fix blocking stream event
leehuwuj Oct 14, 2024
3e82be7
fix mypy
leehuwuj Oct 14, 2024
393a926
Merge remote-tracking branch 'origin/main' into feat/upload-file-sandbox
leehuwuj Oct 14, 2024
9602c6c
add changeset and fix mypy after merge
leehuwuj Oct 14, 2024
985cb26
fix mypy
leehuwuj Oct 14, 2024
9a4c0a3
enhance code
leehuwuj Oct 14, 2024
2efc727
typing
leehuwuj Oct 14, 2024
249edf5
wording
leehuwuj Oct 15, 2024
22cd958
exclude indexing private csv file if code executor tool is enabled
leehuwuj Oct 15, 2024
30e408b
remove file content and duplicated file id
leehuwuj Oct 15, 2024
94b338a
simpler file upload
leehuwuj Oct 15, 2024
6bb7a30
support for TS
leehuwuj Oct 15, 2024
bbf321f
support file upload for artifact in TS
leehuwuj Oct 15, 2024
852e6ec
enhance file path
leehuwuj Oct 15, 2024
5ae6b57
enhance code
leehuwuj Oct 15, 2024
c64e2ba
revise vercel streaming
leehuwuj Oct 15, 2024
36cdb1e
remove redundant id
leehuwuj Oct 15, 2024
e0921fe
add show file widget to the
leehuwuj Oct 15, 2024
a3c1c55
allow upload file with empty index store
leehuwuj Oct 15, 2024
bae12e6
Merge branch 'main' into feat/upload-file-sandbox
marcusschiesser Oct 15, 2024
7d9dee2
add data scientist use case
marcusschiesser Oct 15, 2024
3b91e7b
use GPT4o model for data scientist and code artifact
marcusschiesser Oct 15, 2024
954113e
update comments
leehuwuj Oct 15, 2024
624aea7
use previewcard to render documents
marcusschiesser Oct 15, 2024
788fab0
fix: UI overlap, key warning, wrong filename and url in markdown
thucpn Oct 16, 2024
0f56092
use div as tag wrapper for message
thucpn Oct 16, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/poor-knives-smoke.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"create-llama": patch
---

Fix event streaming is blocked
5 changes: 5 additions & 0 deletions .changeset/wet-tips-judge.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"create-llama": patch
---

Add upload file to sandbox (artifact and code interpreter)
2 changes: 1 addition & 1 deletion helpers/tools.ts
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ For better results, you can specify the region parameter to get results from a s
dependencies: [
{
name: "e2b_code_interpreter",
version: "0.0.10",
version: "0.0.11b38",
},
],
supportedFrameworks: ["fastapi", "express", "nextjs"],
Expand Down
55 changes: 41 additions & 14 deletions questions/simple.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,19 @@ import { getTools } from "../helpers/tools";
import { ModelConfig, TemplateFramework } from "../helpers/types";
import { PureQuestionArgs, QuestionResults } from "./types";
import { askPostInstallAction, questionHandlers } from "./utils";
type AppType = "rag" | "code_artifact" | "multiagent" | "extractor";

type AppType =
| "rag"
| "code_artifact"
| "multiagent"
| "extractor"
| "data_scientist";

type SimpleAnswers = {
appType: AppType;
language: TemplateFramework;
useLlamaCloud: boolean;
llamaCloudKey?: string;
modelConfig: ModelConfig;
};

export const askSimpleQuestions = async (
Expand All @@ -25,6 +30,7 @@ export const askSimpleQuestions = async (
message: "What app do you want to build?",
choices: [
{ title: "Agentic RAG", value: "rag" },
{ title: "Data Scientist", value: "data_scientist" },
{ title: "Code Artifact Agent", value: "code_artifact" },
{ title: "Multi-Agent Report Gen", value: "multiagent" },
{ title: "Structured extraction", value: "extractor" },
Expand Down Expand Up @@ -80,40 +86,56 @@ export const askSimpleQuestions = async (
}
}

const modelConfig = await askModelConfig({
openAiKey: args.openAiKey,
askModels: args.askModels ?? false,
framework: language,
});

const results = convertAnswers({
const results = await convertAnswers(args, {
appType,
language,
useLlamaCloud,
llamaCloudKey,
modelConfig,
});

results.postInstallAction = await askPostInstallAction(results);
return results;
};

const convertAnswers = (answers: SimpleAnswers): QuestionResults => {
const convertAnswers = async (
args: PureQuestionArgs,
answers: SimpleAnswers,
): Promise<QuestionResults> => {
const MODEL_GPT4o: ModelConfig = {
provider: "openai",
apiKey: args.openAiKey,
model: "gpt-4o",
embeddingModel: "text-embedding-3-large",
dimensions: 1536,
isConfigured(): boolean {
return !!args.openAiKey;
},
};
const lookup: Record<
AppType,
Pick<QuestionResults, "template" | "tools" | "frontend" | "dataSources">
Pick<QuestionResults, "template" | "tools" | "frontend" | "dataSources"> & {
modelConfig?: ModelConfig;
}
> = {
rag: {
template: "streaming",
tools: getTools(["duckduckgo"]),
frontend: true,
dataSources: [EXAMPLE_FILE],
},
data_scientist: {
template: "streaming",
tools: getTools(["interpreter", "document_generator"]),
frontend: true,
dataSources: [],
modelConfig: MODEL_GPT4o,
},
code_artifact: {
template: "streaming",
tools: getTools(["artifact"]),
frontend: true,
dataSources: [],
modelConfig: MODEL_GPT4o,
},
multiagent: {
template: "multiagent",
Expand All @@ -140,11 +162,16 @@ const convertAnswers = (answers: SimpleAnswers): QuestionResults => {
llamaCloudKey: answers.llamaCloudKey,
useLlamaParse: answers.useLlamaCloud,
llamapack: "",
postInstallAction: "none",
vectorDb: answers.useLlamaCloud ? "llamacloud" : "none",
modelConfig: answers.modelConfig,
observability: "none",
...results,
modelConfig:
results.modelConfig ??
(await askModelConfig({
openAiKey: args.openAiKey,
askModels: args.askModels ?? false,
framework: answers.language,
})),
frontend: answers.language === "nextjs" ? false : results.frontend,
};
};
21 changes: 16 additions & 5 deletions templates/components/engines/python/agent/tools/artifact.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,21 +66,29 @@ class CodeGeneratorTool:
def __init__(self):
pass

def artifact(self, query: str, old_code: Optional[str] = None) -> Dict:
"""Generate a code artifact based on the input.
def artifact(
self,
query: str,
sandbox_files: Optional[List[str]] = None,
old_code: Optional[str] = None,
) -> Dict:
"""Generate a code artifact based on the provided input.

Args:
query (str): The description of the application you want to build.
query (str): A description of the application you want to build.
sandbox_files (Optional[List[str]], optional): A list of sandbox file paths. Defaults to None. Include these files if the code requires them.
old_code (Optional[str], optional): The existing code to be modified. Defaults to None.

Returns:
Dict: A dictionary containing the generated artifact information.
Dict: A dictionary containing information about the generated artifact.
"""

if old_code:
user_message = f"{query}\n\nThe existing code is: \n```\n{old_code}\n```"
else:
user_message = query
if sandbox_files:
user_message += f"\n\nThe provided files are: \n{str(sandbox_files)}"

messages: List[ChatMessage] = [
ChatMessage(role="system", content=CODE_GENERATION_PROMPT),
Expand All @@ -90,7 +98,10 @@ def artifact(self, query: str, old_code: Optional[str] = None) -> Dict:
sllm = Settings.llm.as_structured_llm(output_cls=CodeArtifact) # type: ignore
response = sllm.chat(messages)
data: CodeArtifact = response.raw
return data.model_dump()
data_dict = data.model_dump()
if sandbox_files:
data_dict["files"] = sandbox_files
return data_dict
except Exception as e:
logger.error(f"Failed to generate artifact: {str(e)}")
raise e
Expand Down
Loading
Loading