-
Notifications
You must be signed in to change notification settings - Fork 183
feat: use llamacloud for chat #149
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 10 commits
c65daa3
286245b
52c848c
226f4e4
ba4c04d
0923a38
518931b
0a19bc7
831df3e
3bca368
8328240
8fdf640
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
--- | ||
"create-llama": patch | ||
--- | ||
|
||
use llamacloud for chat |
Original file line number | Diff line number | Diff line change | ||||||||
---|---|---|---|---|---|---|---|---|---|---|
|
@@ -8,14 +8,14 @@ export async function createChatEngine() { | |||||||||
`StorageContext is empty - call 'npm run generate' to generate the storage first`, | ||||||||||
); | ||||||||||
} | ||||||||||
const retriever = index.asRetriever(); | ||||||||||
retriever.similarityTopK = process.env.TOP_K | ||||||||||
? parseInt(process.env.TOP_K) | ||||||||||
: 3; | ||||||||||
const retriever = index.asRetriever({ | ||||||||||
similarityTopK: process.env.TOP_K ? parseInt(process.env.TOP_K) : 3, | ||||||||||
}); | ||||||||||
|
||||||||||
return new ContextChatEngine({ | ||||||||||
chatModel: Settings.llm, | ||||||||||
retriever, | ||||||||||
systemPrompt: process.env.SYSTEM_PROMPT, | ||||||||||
// disable as a custom system prompt disables the generated context | ||||||||||
// systemPrompt: process.env.SYSTEM_PROMPT, | ||||||||||
Comment on lines
+18
to
+19
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Consider removing or clarifying the commented-out Commented-out code can lead to confusion if the reason isn't documented. If - // systemPrompt: process.env.SYSTEM_PROMOT,
+ // systemPrompt: process.env.SYSTEM_PROMPT, // Temporarily disabled until [condition/reason] Committable suggestion
Suggested change
|
||||||||||
}); | ||||||||||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,45 @@ | ||
from dotenv import load_dotenv | ||
|
||
load_dotenv() | ||
|
||
import os | ||
import logging | ||
from app.settings import init_settings | ||
from app.engine.loaders import get_documents | ||
from llama_index.indices.managed.llama_cloud import LlamaCloudIndex | ||
marcusschiesser marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
|
||
logging.basicConfig(level=logging.INFO) | ||
logger = logging.getLogger() | ||
|
||
|
||
def generate_datasource(): | ||
init_settings() | ||
logger.info("Generate index for the provided data") | ||
|
||
name = os.getenv("LLAMA_CLOUD_INDEX_NAME") | ||
project_name = os.getenv("LLAMA_CLOUD_PROJECT_NAME") | ||
api_key = os.getenv("LLAMA_CLOUD_API_KEY") | ||
base_url = os.getenv("LLAMA_CLOUD_BASE_URL") | ||
|
||
if not name or not project_name or not api_key: | ||
raise ValueError( | ||
"Please set LLAMA_CLOUD_INDEX_NAME, LLAMA_CLOUD_PROJECT_NAME and LLAMA_CLOUD_API_KEY" | ||
" to your environment variables or config them in .env file" | ||
) | ||
|
||
documents = get_documents() | ||
|
||
LlamaCloudIndex.from_documents( | ||
documents, | ||
name, | ||
project_name=project_name, | ||
api_key=api_key, | ||
base_url=base_url, | ||
) | ||
|
||
logger.info("Finished generating the index") | ||
|
||
|
||
if __name__ == "__main__": | ||
generate_datasource() |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
from dotenv import load_dotenv | ||
|
||
load_dotenv() | ||
|
||
import logging | ||
import os | ||
from llama_index.indices.managed.llama_cloud import LlamaCloudIndex | ||
marcusschiesser marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
|
||
logger = logging.getLogger("uvicorn") | ||
|
||
|
||
def get_index(): | ||
name = os.getenv("LLAMA_CLOUD_INDEX_NAME") | ||
project_name = os.getenv("LLAMA_CLOUD_PROJECT_NAME") | ||
api_key = os.getenv("LLAMA_CLOUD_API_KEY") | ||
base_url = os.getenv("LLAMA_CLOUD_BASE_URL") | ||
|
||
if not name or not project_name or not api_key: | ||
raise ValueError( | ||
"Please set LLAMA_CLOUD_INDEX_NAME, LLAMA_CLOUD_PROJECT_NAME and LLAMA_CLOUD_API_KEY" | ||
" to your environment variables or config them in .env file" | ||
) | ||
|
||
index = LlamaCloudIndex( | ||
name, | ||
project_name=project_name, | ||
api_key=api_key, | ||
base_url=base_url, | ||
) | ||
|
||
return index |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
import * as dotenv from "dotenv"; | ||
import { LlamaCloudIndex } from "llamaindex"; | ||
import { getDocuments } from "./loader"; | ||
import { initSettings } from "./settings"; | ||
import { checkRequiredEnvVars } from "./shared"; | ||
|
||
dotenv.config(); | ||
|
||
async function loadAndIndex() { | ||
const documents = await getDocuments(); | ||
await LlamaCloudIndex.fromDocuments({ | ||
documents, | ||
name: process.env.LLAMA_CLOUD_INDEX_NAME!, | ||
projectName: process.env.LLAMA_CLOUD_PROJECT_NAME!, | ||
apiKey: process.env.LLAMA_CLOUD_API_KEY, | ||
baseUrl: process.env.LLAMA_CLOUD_BASE_URL, | ||
}); | ||
console.log(`Successfully created embeddings!`); | ||
} | ||
|
||
(async () => { | ||
checkRequiredEnvVars(); | ||
initSettings(); | ||
await loadAndIndex(); | ||
console.log("Finished generating storage."); | ||
})(); |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
import { LlamaCloudIndex } from "llamaindex/cloud/LlamaCloudIndex"; | ||
import { checkRequiredEnvVars } from "./shared"; | ||
|
||
export async function getDataSource() { | ||
checkRequiredEnvVars(); | ||
const index = new LlamaCloudIndex({ | ||
name: process.env.LLAMA_CLOUD_INDEX_NAME!, | ||
projectName: process.env.LLAMA_CLOUD_PROJECT_NAME!, | ||
apiKey: process.env.LLAMA_CLOUD_API_KEY, | ||
baseUrl: process.env.LLAMA_CLOUD_BASE_URL, | ||
}); | ||
return index; | ||
} |
Uh oh!
There was an error while loading. Please reload this page.