Skip to content

Commit bedb36c

Browse files
committed
bump llama_index version and add using llama_cloud index for all vector store
1 parent 07fcefd commit bedb36c

File tree

3 files changed

+65
-10
lines changed

3 files changed

+65
-10
lines changed

templates/components/vectordbs/python/none/index.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,32 @@
55
from cachetools import cached, TTLCache
66
from llama_index.core.storage import StorageContext
77
from llama_index.core.indices import load_index_from_storage
8+
from llama_index.indices.managed.llama_cloud import LlamaCloudIndex
89

910
logger = logging.getLogger("uvicorn")
1011

1112

13+
def get_llama_cloud_index():
14+
name = os.getenv("LLAMA_CLOUD_INDEX_NAME")
15+
project_name = os.getenv("LLAMA_CLOUD_PROJECT_NAME")
16+
api_key = os.getenv("LLAMA_CLOUD_API_KEY")
17+
base_url = os.getenv("LLAMA_CLOUD_BASE_URL")
18+
19+
if name is None or project_name is None or api_key is None:
20+
raise ValueError(
21+
"Please set LLAMA_CLOUD_INDEX_NAME, LLAMA_CLOUD_PROJECT_NAME and LLAMA_CLOUD_API_KEY"
22+
" to your environment variables or config them in .env file"
23+
)
24+
25+
index = LlamaCloudIndex(
26+
name=name,
27+
project_name=project_name,
28+
api_key=api_key,
29+
base_url=base_url,
30+
)
31+
return index
32+
33+
1234
@cached(
1335
TTLCache(maxsize=10, ttl=timedelta(minutes=5).total_seconds()),
1436
key=lambda *args, **kwargs: "global_storage_context",
@@ -18,6 +40,11 @@ def get_storage_context(persist_dir: str) -> StorageContext:
1840

1941

2042
def get_index():
43+
use_llama_cloud = os.getenv("USE_LLAMA_CLOUD", "false").lower() == "true"
44+
if use_llama_cloud:
45+
logger.info("Connecting to LlamaCloud...")
46+
return get_llama_cloud_index()
47+
2148
storage_dir = os.getenv("STORAGE_DIR", "storage")
2249
# check if storage already exists
2350
if not os.path.exists(storage_dir):
Lines changed: 36 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,45 @@
1+
import os
12
import logging
3+
from llama_index.indices.managed.llama_cloud import LlamaCloudIndex
24
from llama_index.core.indices import VectorStoreIndex
35
from app.engine.vectordb import get_vector_store
46

57

68
logger = logging.getLogger("uvicorn")
79

810

9-
def get_index():
10-
logger.info("Connecting vector store...")
11-
store = get_vector_store()
12-
# Load the index from the vector store
13-
# If you are using a vector store that doesn't store text,
14-
# you must load the index from both the vector store and the document store
15-
index = VectorStoreIndex.from_vector_store(store)
16-
logger.info("Finished load index from vector store.")
11+
def get_llama_cloud_index():
12+
name = os.getenv("LLAMA_CLOUD_INDEX_NAME")
13+
project_name = os.getenv("LLAMA_CLOUD_PROJECT_NAME")
14+
api_key = os.getenv("LLAMA_CLOUD_API_KEY")
15+
base_url = os.getenv("LLAMA_CLOUD_BASE_URL")
16+
17+
if name is None or project_name is None or api_key is None:
18+
raise ValueError(
19+
"Please set LLAMA_CLOUD_INDEX_NAME, LLAMA_CLOUD_PROJECT_NAME and LLAMA_CLOUD_API_KEY"
20+
" to your environment variables or config them in .env file"
21+
)
22+
23+
index = LlamaCloudIndex(
24+
name=name,
25+
project_name=project_name,
26+
api_key=api_key,
27+
base_url=base_url,
28+
)
1729
return index
30+
31+
32+
def get_index():
33+
use_llama_cloud = os.getenv("USE_LLAMA_CLOUD", "false").lower() == "true"
34+
if use_llama_cloud:
35+
logger.info("Connecting to LlamaCloud...")
36+
return get_llama_cloud_index()
37+
else:
38+
logger.info("Connecting vector store...")
39+
store = get_vector_store()
40+
# Load the index from the vector store
41+
# If you are using a vector store that doesn't store text,
42+
# you must load the index from both the vector store and the document store
43+
index = VectorStoreIndex.from_vector_store(store)
44+
logger.info("Finished load index from vector store.")
45+
return index

templates/types/streaming/fastapi/pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,8 @@ fastapi = "^0.109.1"
1414
uvicorn = { extras = ["standard"], version = "^0.23.2" }
1515
python-dotenv = "^1.0.0"
1616
aiostream = "^0.5.2"
17-
llama-index = "0.10.50"
18-
llama-index-core = "0.10.50"
17+
llama-index = "0.10.52"
18+
llama-index-core = "0.10.52.post1"
1919
cachetools = "^5.3.3"
2020

2121
[build-system]

0 commit comments

Comments
 (0)