Skip to content

Commit 66dbdae

Browse files
updated based on feedback
1 parent 4cc7ce5 commit 66dbdae

File tree

2 files changed

+4
-7
lines changed

2 files changed

+4
-7
lines changed

python/samples/concepts/memory/new_memory.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ class MyDataModelList:
9292
# function which returns the store.
9393
# Using a function allows for lazy initialization of the store,
9494
# so that settings for unused stores do not cause validation errors.
95-
services: dict[str, Callable[[], VectorStoreRecordCollection]] = {
95+
collections: dict[str, Callable[[], VectorStoreRecordCollection]] = {
9696
"ai_search": lambda: AzureAISearchCollection[MyDataModel](
9797
data_model_type=MyDataModel,
9898
),
@@ -133,7 +133,7 @@ async def main(service: str, use_azure_openai: bool, embedding_model: str):
133133
else:
134134
embedder = OpenAITextEmbedding(service_id=service_id, ai_model_id=embedding_model)
135135
kernel.add_service(embedder)
136-
async with services[service]() as record_collection:
136+
async with collections[service]() as record_collection:
137137
print(f"Creating {service} collection!")
138138
await record_collection.create_collection_if_not_exists()
139139

@@ -184,7 +184,7 @@ async def main(service: str, use_azure_openai: bool, embedding_model: str):
184184
argparse.ArgumentParser()
185185

186186
parser = argparse.ArgumentParser()
187-
parser.add_argument("--service", default="in_memory", choices=services.keys(), help="What store to use.")
187+
parser.add_argument("--service", default="in_memory", choices=collections.keys(), help="What store to use.")
188188
# Option of whether to use OpenAI or Azure OpenAI.
189189
parser.add_argument("--use-azure-openai", action="store_true", help="Use Azure OpenAI instead of OpenAI.")
190190
# Model

python/semantic_kernel/connectors/memory/qdrant/qdrant_collection.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,6 @@
55
from collections.abc import Mapping, Sequence
66
from typing import Any, ClassVar, Generic, TypeVar
77

8-
from semantic_kernel.exceptions.search_exceptions import VectorSearchExecutionException
9-
108
if sys.version_info >= (3, 12):
119
from typing import override # pragma: no cover
1210
else:
@@ -29,6 +27,7 @@
2927
VectorStoreModelValidationError,
3028
)
3129
from semantic_kernel.exceptions.memory_connector_exceptions import MemoryConnectorException
30+
from semantic_kernel.exceptions.search_exceptions import VectorSearchExecutionException
3231
from semantic_kernel.kernel_types import OneOrMany
3332
from semantic_kernel.utils.experimental_decorator import experimental_class
3433
from semantic_kernel.utils.telemetry.user_agent import APP_INFO, prepend_semantic_kernel_to_user_agent
@@ -80,8 +79,6 @@ def __init__(
8079
When nothing is supplied, it defaults to an in-memory qdrant instance.
8180
You can also supply a async qdrant client directly.
8281
83-
If you want to use the vectorizable_text_search you will need to install `qrant_client[fastembed]`.
84-
8582
Args:
8683
data_model_type (type[TModel]): The type of the data model.
8784
data_model_definition (VectorStoreRecordDefinition): The model fields, optional.

0 commit comments

Comments
 (0)