Skip to content

Commit e70d792

Browse files
committed
Review comment and format
1 parent b44fefd commit e70d792

File tree

1 file changed

+19
-31
lines changed

1 file changed

+19
-31
lines changed

rag-azure-openai-cosmosdb-notebook.ipynb

+19-31
Original file line numberDiff line numberDiff line change
@@ -59,16 +59,16 @@
5959
},
6060
{
6161
"cell_type": "code",
62-
"execution_count": 1,
62+
"execution_count": 2,
6363
"metadata": {},
6464
"outputs": [
6565
{
6666
"data": {
6767
"text/plain": [
68-
"True"
68+
"False"
6969
]
7070
},
71-
"execution_count": 1,
71+
"execution_count": 2,
7272
"metadata": {},
7373
"output_type": "execute_result"
7474
}
@@ -105,9 +105,7 @@
105105
"\n",
106106
"# Vector search index parameters\n",
107107
"index_name = \"VectorSearchIndex\"\n",
108-
"vector_dimensions = (\n",
109-
" 1536 # text-embedding-ada-002 uses a 1536-dimensional embedding vector\n",
110-
")\n",
108+
"vector_dimensions = 1536 # text-embedding-ada-002 uses a 1536-dimensional embedding vector\n",
111109
"num_lists = 1\n",
112110
"similarity = \"COS\" # cosine distance"
113111
]
@@ -141,16 +139,14 @@
141139
"from semantic_kernel.memory.memory_store_base import MemoryStoreBase\n",
142140
"\n",
143141
"\n",
144-
"async def upsert_data_to_memory_store(\n",
145-
" memory: SemanticTextMemory, store: MemoryStoreBase, data_file_path: str\n",
146-
") -> None:\n",
142+
"async def upsert_data_to_memory_store(memory: SemanticTextMemory, store: MemoryStoreBase, data_file_path: str) -> None:\n",
147143
" \"\"\"\n",
148144
" This asynchronous function takes two memory stores and a data file path as arguments.\n",
149145
" It is designed to upsert (update or insert) data into the memory stores from the data file.\n",
150146
"\n",
151147
" Args:\n",
152-
" kernel_memory_store (callable): A callable object that represents the kernel memory store where data will be upserted.\n",
153-
" memory_store (callable): A callable object that represents the memory store where data will be upserted.\n",
148+
" memory (callable): A callable object that represents the semantic kernel memory.\n",
149+
" store (callable): A callable object that represents the memory store where data will be upserted.\n",
154150
" data_file_path (str): The path to the data file that contains the data to be upserted.\n",
155151
"\n",
156152
" Returns:\n",
@@ -164,11 +160,7 @@
164160
" # check if the item already exists in the memory store\n",
165161
" # if the id doesn't exist, it throws an exception\n",
166162
" try:\n",
167-
" already_created = bool(\n",
168-
" await store.get(\n",
169-
" collection_name, item[\"id\"], with_embedding=True\n",
170-
" )\n",
171-
" )\n",
163+
" already_created = bool(await store.get(collection_name, item[\"id\"], with_embedding=True))\n",
172164
" except Exception:\n",
173165
" already_created = False\n",
174166
" # if the record doesn't exist, we generate embeddings and save it to the database\n",
@@ -297,9 +289,7 @@
297289
],
298290
"source": [
299291
"# adding azure openai text embedding service\n",
300-
"embedding_model_deployment_name = os.environ.get(\n",
301-
" \"AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME\"\n",
302-
")\n",
292+
"embedding_model_deployment_name = os.environ.get(\"AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME\")\n",
303293
"\n",
304294
"kernel.add_service(\n",
305295
" AzureTextEmbedding(\n",
@@ -515,11 +505,7 @@
515505
"from semantic_kernel.connectors.ai.open_ai import OpenAITextPromptExecutionSettings\n",
516506
"\n",
517507
"execution_settings = OpenAITextPromptExecutionSettings(\n",
518-
" service_id=\"chat_completion\",\n",
519-
" ai_model_id=chat_model_deployment_name,\n",
520-
" max_tokens=500,\n",
521-
" temperature=0.0,\n",
522-
" top_p=0.5\n",
508+
" service_id=\"chat_completion\", ai_model_id=chat_model_deployment_name, max_tokens=500, temperature=0.0, top_p=0.5\n",
523509
")"
524510
]
525511
},
@@ -540,7 +526,7 @@
540526
" InputVariable(name=\"db_record\", description=\"The database record\", is_required=True),\n",
541527
" InputVariable(name=\"query_term\", description=\"The user input\", is_required=True),\n",
542528
" ],\n",
543-
" execution_settings=execution_settings\n",
529+
" execution_settings=execution_settings,\n",
544530
")"
545531
]
546532
},
@@ -551,9 +537,7 @@
551537
"outputs": [],
552538
"source": [
553539
"chat_function = kernel.create_function_from_prompt(\n",
554-
" function_name= \"ChatGPTFunc\",\n",
555-
" plugin_name=\"chatGPTPlugin\",\n",
556-
" prompt_template_config=chat_prompt_template_config\n",
540+
" function_name=\"ChatGPTFunc\", plugin_name=\"chatGPTPlugin\", prompt_template_config=chat_prompt_template_config\n",
557541
")"
558542
]
559543
},
@@ -563,7 +547,9 @@
563547
"metadata": {},
564548
"outputs": [],
565549
"source": [
566-
"completions_result = await kernel.invoke(chat_function, sk.KernelArguments(query_term=query_term, db_record=result[0].additional_metadata))"
550+
"completions_result = await kernel.invoke(\n",
551+
" chat_function, sk.KernelArguments(query_term=query_term, db_record=result[0].additional_metadata)\n",
552+
")"
567553
]
568554
},
569555
{
@@ -629,7 +615,9 @@
629615
"while query_term != \"exit\":\n",
630616
" query_term = input(\"Enter a query: \")\n",
631617
" result = await memory.search(collection_name, query_term)\n",
632-
" completions_result = kernel.invoke_stream(chat_function, sk.KernelArguments(query_term=query_term, db_record=result[0].additional_metadata))\n",
618+
" completions_result = kernel.invoke_stream(\n",
619+
" chat_function, sk.KernelArguments(query_term=query_term, db_record=result[0].additional_metadata)\n",
620+
" )\n",
633621
" print(f\"Question:\\n{query_term}\\nResponse:\")\n",
634622
" async for completion in completions_result:\n",
635623
" print(str(completion[0]), end=\"\")\n",
@@ -654,7 +642,7 @@
654642
"name": "python",
655643
"nbconvert_exporter": "python",
656644
"pygments_lexer": "ipython3",
657-
"version": "3.11.8"
645+
"version": "3.12.2"
658646
}
659647
},
660648
"nbformat": 4,

0 commit comments

Comments
 (0)