Skip to content

Commit 69efe1b

Browse files
committed
bump semantic kernel
1 parent c1e6bca commit 69efe1b

File tree

5 files changed

+32
-29
lines changed

5 files changed

+32
-29
lines changed

Diff for: rag-azure-openai-cosmosdb-notebook.ipynb

+22-19
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,11 @@
99
},
1010
{
1111
"cell_type": "code",
12-
"execution_count": null,
12+
"execution_count": 1,
1313
"metadata": {},
1414
"outputs": [],
1515
"source": [
16-
"%pip install semantic-kernel==0.9.5b1"
16+
"%pip install semantic-kernel==0.9.6b1"
1717
]
1818
},
1919
{
@@ -203,10 +203,10 @@
203203
"metadata": {},
204204
"outputs": [],
205205
"source": [
206-
"import semantic_kernel as sk\n",
206+
"from semantic_kernel import Kernel\n",
207207
"\n",
208208
"# Intialize the kernel\n",
209-
"kernel = sk.Kernel()"
209+
"kernel = Kernel()"
210210
]
211211
},
212212
{
@@ -364,7 +364,7 @@
364364
},
365365
{
366366
"cell_type": "code",
367-
"execution_count": 9,
367+
"execution_count": 10,
368368
"metadata": {},
369369
"outputs": [
370370
{
@@ -380,7 +380,7 @@
380380
"from semantic_kernel.core_plugins.text_memory_plugin import TextMemoryPlugin\n",
381381
"\n",
382382
"memory = SemanticTextMemory(storage=store, embeddings_generator=kernel.get_service(\"text_embedding\"))\n",
383-
"kernel.import_plugin_from_object(TextMemoryPlugin(memory), \"TextMemoryPluginACDB\")\n",
383+
"kernel.add_plugin(TextMemoryPlugin(memory), \"TextMemoryPluginACDB\")\n",
384384
"print(\"Registered Azure Cosmos DB Memory Store...\")"
385385
]
386386
},
@@ -411,7 +411,7 @@
411411
},
412412
{
413413
"cell_type": "code",
414-
"execution_count": 10,
414+
"execution_count": 11,
415415
"metadata": {},
416416
"outputs": [
417417
{
@@ -444,7 +444,7 @@
444444
},
445445
{
446446
"cell_type": "code",
447-
"execution_count": 11,
447+
"execution_count": 12,
448448
"metadata": {},
449449
"outputs": [],
450450
"source": [
@@ -455,7 +455,7 @@
455455
},
456456
{
457457
"cell_type": "code",
458-
"execution_count": 12,
458+
"execution_count": 13,
459459
"metadata": {},
460460
"outputs": [
461461
{
@@ -483,7 +483,7 @@
483483
},
484484
{
485485
"cell_type": "code",
486-
"execution_count": 13,
486+
"execution_count": 14,
487487
"metadata": {},
488488
"outputs": [],
489489
"source": [
@@ -498,7 +498,7 @@
498498
},
499499
{
500500
"cell_type": "code",
501-
"execution_count": 14,
501+
"execution_count": 15,
502502
"metadata": {},
503503
"outputs": [],
504504
"source": [
@@ -511,11 +511,11 @@
511511
},
512512
{
513513
"cell_type": "code",
514-
"execution_count": 15,
514+
"execution_count": 17,
515515
"metadata": {},
516516
"outputs": [],
517517
"source": [
518-
"from semantic_kernel import PromptTemplateConfig\n",
518+
"from semantic_kernel.prompt_template import PromptTemplateConfig\n",
519519
"from semantic_kernel.prompt_template.input_variable import InputVariable\n",
520520
"\n",
521521
"chat_prompt_template_config = PromptTemplateConfig(\n",
@@ -532,29 +532,32 @@
532532
},
533533
{
534534
"cell_type": "code",
535-
"execution_count": 16,
535+
"execution_count": 19,
536536
"metadata": {},
537537
"outputs": [],
538538
"source": [
539-
"chat_function = kernel.create_function_from_prompt(\n",
539+
"chat_function = kernel.add_function(\n",
540540
" function_name=\"ChatGPTFunc\", plugin_name=\"chatGPTPlugin\", prompt_template_config=chat_prompt_template_config\n",
541541
")"
542542
]
543543
},
544544
{
545545
"cell_type": "code",
546-
"execution_count": 17,
546+
"execution_count": 20,
547547
"metadata": {},
548548
"outputs": [],
549549
"source": [
550+
"from semantic_kernel.functions import KernelArguments\n",
551+
"\n",
552+
"\n",
550553
"completions_result = await kernel.invoke(\n",
551-
" chat_function, sk.KernelArguments(query_term=query_term, db_record=result[0].additional_metadata)\n",
554+
" chat_function, KernelArguments(query_term=query_term, db_record=result[0].additional_metadata)\n",
552555
")"
553556
]
554557
},
555558
{
556559
"cell_type": "code",
557-
"execution_count": 18,
560+
"execution_count": 21,
558561
"metadata": {},
559562
"outputs": [
560563
{
@@ -616,7 +619,7 @@
616619
" query_term = input(\"Enter a query: \")\n",
617620
" result = await memory.search(collection_name, query_term)\n",
618621
" completions_result = kernel.invoke_stream(\n",
619-
" chat_function, sk.KernelArguments(query_term=query_term, db_record=result[0].additional_metadata)\n",
622+
" chat_function, KernelArguments(query_term=query_term, db_record=result[0].additional_metadata)\n",
620623
" )\n",
621624
" print(f\"Question:\\n{query_term}\\nResponse:\")\n",
622625
" async for completion in completions_result:\n",

Diff for: src/pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ version = "1.0.0"
44
description = "Demo Chat Application to demonstrate retrieval augmented generation using Azure Open AI, Azure Cosmos DB for MongoDB vCore, and semantic kernel."
55
dependencies = [
66
"Quart",
7-
"semantic-kernel==0.9.5b1",
7+
"semantic-kernel==0.9.6b1",
88
"python-dotenv",
99
"Hypercorn",
1010
]

Diff for: src/quartapp/rag.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
11
import os
22
from typing import Any
33

4-
import semantic_kernel as sk # type: ignore [import-untyped]
5-
from semantic_kernel import Kernel, KernelFunction, PromptTemplateConfig
4+
from semantic_kernel import Kernel
65
from semantic_kernel.connectors.ai.open_ai import ( # type: ignore [import-untyped]
76
AzureChatCompletion,
87
AzureTextEmbedding,
@@ -12,12 +11,14 @@
1211
AzureCosmosDBMemoryStore,
1312
)
1413
from semantic_kernel.core_plugins.text_memory_plugin import TextMemoryPlugin # type: ignore [import-untyped]
14+
from semantic_kernel.functions import KernelArguments, KernelFunction
1515
from semantic_kernel.kernel import FunctionResult # type: ignore [import-untyped]
1616
from semantic_kernel.memory.memory_store_base import MemoryStoreBase # type: ignore [import-untyped]
1717
from semantic_kernel.memory.semantic_text_memory import ( # type: ignore [import-untyped]
1818
MemoryQueryResult,
1919
SemanticTextMemory,
2020
)
21+
from semantic_kernel.prompt_template import PromptTemplateConfig
2122
from semantic_kernel.prompt_template.input_variable import InputVariable # type: ignore [import-untyped]
2223

2324
# collection name will be used multiple times in the code so we store it in a variable
@@ -47,7 +48,7 @@ async def prompt_with_rag_or_vector(query_term: str, option: str) -> str:
4748

4849

4950
def initialize_sk_chat_embedding() -> Kernel:
50-
kernel = sk.Kernel()
51+
kernel = Kernel()
5152
# adding azure openai chat service
5253
chat_model_deployment_name = os.environ.get("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME")
5354
endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT")
@@ -97,7 +98,7 @@ async def initialize_sk_memory_store(
9798
)
9899
print("Finished updating Azure Cosmos DB Memory Store...")
99100
memory = SemanticTextMemory(storage=store, embeddings_generator=kernel.get_service("text_embedding"))
100-
kernel.import_plugin_from_object(TextMemoryPlugin(memory), "TextMemoryPluginACDB")
101+
kernel.add_plugin(TextMemoryPlugin(memory), "TextMemoryPluginACDB")
101102
print("Registered Azure Cosmos DB Memory Store...")
102103
return memory, store
103104

@@ -131,8 +132,7 @@ async def grounded_response(kernel: Kernel) -> KernelFunction:
131132
execution_settings=execution_settings,
132133
)
133134

134-
chat_function: KernelFunction = kernel.create_function_from_prompt(
135-
prompt=prompt,
135+
chat_function: KernelFunction = kernel.add_function(
136136
function_name="ChatGPTFunc",
137137
plugin_name="chatGPTPlugin",
138138
prompt_template_config=chat_prompt_template_config,
@@ -150,7 +150,7 @@ async def perform_rag_search(
150150
db_record: str = result[0].additional_metadata if result else "The requested data is not Found."
151151
return await kernel.invoke(
152152
chat_function,
153-
sk.KernelArguments(query_term=query_term, db_record=db_record),
153+
KernelArguments(query_term=query_term, db_record=db_record),
154154
)
155155

156156

Diff for: src/requirements.in

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
semantic-kernel==0.9.5b1
1+
semantic-kernel==0.9.6b1
22
python-dotenv
33
Quart
44
Hypercorn

Diff for: src/requirements.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ ruamel-yaml-clib==0.2.8
193193
# via ruamel-yaml
194194
scipy==1.13.0
195195
# via semantic-kernel
196-
semantic-kernel==0.9.5b1
196+
semantic-kernel==0.9.6b1
197197
# via -r src/requirements.in
198198
six==1.16.0
199199
# via

0 commit comments

Comments
 (0)