Skip to content

Commit a894aeb

Browse files
Python: implement filters (#5681)
### Motivation and Context <!-- Thank you for your contribution to the semantic-kernel repo! Please help reviewers and future users, providing the following information: 1. Why is this change required? 2. What problem does it solve? 3. What scenario does it contribute to? 4. If it fixes an open issue, please link to the issue here. --> This pull request includes significant changes across multiple files, mainly related to the addition of hooks and the modification of function invocations in the `semantic_kernel` module. The changes also include the addition of a new sample and a YAML file, and modifications to the `__init__.py` files. Removals: * [`python/semantic_kernel/events`](diffhunk://#diff-ebda9504832b19ab83239a92c9a6d5f8c744deff9fef86071c13956ec92bb010L1-L11): Removed the previously used events. New Exceptions: * [`python/semantic_kernel/exceptions/kernel_exceptions.py`](diffhunk://#diff-450aaa5595a8b22cd6ee212eb79b7d6b0d4e9c1072063ef32018a3e7d3fdf21dR41-R48): Added new exception classes `OperationCancelledException` and `HookInvalidSignatureError`. [[1]](diffhunk://#diff-450aaa5595a8b22cd6ee212eb79b7d6b0d4e9c1072063ef32018a3e7d3fdf21dR41-R48) [[2]](diffhunk://#diff-450aaa5595a8b22cd6ee212eb79b7d6b0d4e9c1072063ef32018a3e7d3fdf21dR57-R58) Fixes: #3038 Fixes: #6276 ### Contribution Checklist <!-- Before submitting this PR, please make sure: --> - [x] The code builds clean without any errors or warnings - [x] The PR follows the [SK Contribution Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md) and the [pre-submission formatting script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts) raises no violations - [x] All unit tests pass, and I have added new tests where possible - [ ] I didn't break anyone 😄
1 parent 3db321b commit a894aeb

File tree

45 files changed

+1555
-1039
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+1555
-1039
lines changed

python/samples/concepts/README.md

+1
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ This section contains code snippets that demonstrate the usage of Semantic Kerne
66
| -------- | ----------- |
77
| AutoFunctionCalling | Using `Auto Function Calling` to allow function call capable models to invoke Kernel Functions automatically |
88
| ChatCompletion | Using [`ChatCompletion`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/connectors/ai/chat_completion_client_base.py) messaging capable service with models |
9+
| Filtering | Creating and using Filters |
910
| Functions | Invoking [`Method`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/functions/kernel_function_from_method.py) or [`Prompt`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/functions/kernel_function_from_prompt.py) functions with [`Kernel`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/kernel.py) |
1011
| Grounding | An example of how to perform LLM grounding |
1112
| Logging | Showing how to set up logging |

python/samples/concepts/auto_function_calling/chat_gpt_api_function_calling.py

+4-12
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,7 @@
77

88
from semantic_kernel import Kernel
99
from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior
10-
from semantic_kernel.connectors.ai.open_ai import (
11-
OpenAIChatCompletion,
12-
OpenAIChatPromptExecutionSettings,
13-
)
10+
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, OpenAIChatPromptExecutionSettings
1411
from semantic_kernel.contents import ChatHistory
1512
from semantic_kernel.contents.chat_message_content import ChatMessageContent
1613
from semantic_kernel.contents.function_call_content import FunctionCallContent
@@ -21,6 +18,7 @@
2118
if TYPE_CHECKING:
2219
from semantic_kernel.functions import KernelFunction
2320

21+
2422
system_message = """
2523
You are a chat bot. Your name is Mosscap and
2624
you have one goal: figure out what people need.
@@ -37,12 +35,7 @@
3735
kernel = Kernel()
3836

3937
# Note: the underlying gpt-35/gpt-4 model version needs to be at least version 0613 to support tools.
40-
kernel.add_service(
41-
OpenAIChatCompletion(
42-
service_id="chat",
43-
ai_model_id="gpt-3.5-turbo-1106",
44-
),
45-
)
38+
kernel.add_service(OpenAIChatCompletion(service_id="chat"))
4639

4740
plugins_directory = os.path.join(__file__, "../../../../../prompt_template_samples/")
4841
# adding plugins to the kernel
@@ -67,7 +60,6 @@
6760
# If configured to be greater than one, this value will be overridden to 1.
6861
execution_settings = OpenAIChatPromptExecutionSettings(
6962
service_id="chat",
70-
ai_model_id="gpt-3.5-turbo-1106",
7163
max_tokens=2000,
7264
temperature=0.7,
7365
top_p=0.8,
@@ -149,7 +141,7 @@ async def chat() -> bool:
149141
arguments["user_input"] = user_input
150142
arguments["chat_history"] = history
151143

152-
stream = False
144+
stream = True
153145
if stream:
154146
await handle_streaming(kernel, chat_function, arguments=arguments)
155147
else:

python/samples/concepts/chat_completion/chat.py renamed to python/samples/concepts/chat_completion/chat_streaming.py

+12-4
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
11
# Copyright (c) Microsoft. All rights reserved.
22

33
import asyncio
4+
from functools import reduce
45

56
from semantic_kernel import Kernel
67
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
78
from semantic_kernel.contents import ChatHistory
9+
from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent
810
from semantic_kernel.prompt_template import InputVariable, PromptTemplateConfig
911

1012
prompt = """
@@ -71,11 +73,17 @@ async def chat(chat_history: ChatHistory) -> bool:
7173
print("\n\nExiting chat...")
7274
return False
7375

74-
answer = await kernel.invoke(chat_function, user_input=user_input, chat_history=chat_history)
76+
print("ChatBot:> ", end="")
77+
streamed_chunks: list[StreamingChatMessageContent] = []
78+
responses = kernel.invoke_stream(chat_function, user_input=user_input, chat_history=chat_history)
79+
async for message in responses:
80+
streamed_chunks.append(message[0])
81+
print(str(message[0]), end="")
82+
print("")
7583
chat_history.add_user_message(user_input)
76-
chat_history.add_assistant_message(str(answer))
77-
78-
print(f"ChatBot:> {answer}")
84+
if streamed_chunks:
85+
streaming_chat_message = reduce(lambda first, second: first + second, streamed_chunks)
86+
chat_history.add_message(streaming_chat_message)
7987
return True
8088

8189

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,169 @@
1+
# Copyright (c) Microsoft. All rights reserved.
2+
3+
import asyncio
4+
import os
5+
6+
from semantic_kernel import Kernel
7+
from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior
8+
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, OpenAIChatPromptExecutionSettings
9+
from semantic_kernel.contents import ChatHistory
10+
from semantic_kernel.contents.chat_message_content import ChatMessageContent
11+
from semantic_kernel.contents.function_call_content import FunctionCallContent
12+
from semantic_kernel.core_plugins import MathPlugin, TimePlugin
13+
from semantic_kernel.filters.auto_function_invocation.auto_function_invocation_context import (
14+
AutoFunctionInvocationContext,
15+
)
16+
from semantic_kernel.filters.filter_types import FilterTypes
17+
from semantic_kernel.functions import KernelArguments
18+
from semantic_kernel.functions.function_result import FunctionResult
19+
20+
system_message = """
21+
You are a chat bot. Your name is Mosscap and
22+
you have one goal: figure out what people need.
23+
Your full name, should you need to know it, is
24+
Splendid Speckled Mosscap. You communicate
25+
effectively, but you tend to answer with long
26+
flowery prose. You are also a math wizard,
27+
especially for adding and subtracting.
28+
You also excel at joke telling, where your tone is often sarcastic.
29+
Once you have the answer I am looking for,
30+
you will return a full answer to me as soon as possible.
31+
"""
32+
33+
kernel = Kernel()
34+
35+
# Note: the underlying gpt-35/gpt-4 model version needs to be at least version 0613 to support tools.
36+
kernel.add_service(OpenAIChatCompletion(service_id="chat"))
37+
38+
plugins_directory = os.path.join(__file__, "../../../../../prompt_template_samples/")
39+
# adding plugins to the kernel
40+
# the joke plugin in the FunPlugins is a semantic plugin and has the function calling disabled.
41+
# kernel.import_plugin_from_prompt_directory("chat", plugins_directory, "FunPlugin")
42+
# the math plugin is a core plugin and has the function calling enabled.
43+
kernel.add_plugin(MathPlugin(), plugin_name="math")
44+
kernel.add_plugin(TimePlugin(), plugin_name="time")
45+
46+
chat_function = kernel.add_function(
47+
prompt="{{$chat_history}}{{$user_input}}",
48+
plugin_name="ChatBot",
49+
function_name="Chat",
50+
)
51+
# enabling or disabling function calling is done by setting the function_call parameter for the completion.
52+
# when the function_call parameter is set to "auto" the model will decide which function to use, if any.
53+
# if you only want to use a specific function, set the name of that function in this parameter,
54+
# the format for that is 'PluginName-FunctionName', (i.e. 'math-Add').
55+
# if the model or api version do not support this you will get an error.
56+
57+
# Note: the number of responses for auto inoking tool calls is limited to 1.
58+
# If configured to be greater than one, this value will be overridden to 1.
59+
execution_settings = OpenAIChatPromptExecutionSettings(
60+
service_id="chat",
61+
max_tokens=2000,
62+
temperature=0.7,
63+
top_p=0.8,
64+
function_call_behavior=FunctionCallBehavior.EnableFunctions(
65+
auto_invoke=True, filters={"included_plugins": ["math", "time"]}
66+
),
67+
)
68+
69+
history = ChatHistory()
70+
71+
history.add_system_message(system_message)
72+
history.add_user_message("Hi there, who are you?")
73+
history.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need.")
74+
75+
arguments = KernelArguments(settings=execution_settings)
76+
77+
78+
# A filter is a piece of custom code that runs at certain points in the process
79+
# this sample has a filter that is called during Auto Function Invocation
80+
# this filter will be called for each function call in the response.
81+
# You can name the function itself with arbitrary names, but the signature needs to be:
82+
# `context, next`
83+
# You are then free to run code before the call to the next filter or the function itself.
84+
# if you want to terminate the function calling sequence. set context.terminate to True
85+
@kernel.filter(FilterTypes.AUTO_FUNCTION_INVOCATION)
86+
async def auto_function_invocation_filter(context: AutoFunctionInvocationContext, next):
87+
"""A filter that will be called for each function call in the response."""
88+
print("\nAuto function invocation filter")
89+
print(f"Function: {context.function.name}")
90+
print(f"Request sequence: {context.request_sequence_index}")
91+
print(f"Function sequence: {context.function_sequence_index}")
92+
93+
# as an example
94+
function_calls = context.chat_history.messages[-1].items
95+
print(f"Number of function calls: {len(function_calls)}")
96+
# if we don't call next, it will skip this function, and go to the next one
97+
await next(context)
98+
result = context.function_result
99+
for fc in function_calls:
100+
if fc.plugin_name == "math":
101+
context.function_result = FunctionResult(
102+
function=result.function, value="Stop trying to ask me to do math, I don't like it!"
103+
)
104+
context.terminate = True
105+
106+
107+
def print_tool_calls(message: ChatMessageContent) -> None:
108+
# A helper method to pretty print the tool calls from the message.
109+
# This is only triggered if auto invoke tool calls is disabled.
110+
items = message.items
111+
formatted_tool_calls = []
112+
for i, item in enumerate(items, start=1):
113+
if isinstance(item, FunctionCallContent):
114+
tool_call_id = item.id
115+
function_name = item.name
116+
function_arguments = item.arguments
117+
formatted_str = (
118+
f"tool_call {i} id: {tool_call_id}\n"
119+
f"tool_call {i} function name: {function_name}\n"
120+
f"tool_call {i} arguments: {function_arguments}"
121+
)
122+
formatted_tool_calls.append(formatted_str)
123+
print("Tool calls:\n" + "\n\n".join(formatted_tool_calls))
124+
125+
126+
async def chat() -> bool:
127+
try:
128+
user_input = input("User:> ")
129+
except KeyboardInterrupt:
130+
print("\n\nExiting chat...")
131+
return False
132+
except EOFError:
133+
print("\n\nExiting chat...")
134+
return False
135+
136+
if user_input == "exit":
137+
print("\n\nExiting chat...")
138+
return False
139+
arguments["user_input"] = user_input
140+
arguments["chat_history"] = history
141+
142+
result = await kernel.invoke(chat_function, arguments=arguments)
143+
144+
# If tools are used, and auto invoke tool calls is False, the response will be of type
145+
# ChatMessageContent with information about the tool calls, which need to be sent
146+
# back to the model to get the final response.
147+
if isinstance(result.value[0].items[0], FunctionCallContent):
148+
print_tool_calls(result.value[0])
149+
return True
150+
151+
history.add_user_message(user_input)
152+
history.add_assistant_message(str(result))
153+
print(f"Mosscap:> {result}")
154+
return True
155+
156+
157+
async def main() -> None:
158+
chatting = True
159+
print(
160+
"Welcome to the chat bot!\
161+
\n Type 'exit' to exit.\
162+
\n Try a math question to see the function calling in action (i.e. what is 3+3?)."
163+
)
164+
while chatting:
165+
chatting = await chat()
166+
167+
168+
if __name__ == "__main__":
169+
asyncio.run(main())
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
# Copyright (c) Microsoft. All rights reserved.
2+
3+
import asyncio
4+
import logging
5+
import os
6+
from typing import Any, Callable, Coroutine
7+
8+
from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion
9+
from semantic_kernel.contents.chat_history import ChatHistory
10+
from semantic_kernel.exceptions.kernel_exceptions import OperationCancelledException
11+
from semantic_kernel.filters.filter_types import FilterTypes
12+
from semantic_kernel.filters.functions.function_invocation_context import FunctionInvocationContext
13+
from semantic_kernel.kernel import Kernel
14+
15+
logger = logging.getLogger(__name__)
16+
17+
18+
# A filter is a piece of custom code that runs at certain points in the process
19+
# this sample has a filter that is called during Function Invocation for non-streaming function.
20+
# You can name the function itself with arbitrary names, but the signature needs to be:
21+
# `context, next`
22+
# You are then free to run code before the call to the next filter or the function itself.
23+
# and code afterwards.
24+
async def input_output_filter(
25+
context: FunctionInvocationContext,
26+
next: Callable[[FunctionInvocationContext], Coroutine[Any, Any, None]],
27+
) -> None:
28+
if context.function.plugin_name != "chat":
29+
await next(context)
30+
return
31+
try:
32+
user_input = input("User:> ")
33+
except (KeyboardInterrupt, EOFError) as exc:
34+
raise OperationCancelledException("User stopped the operation") from exc
35+
if user_input == "exit":
36+
raise OperationCancelledException("User stopped the operation")
37+
context.arguments["chat_history"].add_user_message(user_input)
38+
39+
await next(context)
40+
41+
if context.result:
42+
logger.info(f'Usage: {context.result.metadata.get("usage")}')
43+
context.arguments["chat_history"].add_message(context.result.value[0])
44+
print(f"Mosscap:> {str(context.result)}")
45+
46+
47+
async def main() -> None:
48+
kernel = Kernel()
49+
kernel.add_service(AzureChatCompletion(service_id="chat-gpt"))
50+
kernel.add_plugin(
51+
parent_directory=os.path.join(os.path.dirname(os.path.realpath(__file__)), "resources"), plugin_name="chat"
52+
)
53+
history = ChatHistory()
54+
55+
# here we are adding two filters, one that was created earlier, and can be reused and added to other kernels
56+
# and one created and added in one go through the decorator
57+
kernel.add_filter("function_invocation", input_output_filter)
58+
59+
# you can use both the literal term and the FilterTypes enum
60+
@kernel.filter(filter_type=FilterTypes.FUNCTION_INVOCATION)
61+
async def exception_catch_filter(
62+
context: FunctionInvocationContext, next: Coroutine[FunctionInvocationContext, Any, None]
63+
):
64+
try:
65+
await next(context)
66+
except Exception as e:
67+
logger.info(e)
68+
69+
chatting = True
70+
while chatting:
71+
chatting = await kernel.invoke(
72+
function_name="chat",
73+
plugin_name="chat",
74+
chat_history=history,
75+
)
76+
77+
78+
if __name__ == "__main__":
79+
asyncio.run(main())

0 commit comments

Comments
 (0)