|
| 1 | +# Copyright (c) Microsoft. All rights reserved. |
| 2 | + |
| 3 | +import asyncio |
| 4 | +import os |
| 5 | + |
| 6 | +from semantic_kernel import Kernel |
| 7 | +from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior |
| 8 | +from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, OpenAIChatPromptExecutionSettings |
| 9 | +from semantic_kernel.contents import ChatHistory |
| 10 | +from semantic_kernel.contents.chat_message_content import ChatMessageContent |
| 11 | +from semantic_kernel.contents.function_call_content import FunctionCallContent |
| 12 | +from semantic_kernel.core_plugins import MathPlugin, TimePlugin |
| 13 | +from semantic_kernel.filters.auto_function_invocation.auto_function_invocation_context import ( |
| 14 | + AutoFunctionInvocationContext, |
| 15 | +) |
| 16 | +from semantic_kernel.filters.filter_types import FilterTypes |
| 17 | +from semantic_kernel.functions import KernelArguments |
| 18 | +from semantic_kernel.functions.function_result import FunctionResult |
| 19 | + |
| 20 | +system_message = """ |
| 21 | +You are a chat bot. Your name is Mosscap and |
| 22 | +you have one goal: figure out what people need. |
| 23 | +Your full name, should you need to know it, is |
| 24 | +Splendid Speckled Mosscap. You communicate |
| 25 | +effectively, but you tend to answer with long |
| 26 | +flowery prose. You are also a math wizard, |
| 27 | +especially for adding and subtracting. |
| 28 | +You also excel at joke telling, where your tone is often sarcastic. |
| 29 | +Once you have the answer I am looking for, |
| 30 | +you will return a full answer to me as soon as possible. |
| 31 | +""" |
| 32 | + |
| 33 | +kernel = Kernel() |
| 34 | + |
| 35 | +# Note: the underlying gpt-35/gpt-4 model version needs to be at least version 0613 to support tools. |
| 36 | +kernel.add_service(OpenAIChatCompletion(service_id="chat")) |
| 37 | + |
| 38 | +plugins_directory = os.path.join(__file__, "../../../../../prompt_template_samples/") |
| 39 | +# adding plugins to the kernel |
| 40 | +# the joke plugin in the FunPlugins is a semantic plugin and has the function calling disabled. |
| 41 | +# kernel.import_plugin_from_prompt_directory("chat", plugins_directory, "FunPlugin") |
| 42 | +# the math plugin is a core plugin and has the function calling enabled. |
| 43 | +kernel.add_plugin(MathPlugin(), plugin_name="math") |
| 44 | +kernel.add_plugin(TimePlugin(), plugin_name="time") |
| 45 | + |
| 46 | +chat_function = kernel.add_function( |
| 47 | + prompt="{{$chat_history}}{{$user_input}}", |
| 48 | + plugin_name="ChatBot", |
| 49 | + function_name="Chat", |
| 50 | +) |
| 51 | +# enabling or disabling function calling is done by setting the function_call parameter for the completion. |
| 52 | +# when the function_call parameter is set to "auto" the model will decide which function to use, if any. |
| 53 | +# if you only want to use a specific function, set the name of that function in this parameter, |
| 54 | +# the format for that is 'PluginName-FunctionName', (i.e. 'math-Add'). |
| 55 | +# if the model or api version do not support this you will get an error. |
| 56 | + |
| 57 | +# Note: the number of responses for auto inoking tool calls is limited to 1. |
| 58 | +# If configured to be greater than one, this value will be overridden to 1. |
| 59 | +execution_settings = OpenAIChatPromptExecutionSettings( |
| 60 | + service_id="chat", |
| 61 | + max_tokens=2000, |
| 62 | + temperature=0.7, |
| 63 | + top_p=0.8, |
| 64 | + function_call_behavior=FunctionCallBehavior.EnableFunctions( |
| 65 | + auto_invoke=True, filters={"included_plugins": ["math", "time"]} |
| 66 | + ), |
| 67 | +) |
| 68 | + |
| 69 | +history = ChatHistory() |
| 70 | + |
| 71 | +history.add_system_message(system_message) |
| 72 | +history.add_user_message("Hi there, who are you?") |
| 73 | +history.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need.") |
| 74 | + |
| 75 | +arguments = KernelArguments(settings=execution_settings) |
| 76 | + |
| 77 | + |
| 78 | +# A filter is a piece of custom code that runs at certain points in the process |
| 79 | +# this sample has a filter that is called during Auto Function Invocation |
| 80 | +# this filter will be called for each function call in the response. |
| 81 | +# You can name the function itself with arbitrary names, but the signature needs to be: |
| 82 | +# `context, next` |
| 83 | +# You are then free to run code before the call to the next filter or the function itself. |
| 84 | +# if you want to terminate the function calling sequence. set context.terminate to True |
| 85 | +@kernel.filter(FilterTypes.AUTO_FUNCTION_INVOCATION) |
| 86 | +async def auto_function_invocation_filter(context: AutoFunctionInvocationContext, next): |
| 87 | + """A filter that will be called for each function call in the response.""" |
| 88 | + print("\nAuto function invocation filter") |
| 89 | + print(f"Function: {context.function.name}") |
| 90 | + print(f"Request sequence: {context.request_sequence_index}") |
| 91 | + print(f"Function sequence: {context.function_sequence_index}") |
| 92 | + |
| 93 | + # as an example |
| 94 | + function_calls = context.chat_history.messages[-1].items |
| 95 | + print(f"Number of function calls: {len(function_calls)}") |
| 96 | + # if we don't call next, it will skip this function, and go to the next one |
| 97 | + await next(context) |
| 98 | + result = context.function_result |
| 99 | + for fc in function_calls: |
| 100 | + if fc.plugin_name == "math": |
| 101 | + context.function_result = FunctionResult( |
| 102 | + function=result.function, value="Stop trying to ask me to do math, I don't like it!" |
| 103 | + ) |
| 104 | + context.terminate = True |
| 105 | + |
| 106 | + |
| 107 | +def print_tool_calls(message: ChatMessageContent) -> None: |
| 108 | + # A helper method to pretty print the tool calls from the message. |
| 109 | + # This is only triggered if auto invoke tool calls is disabled. |
| 110 | + items = message.items |
| 111 | + formatted_tool_calls = [] |
| 112 | + for i, item in enumerate(items, start=1): |
| 113 | + if isinstance(item, FunctionCallContent): |
| 114 | + tool_call_id = item.id |
| 115 | + function_name = item.name |
| 116 | + function_arguments = item.arguments |
| 117 | + formatted_str = ( |
| 118 | + f"tool_call {i} id: {tool_call_id}\n" |
| 119 | + f"tool_call {i} function name: {function_name}\n" |
| 120 | + f"tool_call {i} arguments: {function_arguments}" |
| 121 | + ) |
| 122 | + formatted_tool_calls.append(formatted_str) |
| 123 | + print("Tool calls:\n" + "\n\n".join(formatted_tool_calls)) |
| 124 | + |
| 125 | + |
| 126 | +async def chat() -> bool: |
| 127 | + try: |
| 128 | + user_input = input("User:> ") |
| 129 | + except KeyboardInterrupt: |
| 130 | + print("\n\nExiting chat...") |
| 131 | + return False |
| 132 | + except EOFError: |
| 133 | + print("\n\nExiting chat...") |
| 134 | + return False |
| 135 | + |
| 136 | + if user_input == "exit": |
| 137 | + print("\n\nExiting chat...") |
| 138 | + return False |
| 139 | + arguments["user_input"] = user_input |
| 140 | + arguments["chat_history"] = history |
| 141 | + |
| 142 | + result = await kernel.invoke(chat_function, arguments=arguments) |
| 143 | + |
| 144 | + # If tools are used, and auto invoke tool calls is False, the response will be of type |
| 145 | + # ChatMessageContent with information about the tool calls, which need to be sent |
| 146 | + # back to the model to get the final response. |
| 147 | + if isinstance(result.value[0].items[0], FunctionCallContent): |
| 148 | + print_tool_calls(result.value[0]) |
| 149 | + return True |
| 150 | + |
| 151 | + history.add_user_message(user_input) |
| 152 | + history.add_assistant_message(str(result)) |
| 153 | + print(f"Mosscap:> {result}") |
| 154 | + return True |
| 155 | + |
| 156 | + |
| 157 | +async def main() -> None: |
| 158 | + chatting = True |
| 159 | + print( |
| 160 | + "Welcome to the chat bot!\ |
| 161 | + \n Type 'exit' to exit.\ |
| 162 | + \n Try a math question to see the function calling in action (i.e. what is 3+3?)." |
| 163 | + ) |
| 164 | + while chatting: |
| 165 | + chatting = await chat() |
| 166 | + |
| 167 | + |
| 168 | +if __name__ == "__main__": |
| 169 | + asyncio.run(main()) |
0 commit comments