import asyncio import nest_asyncio nest_asyncio.apply() from semantic_kernel import Kernel from semantic_kernel.utils.logging import setup_logging from semantic_kernel.functions import kernel_function from semantic_kernel.connectors.ai.ollama import OllamaChatCompletion from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.functions.kernel_arguments import KernelArguments from semantic_kernel.connectors.ai.ollama.ollama_prompt_execution_settings import OllamaChatPromptExecutionSettings
from typing import Annotated from semantic_kernel.functions import kernel_function classLightsPlugin: lights = [ {"id": 1, "name": "台灯", "is_on": False}, {"id": 2, "name": "玄关灯", "is_on": False}, {"id": 3, "name": "吊灯", "is_on": True}, ] @kernel_function( name="get_lights", description="Gets a list of lights and their current state", ) defget_state( self, ) -> Annotated[str, "the output is a string"]: """Gets a list of lights and their current state.""" returnself.lights @kernel_function( name="change_state", description="Changes the state of the light", ) defchange_state( self, id: int, is_on: bool, ) -> Annotated[str, "the output is a string"]: """Changes the state of the light.""" for light inself.lights: if light["id"] == id: light["is_on"] = is_on return light returnNone
import logging asyncdefmain(): # Initialize the kernel kernel = Kernel() # Add Azure OpenAI chat completion chat_completion = OllamaChatCompletion( ai_model_id ="qwen2.5:latest", host ="http://192.168.3.155:11434", ) kernel.add_service(chat_completion) # Set the logging level for semantic_kernel.kernel to DEBUG. setup_logging() logging.getLogger("kernel").setLevel(logging.DEBUG) # Add a plugin (the LightsPlugin class is defined below) kernel.add_plugin( LightsPlugin(), plugin_name="Lights", ) # Enable planning execution_settings = OllamaChatPromptExecutionSettings() execution_settings.function_choice_behavior=FunctionChoiceBehavior.Auto() # Create a history of the conversation history = ChatHistory() # Initiate a back-and-forth chat userInput = None whileTrue: # Collect user input userInput = input("User > ") # Terminate the loop if the user says "exit" if userInput == "exit": break # Add user input to the history history.add_user_message(userInput) # Get the response from the AI result = await chat_completion.get_chat_message_content( chat_history=history, settings=execution_settings, kernel=kernel, ) # Print the results print("Assistant > " + str(result)) # Add the message from the agent to the chat history history.add_message(result) # Run the main function if __name__ == "__main__": loop = asyncio.get_running_loop() asyncio.run(main())
User > 我是小五,住在广州
Assistant > 很高兴认识你,小五!我叫Qwen,是一名来自阿里云的智能助手。如果你在广州有任何需要帮助的地方,都可以告诉我哦!比如你需要找附近的餐厅、景点推荐或者是天气查询等等。有什么我可以帮到你的吗?
User > 家里的灯都开着吗?
Assistant > 目前你家的灯光状态如下:
- 台灯是关闭的
- 玄关灯是关闭的
- 吊灯是开启的
需要我帮你调整灯光的状态吗?比如关闭开着的灯或者打开关闭的灯。
User > 打开台灯,关闭吊灯
Assistant > 好的,已经调整好了灯光状态:
- 台灯现在是开着的。
- 吊灯现在是关着的。
还有其他需要帮忙的地方吗?比如再调整其他灯或者是查询其他信息。
User > 灯光状态
Assistant > 当前家里的灯光状态如下:
- 台灯:开启
- 玄关灯:关闭
- 吊灯:关闭
如果你还需要进一步的操作或者其他帮助,请告诉我!
User > 我是谁?
Assistant > 你是小五,你住在广州。有什么其他问题或者需要帮助的吗?比如天气查询、生活建议等,都可以告诉我哦!
User > exit
# Get the response from the AI result = await chat_completion.get_chat_message_content( chat_history=history, settings=execution_settings, kernel=kernel, )
Semantic Kernel 的关键是 kernel 类,其中包含不同的组件:
AI Service Connectors:Semantic 提供了一个抽象层,以统一的接口提供不同类型的 AI 服务,如 openai、ollama 等,支持的服务包括聊天、文本生成、文本 <-> 图片,文本 <-> 音频
Vector Store (Memory) Connectors:同样是抽象层,统一的接口接入不同的向量数据库
Functions and Plugins:插件被命名为函数容器。每个 API 可以包含一个或多个函数
Prompt Templates:允许开发人员或提示工程师创建一个模板,该模板将 AI 的上下文和指令与用户输入和函数输出混合在一起。