Skip to content

Commit

Permalink
Merge branch 'main' into feature_agent_serialization
Browse files Browse the repository at this point in the history
  • Loading branch information
crickman authored Nov 8, 2024
2 parents 8b877fd + 05f4589 commit e275908
Show file tree
Hide file tree
Showing 4 changed files with 130 additions and 4 deletions.
1 change: 1 addition & 0 deletions python/samples/concepts/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ This section contains code snippets that demonstrate the usage of Semantic Kerne
| Agents | Creating and using [agents](../../semantic_kernel/agents/) in Semantic Kernel |
| AutoFunctionCalling | Using `Auto Function Calling` to allow function call capable models to invoke Kernel Functions automatically |
| ChatCompletion | Using [`ChatCompletion`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/connectors/ai/chat_completion_client_base.py) messaging capable service with models |
| ChatHistory | Using and serializing the [`ChatHistory`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/contents/chat_history.py) |
| Filtering | Creating and using Filters |
| Functions | Invoking [`Method`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/functions/kernel_function_from_method.py) or [`Prompt`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/functions/kernel_function_from_prompt.py) functions with [`Kernel`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/kernel.py) |
| Grounding | An example of how to perform LLM grounding |
Expand Down
95 changes: 95 additions & 0 deletions python/samples/concepts/chat_history/serialize_chat_history.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
# Copyright (c) Microsoft. All rights reserved.

import asyncio
import os
from typing import TYPE_CHECKING

from semantic_kernel import Kernel
from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior
from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.azure_chat_prompt_execution_settings import (
AzureChatPromptExecutionSettings,
)
from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion
from semantic_kernel.contents import ChatHistory
from semantic_kernel.core_plugins.math_plugin import MathPlugin
from semantic_kernel.core_plugins.time_plugin import TimePlugin
from semantic_kernel.functions import KernelArguments

if TYPE_CHECKING:
pass


system_message = """
You are a chat bot. Your name is Mosscap and
you have one goal: figure out what people need.
Your full name, should you need to know it, is
Splendid Speckled Mosscap. You communicate
effectively, but you tend to answer with long
flowery prose. You are also a math wizard,
especially for adding and subtracting.
You also excel at joke telling, where your tone is often sarcastic.
Once you have the answer I am looking for,
you will return a full answer to me as soon as possible.
"""

kernel = Kernel()

# Note: the underlying gpt-35/gpt-4 model version needs to be at least version 0613 to support tools.
kernel.add_service(AzureChatCompletion(service_id="chat"))

plugins_directory = os.path.join(__file__, "../../../../../prompt_template_samples/")
# adding plugins to the kernel
kernel.add_plugin(MathPlugin(), plugin_name="math")
kernel.add_plugin(TimePlugin(), plugin_name="time")

# Enabling or disabling function calling is done by setting the `function_choice_behavior` attribute for the
# prompt execution settings. When the function_call parameter is set to "auto" the model will decide which
# function to use, if any.
#
# There are two ways to define the `function_choice_behavior` parameter:
# 1. Using the type string as `"auto"`, `"required"`, or `"none"`. For example:
# configure `function_choice_behavior="auto"` parameter directly in the execution settings.
# 2. Using the FunctionChoiceBehavior class. For example:
# `function_choice_behavior=FunctionChoiceBehavior.Auto()`.
# Both of these configure the `auto` tool_choice and all of the available plugins/functions
# registered on the kernel. If you want to limit the available plugins/functions, you must
# configure the `filters` dictionary attribute for each type of function choice behavior.
# For example:
#
# from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior

# function_choice_behavior = FunctionChoiceBehavior.Auto(
# filters={"included_functions": ["time-date", "time-time", "math-Add"]}
# )
#
# The filters attribute allows you to specify either: `included_functions`, `excluded_functions`,
# `included_plugins`, or `excluded_plugins`.

# Note: the number of responses for auto invoking tool calls is limited to 1.
# If configured to be greater than one, this value will be overridden to 1.
execution_settings = AzureChatPromptExecutionSettings(
service_id="chat",
max_tokens=2000,
temperature=0.7,
top_p=0.8,
function_choice_behavior=FunctionChoiceBehavior.Auto(),
)

arguments = KernelArguments(settings=execution_settings)


async def main() -> None:
user_input = "What is the current hour plus 10?"
print(f"User:> {user_input}")

result = await kernel.invoke_prompt(prompt=user_input, arguments=arguments)

print(f"Mosscap:> {result}")

print("\nChat history:")
chat_history: ChatHistory = result.metadata["messages"]
print(chat_history.serialize())


if __name__ == "__main__":
asyncio.run(main())
4 changes: 3 additions & 1 deletion python/semantic_kernel/contents/kernel_content.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@
class KernelContent(KernelBaseModel, ABC):
"""Base class for all kernel contents."""

inner_content: Any | None = None
# NOTE: if you wish to hold on to the inner content, you are responsible
# for saving it before serializing the content/chat history as it won't be included.
inner_content: Any | None = Field(None, exclude=True)
ai_model_id: str | None = None
metadata: dict[str, Any] = Field(default_factory=dict)

Expand Down
34 changes: 31 additions & 3 deletions python/tests/unit/contents/test_chat_history.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@


import pytest
from openai.types.chat.chat_completion import ChatCompletion, Choice
from openai.types.chat.chat_completion_message import ChatCompletionMessage

from semantic_kernel.contents.chat_history import ChatHistory
from semantic_kernel.contents.chat_message_content import ChatMessageContent
Expand All @@ -18,6 +20,19 @@
from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig


@pytest.fixture
def mock_chat_completion_response() -> ChatCompletion:
return ChatCompletion(
id="test_id",
choices=[
Choice(index=0, message=ChatCompletionMessage(content="test", role="assistant"), finish_reason="stop")
],
created=0,
model="test",
object="chat.completion",
)


def test_init_with_system_message_only():
system_msg = "test message"
chat_history = ChatHistory(system_message=system_msg)
Expand Down Expand Up @@ -264,13 +279,26 @@ def test_serialize():
)


def test_serialize_and_deserialize_to_chat_history():
def test_serialize_and_deserialize_to_chat_history(mock_chat_completion_response: ChatCompletion):
system_msg = "a test system prompt"
msgs = [ChatMessageContent(role=AuthorRole.USER, content=f"Message {i}") for i in range(3)]
msgs = [
ChatMessageContent(
role=AuthorRole.USER,
content=f"Message {i}",
inner_content=mock_chat_completion_response,
)
for i in range(3)
]
chat_history = ChatHistory(messages=msgs, system_message=system_msg)

json_str = chat_history.serialize()
new_chat_history = ChatHistory.restore_chat_history(json_str)
assert new_chat_history == chat_history

assert len(new_chat_history.messages) == len(chat_history.messages)

for original_msg, restored_msg in zip(chat_history.messages, new_chat_history.messages):
assert original_msg.role == restored_msg.role
assert original_msg.content == restored_msg.content


def test_deserialize_invalid_json_raises_exception():
Expand Down

0 comments on commit e275908

Please sign in to comment.