Skip to content

Commit

Permalink
.Net: Support function call content classes for AI streaming API (#6449)
Browse files Browse the repository at this point in the history
This PR is the second and last one in the series of PRs to leverage
`FunctionCallContent` and `FunctionResultClass` classes to represent
function calls and function results in a service/connector/model
agnostic way. The first PR: [.Net: Function call content
types](#5800) added the
classes and updated the **non-streaming** AI API to use them. This PR
updates the **streaming** API to utilize the classes.

Related ADR: [Function call
content](https://github.com/microsoft/semantic-kernel/blob/main/docs/decisions/0041-function-call-content.md)
Related issue: [.Net: Support FunctionCallContent for
streaming](#5818)

Note: More tests and examples will be added in scope of this PR.

### Contribution Checklist

<!-- Before submitting this PR, please make sure: -->

- [x] The code builds clean without any errors or warnings
- [x] The PR follows the [SK Contribution
Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md)
and the [pre-submission formatting
script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts)
raises no violations
- [x] All unit tests pass, and I have added new tests where possible
- [x] I didn't break anyone 😄
  • Loading branch information
SergeyMenshykh committed Jun 13, 2024
1 parent 890eab1 commit ae2f72b
Show file tree
Hide file tree
Showing 15 changed files with 1,491 additions and 133 deletions.
Original file line number Diff line number Diff line change
@@ -1,27 +1,36 @@
// Copyright (c) Microsoft. All rights reserved.

using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.ChatCompletion;
using Microsoft.SemanticKernel.Connectors.OpenAI;

namespace ChatCompletion;

// The following example shows how to use Semantic Kernel with streaming Chat Completion
/// <summary>
/// These examples demonstrate the ways different content types are streamed by OpenAI LLM via the chat completion service.
/// </summary>
public class OpenAI_ChatCompletionStreaming(ITestOutputHelper output) : BaseTest(output)
{
/// <summary>
/// This example demonstrates chat completion streaming using OpenAI.
/// </summary>
[Fact]
public Task OpenAIChatStreamSampleAsync()
public Task StreamOpenAIChatAsync()
{
Console.WriteLine("======== Open AI - ChatGPT Streaming ========");
Console.WriteLine("======== Open AI Chat Completion Streaming ========");

OpenAIChatCompletionService chatCompletionService = new(TestConfiguration.OpenAI.ChatModelId, TestConfiguration.OpenAI.ApiKey);

return this.StartStreamingChatAsync(chatCompletionService);
}

/// <summary>
/// This example demonstrates chat completion streaming using Azure OpenAI.
/// </summary>
[Fact]
public Task AzureOpenAIChatStreamSampleAsync()
public Task StreamAzureOpenAIChatAsync()
{
Console.WriteLine("======== Azure Open AI - ChatGPT Streaming ========");
Console.WriteLine("======== Azure Open AI Chat Completion Streaming ========");

AzureOpenAIChatCompletionService chatCompletionService = new(
deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName,
Expand All @@ -32,24 +41,98 @@ public Task AzureOpenAIChatStreamSampleAsync()
return this.StartStreamingChatAsync(chatCompletionService);
}

/// <summary>
/// This example demonstrates how the chat completion service streams text content.
/// It shows how to access the response update via StreamingChatMessageContent.Content property
/// and alternatively via the StreamingChatMessageContent.Items property.
/// </summary>
[Fact]
public async Task StreamTextContentAsync()
{
Console.WriteLine("======== Stream Text Content ========");

// Create chat completion service
AzureOpenAIChatCompletionService chatCompletionService = new(
deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName,
endpoint: TestConfiguration.AzureOpenAI.Endpoint,
apiKey: TestConfiguration.AzureOpenAI.ApiKey,
modelId: TestConfiguration.AzureOpenAI.ChatModelId);

// Create chat history with initial system and user messages
ChatHistory chatHistory = new("You are a librarian, an expert on books.");
chatHistory.AddUserMessage("Hi, I'm looking for book suggestions.");
chatHistory.AddUserMessage("I love history and philosophy. I'd like to learn something new about Greece, any suggestion?");

// Start streaming chat based on the chat history
await foreach (StreamingChatMessageContent chatUpdate in chatCompletionService.GetStreamingChatMessageContentsAsync(chatHistory))
{
// Access the response update via StreamingChatMessageContent.Content property
Console.Write(chatUpdate.Content);

// Alternatively, the response update can be accessed via the StreamingChatMessageContent.Items property
Console.Write(chatUpdate.Items.OfType<StreamingTextContent>().FirstOrDefault());
}
}

/// <summary>
/// This example demonstrates how the chat completion service streams raw function call content.
/// See <see cref="FunctionCalling.OpenAI_FunctionCalling.RunStreamingChatAPIWithManualFunctionCallingAsync"/> for a sample demonstrating how to simplify
/// function call content building out of streamed function call updates using the <see cref="FunctionCallContentBuilder"/>.
/// </summary>
[Fact]
public async Task StreamFunctionCallContentAsync()
{
Console.WriteLine("======== Stream Function Call Content ========");

// Create chat completion service
OpenAIChatCompletionService chatCompletionService = new(TestConfiguration.OpenAI.ChatModelId, TestConfiguration.OpenAI.ApiKey);

// Create kernel with helper plugin.
Kernel kernel = new();
kernel.ImportPluginFromFunctions("HelperFunctions",
[
kernel.CreateFunctionFromMethod((string longTestString) => DateTime.UtcNow.ToString("R"), "GetCurrentUtcTime", "Retrieves the current time in UTC."),
]);

// Create execution settings with manual function calling
OpenAIPromptExecutionSettings settings = new() { ToolCallBehavior = ToolCallBehavior.EnableKernelFunctions };

// Create chat history with initial user question
ChatHistory chatHistory = new();
chatHistory.AddUserMessage("Hi, what is the current time?");

// Start streaming chat based on the chat history
await foreach (StreamingChatMessageContent chatUpdate in chatCompletionService.GetStreamingChatMessageContentsAsync(chatHistory, settings, kernel))
{
// Getting list of function call updates requested by LLM
var streamingFunctionCallUpdates = chatUpdate.Items.OfType<StreamingFunctionCallUpdateContent>();

// Iterating over function call updates. Please use the unctionCallContentBuilder to simplify function call content building.
foreach (StreamingFunctionCallUpdateContent update in streamingFunctionCallUpdates)
{
Console.WriteLine($"Function call update: callId={update.CallId}, name={update.Name}, arguments={update.Arguments?.Replace("\n", "\\n")}, functionCallIndex={update.FunctionCallIndex}");
}
}
}

private async Task StartStreamingChatAsync(IChatCompletionService chatCompletionService)
{
Console.WriteLine("Chat content:");
Console.WriteLine("------------------------");

var chatHistory = new ChatHistory("You are a librarian, expert about books");
await MessageOutputAsync(chatHistory);
OutputLastMessage(chatHistory);

// First user message
chatHistory.AddUserMessage("Hi, I'm looking for book suggestions");
await MessageOutputAsync(chatHistory);
OutputLastMessage(chatHistory);

// First bot assistant message
await StreamMessageOutputAsync(chatCompletionService, chatHistory, AuthorRole.Assistant);

// Second user message
chatHistory.AddUserMessage("I love history and philosophy, I'd like to learn something new about Greece, any suggestion?");
await MessageOutputAsync(chatHistory);
OutputLastMessage(chatHistory);

// Second bot assistant message
await StreamMessageOutputAsync(chatCompletionService, chatHistory, AuthorRole.Assistant);
Expand Down Expand Up @@ -82,13 +165,11 @@ await foreach (var chatUpdate in chatCompletionService.GetStreamingChatMessageCo
/// <summary>
/// Outputs the last message of the chat history
/// </summary>
private Task MessageOutputAsync(ChatHistory chatHistory)
private void OutputLastMessage(ChatHistory chatHistory)
{
var message = chatHistory.Last();

Console.WriteLine($"{message.Role}: {message.Content}");
Console.WriteLine("------------------------");

return Task.CompletedTask;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
using Microsoft.SemanticKernel.Connectors.Google;
using xRetry;

namespace AutoFunctionCalling;
namespace FunctionCalling;

/// <summary>
/// These examples demonstrate two ways functions called by the Gemini LLM can be invoked using the SK streaming and non-streaming AI API:
Expand All @@ -18,10 +18,10 @@ namespace AutoFunctionCalling;
///
/// 2. Manual Invocation by a Caller:
/// Functions called by the LLM are returned to the AI API caller. The caller controls the invocation phase where
/// they may decide which function to call, when to call them, how to handle exceptions, etc. The caller then
/// adds the function results or exceptions to the chat history and returns it to the LLM, which reasons about it
/// they may decide which function to call, when to call them, how to handle exceptions, call them in parallel or sequentially, etc.
/// The caller then adds the function results or exceptions to the chat history and returns it to the LLM, which reasons about it
/// and generates the final response.
/// This approach is more manual and requires more manual intervention from the caller.
/// This approach is manual and provides more control over the function invocation phase to the caller.
/// </summary>
public sealed class Gemini_FunctionCalling(ITestOutputHelper output) : BaseTest(output)
{
Expand Down
95 changes: 84 additions & 11 deletions dotnet/samples/Concepts/FunctionCalling/OpenAI_FunctionCalling.cs
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@ namespace FunctionCalling;
///
/// 2. Manual Invocation by a Caller:
/// Functions called by the LLM are returned to the AI API caller. The caller controls the invocation phase where
/// they may decide which function to call, when to call them, how to handle exceptions, etc. The caller then
/// adds the function results or exceptions to the chat history and returns it to the LLM, which reasons about it
/// they may decide which function to call, when to call them, how to handle exceptions, call them in parallel or sequentially, etc.
/// The caller then adds the function results or exceptions to the chat history and returns it to the LLM, which reasons about it
/// and generates the final response.
/// This approach is more manual and requires more manual intervention from the caller.
/// This approach is manual and provides more control over the function invocation phase to the caller.
/// </summary>
public class OpenAI_FunctionCalling(ITestOutputHelper output) : BaseTest(output)
{
Expand Down Expand Up @@ -61,54 +61,127 @@ await foreach (StreamingKernelContent update in kernel.InvokePromptStreamingAsyn
}

/// <summary>
/// This example demonstrates manual function calling with a non-streaming prompt.
/// This example demonstrates manual function calling with a non-streaming chat API.
/// </summary>
[Fact]
public async Task RunNonStreamingPromptWithManualFunctionCallingAsync()
public async Task RunNonStreamingChatAPIWithManualFunctionCallingAsync()
{
Console.WriteLine("Manual function calling with a non-streaming prompt.");

// Create kernel and chat service
Kernel kernel = CreateKernel();

IChatCompletionService chat = kernel.GetRequiredService<IChatCompletionService>();

// Configure the chat service to enable manual function calling
OpenAIPromptExecutionSettings settings = new() { ToolCallBehavior = ToolCallBehavior.EnableKernelFunctions };

// Create chat history with the initial user message
ChatHistory chatHistory = new();
chatHistory.AddUserMessage("Given the current time of day and weather, what is the likely color of the sky in Boston?");

while (true)
{
// Start or continue chat based on the chat history
ChatMessageContent result = await chat.GetChatMessageContentAsync(chatHistory, settings, kernel);
if (result.Content is not null)
{
Console.Write(result.Content);
}

// Get function calls from the chat message content and quit the chat loop if no function calls are found.
IEnumerable<FunctionCallContent> functionCalls = FunctionCallContent.GetFunctionCalls(result);
if (!functionCalls.Any())
{
break;
}

chatHistory.Add(result); // Adding LLM response containing function calls(requests) to chat history as it's required by LLMs.
// Preserving the original chat message content with function calls in the chat history.
chatHistory.Add(result);

// Iterating over the requested function calls and invoking them
foreach (FunctionCallContent functionCall in functionCalls)
{
try
{
FunctionResultContent resultContent = await functionCall.InvokeAsync(kernel); // Executing each function.
// Invoking the function
FunctionResultContent resultContent = await functionCall.InvokeAsync(kernel);

// Adding the function result to the chat history
chatHistory.Add(resultContent.ToChatMessage());
}
catch (Exception ex)
{
chatHistory.Add(new FunctionResultContent(functionCall, ex).ToChatMessage()); // Adding function result to chat history.
// Adding exception to chat history.
// Adding function exception to the chat history.
chatHistory.Add(new FunctionResultContent(functionCall, ex).ToChatMessage());
// or
//string message = "Error details that LLM can reason about.";
//chatHistory.Add(new FunctionResultContent(functionCall, message).ToChatMessageContent()); // Adding function result to chat history.
//chatHistory.Add(new FunctionResultContent(functionCall, "Error details that LLM can reason about.").ToChatMessage());
}
}

Console.WriteLine();
}
}

/// <summary>
/// This example demonstrates manual function calling with a streaming chat API.
/// </summary>
[Fact]
public async Task RunStreamingChatAPIWithManualFunctionCallingAsync()
{
Console.WriteLine("Manual function calling with a streaming prompt.");

// Create kernel and chat service
Kernel kernel = CreateKernel();

IChatCompletionService chat = kernel.GetRequiredService<IChatCompletionService>();

// Configure the chat service to enable manual function calling
OpenAIPromptExecutionSettings settings = new() { ToolCallBehavior = ToolCallBehavior.EnableKernelFunctions };

// Create chat history with the initial user message
ChatHistory chatHistory = new();
chatHistory.AddUserMessage("Given the current time of day and weather, what is the likely color of the sky in Boston?");

while (true)
{
AuthorRole? authorRole = null;
var fccBuilder = new FunctionCallContentBuilder();

// Start or continue streaming chat based on the chat history
await foreach (var streamingContent in chat.GetStreamingChatMessageContentsAsync(chatHistory, settings, kernel))
{
if (streamingContent.Content is not null)
{
Console.Write(streamingContent.Content);
}
authorRole ??= streamingContent.Role;
fccBuilder.Append(streamingContent);
}

// Build the function calls from the streaming content and quit the chat loop if no function calls are found
var functionCalls = fccBuilder.Build();
if (!functionCalls.Any())
{
break;
}

// Creating and adding chat message content to preserve the original function calls in the chat history.
// The function calls are added to the chat message a few lines below.
var fcContent = new ChatMessageContent(role: authorRole ?? default, content: null);
chatHistory.Add(fcContent);

// Iterating over the requested function calls and invoking them
foreach (var functionCall in functionCalls)
{
// Adding the original function call to the chat message content
fcContent.Items.Add(functionCall);

// Invoking the function
var functionResult = await functionCall.InvokeAsync(kernel);

// Adding the function result to the chat history
chatHistory.Add(functionResult.ToChatMessage());
}

Console.WriteLine();
Expand Down
Loading

0 comments on commit ae2f72b

Please sign in to comment.