Skip to content

Commit b8a47a8

Browse files
[.Net] fix #3014 by adding local model function call in dotnet website (#3044)
* add instruction in ollama-litellm function call example * add tutorial * fix tests
1 parent 23c1dec commit b8a47a8

11 files changed

+199
-153
lines changed
Original file line numberDiff line numberDiff line change
@@ -1,68 +1,3 @@
11
// Copyright (c) Microsoft Corporation. All rights reserved.
22
// Example13_OpenAIAgent_JsonMode.cs
3-
4-
using System.Text.Json;
5-
using System.Text.Json.Serialization;
6-
using AutoGen.Core;
7-
using AutoGen.OpenAI;
8-
using AutoGen.OpenAI.Extension;
9-
using Azure.AI.OpenAI;
10-
using FluentAssertions;
11-
12-
namespace AutoGen.BasicSample;
13-
14-
public class Example13_OpenAIAgent_JsonMode
15-
{
16-
public static async Task RunAsync()
17-
{
18-
#region create_agent
19-
var config = LLMConfiguration.GetAzureOpenAIGPT3_5_Turbo(deployName: "gpt-35-turbo"); // json mode only works with 0125 and later model.
20-
var apiKey = config.ApiKey;
21-
var endPoint = new Uri(config.Endpoint);
22-
23-
var openAIClient = new OpenAIClient(endPoint, new Azure.AzureKeyCredential(apiKey));
24-
var openAIClientAgent = new OpenAIChatAgent(
25-
openAIClient: openAIClient,
26-
name: "assistant",
27-
modelName: config.DeploymentName,
28-
systemMessage: "You are a helpful assistant designed to output JSON.",
29-
seed: 0, // explicitly set a seed to enable deterministic output
30-
responseFormat: ChatCompletionsResponseFormat.JsonObject) // set response format to JSON object to enable JSON mode
31-
.RegisterMessageConnector()
32-
.RegisterPrintMessage();
33-
#endregion create_agent
34-
35-
#region chat_with_agent
36-
var reply = await openAIClientAgent.SendAsync("My name is John, I am 25 years old, and I live in Seattle.");
37-
38-
var person = JsonSerializer.Deserialize<Person>(reply.GetContent());
39-
Console.WriteLine($"Name: {person.Name}");
40-
Console.WriteLine($"Age: {person.Age}");
41-
42-
if (!string.IsNullOrEmpty(person.Address))
43-
{
44-
Console.WriteLine($"Address: {person.Address}");
45-
}
46-
47-
Console.WriteLine("Done.");
48-
#endregion chat_with_agent
49-
50-
person.Name.Should().Be("John");
51-
person.Age.Should().Be(25);
52-
person.Address.Should().BeNullOrEmpty();
53-
}
54-
}
55-
56-
#region person_class
57-
public class Person
58-
{
59-
[JsonPropertyName("name")]
60-
public string Name { get; set; }
61-
62-
[JsonPropertyName("age")]
63-
public int Age { get; set; }
64-
65-
[JsonPropertyName("address")]
66-
public string Address { get; set; }
67-
}
68-
#endregion person_class
3+
// this example has been moved to https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.OpenAI.Sample/Use_Json_Mode.cs
Original file line numberDiff line numberDiff line change
@@ -1,62 +1,3 @@
11
// Copyright (c) Microsoft Corporation. All rights reserved.
22
// Example16_OpenAIChatAgent_ConnectToThirdPartyBackend.cs
3-
#region using_statement
4-
using AutoGen.Core;
5-
using AutoGen.OpenAI;
6-
using AutoGen.OpenAI.Extension;
7-
using Azure.AI.OpenAI;
8-
using Azure.Core.Pipeline;
9-
#endregion using_statement
10-
11-
namespace AutoGen.BasicSample;
12-
13-
#region CustomHttpClientHandler
14-
public sealed class CustomHttpClientHandler : HttpClientHandler
15-
{
16-
private string _modelServiceUrl;
17-
18-
public CustomHttpClientHandler(string modelServiceUrl)
19-
{
20-
_modelServiceUrl = modelServiceUrl;
21-
}
22-
23-
protected override Task<HttpResponseMessage> SendAsync(HttpRequestMessage request, CancellationToken cancellationToken)
24-
{
25-
request.RequestUri = new Uri($"{_modelServiceUrl}{request.RequestUri.PathAndQuery}");
26-
27-
return base.SendAsync(request, cancellationToken);
28-
}
29-
}
30-
#endregion CustomHttpClientHandler
31-
32-
public class Example16_OpenAIChatAgent_ConnectToThirdPartyBackend
33-
{
34-
public static async Task RunAsync()
35-
{
36-
#region create_agent
37-
using var client = new HttpClient(new CustomHttpClientHandler("http://localhost:11434"));
38-
var option = new OpenAIClientOptions(OpenAIClientOptions.ServiceVersion.V2024_04_01_Preview)
39-
{
40-
Transport = new HttpClientTransport(client),
41-
};
42-
43-
// api-key is not required for local server
44-
// so you can use any string here
45-
var openAIClient = new OpenAIClient("api-key", option);
46-
var model = "llama3";
47-
48-
var agent = new OpenAIChatAgent(
49-
openAIClient: openAIClient,
50-
name: "assistant",
51-
modelName: model,
52-
systemMessage: "You are a helpful assistant designed to output JSON.",
53-
seed: 0)
54-
.RegisterMessageConnector()
55-
.RegisterPrintMessage();
56-
#endregion create_agent
57-
58-
#region send_message
59-
await agent.SendAsync("Can you write a piece of C# code to calculate 100th of fibonacci?");
60-
#endregion send_message
61-
}
62-
}
3+
// this example has been moved to https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.OpenAI.Sample/Connect_To_Ollama.cs

dotnet/sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs

+21-8
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88

99
namespace AutoGen.OpenAI.Sample;
1010

11+
#region Function
1112
public partial class Function
1213
{
1314
[Function]
@@ -16,25 +17,37 @@ public async Task<string> GetWeatherAsync(string city)
1617
return await Task.FromResult("The weather in " + city + " is 72 degrees and sunny.");
1718
}
1819
}
20+
#endregion Function
21+
1922
public class Tool_Call_With_Ollama_And_LiteLLM
2023
{
2124
public static async Task RunAsync()
2225
{
23-
#region Create_Agent
24-
var liteLLMUrl = "http://localhost:4000";
25-
using var httpClient = new HttpClient(new CustomHttpClientHandler(liteLLMUrl));
26-
var option = new OpenAIClientOptions(OpenAIClientOptions.ServiceVersion.V2024_04_01_Preview)
27-
{
28-
Transport = new HttpClientTransport(httpClient),
29-
};
26+
// Before running this code, make sure you have
27+
// - Ollama:
28+
// - Install dolphincoder:latest in Ollama
29+
// - Ollama running on http://localhost:11434
30+
// - LiteLLM
31+
// - Install LiteLLM
32+
// - Start LiteLLM with the following command:
33+
// - litellm --model ollama_chat/dolphincoder --port 4000
3034

35+
# region Create_tools
3136
var functions = new Function();
3237
var functionMiddleware = new FunctionCallMiddleware(
3338
functions: [functions.GetWeatherAsyncFunctionContract],
3439
functionMap: new Dictionary<string, Func<string, Task<string>>>
3540
{
3641
{ functions.GetWeatherAsyncFunctionContract.Name!, functions.GetWeatherAsyncWrapper },
3742
});
43+
#endregion Create_tools
44+
#region Create_Agent
45+
var liteLLMUrl = "http://localhost:4000";
46+
using var httpClient = new HttpClient(new CustomHttpClientHandler(liteLLMUrl));
47+
var option = new OpenAIClientOptions(OpenAIClientOptions.ServiceVersion.V2024_04_01_Preview)
48+
{
49+
Transport = new HttpClientTransport(httpClient),
50+
};
3851

3952
// api-key is not required for local server
4053
// so you can use any string here
@@ -43,7 +56,7 @@ public static async Task RunAsync()
4356
var agent = new OpenAIChatAgent(
4457
openAIClient: openAIClient,
4558
name: "assistant",
46-
modelName: "placeholder",
59+
modelName: "dolphincoder:latest",
4760
systemMessage: "You are a helpful AI assistant")
4861
.RegisterMessageConnector()
4962
.RegisterMiddleware(functionMiddleware)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
// Copyright (c) Microsoft Corporation. All rights reserved.
2+
// Example13_OpenAIAgent_JsonMode.cs
3+
4+
using System.Text.Json;
5+
using System.Text.Json.Serialization;
6+
using AutoGen.Core;
7+
using AutoGen.OpenAI;
8+
using AutoGen.OpenAI.Extension;
9+
using Azure.AI.OpenAI;
10+
using FluentAssertions;
11+
12+
namespace AutoGen.BasicSample;
13+
14+
public class Use_Json_Mode
15+
{
16+
public static async Task RunAsync()
17+
{
18+
#region create_agent
19+
var apiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new Exception("Please set OPENAI_API_KEY environment variable.");
20+
var model = "gpt-3.5-turbo";
21+
22+
var openAIClient = new OpenAIClient(apiKey);
23+
var openAIClientAgent = new OpenAIChatAgent(
24+
openAIClient: openAIClient,
25+
name: "assistant",
26+
modelName: model,
27+
systemMessage: "You are a helpful assistant designed to output JSON.",
28+
seed: 0, // explicitly set a seed to enable deterministic output
29+
responseFormat: ChatCompletionsResponseFormat.JsonObject) // set response format to JSON object to enable JSON mode
30+
.RegisterMessageConnector()
31+
.RegisterPrintMessage();
32+
#endregion create_agent
33+
34+
#region chat_with_agent
35+
var reply = await openAIClientAgent.SendAsync("My name is John, I am 25 years old, and I live in Seattle.");
36+
37+
var person = JsonSerializer.Deserialize<Person>(reply.GetContent());
38+
Console.WriteLine($"Name: {person.Name}");
39+
Console.WriteLine($"Age: {person.Age}");
40+
41+
if (!string.IsNullOrEmpty(person.Address))
42+
{
43+
Console.WriteLine($"Address: {person.Address}");
44+
}
45+
46+
Console.WriteLine("Done.");
47+
#endregion chat_with_agent
48+
49+
person.Name.Should().Be("John");
50+
person.Age.Should().Be(25);
51+
person.Address.Should().BeNullOrEmpty();
52+
}
53+
}
54+
55+
#region person_class
56+
public class Person
57+
{
58+
[JsonPropertyName("name")]
59+
public string Name { get; set; }
60+
61+
[JsonPropertyName("age")]
62+
public int Age { get; set; }
63+
64+
[JsonPropertyName("address")]
65+
public string Address { get; set; }
66+
}
67+
#endregion person_class

dotnet/test/AutoGen.OpenAI.Tests/AutoGen.OpenAI.Tests.csproj

+1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
</PropertyGroup>
99

1010
<ItemGroup>
11+
<ProjectReference Include="..\..\sample\AutoGen.OpenAI.Sample\AutoGen.OpenAI.Sample.csproj" />
1112
<ProjectReference Include="..\..\src\AutoGen.SourceGenerator\AutoGen.SourceGenerator.csproj" OutputItemType="Analyzer" ReferenceOutputAssembly="false" />
1213
<ProjectReference Include="..\..\src\AutoGen\AutoGen.csproj" />
1314
<ProjectReference Include="..\AutoGen.Tests\AutoGen.Tests.csproj" />

dotnet/test/AutoGen.Tests/BasicSampleTest.cs

-5
Original file line numberDiff line numberDiff line change
@@ -37,11 +37,6 @@ public async Task AgentFunctionCallTestAsync()
3737
await Example03_Agent_FunctionCall.RunAsync();
3838
}
3939

40-
[ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")]
41-
public async Task OpenAIAgent_JsonMode()
42-
{
43-
await Example13_OpenAIAgent_JsonMode.RunAsync();
44-
}
4540

4641
[ApiKeyFact("MISTRAL_API_KEY")]
4742
public async Task MistralClientAgent_TokenCount()
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
This example shows how to use function call with local LLM models where [Ollama](https://ollama.com/) as local model provider and [LiteLLM](https://docs.litellm.ai/docs/) proxy server which provides an openai-api compatible interface.
2+
3+
[![](https://img.shields.io/badge/Open%20on%20Github-grey?logo=github)](https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs)
4+
5+
To run this example, the following prerequisites are required:
6+
- Install [Ollama](https://ollama.com/) and [LiteLLM](https://docs.litellm.ai/docs/) on your local machine.
7+
- A local model that supports function call. In this example `dolphincoder:latest` is used.
8+
9+
## Install Ollama and pull `dolphincoder:latest` model
10+
First, install Ollama by following the instructions on the [Ollama website](https://ollama.com/).
11+
12+
After installing Ollama, pull the `dolphincoder:latest` model by running the following command:
13+
```bash
14+
ollama pull dolphincoder:latest
15+
```
16+
17+
## Install LiteLLM and start the proxy server
18+
19+
You can install LiteLLM by following the instructions on the [LiteLLM website](https://docs.litellm.ai/docs/).
20+
```bash
21+
pip install 'litellm[proxy]'
22+
```
23+
24+
Then, start the proxy server by running the following command:
25+
26+
```bash
27+
litellm --model ollama_chat/dolphincoder --port 4000
28+
```
29+
30+
This will start an openai-api compatible proxy server at `http://localhost:4000`. You can verify if the server is running by observing the following output in the terminal:
31+
32+
```bash
33+
#------------------------------------------------------------#
34+
# #
35+
# 'The worst thing about this product is...' #
36+
# https://github.com/BerriAI/litellm/issues/new #
37+
# #
38+
#------------------------------------------------------------#
39+
40+
INFO: Application startup complete.
41+
INFO: Uvicorn running on http://0.0.0.0:4000 (Press CTRL+C to quit)
42+
```
43+
44+
## Install AutoGen and AutoGen.SourceGenerator
45+
In your project, install the AutoGen and AutoGen.SourceGenerator package using the following command:
46+
47+
```bash
48+
dotnet add package AutoGen
49+
dotnet add package AutoGen.SourceGenerator
50+
```
51+
52+
The `AutoGen.SourceGenerator` package is used to automatically generate type-safe `FunctionContract` instead of manually defining them. For more information, please check out [Create type-safe function](Create-type-safe-function-call.md).
53+
54+
And in your project file, enable structural xml document support by setting the `GenerateDocumentationFile` property to `true`:
55+
56+
```xml
57+
<PropertyGroup>
58+
<!-- This enables structural xml document support -->
59+
<GenerateDocumentationFile>true</GenerateDocumentationFile>
60+
</PropertyGroup>
61+
```
62+
63+
## Define `WeatherReport` function and create @AutoGen.Core.FunctionCallMiddleware
64+
65+
Create a `public partial` class to host the methods you want to use in AutoGen agents. The method has to be a `public` instance method and its return type must be `Task<string>`. After the methods are defined, mark them with `AutoGen.Core.FunctionAttribute` attribute.
66+
67+
[!code-csharp[Define WeatherReport function](../../sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs?name=Function)]
68+
69+
Then create a @AutoGen.Core.FunctionCallMiddleware and add the `WeatherReport` function to the middleware. The middleware will pass the `FunctionContract` to the agent when generating a response, and process the tool call response when receiving a `ToolCallMessage`.
70+
[!code-csharp[Define WeatherReport function](../../sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs?name=Create_tools)]
71+
72+
## Create @AutoGen.OpenAI.OpenAIChatAgent with `GetWeatherReport` tool and chat with it
73+
74+
Because LiteLLM proxy server is openai-api compatible, we can use @AutoGen.OpenAI.OpenAIChatAgent to connect to it as a third-party openai-api provider. The agent is also registered with a @AutoGen.Core.FunctionCallMiddleware which contains the `WeatherReport` tool. Therefore, the agent can call the `WeatherReport` tool when generating a response.
75+
76+
[!code-csharp[Create an agent with tools](../../sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs?name=Create_Agent)]
77+
78+
The reply from the agent will similar to the following:
79+
```bash
80+
AggregateMessage from assistant
81+
--------------------
82+
ToolCallMessage:
83+
ToolCallMessage from assistant
84+
--------------------
85+
- GetWeatherAsync: {"city": "new york"}
86+
--------------------
87+
88+
ToolCallResultMessage:
89+
ToolCallResultMessage from assistant
90+
--------------------
91+
- GetWeatherAsync: The weather in new york is 72 degrees and sunny.
92+
--------------------
93+
```

0 commit comments

Comments
 (0)