diff --git a/.dotnet/CONTRIBUTING.md b/.dotnet/CONTRIBUTING.md new file mode 100644 index 000000000..05eb58570 --- /dev/null +++ b/.dotnet/CONTRIBUTING.md @@ -0,0 +1,16 @@ +# CONTRIBUTING + +## How to run code generation + +1. Run the following command to install the necessary tools: + `npm install` +1. Regenerate the OpenAPI spec by running the following command: + `npx tsp compile .\openai-in-typespec\main.tsp --emit @typespec/openapi3` +1. Regenerate the library by running the following command: + `npx tsp compile .\openai-in-typespec\main.tsp --emit @azure-tools/typespec-csharp --option @azure-tools/typespec-csharp.emitter-output-dir=.\openai-in-typespec\.dotnet` +1. Run the following script: + `.\openai-in-typespec\.dotnet\scripts\Update-ClientModel.ps1` +1. Run the following script: + `.\openai-in-typespec\.dotnet\scripts\ConvertTo-Internal.ps1` +1. Run the following script: + `.\openai-in-typespec\.dotnet\scripts\Add-Customizations.ps1` diff --git a/.dotnet/OpenAI.sln b/.dotnet/OpenAI.sln new file mode 100644 index 000000000..c3e866e93 --- /dev/null +++ b/.dotnet/OpenAI.sln @@ -0,0 +1,36 @@ +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.9.34701.34 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "OpenAI", "src\OpenAI.csproj", "{28FF4005-4467-4E36-92E7-DEA27DEB1519}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "OpenAI.Tests", "tests\OpenAI.Tests.csproj", "{1F1CD1D4-9932-4B73-99D8-C252A67D4B46}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "System.ClientModel", "..\..\azure-sdk-for-net\sdk\core\System.ClientModel\src\System.ClientModel.csproj", "{297DA5FF-1CD7-4183-8C13-45987286D33F}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {28FF4005-4467-4E36-92E7-DEA27DEB1519}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {28FF4005-4467-4E36-92E7-DEA27DEB1519}.Debug|Any CPU.Build.0 = Debug|Any CPU + {28FF4005-4467-4E36-92E7-DEA27DEB1519}.Release|Any CPU.ActiveCfg = Release|Any CPU + {28FF4005-4467-4E36-92E7-DEA27DEB1519}.Release|Any CPU.Build.0 = Release|Any CPU + {1F1CD1D4-9932-4B73-99D8-C252A67D4B46}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {1F1CD1D4-9932-4B73-99D8-C252A67D4B46}.Debug|Any CPU.Build.0 = Debug|Any CPU + {1F1CD1D4-9932-4B73-99D8-C252A67D4B46}.Release|Any CPU.ActiveCfg = Release|Any CPU + {1F1CD1D4-9932-4B73-99D8-C252A67D4B46}.Release|Any CPU.Build.0 = Release|Any CPU + {297DA5FF-1CD7-4183-8C13-45987286D33F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {297DA5FF-1CD7-4183-8C13-45987286D33F}.Debug|Any CPU.Build.0 = Debug|Any CPU + {297DA5FF-1CD7-4183-8C13-45987286D33F}.Release|Any CPU.ActiveCfg = Release|Any CPU + {297DA5FF-1CD7-4183-8C13-45987286D33F}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {A97F4B90-2591-4689-B1F8-5F21FE6D6CAE} + EndGlobalSection +EndGlobal diff --git a/.dotnet/README.md b/.dotnet/README.md new file mode 100644 index 000000000..c6ab493c2 --- /dev/null +++ b/.dotnet/README.md @@ -0,0 +1,584 @@ +# OpenAI client library for .NET + +The OpenAI client library for .NET provides convenient access to the OpenAI REST API from .NET applications. + +## Getting started + +### Prerequisites + +To call the OpenAI REST API, you will need an API key. To obtain one, first [create a new OpenAI account](https://platform.openai.com/signup) or [log in](https://platform.openai.com/login). Next, navigate to the [API key page](https://platform.openai.com/account/api-keys) and select "Create new secret key", optionally naming the key. Make sure to save your API key somewhere safe and do not share it with anyone. + +### Install the NuGet package + +Add the client library to your .NET project with [NuGet](https://www.nuget.org/): + +```cli +dotnet add package OpenAI.OpenAI --prerelease +``` + +Note that the code samples included below were written using [.NET 8](https://dotnet.microsoft.com/download/dotnet/8.0). + +## Using the client library + +For convenience, the client library is organized by feature area into nine different namespaces, each with a corresponding client class: + +| Namespace | Client class | +| ----------------------------|--------------------------| +| `OpenAI.Assistants` | `AssistantsClient` | +| `OpenAI.Audio` | `AudioClient` | +| `OpenAI.Chat` | `ChatClient` | +| `OpenAI.Embeddings` | `EmbeddingClient` | +| `OpenAI.Files` | `FileClient` | +| `OpenAI.Images` | `ImageClient` | +| `OpenAI.LegacyCompletions` | `LegacyCompletionClient` | +| `OpenAI.ModelManagement` | `ModelManagementClient` | +| `OpenAI.Moderations` | `ModerationsClient` | + +To use chat completions, for example, start by adding the corresponding `using` statement and create an instance of the `ChatClient` by specifying both: + +1. The name of the OpenAI model that the client will use in its API calls (e.g., `"gpt-3.5-turbo"`) +2. The API key that the client will use to authenticate + +Then, call its `CompleteChat` method by passing the user message that you would like to generate completions for: + +```csharp +using OpenAI.Chat; + +ChatClient client = new("gpt-3.5-turbo", ""); + +ChatCompletion chatCompletion = client.CompleteChat("How does AI work? Explain it in simple terms."); + +Console.WriteLine($"[ASSISTANT]:"); +Console.WriteLine($"{ chatCompletion.Content }"); +``` + +For illustrative purposes, the code above prints the `Content` property of the resulting `ChatCompletion` object, yielding something like this: + +```text +[ASSISTANT]: +AI, or artificial intelligence, is a technology that allows machines to mimic human behaviors and intelligence. +It works by using algorithms and data to make decisions and perform tasks. These algorithms are designed to analyze +data, recognize patterns, and learn from past experiences to make predictions and solve problems. AI can be trained +to perform specific tasks, such as recognizing images or translating languages, by using vast amounts of data to +make accurate decisions. Essentially, AI works by processing data and using it to make informed decisions and solve +problems, much like a human brain would. +``` + +### Making async API calls + +Note that every client method that performs a synchronous API call has an asynchronous variant in the same client class. For instance, the asynchronous variant of the `ChatClient`'s `CompleteChat` method is the `ChatClient`'s `CompleteChatAsync` method. If you wanted to re-write the sample above as async code, all that you would need to do is modify the client method call like this: + +```csharp +ChatCompletion chatCompletion = await client.CompleteChatAsync("How does AI work? Explain it in simple terms."); +``` + +### Using the `OpenAIClient` class + +In addition to the nine namespaces mentioned above, there is also the parent `OpenAI` namespace itself: + +```csharp +using OpenAI; +``` + +This namespace contains the `OpenAIClient` class, which offers certain conveniences when you need to work with multiple clients. More specifically, you can use an instance of this class to create instances of the other clients that would share the same HTTP pipeline. + +You can create an `OpenAIClient` by specifying the API key that all clients will use for authentication: + +```csharp +OpenAIClient client = new(""); +``` + +Next, to create an instance of an `AudioClient`, for example, you can call the `OpenAIClient`'s `GetAudioClient` method by passing the OpenAI model that the `AudioClient` will use in its API calls. If necessary, you can create additional clients of the same type to target different models. + +```csharp +AudioClient ttsClient = client.GetAudioClient("tts-1"); +AudioClient whisperClient = client.GetAudioClient("whisper-1"); +``` + +## How to use chat completions with streaming + +When you request a chat completion, the default behavior is for the server to generate it in its entirety before sending it back in a single response. Consequently, long chat completions can require waiting for several seconds before hearing back from the server. To mitigate this, the OpenAI REST API supports the ability to stream partial results back as they are being generated, allowing you to start processing the beginning of the completion before it is finished. + +The client library offers a convenient approach to working with streaming chat completions. If you wanted to re-write the sample from the previous section using streaming, rather than calling the `ChatClient`'s `CompleteChat` method, you would call its `CompleteChatStreaming` method instead: + +```csharp +StreamingClientResult result = + client.CompleteChatStreaming("How does AI work? Explain it in simple terms."); +``` + +Notice that the returned value is a `StreamingClientResult` object, which can be iterated on to receive the streaming updates as they arrive: + +```csharp +Console.WriteLine("[ASSISTANT]: "); +await foreach (StreamingChatUpdate chatUpdate in result) +{ + Console.Write(chatUpdate.ContentUpdate); +} +``` + +## How to use chat completions with function calling + +In this sample, you have two functions. The first function can retrieve a user's current geographic location (e.g., by polling the location service APIs of the user's device), while the second function can query the weather in a given location (e.g., by making an API call to some third-party weather service). You want chat completions to be able to call these functions if the model deems it necessary to have this information in order to respond to a user request. For illustrative purposes, consider the following: + +```csharp +private static string GetCurrentLocation() +{ + // Call the location API here. + return "San Francisco"; +} + +private static string GetCurrentWeather(string location, string unit = "celsius") +{ + // Call the weather API here. + return $"31 {unit}"; +} +``` + +Start by creating two instances of the `ChatFunctionToolDefinition` class to describe each function: + +```csharp +private const string GetCurrentLocationFunctionName = "get_current_location"; + +private const string GetCurrentWeatherFunctionName = "get_current_weather"; + +private static readonly ChatFunctionToolDefinition getCurrentLocationFunction = new() +{ + Name = GetCurrentLocationFunctionName, + Description = "Get the user's current location" +}; + +private static readonly ChatFunctionToolDefinition getCurrentWeatherFunction = new() +{ + Name = GetCurrentWeatherFunctionName, + Description = "Get the current weather in a given location", + Parameters = BinaryData.FromString(""" + { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. Boston, MA" + }, + "unit": { + "type": "string", + "enum": [ "celsius", "fahrenheit" ], + "description": "The temperature unit to use. Infer this from the specified location." + } + }, + "required": [ "location" ] + } + """), +}; +``` + +Next, create a `ChatCompletionsOptions` instance and add both function definitions to its `Tools` property. You will pass this instance as an argument in your calls to the `ChatClient`'s `CompleteChat` method. + +```csharp +List messages = [ + new ChatRequestSystemMessage( + "Don't make assumptions about what values to plug into functions." + + " Ask for clarification if a user request is ambiguous."), + new ChatRequestUserMessage("What's the weather like today?"), +]; + +ChatCompletionOptions options = new() +{ + Tools = { getCurrentLocationFunction, getCurrentWeatherFunction }, +}; +``` + +When the resulting `ChatCompletion` has a `FinishReason` property equal to `ChatFinishReason.ToolCalls`, it means that the model has determined that one or more tools must be called before the assistant can respond appropriately. In those cases, you must first call the function specified in the `ChatCompletion`'s `ToolCalls` and then call the `ChatClient`'s `CompleteChat` method again while passing the function's result as an additional `ChatRequestToolMessage`. Repeat this process as needed. + +```csharp +bool requiresAction; + +do +{ + requiresAction = false; + ChatCompletion chatCompletion = client.CompleteChat(messages, options); + + switch (chatCompletion.FinishReason) + { + case ChatFinishReason.Stopped: + { + // Add the assistant message to the conversation history. + messages.Add(new ChatRequestAssistantMessage(chatCompletion)); + break; + } + + case ChatFinishReason.ToolCalls: + { + // First, add the assistant message with tool calls to the conversation history. + messages.Add(new ChatRequestAssistantMessage(chatCompletion)); + + // Then, add a new tool message for each tool call that is resolved. + foreach (ChatToolCall toolCall in chatCompletion.ToolCalls) + { + ChatFunctionToolCall functionToolCall = toolCall as ChatFunctionToolCall; + + switch (functionToolCall?.Name) + { + case GetCurrentLocationFunctionName: + { + string toolResult = GetCurrentLocation(); + messages.Add(new ChatRequestToolMessage(toolCall.Id, toolResult)); + break; + } + + case GetCurrentWeatherFunctionName: + { + // The arguments that the model wants to use to call the function are specified as a + // stringified JSON object based on the schema defined in the tool definition. Note that + // the model may hallucinate arguments too. Consequently, it is important to do the + // appropriate parsing and validation before calling the function. + using JsonDocument argumentsJson = JsonDocument.Parse(functionToolCall.Arguments); + bool hasLocation = argumentsJson.RootElement.TryGetProperty("location", out JsonElement location); + bool hasUnit = argumentsJson.RootElement.TryGetProperty("unit", out JsonElement unit); + + if (!hasLocation) + { + throw new ArgumentNullException(nameof(location), "The location argument is required."); + } + + string toolResult = hasUnit + ? GetCurrentWeather(location.GetString(), unit.GetString()) + : GetCurrentWeather(location.GetString()); + messages.Add(new ChatRequestToolMessage(toolCall.Id, toolResult)); + break; + } + + default: + { + // Handle other or unexpected calls. + throw new NotImplementedException(); + } + } + } + + requiresAction = true; + break; + } + + case ChatFinishReason.Length: + throw new NotImplementedException("Incomplete model output due to MaxTokens parameter or token limit exceeded."); + + case ChatFinishReason.ContentFilter: + throw new NotImplementedException("Omitted content due to a content filter flag."); + + case ChatFinishReason.FunctionCall: + throw new NotImplementedException("Deprecated in favor of tool calls."); + + default: + throw new NotImplementedException(chatCompletion.FinishReason.ToString()); + } +} while (requiresAction); +``` + +## How to get text embeddings + +In this sample, you want to create a trip-planning website that allows customers to write a prompt describing the kind of hotel that they are looking for and then offers hotel recommendations that closely match this description. To achieve this, it is possible to use text embeddings to measure the relatedness of text strings. In summary, you can get embeddings of the hotel descriptions, store them in a vector database, and use them to build a search index that you can query using the embedding of a given customer's prompt. + +To get a text embedding, start by adding the corresponding `using` statement: + +```csharp +using OpenAI.Embeddings; +``` + +Next, instantiate the `EmbeddingClient` and call its `GenerateEmbedding` method by passing the text input as an argument: + +```csharp +EmbeddingClient client = new("text-embedding-3-small", ""); + +string description = + "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa," + + " and a really helpful concierge. The location is perfect -- right downtown, close to all " + + " the tourist attractions. We highly recommend this hotel."; + +Embedding embedding = client.GenerateEmbedding(description); +ReadOnlyMemory vector = embedding.Vector; +``` + +Notice that the resulting embedding is a list (also called a vector) of floating point numbers represented as an instance of `ReadOnlyMemory`. By default, the length of the embedding vector will be 1536 when using the `text-embedding-3-small` model or 3072 when using the `text-embedding-3-large` model. Generally, larger embeddings perform better, but using them also tends to cost more in terms of compute, memory, and storage. You can reduce the dimensions of the embedding by creating an instance of the `EmbeddingOptions` class, setting the `Dimensions` property, and passing it as an argument in your call to the `GenerateEmbedding` method: + +```csharp +EmbeddingOptions options = new() { Dimensions = 512 }; + +Embedding embedding = client.GenerateEmbedding(description, options); +``` + +## How to generate images + +In this sample, you want to build an app to help interior designers prototype new ideas based on the latest design trends. As part of the creative process, an interior designer can use this app to generate images for inspiration simply by describing the scene in their head as a prompt. As expected, high-quality, strikingly dramatic images with finer details deliver the best results for this application. + +To generate an image, start by adding the corresponding `using` statement: + +```csharp +using OpenAI.Images; +``` + +Next, instantiate the `ImageClient`: + +```csharp +ImageClient client = new("dall-e-3", ""); +``` + +To tailor the image generation to your specific needs, create an instance of the `ImageGenerationOptions` class and set the `Quality`, `Size`, and `Style` properties accordingly. Note that you can also set the `ResponseFormat` property of `ImageGenerationOptions` to `ImageResponseFormat.Bytes` in order to receive the resulting PNG as `BinaryData` if this is convenient for your use case. + +```csharp +string prompt = "The concept for a living room that blends Scandinavian simplicity with Japanese minimalism for" + + " a serene and cozy atmosphere. It's a space that invites relaxation and mindfulness, with natural light" + + " and fresh air. Using neutral tones, including colors like white, beige, gray, and black, that create a" + + " sense of harmony. Featuring sleek wood furniture with clean lines and subtle curves to add warmth and" + + " elegance. Plants and flowers in ceramic pots adding color and life to a space. They can serve as focal" + + " points, creating a connection with nature. Soft textiles and cushions in organic fabrics adding comfort" + + " and softness to a space. They can serve as accents, adding contrast and texture."; + +ImageGenerationOptions options = new() +{ + Quality = ImageQuality.High, + Size = ImageSize.Size1792x1024, + Style = ImageStyle.Vivid, + ResponseFormat = ImageResponseFormat.Bytes +}; +``` + +Finally, call the `ImageClient`'s `GenerateImage` method by passing the prompt and the `ImageGenerationOptions` instance as arguments: + +```csharp +GeneratedImage image = client.GenerateImage(prompt, options); +BinaryData bytes = image.ImageBytes; +``` + +For illustrative purposes, you could save the generated image to local storage: + +```csharp +using FileStream stream = File.OpenWrite($"{ Guid.NewGuid() }.png"); +bytes.ToStream().CopyTo(stream); +``` + +## How to use assistants with retrieval augmented generation (RAG) + +In this sample, you have a JSON document with the monthly sales information of different products, and you want to build an assistant capable of analyzing it and answering questions about it. + +Start by adding the following `using` statements: + +```csharp +using OpenAI; +using OpenAI.Assistants; +using OpenAI.Files; +``` + +Create an instance of the `OpenAIClient` class and use it to instantiate a `FileClient` and an `AssistantClient`: + +```csharp +OpenAIClient openAIClient = new(""); +FileClient fileClient = openAIClient.GetFileClient(); +AssistantClient assistantClient = openAIClient.GetAssistantClient(); +``` + +Here is an example of what the JSON document might look like: + +```csharp +BinaryData document = BinaryData.FromString(""" + { + "description": "This document contains the sale history data for Contoso products.", + "sales": [ + { + "month": "January", + "by_product": { + "113043": 15, + "113045": 12, + "113049": 2 + } + }, + { + "month": "February", + "by_product": { + "113045": 22 + } + }, + { + "month": "March", + "by_product": { + "113045": 16, + "113055": 5 + } + } + ] + } + """); +``` + +Upload this document to OpenAI using the `FileClient`'s `UploadFile` method: + +```csharp +OpenAIFileInfo openAIFileInfo = fileClient.UploadFile(document, "MonthlySales.json", OpenAIFilePurpose.Assistants); +``` + +Create an instance of the `AssistantCreationOptions` class and use it to define the assistant that you want to build. Make sure to include: + +1. The ID of the JSON document that you just uploaded in the `FileIds` property +2. An instance of the `RetrievalToolDefinition` class in the `Tools` property + +Optionally, you can also include an instance of the `CodeInterpreterToolDefinition` class in the `Tools` property and instruct the assistant via the `Instructions` property to use it to generate data visualizations when prompted. + +```csharp +AssistantCreationOptions assistantOptions = new() +{ + Name = "Example: Contoso sales RAG", + Instructions = + "You are an assistant that looks up sales data and helps visualize the information based" + + " on user queries. When asked to generate a graph, chart, or other visualization, use" + + " the code interpreter tool to do so.", + FileIds = { openAIFileInfo.Id }, + Tools = + { + new RetrievalToolDefinition(), + new CodeInterpreterToolDefinition(), + } +}; +``` + +Now, create the assistant using the `AssistantClient`'s `CreateAssistant` method: + +```csharp +Assistant assistant = assistantClient.CreateAssistant("gpt-4-1106-preview", assistantOptions); +``` + +Next, create a new thread. For illustrative purposes, you could include an initial user message asking about the sales information of a given product and then use the `AssistantClient`'s `CreateThreadAndRun` method to get it started: + +```csharp +ThreadCreationOptions threadOptions = new() +{ + Messages = + { + new ThreadInitializationMessage( + MessageRole.User, + "How well did product 113045 sell in February? Graph its trend over time."), + } +}; + +ThreadRun threadRun = assistantClient.CreateThreadAndRun(assistant.Id, threadOptions); +``` + +Poll the status of the run until it is no longer queued or in progress: + +```csharp +do +{ + Thread.Sleep(TimeSpan.FromSeconds(1)); + threadRun = assistantClient.GetRun(threadRun.ThreadId, threadRun.Id); +} while (threadRun.Status == RunStatus.Queued || threadRun.Status == RunStatus.InProgress); +``` + +If everything went well, the terminal status of the run will be `RunStatus.CompletedSuccessfully`. + +Finally, you can use the `AssistantClient`'s `GetMessages` method to retrieve the messages associated with this thread, which now include the responses from the assistant to the initial user message: + +```csharp +ListQueryPage messages = assistantClient.GetMessages(threadRun.ThreadId); +``` + +For illustrative purposes, you could print the messages to the console and also save any images produced by the assistant to local storage: + +```csharp +for (int i = messages.Count - 1; i >= 0; i--) +{ + ThreadMessage message = messages[i]; + + Console.WriteLine($"[{message.Role.ToString().ToUpper()}]:"); + foreach (MessageContent contentItem in message.ContentItems) + { + if (contentItem is MessageTextContent textContent) + { + Console.WriteLine($"{textContent.Text}"); + + if (textContent.Annotations.Count > 0) + { + Console.WriteLine(); + } + + // Include annotations, if any. + foreach (TextContentAnnotation annotation in textContent.Annotations) + { + if (annotation is TextContentFileCitationAnnotation citationAnnotation) + { + Console.WriteLine($"* File citation, file ID: {citationAnnotation.FileId}"); + } + else if (annotation is TextContentFilePathAnnotation pathAnnotation) + { + Console.WriteLine($"* File path, file ID: {pathAnnotation.FileId}"); + } + } + } + else if (contentItem is MessageImageFileContent imageFileContent) + { + OpenAIFileInfo imageInfo = fileClient.GetFileInfo(imageFileContent.FileId); + BinaryData imageBytes = fileClient.DownloadFile(imageFileContent.FileId); + using FileStream stream = File.OpenWrite($"{ imageInfo.Filename }.png"); + imageBytes.ToStream().CopyTo(stream); + + Console.WriteLine($"<{ imageInfo.Filename }.png>"); + } + } + Console.WriteLine(); +} +``` + +And it would yield something like this: + +```text +[USER]: +How well did product 113045 sell in February? Graph its trend over time. + +[ASSISTANT]: + +Product 113045 sold 22 units in February. Here is the trend graph showing its sales over the months of +January, February, and March. +``` + +## Advanced scenarios + +### Using protocol methods + +The client library includes model types—convenience classes that map to the request and response bodies of the REST API. The client methods that receive and return model types can be called here _convenience methods_. In addition to these, the clients also expose overloads of these methods that mirror the request and response bodies directly. Those methods are called here _protocol methods_, as they provide more direct access to the REST protocol. + +For example, to use the protocol method variant of the `ChatClient`'s `CompleteChat` method, pass the request body as a `BinaryContent` object: + +```csharp +BinaryData input = BinaryData.FromString(""" + { + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "How does AI work? Explain it in simple terms." + } + ] + } + """); + +using BinaryContent content = BinaryContent.Create(input); +ClientResult result = client.CompleteChat(content); +BinaryData output = result.GetRawResponse().Content; + +using JsonDocument outputAsJson = JsonDocument.Parse(output.ToString()); +string message = outputAsJson.RootElement + .GetProperty("choices")[0] + .GetProperty("message") + .GetProperty("content") + .GetString(); +``` + +Notice how you can then call the resulting `ClientResult`'s `GetRawResponse` method and retrieve the response body as `BinaryData` via the `PipelineResponse`'s `Content` property. + +### Automatically retrying errors + +By default, the client classes will automatically retry the following errors up to three additional times using exponential backoff: + +- 408 Request Timeout +- 429 Too Many Requests +- 500 Internal Server Error +- 502 Bad Gateway +- 503 Service Unavailable +- 504 Gateway Timeout diff --git a/.dotnet/nuget.config b/.dotnet/nuget.config new file mode 100644 index 000000000..824446d51 --- /dev/null +++ b/.dotnet/nuget.config @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/.dotnet/scripts/Add-Customizations.ps1 b/.dotnet/scripts/Add-Customizations.ps1 new file mode 100644 index 000000000..4b4d4cd92 --- /dev/null +++ b/.dotnet/scripts/Add-Customizations.ps1 @@ -0,0 +1,62 @@ +function Update-SystemTextJsonPackage { + $current = Get-Location + $root = Split-Path $PSScriptRoot -Parent + + # Update System.Text.Json package to 8.0.2 in OpenAI.csproj + $directory = Join-Path -Path $root -ChildPath "src" + Set-Location -Path $directory + dotnet remove "OpenAI.csproj" package "System.Text.Json" + dotnet add "OpenAI.csproj" package "System.Text.Json" --version "8.0.2" + + Set-Location -Path $current +} + +function Update-MicrosoftBclAsyncInterfacesPackage { + $current = Get-Location + $root = Split-Path $PSScriptRoot -Parent + + # Update Microsoft.Bcl.AsyncInterfaces package to 8.0.0 in OpenAI.Tests.csproj + $directory = Join-Path -Path $root -ChildPath "tests" + Set-Location -Path $directory + dotnet remove "OpenAI.Tests.csproj" package "Microsoft.Bcl.AsyncInterfaces" + dotnet add "OpenAI.Tests.csproj" package "Microsoft.Bcl.AsyncInterfaces" --version "8.0.0" + + Set-Location -Path $current +} + +function Set-LangVersionToLatest { + $root = Split-Path $PSScriptRoot -Parent + $filePath = Join-Path -Path $root -ChildPath "tests\OpenAI.Tests.csproj" + $xml = [xml](Get-Content -Path $filePath) + + $xml.Project.PropertyGroup.TargetFramework = "net8.0" + + $element = $xml.CreateElement("LangVersion") + $element.InnerText = "latest" + $xml.Project.PropertyGroup.AppendChild($element) | Out-Null + + $xml.Save($filePath) +} + +function Edit-RunObjectSerialization { + $root = Split-Path $PSScriptRoot -Parent + $directory = Join-Path -Path $root -ChildPath "src\Generated\Models" + + $file = Get-ChildItem -Path $directory -Filter "RunObject.Serialization.cs" + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace "expiresAt = property\.Value\.GetDateTimeOffset\(`"O`"\);", "// BUG: https://github.com/Azure/autorest.csharp/issues/4296`r`n // expiresAt = property.Value.GetDateTimeOffset(`"O`");`r`n expiresAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64());" + $content = $content -creplace "startedAt = property\.Value\.GetDateTimeOffset\(`"O`"\);", "// BUG: https://github.com/Azure/autorest.csharp/issues/4296`r`n // startedAt = property.Value.GetDateTimeOffset(`"O`");`r`n startedAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64());" + $content = $content -creplace "cancelledAt = property\.Value\.GetDateTimeOffset\(`"O`"\);", "// BUG: https://github.com/Azure/autorest.csharp/issues/4296`r`n // cancelledAt = property.Value.GetDateTimeOffset(`"O`");`r`n cancelledAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64());" + $content = $content -creplace "failedAt = property\.Value\.GetDateTimeOffset\(`"O`"\);", "// BUG: https://github.com/Azure/autorest.csharp/issues/4296`r`n // failedAt = property.Value.GetDateTimeOffset(`"O`");`r`n failedAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64());" + $content = $content -creplace "completedAt = property\.Value\.GetDateTimeOffset\(`"O`"\);", "// BUG: https://github.com/Azure/autorest.csharp/issues/4296`r`n // completedAt = property.Value.GetDateTimeOffset(`"O`");`r`n completedAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64());" + + $content | Set-Content -Path $file.FullName -NoNewline +} + +Update-SystemTextJsonPackage +Update-MicrosoftBclAsyncInterfacesPackage +Set-LangVersionToLatest +Edit-RunObjectSerialization \ No newline at end of file diff --git a/.dotnet/scripts/ConvertTo-Internal.ps1 b/.dotnet/scripts/ConvertTo-Internal.ps1 new file mode 100644 index 000000000..2a656ed9e --- /dev/null +++ b/.dotnet/scripts/ConvertTo-Internal.ps1 @@ -0,0 +1,75 @@ +function Edit-GeneratedOpenAIClient { + $root = Split-Path $PSScriptRoot -Parent + + $directory = Join-Path -Path $root -ChildPath "src\Generated" + $file = Get-ChildItem -Path $directory -Filter "OpenAIClient.cs" + + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace "public partial class", "internal partial class" + $content = $content -creplace "public readonly partial struct", "internal readonly partial struct" + $content = $content -creplace "public static partial class", "internal static partial class" + $content = $content -creplace "namespace OpenAI", "namespace OpenAI.Internal" + $content = $content -creplace "using OpenAI.Models;", "using OpenAI.Internal.Models;" + $content = $content -creplace "private (OpenAI.)?(?\w+) _cached(\w+);", "private OpenAI.Internal.`${var} _cached`${var};" + $content = $content -creplace "public virtual (OpenAI.)?(?\w+) Get(\w+)Client", "public virtual OpenAI.Internal.`${var} Get`${var}Client" + $content = $content -creplace "ref _cached(\w+), new (OpenAI.)?(?\w+)", "ref _cached`${var}, new OpenAI.Internal.`${var}" + + $content | Set-Content -Path $file.FullName -NoNewline +} + +function Edit-GeneratedSubclients { + $root = Split-Path $PSScriptRoot -Parent + + $directory = Join-Path -Path $root -ChildPath "src\Generated" + $files = Get-ChildItem -Path $($directory + "\*") -Include "*.cs" -Exclude "OpenAIClient.cs" + + foreach ($file in $files) { + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace "public partial class", "internal partial class" + $content = $content -creplace "public readonly partial struct", "internal readonly partial struct" + $content = $content -creplace "public static partial class", "internal static partial class" + $content = $content -creplace "namespace OpenAI", "namespace OpenAI.Internal" + $content = $content -creplace "using OpenAI.Models;", "using OpenAI.Internal.Models;" + + $content | Set-Content -Path $file.FullName -NoNewline + } +} + +function Edit-GeneratedModels { + $root = Split-Path $PSScriptRoot -Parent + + $directory = Join-Path -Path $root -ChildPath "src\Generated\Models" + $files = Get-ChildItem -Path $($directory + "\*") -Include "*.cs" + + foreach ($file in $files) { + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace "public partial class", "internal partial class" + $content = $content -creplace "public readonly partial struct", "internal readonly partial struct" + $content = $content -creplace "public static partial class", "internal static partial class" + $content = $content -creplace "namespace OpenAI", "namespace OpenAI.Internal" + $content = $content -creplace "using OpenAI.Models;", "using OpenAI.Internal.Models;" + + $content | Set-Content -Path $file.FullName -NoNewline + } +} + +function Remove-GeneratedTests { + $root = Split-Path $PSScriptRoot -Parent + + $directory = Join-Path -Path $root -ChildPath "tests\Generated" + Remove-Item -LiteralPath $directory -Recurse -Force +} + +Edit-GeneratedOpenAIClient +Edit-GeneratedSubclients +Edit-GeneratedModels +Remove-GeneratedTests diff --git a/.dotnet/scripts/Update-Client.ps1 b/.dotnet/scripts/Update-Client.ps1 new file mode 100644 index 000000000..8832c2393 --- /dev/null +++ b/.dotnet/scripts/Update-Client.ps1 @@ -0,0 +1,21 @@ +$repoRoot = Join-Path $PSScriptRoot .. .. -Resolve +$dotnetFolder = Join-Path $repoRoot .dotnet + +function Invoke([scriptblock]$script) { + $scriptString = $script | Out-String + Write-Host "--------------------------------------------------------------------------------`n> $scriptString" + & $script +} + +Push-Location $repoRoot +try { + Invoke { npm ci } + Invoke { npm exec --no -- tsp compile main.tsp --emit @typespec/openapi3 } + Invoke { npm exec --no -- tsp compile main.tsp --emit @azure-tools/typespec-csharp --option @azure-tools/typespec-csharp.emitter-output-dir="$dotnetFolder" } + Invoke { .dotnet\scripts\Update-ClientModel.ps1 } + Invoke { .dotnet\scripts\ConvertTo-Internal.ps1 } + Invoke { .dotnet\scripts\Add-Customizations.ps1 } +} +finally { + Pop-Location +} diff --git a/.dotnet/scripts/Update-ClientModel.ps1 b/.dotnet/scripts/Update-ClientModel.ps1 new file mode 100644 index 000000000..698ed7b05 --- /dev/null +++ b/.dotnet/scripts/Update-ClientModel.ps1 @@ -0,0 +1,249 @@ +function Update-SystemClientModelPackage { + $current = Get-Location + $root = Split-Path $PSScriptRoot -Parent + + # Update System.ClientModel package in OpenAI.csproj + $directory = Join-Path -Path $root -ChildPath "src" + Set-Location -Path $directory + dotnet remove "OpenAI.csproj" package "System.ClientModel" + dotnet add "OpenAI.csproj" package "System.ClientModel" --version "1.1.0-alpha.20240319.1" --source "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-net/nuget/v3/index.json" + + # Update System.ClientModel package in OpenAI.Tests.csproj + $directory = Join-Path -Path $root -ChildPath "tests" + Set-Location -Path $directory + dotnet remove "OpenAI.Tests.csproj" package "System.ClientModel" + dotnet add "OpenAI.Tests.csproj" package "System.ClientModel" --version "1.1.0-alpha.20240319.1" --source "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-net/nuget/v3/index.json" + + Set-Location -Path $current +} + +function Update-OpenAIClient { + $root = Split-Path $PSScriptRoot -Parent + $directory = Join-Path -Path $root -ChildPath "src\Generated" + $file = Get-ChildItem -Path $directory -Filter "OpenAIClient.cs" + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace "\s+#nullable disable", "" + $content = $content -creplace "\s+using System\.ClientModel\.Internal;", "" + $content = $content -creplace "\s+using System\.ClientModel\.Primitives\.Pipeline;", "" + $content = $content -creplace " KeyCredential ", " ApiKeyCredential " + $content = $content -creplace " _keyCredential", " _credential" + $content = $content -creplace " MessagePipeline ", " ClientPipeline " + $content = $content -creplace "\s+\/\/\/ The ClientDiagnostics is used to provide tracing support for the client library. ", "" + $content = $content -creplace "\s+internal TelemetrySource ClientDiagnostics { get; }", "" + $content = $content -creplace "\(KeyCredential", "(ApiKeyCredential" + $content = $content -creplace "\s+ClientDiagnostics = new TelemetrySource\(options, true\);", "" + $content = $content -creplace "_pipeline = MessagePipeline\.Create\(options, new IPipelinePolicy\[\] \{ new KeyCredentialPolicy\(_keyCredential, AuthorizationHeader, AuthorizationApiKeyPrefix\) \}, Array\.Empty>\(\)\);", "var authenticationPolicy = ApiKeyAuthenticationPolicy.CreateBearerAuthorizationPolicy(_credential);`r`n _pipeline = ClientPipeline.Create(options,`r`n perCallPolicies: ReadOnlySpan.Empty,`r`n perTryPolicies: new PipelinePolicy[] { authenticationPolicy },`r`n beforeTransportPolicies: ReadOnlySpan.Empty);" + $content = $content -creplace "\(ClientDiagnostics, ", "(" + + $content | Set-Content -Path $file.FullName -NoNewline +} + +function Update-OpenAIClientOptions { + $root = Split-Path $PSScriptRoot -Parent + $directory = Join-Path -Path $root -ChildPath "src\Generated" + $file = Get-ChildItem -Path $directory -Filter "OpenAIClientOptions.cs" + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace "\s+#nullable disable", "" + $content = $content -creplace "using System\.ClientModel;", "using System.ClientModel.Primitives;" + $content = $content -creplace ": RequestOptions", ": ClientPipelineOptions" + + $content | Set-Content -Path $file.FullName -NoNewline +} + +function Update-Subclients { + $root = Split-Path $PSScriptRoot -Parent + $directory = Join-Path -Path $root -ChildPath "src\Generated" + $files = Get-ChildItem -Path $($directory + "\*") -Include "*.cs" -Exclude "OpenAIClient.cs", "OpenAIClientOptions.cs" + + foreach ($file in $files) { + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + # Delete #nullable + $content = $content -creplace "\s+#nullable disable", "" + + # Fix using statements + $content = $content -creplace "\s+using System.ClientModel.Internal;", "" + $content = $content -creplace "\s+using System.ClientModel.Primitives.Pipeline;", "" + $content = $content -creplace "using System.ClientModel.Primitives;", "using System.ClientModel.Primitives;`r`nusing System.Text;" + + # Delete TelemetrySource + $content = $content -creplace "\s+\/\/\/ The ClientDiagnostics is used to provide tracing support for the client library. ", "" + $content = $content -creplace "\s+internal TelemetrySource ClientDiagnostics { get; }", "" + + # Delete FromCancellationToken + $content = $content -creplace "(?s)\s+internal static RequestOptions FromCancellationToken\(CancellationToken cancellationToken = default\).*?return new RequestOptions\(\) \{ CancellationToken = cancellationToken \};.*?\}", "" + + # Modify constructor + $content = $content -creplace "\s+\/\/\/ The handler for diagnostic messaging in the client. ", "" + $content = $content -creplace "", "" + $content = $content -creplace "internal (?\w+)\(TelemetrySource clientDiagnostics, MessagePipeline pipeline, KeyCredential keyCredential, Uri endpoint\)", "internal `${name}(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint)" + $content = $content -creplace "\s+ClientDiagnostics = clientDiagnostics;", "" + + # # Modify convenience methods + $content = $content -creplace "\s+\/\/\/ The cancellation token to use. ", "" + $content = $content -creplace "\(CancellationToken cancellationToken = default\)", "()" + $content = $content -creplace ", CancellationToken cancellationToken = default\)", ")" + $content = $content -creplace "RequestOptions context = FromCancellationToken\(cancellationToken\);\s+", "" + $content = $content -creplace "using RequestBody content = (?\w+)\.ToRequestBody\(\);", "using BinaryContent content = BinaryContent.Create(`${var});" + $content = $content -creplace "using RequestBody content0 = (?\w+)\.ToRequestBody\(\);", "using BinaryContent content0 = BinaryContent.Create(`${var});" + $content = $content -creplace "Result result = await (?\w+)\(context\)\.ConfigureAwait\(false\);", "ClientResult result = await `${method}(DefaultRequestContext).ConfigureAwait(false);" + $content = $content -creplace "Result result = (?\w+)\(context\);", "ClientResult result = `${method}(DefaultRequestContext);" + $content = $content -creplace "Result result = await (?\w+)\((?[(\w+)(\?.ToString\(\)*)(,\s\w+)]*), context\)\.ConfigureAwait\(false\);", "ClientResult result = await `${method}(`${params}, DefaultRequestContext).ConfigureAwait(false);" + $content = $content -creplace "Result result = (?\w+)\((?[(\w+)(\?.ToString\(\)*)(,\s\w+)]*), context\);", "ClientResult result = `${method}(`${params}, DefaultRequestContext);" + + # Modify protocol methods + $content = $content -creplace "\/\/\/ Please try the simpler \w+)\(CancellationToken\)`"/> convenience overload with strongly typed models first.", "/// Please try the simpler convenience overload with strongly typed models first." + $content = $content -creplace "\/\/\/ Please try the simpler \w+)\((?[(\w+)(\?*)(,\s\w+)]*),CancellationToken\)`"/> convenience overload with strongly typed models first.", "/// Please try the simpler convenience overload with strongly typed models first." + $content = $content -creplace "\/\/\/ The request context, which can override default behaviors of the client pipeline on a per-call basis. ", "/// The request options, which can override default behaviors of the client pipeline on a per-call basis. " + $content = $content -creplace "\/\/\/ ", "/// " + $content = $content -creplace " Task ", " Task " + $content = $content -creplace " Result ", " ClientResult " + $content = $content -creplace "\(RequestBody content", "(BinaryContent content" + $content = $content -creplace " RequestBody content", " BinaryContent content" + $content = $content -creplace "\(RequestOptions context", "(RequestOptions options" + $content = $content -creplace " RequestOptions context", " RequestOptions options" + $content = $content -creplace "context\)", "options)" + $content = $content -creplace "using var scope = ClientDiagnostics\.CreateSpan\(`"(?\w+)\.(?\w+)`"\);", "options ??= new RequestOptions();`r`n // using var scope = ClientDiagnostics.CreateSpan(`"`${tag}.`${operationId}`"\);" + $content = $content -creplace "scope\.Start\(\);", "// scope.Start();" + $content = $content -creplace "scope\.Failed\(e\);", "// scope.Failed(e);" + + # Create request + $content = $content -creplace "\(RequestBody content", "(BinaryContent content" + $content = $content -creplace " RequestBody content", " BinaryContent content" + $content = $content -creplace " RequestOptions context", " RequestOptions options" + $content = $content -creplace "var message = _pipeline\.CreateMessage\(context, ResponseErrorClassifier200\);", "PipelineMessage message = _pipeline.CreateMessage();`r`n message.ResponseClassifier = ResponseErrorClassifier200;" + $content = $content -creplace "var request = message\.Request;", "PipelineRequest request = message.Request;" + $content = $content -creplace "request\.SetMethod\(`"(?[\w\/]+)`"\);", "request.Method = `"`${name}`";" + $content = $content -creplace "var uri = new RequestUri\(\);", "UriBuilder uriBuilder = new(_endpoint.ToString());" + $content = $content -creplace "uri\.Reset\(_endpoint\);", "StringBuilder path = new();" + $content = $content -creplace "uri\.AppendPath\((?`"?[\w\/]+`"?), (\w+)\);", "path.Append(`${path});" + $content = $content -creplace "uri\.AppendQuery\(`"(?\w+)`", (?\w+(\.Value)?), (\w+)\);", "if (uriBuilder.Query != null && uriBuilder.Query.Length > 1)`r`n {`r`n uriBuilder.Query += $`"&`${key}={`${value}}`";`r`n }`r`n else`r`n {`r`n uriBuilder.Query = $`"`${key}={`${value}}`";`r`n }" + $content = $content -creplace "request\.Uri = uri\.ToUri\(\);", "uriBuilder.Path += path.ToString();`r`n request.Uri = uriBuilder.Uri;" + $content = $content -creplace "request\.SetHeaderValue", "request.Headers.Set" + $content = $content -creplace "request\.Content = content;", "request.Content = content;`r`n message.Apply(options);" + + # Clean up ApiKeyCredential + $content = $content -creplace " KeyCredential", " ApiKeyCredential" + $content = $content -creplace "_keyCredential", "_credential" + $content = $content -creplace " keyCredential", " credential" + + # Clean up ClientPipeline + $content = $content -creplace " MessagePipeline ", " ClientPipeline " + + # Clean up ClientResult + $content = $content -creplace " Result", " ClientResult" + $content = $content -creplace "Task _responseErrorClassifier200 \?\?= new StatusResponseClassifier\(stackalloc ushort\[\] \{ 200 \}\);", "private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 });" + + $content | Set-Content -Path $file.FullName -NoNewline + } +} + +function Update-Models { + $root = Split-Path $PSScriptRoot -Parent + $directory = Join-Path -Path $root -ChildPath "src\Generated\Models" + $files = Get-ChildItem -Path $directory -Filter "*.cs" + + foreach ($file in $files) { + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace "\s+#nullable disable", "" + $content = $content -creplace "using System\.ClientModel\.Internal;", "using OpenAI.ClientShared.Internal;" + $content = $content -creplace "using System\.ClientModel\.Primitives;", "using System.ClientModel;`r`nusing System.ClientModel.Primitives;" + $content = $content -creplace ": IUtf8JsonWriteable,", ":" + $content = $content -creplace "\s+void IUtf8JsonWriteable\.Write\(Utf8JsonWriter writer\) => \(\(IJsonModel<(\w+)>\)this\)\.Write\(writer, new ModelReaderWriterOptions\(`"W`"\)\);`r`n", "" + $content = $content -creplace " RequestBody", " BinaryContent" + + $content | Set-Content -Path $file.FullName -NoNewline + } +} + +function Update-InternalClientPipelineExtensions { + $root = Split-Path $PSScriptRoot -Parent + $directory = Join-Path -Path $root -ChildPath "src\Generated\Internal" + $file = Get-ChildItem -Path $directory -Filter "ClientPipelineExtensions.cs" + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace "\s+using System\.ClientModel\.Primitives\.Pipeline;", "" + $content = $content -creplace " Pipeline", " ClientPipeline" + $content = $content -creplace "\.ErrorBehavior", ".ErrorOptions" + $content = $content -creplace "ErrorBehavior\.", "ClientErrorBehaviors." + $content = $content -creplace " MessageFailedException", " ClientResultException" + $content = $content -creplace "(?s)\s+public static async ValueTask> ProcessHeadAsBoolMessageAsync.*?\}.*?\}", "" + $content = $content -creplace "(?s)\s+public static NullableResult ProcessHeadAsBoolMessage.*?\}.*?\}", "" + + $content | Set-Content -Path $file.FullName -NoNewline +} + +function Update-InternalErrorResult { + $root = Split-Path $PSScriptRoot -Parent + $directory = Join-Path -Path $root -ChildPath "src\Generated\Internal" + $file = Get-ChildItem -Path $directory -Filter "ErrorResult.cs" + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace " MessagePipeline", " ClientPipeline" + $content = $content -creplace " Result", " ClientResult" + $content = $content -creplace " MessageFailedException", " ClientResultException" + $content = $content -creplace "\s+public override bool HasValue => false;", "" + $content = $content -creplace "(?s)\s+public override PipelineResponse GetRawResponse\(\)\s+\{\s+return _response;\s+\}", "" + + $content | Set-Content -Path $file.FullName -NoNewline +} + +function Update-InternalUtf8JsonRequestBody { + $root = Split-Path $PSScriptRoot -Parent + $directory = Join-Path -Path $root -ChildPath "src\Generated\Internal" + $file = Get-ChildItem -Path $directory -Filter "Utf8JsonRequestBody.cs" + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace "using System\.ClientModel\.Primitives;", "using System;`r`nusing System.ClientModel;" + $content = $content -creplace " RequestBody", " BinaryContent" + $content = $content -creplace "_content = CreateFromStream\(_stream\);", "_content = BinaryContent.Create(BinaryData.FromStream(_stream));" + + $content | Set-Content -Path $file.FullName -NoNewline +} + +function Update-Tests { + $root = Split-Path $PSScriptRoot -Parent + $directory = Join-Path -Path $root -ChildPath "tests\Generated\Tests" + $files = Get-ChildItem -Path $directory -Filter "*.cs" + + foreach ($file in $files) { + $content = Get-Content -Path $file -Raw + + Write-Output "Editing $($file.FullName)" + + $content = $content -creplace " KeyCredential", " ApiKeyCredential" + + $content | Set-Content -Path $file.FullName -NoNewline + } +} + +Update-SystemClientModelPackage +Update-OpenAIClient +Update-OpenAIClientOptions +Update-Subclients +Update-Models +Update-InternalClientPipelineExtensions +Update-InternalErrorResult +Update-InternalUtf8JsonRequestBody +Update-Tests \ No newline at end of file diff --git a/.dotnet/src/ClientShared/ModelReaderWriterExtensions.cs b/.dotnet/src/ClientShared/ModelReaderWriterExtensions.cs new file mode 100644 index 000000000..25be6cbb5 --- /dev/null +++ b/.dotnet/src/ClientShared/ModelReaderWriterExtensions.cs @@ -0,0 +1,264 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Diagnostics; +using System.Globalization; +using System.Linq; +using System.Text.Json; + +namespace OpenAI.ClientShared.Internal; + +internal static class ModelReaderWriterExtensions +{ + // TODO: These are copied from shared source files. If they become + // public we need to refactor and consolidate to a single place. + + #region JsonElement + + public static object? GetObject(in this JsonElement element) + { + switch (element.ValueKind) + { + case JsonValueKind.String: + return element.GetString(); + case JsonValueKind.Number: + if (element.TryGetInt32(out int intValue)) + { + return intValue; + } + if (element.TryGetInt64(out long longValue)) + { + return longValue; + } + return element.GetDouble(); + case JsonValueKind.True: + return true; + case JsonValueKind.False: + return false; + case JsonValueKind.Undefined: + case JsonValueKind.Null: + return null; + case JsonValueKind.Object: + var dictionary = new Dictionary(); + foreach (JsonProperty jsonProperty in element.EnumerateObject()) + { + dictionary.Add(jsonProperty.Name, jsonProperty.Value.GetObject()); + } + return dictionary; + case JsonValueKind.Array: + var list = new List(); + foreach (JsonElement item in element.EnumerateArray()) + { + list.Add(item.GetObject()); + } + return list.ToArray(); + default: + throw new NotSupportedException("Not supported value kind " + element.ValueKind); + } + } + + public static byte[]? GetBytesFromBase64(in this JsonElement element, string format) + { + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + return format switch + { + "U" => TypeFormatters.FromBase64UrlString(element.GetRequiredString()), + "D" => element.GetBytesFromBase64(), + _ => throw new ArgumentException($"Format is not supported: '{format}'", nameof(format)) + }; + } + + public static DateTimeOffset GetDateTimeOffset(in this JsonElement element, string format) => format switch + { + "U" when element.ValueKind == JsonValueKind.Number => DateTimeOffset.FromUnixTimeSeconds(element.GetInt64()), + // relying on the param check of the inner call to throw ArgumentNullException if GetString() returns null + _ => TypeFormatters.ParseDateTimeOffset(element.GetString()!, format) + }; + + public static TimeSpan GetTimeSpan(in this JsonElement element, string format) => + // relying on the param check of the inner call to throw ArgumentNullException if GetString() returns null + TypeFormatters.ParseTimeSpan(element.GetString()!, format); + + public static char GetChar(this in JsonElement element) + { + if (element.ValueKind == JsonValueKind.String) + { + var text = element.GetString(); + if (text == null || text.Length != 1) + { + throw new NotSupportedException($"Cannot convert \"{text}\" to a Char"); + } + return text[0]; + } + else + { + throw new NotSupportedException($"Cannot convert {element.ValueKind} to a Char"); + } + } + + [Conditional("DEBUG")] + public static void ThrowNonNullablePropertyIsNull(this JsonProperty property) + { + throw new JsonException($"A property '{property.Name}' defined as non-nullable but received as null from the service. " + + $"This exception only happens in DEBUG builds of the library and would be ignored in the release build"); + } + + public static string GetRequiredString(in this JsonElement element) + { + var value = element.GetString(); + if (value == null) + throw new InvalidOperationException($"The requested operation requires an element of type 'String', but the target element has type '{element.ValueKind}'."); + + return value; + } + + #endregion + + #region Utf8JsonWriter + public static void WriteStringValue(this Utf8JsonWriter writer, DateTimeOffset value, string format) => + writer.WriteStringValue(TypeFormatters.ToString(value, format)); + + public static void WriteStringValue(this Utf8JsonWriter writer, DateTime value, string format) => + writer.WriteStringValue(TypeFormatters.ToString(value, format)); + + public static void WriteStringValue(this Utf8JsonWriter writer, TimeSpan value, string format) => + writer.WriteStringValue(TypeFormatters.ToString(value, format)); + + public static void WriteStringValue(this Utf8JsonWriter writer, char value) => + writer.WriteStringValue(value.ToString(CultureInfo.InvariantCulture)); + + public static void WriteNonEmptyArray(this Utf8JsonWriter writer, string name, IReadOnlyList values) + { + if (values.Any()) + { + writer.WriteStartArray(name); + foreach (var s in values) + { + writer.WriteStringValue(s); + } + + writer.WriteEndArray(); + } + } + + public static void WriteBase64StringValue(this Utf8JsonWriter writer, byte[] value, string format) + { + if (value == null) + { + writer.WriteNullValue(); + return; + } + + switch (format) + { + case "U": + writer.WriteStringValue(TypeFormatters.ToBase64UrlString(value)); + break; + case "D": + writer.WriteBase64StringValue(value); + break; + default: + throw new ArgumentException($"Format is not supported: '{format}'", nameof(format)); + } + } + + public static void WriteNumberValue(this Utf8JsonWriter writer, DateTimeOffset value, string format) + { + if (format != "U") throw new ArgumentOutOfRangeException(format, "Only 'U' format is supported when writing a DateTimeOffset as a Number."); + + writer.WriteNumberValue(value.ToUnixTimeSeconds()); + } + + public static void WriteObjectValue(this Utf8JsonWriter writer, object? value) + { + switch (value) + { + case null: + writer.WriteNullValue(); + break; + case IJsonModel writeable: + writeable.Write(writer, ModelReaderWriterHelper.WireOptions); + break; + case byte[] bytes: + writer.WriteBase64StringValue(bytes); + break; + case BinaryData bytes: + writer.WriteBase64StringValue(bytes); + break; + case JsonElement json: + json.WriteTo(writer); + break; + case int i: + writer.WriteNumberValue(i); + break; + case decimal d: + writer.WriteNumberValue(d); + break; + case double d: + if (double.IsNaN(d)) + { + writer.WriteStringValue("NaN"); + } + else + { + writer.WriteNumberValue(d); + } + break; + case float f: + writer.WriteNumberValue(f); + break; + case long l: + writer.WriteNumberValue(l); + break; + case string s: + writer.WriteStringValue(s); + break; + case bool b: + writer.WriteBooleanValue(b); + break; + case Guid g: + writer.WriteStringValue(g); + break; + case DateTimeOffset dateTimeOffset: + writer.WriteStringValue(dateTimeOffset, "O"); + break; + case DateTime dateTime: + writer.WriteStringValue(dateTime, "O"); + break; + case IEnumerable> enumerable: + writer.WriteStartObject(); + foreach (KeyValuePair pair in enumerable) + { + writer.WritePropertyName(pair.Key); + writer.WriteObjectValue(pair.Value); + } + writer.WriteEndObject(); + break; + case IEnumerable objectEnumerable: + writer.WriteStartArray(); + foreach (object item in objectEnumerable) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + break; + case TimeSpan timeSpan: + writer.WriteStringValue(timeSpan, "P"); + break; + + default: + throw new NotSupportedException("Not supported type " + value.GetType()); + } + } + + #endregion +} \ No newline at end of file diff --git a/.dotnet/src/ClientShared/ModelReaderWriterHelper.cs b/.dotnet/src/ClientShared/ModelReaderWriterHelper.cs new file mode 100644 index 000000000..21181d6d1 --- /dev/null +++ b/.dotnet/src/ClientShared/ModelReaderWriterHelper.cs @@ -0,0 +1,31 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; +using System.ClientModel.Primitives; +using System.Runtime.CompilerServices; + +namespace OpenAI.ClientShared.Internal; + +internal static class ModelReaderWriterHelper +{ + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void ValidateFormat(IPersistableModel model, string format) + { + bool implementsJson = model is IJsonModel; + bool isValid = (format == "J" && implementsJson) || format == "W"; + if (!isValid) + { + throw new FormatException($"The model {model.GetType().Name} does not support '{format}' format."); + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void ValidateFormat(IPersistableModel model, string format) + => ValidateFormat(model, format); + + private static ModelReaderWriterOptions? _wireOptions; + public static ModelReaderWriterOptions WireOptions => _wireOptions ??= new ModelReaderWriterOptions("W"); +} \ No newline at end of file diff --git a/.dotnet/src/ClientShared/TypeFormatters.cs b/.dotnet/src/ClientShared/TypeFormatters.cs new file mode 100644 index 000000000..9a00edaf3 --- /dev/null +++ b/.dotnet/src/ClientShared/TypeFormatters.cs @@ -0,0 +1,158 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Xml; + +namespace OpenAI.ClientShared.Internal; + +internal class TypeFormatters +{ + private const string RoundtripZFormat = "yyyy-MM-ddTHH:mm:ss.fffffffZ"; + public static string DefaultNumberFormat { get; } = "G"; + + public static string ToString(bool value) => value ? "true" : "false"; + + public static string ToString(DateTime value, string format) => value.Kind switch + { + DateTimeKind.Utc => ToString((DateTimeOffset)value, format), + _ => throw new NotSupportedException($"DateTime {value} has a Kind of {value.Kind}. Azure SDK requires it to be UTC. You can call DateTime.SpecifyKind to change Kind property value to DateTimeKind.Utc.") + }; + + public static string ToString(DateTimeOffset value, string format) => format switch + { + "D" => value.ToString("yyyy-MM-dd", CultureInfo.InvariantCulture), + "U" => value.ToUnixTimeSeconds().ToString(CultureInfo.InvariantCulture), + "O" => value.ToUniversalTime().ToString(RoundtripZFormat, CultureInfo.InvariantCulture), + "o" => value.ToUniversalTime().ToString(RoundtripZFormat, CultureInfo.InvariantCulture), + "R" => value.ToString("r", CultureInfo.InvariantCulture), + _ => value.ToString(format, CultureInfo.InvariantCulture) + }; + + public static string ToString(TimeSpan value, string format) => format switch + { + "P" => XmlConvert.ToString(value), + _ => value.ToString(format, CultureInfo.InvariantCulture) + }; + + public static string ToString(byte[] value, string format) => format switch + { + "U" => ToBase64UrlString(value), + "D" => Convert.ToBase64String(value), + _ => throw new ArgumentException($"Format is not supported: '{format}'", nameof(format)) + }; + + public static string ToBase64UrlString(byte[] value) + { + var numWholeOrPartialInputBlocks = checked(value.Length + 2) / 3; + var size = checked(numWholeOrPartialInputBlocks * 4); + var output = new char[size]; + + var numBase64Chars = Convert.ToBase64CharArray(value, 0, value.Length, output, 0); + + // Fix up '+' -> '-' and '/' -> '_'. Drop padding characters. + int i = 0; + for (; i < numBase64Chars; i++) + { + var ch = output[i]; + if (ch == '+') + { + output[i] = '-'; + } + else if (ch == '/') + { + output[i] = '_'; + } + else if (ch == '=') + { + // We've reached a padding character; truncate the remainder. + break; + } + } + + return new string(output, 0, i); + } + + public static byte[] FromBase64UrlString(string value) + { + var paddingCharsToAdd = GetNumBase64PaddingCharsToAddForDecode(value.Length); + + var output = new char[value.Length + paddingCharsToAdd]; + + int i; + for (i = 0; i < value.Length; i++) + { + var ch = value[i]; + if (ch == '-') + { + output[i] = '+'; + } + else if (ch == '_') + { + output[i] = '/'; + } + else + { + output[i] = ch; + } + } + + for (; i < output.Length; i++) + { + output[i] = '='; + } + + return Convert.FromBase64CharArray(output, 0, output.Length); + } + + private static int GetNumBase64PaddingCharsToAddForDecode(int inputLength) + { + switch (inputLength % 4) + { + case 0: + return 0; + case 2: + return 2; + case 3: + return 1; + default: + throw new InvalidOperationException("Malformed input"); + } + } + + public static DateTimeOffset ParseDateTimeOffset(string value, string format) + { + return format switch + { + "U" => DateTimeOffset.FromUnixTimeSeconds(long.Parse(value, CultureInfo.InvariantCulture)), + _ => DateTimeOffset.Parse(value, CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal) + }; + } + + public static TimeSpan ParseTimeSpan(string value, string format) => format switch + { + "P" => XmlConvert.ToTimeSpan(value), + _ => TimeSpan.ParseExact(value, format, CultureInfo.InvariantCulture) + }; + + public static string ConvertToString(object? value, string? format = null) + => value switch + { + null => "null", + string s => s, + bool b => ToString(b), + int or float or double or long or decimal => ((IFormattable)value).ToString(DefaultNumberFormat, CultureInfo.InvariantCulture), + byte[] b when format != null => ToString(b, format), + IEnumerable s => string.Join(",", s), + DateTimeOffset dateTime when format != null => ToString(dateTime, format), + TimeSpan timeSpan when format != null => ToString(timeSpan, format), + TimeSpan timeSpan => XmlConvert.ToString(timeSpan), + Guid guid => guid.ToString(), + BinaryData binaryData => TypeFormatters.ConvertToString(binaryData.ToArray(), format), + _ => value.ToString()! + }; +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/Assistant.cs b/.dotnet/src/Custom/Assistants/Assistant.cs new file mode 100644 index 000000000..02dcede78 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/Assistant.cs @@ -0,0 +1,48 @@ +using System; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class Assistant +{ + public string Id { get; } + public DateTimeOffset CreatedAt { get; } + public string Name { get; } + public string Description { get; } + public string DefaultModel { get; } + public string DefaultInstructions { get; } + public IReadOnlyList DefaultTools { get; } + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IReadOnlyDictionary Metadata { get; } + + internal Assistant(Internal.Models.AssistantObject internalAssistant) + { + Id = internalAssistant.Id; + CreatedAt = internalAssistant.CreatedAt; + Name = internalAssistant.Name; + Description = internalAssistant.Description; + DefaultModel = internalAssistant.Model; + DefaultInstructions = internalAssistant.Instructions; + Metadata = internalAssistant.Metadata; + + if (internalAssistant.Tools != null) + { + List tools = []; + foreach (BinaryData unionToolDefinitionData in internalAssistant.Tools) + { + tools.Add(ToolDefinition.DeserializeToolDefinition(JsonDocument.Parse(unionToolDefinitionData).RootElement)); + } + DefaultTools = tools; + } + } +} diff --git a/.dotnet/src/Custom/Assistants/AssistantClient.Protocol.cs b/.dotnet/src/Custom/Assistants/AssistantClient.Protocol.cs new file mode 100644 index 000000000..826d90bf8 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/AssistantClient.Protocol.cs @@ -0,0 +1,488 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.ComponentModel; +using System.Threading.Tasks; + +namespace OpenAI.Assistants; + +public partial class AssistantClient +{ + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CreateAssistant( + BinaryContent content, + RequestOptions options = null) + => Shim.CreateAssistant(content, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task CreateAssistantAsync( + BinaryContent content, + RequestOptions options = null) + => await Shim.CreateAssistantAsync(content, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetAssistant( + string assistantId, + RequestOptions options) + => Shim.GetAssistant(assistantId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GetAssistantAsync( + string assistantId, + RequestOptions options) + => await Shim.GetAssistantAsync(assistantId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetAssistants( + int? maxResults, + string createdSortOrder, + string previousAssistantId, + string subsequentAssistantId, + RequestOptions options) + => Shim.GetAssistants(maxResults, createdSortOrder, previousAssistantId, subsequentAssistantId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GetAssistantsAsync( + int? maxResults, + string createdSortOrder, + string previousAssistantId, + string subsequentAssistantId, + RequestOptions options) + => await Shim.GetAssistantsAsync(maxResults, createdSortOrder, previousAssistantId, subsequentAssistantId, options).ConfigureAwait(false); + + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult ModifyAssistant( + string assistantId, + BinaryContent content, + RequestOptions options = null) + => Shim.ModifyAssistant(assistantId, content, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task ModifyAssistantAsync( + string assistantId, + BinaryContent content, + RequestOptions options = null) + => await Shim.ModifyAssistantAsync(assistantId, content, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult DeleteAssistant( + string assistantId, + RequestOptions options) + => Shim.DeleteAssistant(assistantId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task DeleteAssistantAsync( + string assistantId, + RequestOptions options) + => await Shim.DeleteAssistantAsync(assistantId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CreateAssistantFileAssociation( + string assistantId, + BinaryContent content, + RequestOptions options = null) + => Shim.CreateAssistantFile(assistantId, content, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task CreateAssistantFileAssociationAsync( + string assistantId, + BinaryContent content, + RequestOptions options = null) + => await Shim.CreateAssistantFileAsync(assistantId, content, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetAssistantFileAssociation( + string assistantId, + string fileId, + RequestOptions options) + => Shim.GetAssistantFile(assistantId, fileId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GetAssistantFileAssociationAsync( + string assistantId, + string fileId, + RequestOptions options) + => await Shim.GetAssistantFileAsync(assistantId, fileId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetAssistantFileAssociations( + string assistantId, + int? maxResults, + string createdSortOrder, + string previousId, + string subsequentId, + RequestOptions options) + => Shim.GetAssistantFiles(assistantId, maxResults, createdSortOrder, previousId, subsequentId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GetAssistantFileAssociationsAsync( + string assistantId, + int? maxResults, + string createdSortOrder, + string previousId, + string subsequentId, + RequestOptions options) + => await Shim.GetAssistantFilesAsync(assistantId, maxResults, createdSortOrder, previousId, subsequentId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult RemoveAssistantFileAssociation( + string assistantId, + string fileId, + RequestOptions options) + => Shim.DeleteAssistantFile(assistantId, fileId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task RemoveAssistantFileAssociationAsync( + string assistantId, + string fileId, + RequestOptions options) + => await Shim.DeleteAssistantFileAsync(assistantId, fileId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CreateThread( + BinaryContent content, + RequestOptions options = null) + => ThreadShim.CreateThread(content, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task CreateThreadAsync( + BinaryContent content, + RequestOptions options = null) + => await ThreadShim.CreateThreadAsync(content, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetThread( + string threadId, + RequestOptions options) + => ThreadShim.GetThread(threadId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GetThreadAsync( + string threadId, + RequestOptions options) + => await ThreadShim.GetThreadAsync(threadId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult ModifyThread( + string threadId, + BinaryContent content, + RequestOptions options = null) + => ThreadShim.ModifyThread(threadId, content, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task ModifyThreadAsync( + string threadId, + BinaryContent content, + RequestOptions options = null) + => await ThreadShim.ModifyThreadAsync(threadId, content, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult DeleteThread( + string threadId, + RequestOptions options) + => ThreadShim.DeleteThread(threadId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task DeleteThreadAsync( + string threadId, + RequestOptions options) + => await ThreadShim.DeleteThreadAsync(threadId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CreateMessage( + string threadId, + BinaryContent content, + RequestOptions options = null) + => MessageShim.CreateMessage(threadId, content, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task CreateMessageAsync( + string threadId, + BinaryContent content, + RequestOptions options = null) + => await MessageShim.CreateMessageAsync(threadId, content, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetMessage( + string threadId, + string messageId, + RequestOptions options) + => MessageShim.GetMessage(threadId, messageId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GetMessageAsync( + string threadId, + string messageId, + RequestOptions options) + => await MessageShim.GetMessageAsync(threadId, messageId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult ModifyMessage( + string threadId, + string messageId, + BinaryContent content, + RequestOptions options) + => MessageShim.ModifyMessage(threadId, messageId, content, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task ModifyMessageAsync( + string threadId, + string messageId, + BinaryContent content, + RequestOptions options) + => await MessageShim.ModifyMessageAsync(threadId, messageId, content, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetMessages( + string threadId, + int? maxResults, + string createdSortOrder, + string previousMessageId, + string subsequentMessageId, + RequestOptions options) + => MessageShim.GetMessages(threadId, maxResults, createdSortOrder, previousMessageId, subsequentMessageId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GetMessagesAsync( + string threadId, + int? maxResults, + string createdSortOrder, + string previousMessageId, + string subsequentMessageId, + RequestOptions options) + => await MessageShim.GetMessagesAsync(threadId, maxResults, createdSortOrder, previousMessageId, subsequentMessageId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetMessageFileAssociation( + string threadId, + string messageId, + string fileId, + RequestOptions options) + => MessageShim.GetMessageFile(threadId, messageId, fileId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GetMessageFileAssociationAsync( + string threadId, + string messageId, + string fileId, + RequestOptions options) + => await MessageShim.GetMessageFileAsync(threadId, messageId, fileId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetMessageFileAssociations( + string threadId, + string messageId, + int? maxResults, + string createdSortOrder, + string previousId , + string subsequentId, + RequestOptions options) + => MessageShim.GetMessageFiles(threadId, messageId, maxResults, createdSortOrder, previousId, subsequentId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GetMessageFileAssociationsAsync( + string threadId, + string messageId, + int? maxResults, + string createdSortOrder, + string previousId, + string subsequentId, + RequestOptions options) + => await MessageShim.GetMessageFilesAsync(threadId, messageId, maxResults, createdSortOrder, previousId, subsequentId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CreateRun( + string threadId, + BinaryContent content, + RequestOptions options = null) + => RunShim.CreateRun(threadId, content, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task CreateRunAsync( + string threadId, + BinaryContent content, + RequestOptions options = null) + => await RunShim.CreateRunAsync(threadId, content, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CreateThreadAndRun( + BinaryContent content, + RequestOptions options = null) + => RunShim.CreateThreadAndRun(content, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task CreateThreadAndRunAsync( + BinaryContent content, + RequestOptions options = null) + => await RunShim.CreateThreadAndRunAsync(content, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetRun( + string threadId, + string runId, + RequestOptions options) + => RunShim.GetRun(threadId, runId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GetRunAsync( + string threadId, + string runId, + RequestOptions options) + => await RunShim.GetRunAsync(threadId, runId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetRuns( + string threadId, + int? maxResults, + string createdSortOrder, + string previousRunId, + string subsequentRunId, + RequestOptions options) + => RunShim.GetRuns(threadId, maxResults, createdSortOrder, previousRunId, subsequentRunId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GetRunsAsync( + string threadId, + int? maxResults, + string createdSortOrder, + string previousRunId, + string subsequentRunId, + RequestOptions options) + => await RunShim.GetRunsAsync(threadId, maxResults, createdSortOrder, previousRunId, subsequentRunId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult ModifyRun( + string threadId, + string runId, + BinaryContent content, + RequestOptions options = null) + => RunShim.ModifyRun(threadId, runId, content, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task ModifyRunAsync( + string threadId, + string runId, + BinaryContent content, + RequestOptions options = null) + => await RunShim.ModifyRunAsync(threadId, runId, content, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CancelRun( + string threadId, + string runId, + RequestOptions options) + => RunShim.CancelRun(threadId, runId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task CancelRunAsync( + string threadId, + string runId, + RequestOptions options) + => await RunShim.CancelRunAsync(threadId, runId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult SubmitToolOutputs( + string threadId, + string runId, + BinaryContent content, + RequestOptions options = null) + => RunShim.SubmitToolOuputsToRun(threadId, runId, content, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task SubmitToolOutputsAsync( + string threadId, + string runId, + BinaryContent content, + RequestOptions options = null) + => await RunShim.SubmitToolOuputsToRunAsync(threadId, runId, content, options).ConfigureAwait(false); + + /// + public virtual ClientResult GetRunStep( + string threadId, + string runId, + string stepId, + RequestOptions options) + => RunShim.GetRunStep(threadId, runId, stepId, options); + + /// + public virtual async Task GetRunStepAsync( + string threadId, + string runId, + string stepId, + RequestOptions options) + => await RunShim.GetRunStepAsync(threadId, runId, stepId, options).ConfigureAwait(false); + + /// + public virtual ClientResult GetRunSteps( + string threadId, + string runId, + int? maxResults, + string createdSortOrder, + string previousStepId, + string subsequentStepId, + RequestOptions options) + => RunShim.GetRunSteps(threadId, runId, maxResults, createdSortOrder, previousStepId, subsequentStepId, options); + + /// + public virtual async Task GetRunStepsAsync( + string threadId, + string runId, + int? maxResults, + string createdSortOrder, + string previousStepId, + string subsequentStepId, + RequestOptions options) + => await RunShim.GetRunStepsAsync(threadId, runId, maxResults, createdSortOrder, previousStepId, subsequentStepId, options).ConfigureAwait(false); +} diff --git a/.dotnet/src/Custom/Assistants/AssistantClient.cs b/.dotnet/src/Custom/Assistants/AssistantClient.cs new file mode 100644 index 000000000..7543c0330 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/AssistantClient.cs @@ -0,0 +1,720 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Threading.Tasks; + +namespace OpenAI.Assistants; + +/// +/// The service client for OpenAI assistants. +/// +[Experimental("OPENAI001")] +public partial class AssistantClient +{ + private OpenAIClientConnector _clientConnector; + private Internal.Assistants Shim => _clientConnector.InternalClient.GetAssistantsClient(); + private Internal.Threads ThreadShim => _clientConnector.InternalClient.GetThreadsClient(); + private Internal.Messages MessageShim => _clientConnector.InternalClient.GetMessagesClient(); + private Internal.Runs RunShim => _clientConnector.InternalClient.GetRunsClient(); + + /// + /// Initializes a new instance of , used for assistant requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public AssistantClient(ApiKeyCredential credential = default, OpenAIClientOptions options = default) + { + options ??= new(); + options.AddPolicy( + new GenericActionPipelinePolicy((m) => m.Request?.Headers.Set("OpenAI-Beta", "assistants=v1")), + PipelinePosition.PerCall); + _clientConnector = new(model: "none", credential, options); + } + + public virtual ClientResult CreateAssistant( + string modelName, + AssistantCreationOptions options = null) + { + Internal.Models.CreateAssistantRequest request = CreateInternalCreateAssistantRequest(modelName, options); + ClientResult internalResult = Shim.CreateAssistant(request); + return ClientResult.FromValue(new Assistant(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> CreateAssistantAsync( + string modelName, + AssistantCreationOptions options = null) + { + Internal.Models.CreateAssistantRequest request = CreateInternalCreateAssistantRequest(modelName, options); + ClientResult internalResult = await Shim.CreateAssistantAsync(request).ConfigureAwait(false); + return ClientResult.FromValue(new Assistant(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult GetAssistant(string assistantId) + { + ClientResult internalResult = Shim.GetAssistant(assistantId); + return ClientResult.FromValue(new Assistant(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetAssistantAsync( + string assistantId) + { + ClientResult internalResult = await Shim.GetAssistantAsync(assistantId).ConfigureAwait(false); + return ClientResult.FromValue(new Assistant(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult> GetAssistants( + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousAssistantId = null, + string subsequentAssistantId = null) + { + ClientResult internalFunc() => Shim.GetAssistants( + maxResults, + ToInternalListOrder(createdSortOrder), + previousAssistantId, + subsequentAssistantId); + return GetListQueryPage(internalFunc); + } + + public virtual Task>> GetAssistantsAsync( + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousAssistantId = null, + string subsequentAssistantId = null) + { + Task> internalAsyncFunc() => Shim.GetAssistantsAsync( + maxResults, + ToInternalListOrder(createdSortOrder), + previousAssistantId, + subsequentAssistantId); + return GetListQueryPageAsync(internalAsyncFunc); + } + + public virtual ClientResult ModifyAssistant( + string assistantId, + AssistantModificationOptions options) + { + ClientResult internalResult + = Shim.ModifyAssistant(assistantId, CreateInternalModifyAssistantRequest(options)); + return ClientResult.FromValue(new Assistant(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> ModifyAssistantAsync( + string assistantId, + AssistantModificationOptions options) + { + Internal.Models.ModifyAssistantRequest request = CreateInternalModifyAssistantRequest(options); + ClientResult internalResult = await Shim.ModifyAssistantAsync(assistantId, request).ConfigureAwait(false); + return ClientResult.FromValue(new Assistant(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult DeleteAssistant( + string assistantId) + { + ClientResult internalResponse = Shim.DeleteAssistant(assistantId); + return ClientResult.FromValue(internalResponse.Value.Deleted, internalResponse.GetRawResponse()); + } + + public virtual async Task> DeleteAssistantAsync( + string assistantId) + { + ClientResult internalResponse = await Shim.DeleteAssistantAsync(assistantId).ConfigureAwait(false); + return ClientResult.FromValue(internalResponse.Value.Deleted, internalResponse.GetRawResponse()); + } + + public virtual ClientResult CreateAssistantFileAssociation( + string assistantId, + string fileId) + { + ClientResult internalResult + = Shim.CreateAssistantFile(assistantId, new(fileId)); + return ClientResult.FromValue(new AssistantFileAssociation(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> CreateAssistantFileAssociationAsync( + string assistantId, + string fileId) + { + ClientResult internalResult = await Shim.CreateAssistantFileAsync(assistantId, new(fileId)).ConfigureAwait(false); + return ClientResult.FromValue(new AssistantFileAssociation(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult GetAssistantFileAssociation( + string assistantId, + string fileId) + { + ClientResult internalResult = Shim.GetAssistantFile(assistantId, fileId); + return ClientResult.FromValue(new AssistantFileAssociation(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetAssistantFileAssociationAsync( + string assistantId, + string fileId) + { + ClientResult internalResult = await Shim.GetAssistantFileAsync(assistantId, fileId).ConfigureAwait(false); + return ClientResult.FromValue(new AssistantFileAssociation(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult> GetAssistantFileAssociations( + string assistantId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousId = null, + string subsequentId = null) + { + ClientResult internalFunc() => Shim.GetAssistantFiles( + assistantId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousId, + subsequentId); + return GetListQueryPage(internalFunc); + } + + public virtual Task>> GetAssistantFileAssociationsAsync( + string assistantId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousId = null, + string subsequentId = null) + { + Func>> internalFunc + = () => Shim.GetAssistantFilesAsync( + assistantId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousId, + subsequentId); + return GetListQueryPageAsync(internalFunc); + } + + public virtual ClientResult RemoveAssistantFileAssociation( + string assistantId, + string fileId) + { + ClientResult internalResult + = Shim.DeleteAssistantFile(assistantId, fileId); + return ClientResult.FromValue(internalResult.Value.Deleted, internalResult.GetRawResponse()); + } + + public virtual async Task> RemoveAssistantFileAssociationAsync( + string assistantId, + string fileId) + { + ClientResult internalResult = await Shim.DeleteAssistantFileAsync(assistantId, fileId).ConfigureAwait(false); + return ClientResult.FromValue(internalResult.Value.Deleted, internalResult.GetRawResponse()); + } + + public virtual ClientResult CreateThread( + ThreadCreationOptions options = null) + { + Internal.Models.CreateThreadRequest request = CreateInternalCreateThreadRequest(options); + ClientResult internalResult = ThreadShim.CreateThread(request); + return ClientResult.FromValue(new AssistantThread(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> CreateThreadAsync( + ThreadCreationOptions options = null) + { + Internal.Models.CreateThreadRequest request = CreateInternalCreateThreadRequest(options); + ClientResult internalResult = await ThreadShim.CreateThreadAsync(request).ConfigureAwait(false); + return ClientResult.FromValue(new AssistantThread(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult GetThread(string threadId) + { + ClientResult internalResult = ThreadShim.GetThread(threadId); + return ClientResult.FromValue(new AssistantThread(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetThreadAsync( + string threadId) + { + ClientResult internalResult = await ThreadShim.GetThreadAsync(threadId).ConfigureAwait(false); + return ClientResult.FromValue(new AssistantThread(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult ModifyThread( + string threadId, + ThreadModificationOptions options) + { + Internal.Models.ModifyThreadRequest request = new( + options.Metadata, + serializedAdditionalRawData: null); + ClientResult internalResult = ThreadShim.ModifyThread(threadId, request); + return ClientResult.FromValue(new AssistantThread(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> ModifyThreadAsync( + string threadId, + ThreadModificationOptions options) + { + Internal.Models.ModifyThreadRequest request = new( + options.Metadata, + serializedAdditionalRawData: null); + ClientResult internalResult = await ThreadShim.ModifyThreadAsync(threadId, request).ConfigureAwait(false); + return ClientResult.FromValue(new AssistantThread(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult DeleteThread(string threadId) + { + ClientResult internalResult = ThreadShim.DeleteThread(threadId); + return ClientResult.FromValue(internalResult.Value.Deleted, internalResult.GetRawResponse()); + } + + public virtual async Task> DeleteThreadAsync(string threadId) + { + ClientResult internalResult = await ThreadShim.DeleteThreadAsync(threadId).ConfigureAwait(false); + return ClientResult.FromValue(internalResult.Value.Deleted, internalResult.GetRawResponse()); + } + + public virtual ClientResult CreateMessage( + string threadId, + MessageRole role, + string content, + MessageCreationOptions options = null) + { + Internal.Models.CreateMessageRequest request = new( + ToInternalRequestRole(role), + content, + options.FileIds, + options.Metadata, + serializedAdditionalRawData: null); + ClientResult internalResult = MessageShim.CreateMessage(threadId, request); + return ClientResult.FromValue(new ThreadMessage(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> CreateMessageAsync( + string threadId, + MessageRole role, + string content, + MessageCreationOptions options = null) + { + Internal.Models.CreateMessageRequest request = new( + ToInternalRequestRole(role), + content, + options.FileIds, + options.Metadata, + serializedAdditionalRawData: null); + ClientResult internalResult = await MessageShim.CreateMessageAsync(threadId, request).ConfigureAwait(false); + return ClientResult.FromValue(new ThreadMessage(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult GetMessage( + string threadId, + string messageId) + { + ClientResult internalResult = MessageShim.GetMessage(threadId, messageId); + return ClientResult.FromValue(new ThreadMessage(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetMessageAsync( + string threadId, + string messageId) + { + ClientResult internalResult = await MessageShim.GetMessageAsync(threadId, messageId).ConfigureAwait(false); + return ClientResult.FromValue(new ThreadMessage(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult ModifyMessage( + string threadId, + string messageId, + MessageModificationOptions options) + { + Internal.Models.ModifyMessageRequest request = new( + options.Metadata, + serializedAdditionalRawData: null); + ClientResult internalResult = MessageShim.ModifyMessage(threadId, messageId, request); + return ClientResult.FromValue(new ThreadMessage(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> ModifyMessageAsync( + string threadId, + string messageId, + MessageModificationOptions options) + { + Internal.Models.ModifyMessageRequest request = new( + options.Metadata, + serializedAdditionalRawData: null); + ClientResult internalResult = await MessageShim.ModifyMessageAsync(threadId, messageId, request).ConfigureAwait(false); + return ClientResult.FromValue(new ThreadMessage(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult> GetMessages( + string threadId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousMessageId = null, + string subsequentMessageId = null) + { + ClientResult internalFunc() => MessageShim.GetMessages( + threadId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousMessageId, + subsequentMessageId); + return GetListQueryPage(internalFunc); + } + + public virtual Task>> GetMessagesAsync( + string threadId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousMessageId = null, + string subsequentMessageId = null) + { + Func>> internalFunc = () => MessageShim.GetMessagesAsync( + threadId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousMessageId, + subsequentMessageId); + return GetListQueryPageAsync(internalFunc); + } + + public virtual ClientResult GetMessageFileAssociation( + string threadId, + string messageId, + string fileId) + { + ClientResult internalResult + = MessageShim.GetMessageFile(threadId, messageId, fileId); + return ClientResult.FromValue(new MessageFileAssociation(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetMessageFileAssociationAsync( + string threadId, + string messageId, + string fileId) + { + ClientResult internalResult = await MessageShim.GetMessageFileAsync(threadId, messageId, fileId).ConfigureAwait(false); + return ClientResult.FromValue(new MessageFileAssociation(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult> GetMessageFileAssociations( + string threadId, + string messageId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousId = null, + string subsequentId = null) + { + ClientResult internalFunc() => MessageShim.GetMessageFiles( + threadId, + messageId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousId, + subsequentId); + return GetListQueryPage(internalFunc); + } + + public virtual Task>> GetMessageFileAssociationsAsync( + string threadId, + string messageId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousId = null, + string subsequentId = null) + { + Task> internalFunc() => MessageShim.GetMessageFilesAsync( + threadId, + messageId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousId, + subsequentId); + return GetListQueryPageAsync(internalFunc); + } + + public virtual ClientResult CreateRun( + string threadId, + string assistantId, + RunCreationOptions options = null) + { + Internal.Models.CreateRunRequest request = CreateInternalCreateRunRequest(assistantId, options); + ClientResult internalResult = RunShim.CreateRun(threadId, request); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> CreateRunAsync( + string threadId, + string assistantId, + RunCreationOptions options = null) + { + Internal.Models.CreateRunRequest request = CreateInternalCreateRunRequest(assistantId, options); + ClientResult internalResult = await RunShim.CreateRunAsync(threadId, request).ConfigureAwait(false); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult CreateThreadAndRun( + string assistantId, + ThreadCreationOptions threadOptions = null, + RunCreationOptions runOptions = null) + { + Internal.Models.CreateThreadAndRunRequest request + = CreateInternalCreateThreadAndRunRequest(assistantId, threadOptions, runOptions); + ClientResult internalResult = RunShim.CreateThreadAndRun(request); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> CreateThreadAndRunAsync( + string assistantId, + ThreadCreationOptions threadOptions = null, + RunCreationOptions runOptions = null) + { + Internal.Models.CreateThreadAndRunRequest request + = CreateInternalCreateThreadAndRunRequest(assistantId, threadOptions, runOptions); + ClientResult internalResult = await RunShim.CreateThreadAndRunAsync(request).ConfigureAwait(false); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult GetRun(string threadId, string runId) + { + ClientResult internalResult = RunShim.GetRun(threadId, runId); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetRunAsync(string threadId, string runId) + { + ClientResult internalResult = await RunShim.GetRunAsync(threadId, runId).ConfigureAwait(false); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult> GetRuns( + string threadId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousRunId = null, + string subsequentRunId = null) + { + ClientResult internalFunc() => RunShim.GetRuns( + threadId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousRunId, + subsequentRunId); + return GetListQueryPage(internalFunc); + } + + public virtual Task>> GetRunsAsync( + string threadId, + int? maxResults = null, + CreatedAtSortOrder? createdSortOrder = null, + string previousRunId = null, + string subsequentRunId = null) + { + Func>> internalFunc = () => RunShim.GetRunsAsync( + threadId, + maxResults, + ToInternalListOrder(createdSortOrder), + previousRunId, + subsequentRunId); + return GetListQueryPageAsync(internalFunc); + } + + public virtual ClientResult ModifyRun(string threadId, string runId, RunModificationOptions options) + { + Internal.Models.ModifyRunRequest request = new(options.Metadata, serializedAdditionalRawData: null); + ClientResult internalResult = RunShim.ModifyRun(threadId, runId, request); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> ModifyRunAsync(string threadId, string runId, RunModificationOptions options) + { + Internal.Models.ModifyRunRequest request = new(options.Metadata, serializedAdditionalRawData: null); + ClientResult internalResult = await RunShim.ModifyRunAsync(threadId, runId, request).ConfigureAwait(false); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult CancelRun(string threadId, string runId) + { + ClientResult internalResult = RunShim.CancelRun(threadId, runId); + return ClientResult.FromValue(true, internalResult.GetRawResponse()); + } + + public virtual async Task> CancelRunAsync(string threadId, string runId) + { + ClientResult internalResult = await RunShim.CancelRunAsync(threadId, runId).ConfigureAwait(false); + return ClientResult.FromValue(true, internalResult.GetRawResponse()); + } + + public virtual ClientResult SubmitToolOutputs(string threadId, string runId, IEnumerable toolOutputs) + { + List requestToolOutputs = []; + + foreach (ToolOutput toolOutput in toolOutputs) + { + requestToolOutputs.Add(new(toolOutput.Id, toolOutput.Output, null)); + } + + Internal.Models.SubmitToolOutputsRunRequest request = new(requestToolOutputs, null); + ClientResult internalResult = RunShim.SubmitToolOuputsToRun(threadId, runId, request); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> SubmitToolOutputsAsync(string threadId, string runId, IEnumerable toolOutputs) + { + List requestToolOutputs = []; + + foreach (ToolOutput toolOutput in toolOutputs) + { + requestToolOutputs.Add(new(toolOutput.Id, toolOutput.Output, null)); + } + + Internal.Models.SubmitToolOutputsRunRequest request = new(requestToolOutputs, null); + ClientResult internalResult = await RunShim.SubmitToolOuputsToRunAsync(threadId, runId, request).ConfigureAwait(false); + return ClientResult.FromValue(new ThreadRun(internalResult.Value), internalResult.GetRawResponse()); + } + + internal static Internal.Models.CreateAssistantRequest CreateInternalCreateAssistantRequest( + string modelName, + AssistantCreationOptions options) + { + options ??= new(); + return new Internal.Models.CreateAssistantRequest( + modelName, + options.Name, + options.Description, + options.Instructions, + ToInternalBinaryDataList(options.Tools), + options.FileIds, + options.Metadata, + serializedAdditionalRawData: null); + } + + internal static Internal.Models.ModifyAssistantRequest CreateInternalModifyAssistantRequest( + AssistantModificationOptions options) + { + return new Internal.Models.ModifyAssistantRequest( + options.Model, + options.Name, + options.Description, + options.Instructions, + ToInternalBinaryDataList(options.Tools), + options.FileIds, + options.Metadata, + serializedAdditionalRawData: null); + } + + internal static Internal.Models.CreateThreadRequest CreateInternalCreateThreadRequest( + ThreadCreationOptions options) + { + options ??= new(); + return new Internal.Models.CreateThreadRequest( + ToInternalCreateMessageRequestList(options.Messages), + options.Metadata, + serializedAdditionalRawData: null); + } + + internal static Internal.Models.CreateRunRequest CreateInternalCreateRunRequest( + string assistantId, + RunCreationOptions options = null) + { + options ??= new(); + return new( + assistantId, + options.OverrideModel, + options.OverrideInstructions, + options.AdditionalInstructions, + ToInternalBinaryDataList(options.OverrideTools), + options.Metadata, + serializedAdditionalRawData: null); + } + + internal static Internal.Models.CreateThreadAndRunRequest CreateInternalCreateThreadAndRunRequest( + string assistantId, + ThreadCreationOptions threadOptions, + RunCreationOptions runOptions) + { + threadOptions ??= new(); + runOptions ??= new(); + Internal.Models.CreateThreadRequest internalThreadOptions = CreateInternalCreateThreadRequest(threadOptions); + return new Internal.Models.CreateThreadAndRunRequest( + assistantId, + internalThreadOptions, + runOptions?.OverrideModel, + runOptions.OverrideInstructions, + ToInternalBinaryDataList(runOptions?.OverrideTools), + runOptions?.Metadata, + serializedAdditionalRawData: null); + } + + internal static ChangeTrackingList ToInternalBinaryDataList(IEnumerable values) + where T : IPersistableModel + { + ChangeTrackingList internalList = []; + foreach (T value in values) + { + internalList.Add(ModelReaderWriter.Write(value)); + } + return internalList; + } + + internal static Internal.Models.ListOrder? ToInternalListOrder(CreatedAtSortOrder? order) + { + if (order == null) + { + return null; + } + return order switch + { + CreatedAtSortOrder.OldestFirst => Internal.Models.ListOrder.Asc, + CreatedAtSortOrder.NewestFirst => Internal.Models.ListOrder.Desc, + _ => throw new ArgumentException(nameof(order)), + }; + } + + internal static Internal.Models.CreateMessageRequestRole ToInternalRequestRole(MessageRole role) + => role switch + { + MessageRole.User => Internal.Models.CreateMessageRequestRole.User, + _ => throw new ArgumentException(nameof(role)), + }; + + internal static ChangeTrackingList ToInternalCreateMessageRequestList( + IEnumerable messages) + { + ChangeTrackingList internalList = []; + foreach (ThreadInitializationMessage message in messages) + { + internalList.Add(new Internal.Models.CreateMessageRequest( + ToInternalRequestRole(message.Role), + message.Content, + message.FileIds, + message.Metadata, + serializedAdditionalRawData: null)); + } + return internalList; + } + + internal virtual ClientResult> GetListQueryPage(Func> internalFunc) + where T : class + where U : class + { + ClientResult internalResult = internalFunc.Invoke(); + ListQueryPage convertedValue = ListQueryPage.Create(internalResult.Value) as ListQueryPage; + return ClientResult.FromValue(convertedValue, internalResult.GetRawResponse()); + } + + internal virtual async Task>> GetListQueryPageAsync(Func>> internalAsyncFunc) + where T : class + where U : class + { + ClientResult internalResult = await internalAsyncFunc.Invoke().ConfigureAwait(false); + ListQueryPage convertedValue = ListQueryPage.Create(internalResult.Value) as ListQueryPage; + return ClientResult.FromValue(convertedValue, internalResult.GetRawResponse()); + } +} diff --git a/.dotnet/src/Custom/Assistants/AssistantCreationOptions.cs b/.dotnet/src/Custom/Assistants/AssistantCreationOptions.cs new file mode 100644 index 000000000..527a1eb77 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/AssistantCreationOptions.cs @@ -0,0 +1,65 @@ +using OpenAI.ClientShared.Internal; +using System.ClientModel.Internal; + +using System.Collections.Generic; + +namespace OpenAI.Assistants; + +/// +/// Represents additional options available when creating a new . +/// +public partial class AssistantCreationOptions +{ + /// + /// An optional display name for the assistant. + /// + public string Name { get; set; } + /// + /// A description to associate with the assistant. + /// + public string Description { get; set; } + + /// + /// Default instructions for the assistant to use when creating messages. + /// + public string Instructions { get; set; } + + /// + /// A collection of default tool definitions to enable for the assistant. Available tools include: + /// + /// + /// + /// code_interpreter - + /// - works with data, math, and computer code + /// + /// + /// retrieval - + /// - dynamically enriches an assistant's context with content from uploaded, indexed files + /// + /// + /// function - + /// - enables caller-provided custom functions for actions and enrichment + /// + /// + /// + /// + public IList Tools { get; } = new ChangeTrackingList(); + + /// + /// A collection of IDs for previously uploaded files that are made accessible to the assistant. These IDs are the + /// basis for the functionality of file-based tools like retrieval. + /// + public IList FileIds { get; } = new ChangeTrackingList(); + + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new ChangeTrackingDictionary(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/AssistantFileAssociation.cs b/.dotnet/src/Custom/Assistants/AssistantFileAssociation.cs new file mode 100644 index 000000000..7a563c06e --- /dev/null +++ b/.dotnet/src/Custom/Assistants/AssistantFileAssociation.cs @@ -0,0 +1,17 @@ +using System; + +namespace OpenAI.Assistants; + +public partial class AssistantFileAssociation +{ + public string AssistantId { get; } + public string FileId { get; } + public DateTimeOffset CreatedAt { get; } + + internal AssistantFileAssociation(Internal.Models.AssistantFileObject internalFile) + { + AssistantId = internalFile.AssistantId; + FileId = internalFile.Id; + CreatedAt = internalFile.CreatedAt; + } +} diff --git a/.dotnet/src/Custom/Assistants/AssistantModificationOptions.cs b/.dotnet/src/Custom/Assistants/AssistantModificationOptions.cs new file mode 100644 index 000000000..c8c3bbe01 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/AssistantModificationOptions.cs @@ -0,0 +1,71 @@ +using OpenAI.ClientShared.Internal; +using System.ClientModel.Internal; + +using System.Collections.Generic; + +namespace OpenAI.Assistants; + +/// +/// Represents additional options available when modifying an existing . +/// +public partial class AssistantModificationOptions +{ + /// + /// The new model that the assistant should use when creating messages. + /// + public string Model { get; } + + /// + /// A new, friendly name for the assistant. Its will remain unchanged. + /// + public string Name { get; } + + /// + /// A new description to associate with the assistant. + /// + public string Description { get; } + + /// + /// New, default instructions for the assistant to use when creating messages. + /// + public string Instructions { get; } + + /// + /// A new collection of default tool definitions to enable for the assistant. Available tools include: + /// + /// + /// + /// code_interpreter - + /// - works with data, math, and computer code + /// + /// + /// retrieval - + /// - dynamically enriches an assistant's context with content from uploaded, indexed files + /// + /// + /// function - + /// - enables caller-provided custom functions for actions and enrichment + /// + /// + /// + /// + public IList Tools { get; } = new ChangeTrackingList(); + + /// + /// A new collection of IDs for previously uploaded files that are made accessible to the assistant. These IDs are + /// the basis for the functionality of file-based tools like retrieval. + /// + public IList FileIds { get; } = new ChangeTrackingList(); + + /// + /// A replacement for the optional key/value mapping of additional, supplemental data items to attach to the + /// . This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new ChangeTrackingDictionary(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/AssistantThread.cs b/.dotnet/src/Custom/Assistants/AssistantThread.cs new file mode 100644 index 000000000..f6c85c9ad --- /dev/null +++ b/.dotnet/src/Custom/Assistants/AssistantThread.cs @@ -0,0 +1,31 @@ +using System; +using System.Collections.Generic; +namespace OpenAI.Assistants; + +public partial class AssistantThread +{ + public string Id { get; } + + public DateTimeOffset CreatedAt { get; } + + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IReadOnlyDictionary Metadata { get; } + + + internal AssistantThread(Internal.Models.ThreadObject internalThread) + { + Id = internalThread.Id; + Metadata = internalThread.Metadata; + CreatedAt = internalThread.CreatedAt; + } + +} diff --git a/.dotnet/src/Custom/Assistants/CodeInterpreterToolDefinition.cs b/.dotnet/src/Custom/Assistants/CodeInterpreterToolDefinition.cs new file mode 100644 index 000000000..995e9ffb3 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/CodeInterpreterToolDefinition.cs @@ -0,0 +1,38 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class CodeInterpreterToolDefinition : ToolDefinition +{ + public CodeInterpreterToolDefinition() + { } + + internal static CodeInterpreterToolDefinition DeserializeCodeInterpreterToolDefinition( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("code_interpreter"u8)) + { + continue; + } + } + + return new CodeInterpreterToolDefinition(); + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "code_interpreter"u8); + } +} diff --git a/.dotnet/src/Custom/Assistants/CodeInterpreterToolInfo.cs b/.dotnet/src/Custom/Assistants/CodeInterpreterToolInfo.cs new file mode 100644 index 000000000..b464fd2f3 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/CodeInterpreterToolInfo.cs @@ -0,0 +1,33 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class CodeInterpreterToolInfo : ToolInfo +{ + internal CodeInterpreterToolInfo() + { } + + internal static CodeInterpreterToolInfo DeserializeCodeInterpreterToolInfo( + JsonElement element, + ModelReaderWriterOptions options) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + foreach (var property in element.EnumerateObject()) + { + } + return new CodeInterpreterToolInfo(); + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "code_interpreter"u8); + } +} diff --git a/.dotnet/src/Custom/Assistants/CreatedAtSortOrder.cs b/.dotnet/src/Custom/Assistants/CreatedAtSortOrder.cs new file mode 100644 index 000000000..d10e11781 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/CreatedAtSortOrder.cs @@ -0,0 +1,7 @@ +namespace OpenAI.Assistants; + +public enum CreatedAtSortOrder +{ + NewestFirst, + OldestFirst, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/FunctionToolDefinition.cs b/.dotnet/src/Custom/Assistants/FunctionToolDefinition.cs new file mode 100644 index 000000000..d6687feec --- /dev/null +++ b/.dotnet/src/Custom/Assistants/FunctionToolDefinition.cs @@ -0,0 +1,89 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Diagnostics.CodeAnalysis; +using System.Text.Json; +using OpenAI.ClientShared.Internal; + +namespace OpenAI.Assistants; + +public partial class FunctionToolDefinition : ToolDefinition +{ + public required string Name { get; set; } + public string Description { get; set; } + public BinaryData Parameters { get; set; } + + [SetsRequiredMembers] + public FunctionToolDefinition(string name, string description = null, BinaryData parameters = null) + { + Name = name; + Description = description; + Parameters = parameters; + } + + public FunctionToolDefinition() + { } + + internal static FunctionToolDefinition DeserializeFunctionToolDefinition( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string name = null; + string description = null; + BinaryData parameters = null; + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("function"u8)) + { + foreach (var functionProperty in property.Value.EnumerateObject()) + { + if (functionProperty.NameEquals("name"u8)) + { + name = functionProperty.Value.GetString(); + continue; + } + if (functionProperty.NameEquals("description"u8)) + { + description = functionProperty.Value.GetString(); + continue; + } + if (functionProperty.NameEquals("parameters")) + { + parameters = BinaryData.FromObjectAsJson(functionProperty.Value.GetRawText()); + continue; + } + } + } + } + + return new FunctionToolDefinition(name, description, parameters); + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "function"u8); + writer.WritePropertyName("function"u8); + writer.WriteStartObject(); + writer.WriteString("name"u8, Name); + if (Optional.IsDefined(Description)) + { + writer.WriteString("description"u8, Description); + } + if (Optional.IsDefined(Parameters)) + { + writer.WritePropertyName("parameters"u8); + writer.WriteRawValue(Parameters.ToString()); + } + writer.WriteEndObject(); + } +} diff --git a/.dotnet/src/Custom/Assistants/FunctionToolInfo.cs b/.dotnet/src/Custom/Assistants/FunctionToolInfo.cs new file mode 100644 index 000000000..b6edc1fc5 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/FunctionToolInfo.cs @@ -0,0 +1,82 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; +using OpenAI.ClientShared.Internal; + +namespace OpenAI.Assistants; + +public partial class FunctionToolInfo : ToolInfo +{ + public string Name { get; } + public string Description { get; } + public BinaryData Parameters { get; } + + internal FunctionToolInfo(string name, string description, BinaryData parameters) + { + Name = name; + Description = description; + Parameters = parameters; + } + + internal static FunctionToolInfo DeserializeFunctionToolInfo( + JsonElement element, + ModelReaderWriterOptions options) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string name = null; + string description = null; + BinaryData parameters = null; + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("function"u8)) + { + foreach (var functionObjectProperty in property.Value.EnumerateObject()) + { + if (functionObjectProperty.NameEquals("name"u8)) + { + name = functionObjectProperty.Value.GetString(); + continue; + } + if (functionObjectProperty.NameEquals("description"u8)) + { + description = functionObjectProperty.Value.GetString(); + continue; + } + if (functionObjectProperty.NameEquals("parameters"u8)) + { + parameters = BinaryData.FromObjectAsJson(functionObjectProperty.Value.GetRawText()); + continue; + } + } + } + } + return new FunctionToolInfo(name, description, parameters); + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "function"u8); + writer.WritePropertyName("function"u8); + writer.WriteStartObject(); + writer.WriteString("name"u8, Name); + if (Optional.IsDefined(Description)) + { + writer.WriteString("description"u8, Description); + } + if (Optional.IsDefined(Parameters)) + { + writer.WriteRawValue(Parameters.ToString()); + } + writer.WriteEndObject(); + } +} diff --git a/.dotnet/src/Custom/Assistants/ListQueryPage.cs b/.dotnet/src/Custom/Assistants/ListQueryPage.cs new file mode 100644 index 000000000..aa4eaefe9 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ListQueryPage.cs @@ -0,0 +1,114 @@ +using OpenAI.ClientShared.Internal; +using System; +using System.ClientModel.Internal; + +using System.Collections; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Assistants; + +public abstract partial class ListQueryPage +{ + public string FirstId { get; } + public string LastId { get; } + public bool HasMore { get; } + + internal ListQueryPage(string firstId, string lastId, bool hasMore) + { + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + } + + internal static ListQueryPage Create(Internal.Models.ListAssistantsResponse internalResponse) + { + ChangeTrackingList assistants = new(); + foreach (Internal.Models.AssistantObject internalAssistant in internalResponse.Data) + { + assistants.Add(new(internalAssistant)); + } + return new(assistants, internalResponse.FirstId, internalResponse.LastId, internalResponse.HasMore); + } + + internal static ListQueryPage Create(Internal.Models.ListAssistantFilesResponse internalResponse) + { + ChangeTrackingList assistantFileAssociations = new(); + foreach (Internal.Models.AssistantFileObject internalFile in internalResponse.Data) + { + assistantFileAssociations.Add(new(internalFile)); + } + return new(assistantFileAssociations, internalResponse.FirstId, internalResponse.LastId, internalResponse.HasMore); + } + + internal static ListQueryPage Create(Internal.Models.ListMessagesResponse internalResponse) + { + ChangeTrackingList messages = new(); + foreach (Internal.Models.MessageObject internalMessage in internalResponse.Data) + { + messages.Add(new(internalMessage)); + } + return new(messages, internalResponse.FirstId, internalResponse.LastId, internalResponse.HasMore); + } + + internal static ListQueryPage Create(Internal.Models.ListMessageFilesResponse internalResponse) + { + ChangeTrackingList messageFileAssociations = new(); + foreach (Internal.Models.MessageFileObject internalFile in internalResponse.Data) + { + messageFileAssociations.Add(new(internalFile)); + } + return new(messageFileAssociations, internalResponse.FirstId, internalResponse.LastId, internalResponse.HasMore); + } + + internal static ListQueryPage Create(Internal.Models.ListRunsResponse internalResponse) + { + ChangeTrackingList runs = new(); + foreach (Internal.Models.RunObject internalRun in internalResponse.Data) + { + runs.Add(new(internalRun)); + } + return new(runs, internalResponse.FirstId, internalResponse.LastId, internalResponse.HasMore); + } + + internal static ListQueryPage Create(T internalResponse) + where T : class + { + return internalResponse switch + { + Internal.Models.ListAssistantsResponse internalAssistantsResponse => Create(internalAssistantsResponse), + Internal.Models.ListAssistantFilesResponse internalFilesResponse => Create(internalFilesResponse), + Internal.Models.ListMessagesResponse internalMessagesResponse => Create(internalMessagesResponse), + Internal.Models.ListMessageFilesResponse internalMessageFilesResponse => Create(internalMessageFilesResponse), + Internal.Models.ListRunsResponse internalRunsResponse => Create(internalRunsResponse), + _ => throw new ArgumentException( + $"Unknown type for generic {nameof(ListQueryPage)} conversion: {internalResponse.GetType()}"), + }; + } +} + +public partial class ListQueryPage : ListQueryPage, IReadOnlyList + where T : class +{ + public IReadOnlyList Items { get; } + + /// + public int Count => Items.Count; + + /// + public T this[int index] + { + get => Items[index]; + } + + internal ListQueryPage(IEnumerable items, string firstId, string lastId, bool hasMore) + : base(firstId, lastId, hasMore) + { + Items = items.ToList(); + } + + /// + public IEnumerator GetEnumerator() => Items.GetEnumerator(); + + IEnumerator IEnumerable.GetEnumerator() => Items.GetEnumerator(); +} diff --git a/.dotnet/src/Custom/Assistants/MessageContent.Serialization.cs b/.dotnet/src/Custom/Assistants/MessageContent.Serialization.cs new file mode 100644 index 000000000..4d88129b3 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageContent.Serialization.cs @@ -0,0 +1,94 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public abstract partial class MessageContent : IJsonModel +{ + MessageContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MessageContent)} does not support '{format}' format."); + } + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeMessageContent(document.RootElement, options); + } + + MessageContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeMessageContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(MessageContent)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + WriteDerived(writer, options); + writer.WriteEndObject(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(MessageContent)} does not support '{options.Format}' format."); + } + } + + internal abstract void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options); + + internal static MessageContent DeserializeMessageContent( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + if (property.Value.ValueEquals("text"u8)) + { + return MessageTextContent.DeserializeMessageTextContent(element, options); + } + else if (property.Value.ValueEquals("image_file"u8)) + { + return MessageImageFileContent.DeserializeMessageImageFileContent(element, options); + } + else + { + throw new ArgumentException(property.Value.GetString()); + } + } + } + throw new ArgumentException(nameof(element)); + } + +} diff --git a/.dotnet/src/Custom/Assistants/MessageContent.cs b/.dotnet/src/Custom/Assistants/MessageContent.cs new file mode 100644 index 000000000..928137621 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageContent.cs @@ -0,0 +1,6 @@ +namespace OpenAI.Assistants; + + +public abstract partial class MessageContent +{ +} diff --git a/.dotnet/src/Custom/Assistants/MessageCreationOptions.cs b/.dotnet/src/Custom/Assistants/MessageCreationOptions.cs new file mode 100644 index 000000000..b11119fa3 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageCreationOptions.cs @@ -0,0 +1,30 @@ +using OpenAI.ClientShared.Internal; +using System.ClientModel.Internal; + +using System.Collections.Generic; + +namespace OpenAI.Assistants; + +/// +/// Represents additional options available when creating a new . +/// +public partial class MessageCreationOptions +{ + /// + /// A collection of IDs for previously uploaded files that are made accessible to the message. These IDs are the + /// basis for the functionality of file-based tools like retrieval. + /// + public IList FileIds { get; } = new ChangeTrackingList(); + + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new ChangeTrackingDictionary(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/MessageFileAssociation.cs b/.dotnet/src/Custom/Assistants/MessageFileAssociation.cs new file mode 100644 index 000000000..93e7e3d68 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageFileAssociation.cs @@ -0,0 +1,17 @@ +using System; + +namespace OpenAI.Assistants; + +public partial class MessageFileAssociation +{ + public string MessageId { get; } + public string FileId { get; } + public DateTimeOffset CreatedAt { get; } + + internal MessageFileAssociation(Internal.Models.MessageFileObject internalFile) + { + MessageId = internalFile.MessageId; + FileId = internalFile.Id; + CreatedAt = internalFile.CreatedAt; + } +} diff --git a/.dotnet/src/Custom/Assistants/MessageImageFileContent.cs b/.dotnet/src/Custom/Assistants/MessageImageFileContent.cs new file mode 100644 index 000000000..68997b045 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageImageFileContent.cs @@ -0,0 +1,55 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public class MessageImageFileContent : MessageContent +{ + public string FileId { get; } + + internal MessageImageFileContent(string fileId) + { + FileId = fileId; + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "image_file"u8); + writer.WritePropertyName("image_file"u8); + writer.WriteStartObject(); + writer.WriteString("file_id"u8, FileId); + writer.WriteEndObject(); + } + + + internal static MessageContent DeserializeMessageImageFileContent( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string fileId = null; + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("image_file"u8)) + { + foreach (var textObjectProperty in property.Value.EnumerateObject()) + { + if (textObjectProperty.NameEquals("file_id"u8)) + { + fileId = textObjectProperty.Value.GetString(); + continue; + } + } + } + } + return new MessageImageFileContent(fileId); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/MessageModificationOptions.cs b/.dotnet/src/Custom/Assistants/MessageModificationOptions.cs new file mode 100644 index 000000000..947eccb6d --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageModificationOptions.cs @@ -0,0 +1,21 @@ +using System.Collections.Generic; + +namespace OpenAI.Assistants; + +/// +/// Represents additional options available when modifying an existing . +/// +public partial class MessageModificationOptions +{ + /// + /// A replacement for the optional key/value mapping of additional, supplemental data items to attach to the + /// . This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new ChangeTrackingDictionary(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/MessageRole.cs b/.dotnet/src/Custom/Assistants/MessageRole.cs new file mode 100644 index 000000000..c59e5abd2 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageRole.cs @@ -0,0 +1,16 @@ +namespace OpenAI.Assistants; + +/// +/// Represents the role associated with the message which indicates its source and purpose. +/// +public enum MessageRole +{ + /// + /// The user role, associated with caller input into the model. + /// + User, + /// + /// The assistant role, associated with model output in response to inputs from the user and tools. + /// + Assistant, +} diff --git a/.dotnet/src/Custom/Assistants/MessageTextContent.cs b/.dotnet/src/Custom/Assistants/MessageTextContent.cs new file mode 100644 index 000000000..43c0433ad --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageTextContent.cs @@ -0,0 +1,72 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public class MessageTextContent : MessageContent +{ + /// + /// The content text. The interpretation of this value will depend on which kind of chat message the content is + /// associated with. + /// + public string Text { get; } + + public IReadOnlyList Annotations { get; } + + internal MessageTextContent(string text, IReadOnlyList annotations) + { + Text = text; + Annotations = annotations; + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "text"u8); + writer.WritePropertyName("text"u8); + writer.WriteStartObject(); + writer.WriteString("value"u8, Text); + writer.WriteEndObject(); + } + + internal static MessageContent DeserializeMessageTextContent( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string text = null; + List annotations = null; + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("text"u8)) + { + foreach (var textObjectProperty in property.Value.EnumerateObject()) + { + if (textObjectProperty.NameEquals("value"u8)) + { + text = textObjectProperty.Value.GetString(); + continue; + } + if (textObjectProperty.NameEquals("annotations"u8)) + { + annotations ??= []; + foreach (var annotationObject in textObjectProperty.Value.EnumerateArray()) + { + annotations.Add(TextContentAnnotation.DeserializeTextContentAnnotation(annotationObject, options)); + } + continue; + } + } + } + } + return new MessageTextContent(text, annotations); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/RequiredFunctionToolCall.cs b/.dotnet/src/Custom/Assistants/RequiredFunctionToolCall.cs new file mode 100644 index 000000000..f0807495c --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RequiredFunctionToolCall.cs @@ -0,0 +1,66 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class RequiredFunctionToolCall : RequiredToolCall +{ + public string Name { get; } + public string Arguments { get; } + + internal RequiredFunctionToolCall(string id, string name, string arguments) + : base(id) + { + Name = name; + Arguments = arguments; + } + + internal static RequiredFunctionToolCall DeserializeRequiredFunctionToolCall( + JsonElement element, + ModelReaderWriterOptions options) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string id = null; + string name = null; + string arguments = null; + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("function"u8)) + { + foreach (var functionProperty in property.Value.EnumerateObject()) + { + if (functionProperty.NameEquals("name"u8)) + { + name = functionProperty.Value.GetString(); + continue; + } + if (functionProperty.NameEquals("arguments"u8)) + { + arguments = functionProperty.Value.GetString(); + continue; + } + } + continue; + } + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + } + return new RequiredFunctionToolCall(id, name, arguments); + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "retrieval"u8); + } +} diff --git a/.dotnet/src/Custom/Assistants/RequiredToolCall.cs b/.dotnet/src/Custom/Assistants/RequiredToolCall.cs new file mode 100644 index 000000000..19e951d2c --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RequiredToolCall.cs @@ -0,0 +1,37 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public abstract partial class RequiredToolCall : RunRequiredAction +{ + public string Id { get; } + + internal RequiredToolCall(string id) + { + Id = id; + } + + internal static RequiredToolCall DeserializeRequiredToolCall( + JsonElement element, + ModelReaderWriterOptions options) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("function"u8)) + { + return RequiredFunctionToolCall.DeserializeRequiredFunctionToolCall(element, options); + } + } + throw new ArgumentException(nameof(element)); + } +} diff --git a/.dotnet/src/Custom/Assistants/RetrievalToolDefinition.cs b/.dotnet/src/Custom/Assistants/RetrievalToolDefinition.cs new file mode 100644 index 000000000..f18579c87 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RetrievalToolDefinition.cs @@ -0,0 +1,38 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class RetrievalToolDefinition : ToolDefinition +{ + public RetrievalToolDefinition() + { } + + internal static RetrievalToolDefinition DeserializeRetrievalToolDefinition( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("retrieval"u8)) + { + continue; + } + } + + return new RetrievalToolDefinition(); + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "retrieval"u8); + } +} diff --git a/.dotnet/src/Custom/Assistants/RetrievalToolInfo.cs b/.dotnet/src/Custom/Assistants/RetrievalToolInfo.cs new file mode 100644 index 000000000..1b58f35a4 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RetrievalToolInfo.cs @@ -0,0 +1,33 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class RetrievalToolInfo : ToolInfo +{ + internal RetrievalToolInfo() + { } + + internal static RetrievalToolInfo DeserializeRetrievalToolInfo( + JsonElement element, + ModelReaderWriterOptions options) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + foreach (var property in element.EnumerateObject()) + { + } + return new RetrievalToolInfo(); + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "retrieval"u8); + } +} diff --git a/.dotnet/src/Custom/Assistants/RunCreationOptions.cs b/.dotnet/src/Custom/Assistants/RunCreationOptions.cs new file mode 100644 index 000000000..b584aafa3 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunCreationOptions.cs @@ -0,0 +1,67 @@ +using OpenAI.ClientShared.Internal; +using System.ClientModel.Internal; + +using System.Collections.Generic; + +namespace OpenAI.Assistants; + +/// +/// Represents additional options available when creating a new . +/// +public partial class RunCreationOptions +{ + + + /// + /// A run-specific model name that will override the assistant's defined model. If not provided, the assistant's + /// selection will be used. + /// + public string OverrideModel { get; set; } + + /// + /// A run specific replacement for the assistant's default instructions that will override the assistant-level + /// instructions. If not specified, the assistant's instructions will be used. + /// + public string OverrideInstructions { get; set; } + + /// + /// Run-specific additional instructions that will be appended to the assistant-level instructions solely for this + /// run. Unlike , the assistant's instructions are preserved and these additional + /// instructions are concatenated. + /// + public string AdditionalInstructions { get; set; } + + /// + /// A run-specific collection of tool definitions that will override the assistant-level defaults. If not provided, + /// the assistant's defined tools will be used. Available tools include: + /// + /// + /// + /// code_interpreter - + /// - works with data, math, and computer code + /// + /// + /// retrieval - + /// - dynamically enriches an Run's context with content from uploaded, indexed files + /// + /// + /// function - + /// - enables caller-provided custom functions for actions and enrichment + /// + /// + /// + /// + public IList OverrideTools { get; } = new ChangeTrackingList(); + + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new ChangeTrackingDictionary(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/RunError.cs b/.dotnet/src/Custom/Assistants/RunError.cs new file mode 100644 index 000000000..b53777c3d --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunError.cs @@ -0,0 +1,24 @@ +using OpenAI.Chat; + +namespace OpenAI.Assistants; + +public partial class RunError +{ + public RunErrorCode ErrorCode { get; } + public string ErrorMessage { get; } + + internal RunError(RunErrorCode errorCode, string errorMessage) + { + ErrorCode = errorCode; + ErrorMessage = errorMessage; + } + + internal RunError(Internal.Models.RunObjectLastError internalError) + { + if (internalError.Code != null) + { + ErrorCode = new(internalError.Code.ToString()); + } + ErrorMessage = internalError.Message; + } +} diff --git a/.dotnet/src/Custom/Assistants/RunErrorCode.cs b/.dotnet/src/Custom/Assistants/RunErrorCode.cs new file mode 100644 index 000000000..d2eb1b65e --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunErrorCode.cs @@ -0,0 +1,37 @@ +using System; +using System.ComponentModel; + +namespace OpenAI.Assistants; + +public readonly struct RunErrorCode : IEquatable +{ + private readonly string _value; + + public static RunErrorCode ServerError { get; } = new(Internal.Models.RunObjectLastErrorCode.ServerError.ToString()); + public static RunErrorCode RateLimitExceeded { get; } = new(Internal.Models.RunObjectLastErrorCode.RateLimitExceeded.ToString()); + public static RunErrorCode InvalidPrompt { get; } = new("invalid_prompt"); + + public RunErrorCode(string status) + { + _value = status; + } + + /// Determines if two values are the same. + public static bool operator ==(RunErrorCode left, RunErrorCode right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunErrorCode left, RunErrorCode right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunErrorCode(string value) => new RunErrorCode(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunErrorCode other && Equals(other); + /// + public bool Equals(RunErrorCode other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/RunModificationOptions.cs b/.dotnet/src/Custom/Assistants/RunModificationOptions.cs new file mode 100644 index 000000000..3142669f0 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunModificationOptions.cs @@ -0,0 +1,24 @@ +using OpenAI.ClientShared.Internal; +using System.ClientModel.Internal; + +using System.Collections.Generic; + +namespace OpenAI.Assistants; + +/// +/// Represents additional options available when modifying an existing . +/// +public partial class RunModificationOptions +{ + /// + /// A replacement for the optional key/value mapping of additional, supplemental data items to attach to the + /// . This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new ChangeTrackingDictionary(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/RunRequiredAction.Serialization.cs b/.dotnet/src/Custom/Assistants/RunRequiredAction.Serialization.cs new file mode 100644 index 000000000..e9ba0d3ac --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunRequiredAction.Serialization.cs @@ -0,0 +1,107 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public abstract partial class RunRequiredAction : IJsonModel> +{ + IList IJsonModel>.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel>)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunRequiredAction)} does not support '{format}' format."); + } + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunRequiredActions(document.RootElement, options); + } + + IList IPersistableModel>.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel>)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunRequiredActions(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunRequiredAction)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel>.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + void IJsonModel>.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + WriteDerived(writer, options); + writer.WriteEndObject(); + } + + BinaryData IPersistableModel>.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel>)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunRequiredAction)} does not support '{options.Format}' format."); + } + } + + internal abstract void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options); + + internal static IList DeserializeRunRequiredActions( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + List actions = null; + + foreach (var topProperty in element.EnumerateObject()) + { + if (topProperty.NameEquals("submit_tool_outputs"u8)) + { + foreach (var submitObjectProperty in topProperty.Value.EnumerateObject()) + { + if (submitObjectProperty.NameEquals("tool_calls"u8)) + { + foreach (var toolCallObject in submitObjectProperty.Value.EnumerateArray()) + { + foreach (var toolCallProperty in toolCallObject.EnumerateObject()) + { + if ((toolCallProperty.NameEquals("type"u8) && toolCallProperty.Value.ValueEquals("function"u8)) + || (toolCallProperty.NameEquals("function"u8))) + { + actions ??= []; + actions.Add(RequiredFunctionToolCall.DeserializeRequiredFunctionToolCall( + toolCallObject, + options)); + continue; + } + } + } + } + } + } + } + + return actions; + } +} diff --git a/.dotnet/src/Custom/Assistants/RunRequiredAction.cs b/.dotnet/src/Custom/Assistants/RunRequiredAction.cs new file mode 100644 index 000000000..be9a12215 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunRequiredAction.cs @@ -0,0 +1,8 @@ +using System; + +namespace OpenAI.Assistants; + +public partial class RunRequiredAction +{ + +} diff --git a/.dotnet/src/Custom/Assistants/RunStatus.cs b/.dotnet/src/Custom/Assistants/RunStatus.cs new file mode 100644 index 000000000..25888b8d2 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunStatus.cs @@ -0,0 +1,13 @@ +namespace OpenAI.Assistants; + +public enum RunStatus +{ + Queued, + InProgress, + RequiresAction, + Cancelling, + CompletedSuccessfully, + Cancelled, + Failed, + Expired, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/RunTokenUsage.cs b/.dotnet/src/Custom/Assistants/RunTokenUsage.cs new file mode 100644 index 000000000..935287e5f --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunTokenUsage.cs @@ -0,0 +1,20 @@ +namespace OpenAI.Assistants; + +public partial class RunTokenUsage +{ + public int InputTokens { get; } + public int OutputTokens { get; } + public int TotalTokens { get; } + + internal RunTokenUsage(int inputTokens, int outputTokens, int totalTokens) + { + InputTokens = inputTokens; + OutputTokens = outputTokens; + TotalTokens = totalTokens; + } + + internal RunTokenUsage(Internal.Models.RunCompletionUsage internalUsage) + : this((int)internalUsage.PromptTokens, (int)internalUsage.CompletionTokens, (int)internalUsage.TotalTokens) + { + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/TextContentAnnotation.Serialization.cs b/.dotnet/src/Custom/Assistants/TextContentAnnotation.Serialization.cs new file mode 100644 index 000000000..b7ba4eedb --- /dev/null +++ b/.dotnet/src/Custom/Assistants/TextContentAnnotation.Serialization.cs @@ -0,0 +1,93 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public abstract partial class TextContentAnnotation : IJsonModel +{ + TextContentAnnotation IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(TextContentAnnotation)} does not support '{format}' format."); + } + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeTextContentAnnotation(document.RootElement, options); + } + + TextContentAnnotation IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeTextContentAnnotation(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(TextContentAnnotation)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + WriteDerived(writer, options); + writer.WriteEndObject(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(TextContentAnnotation)} does not support '{options.Format}' format."); + } + } + + internal static TextContentAnnotation DeserializeTextContentAnnotation( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + if (property.Value.ValueEquals("file_citation"u8)) + { + return TextContentFileCitationAnnotation.DeserializeTextContentFileCitationAnnotation(element, options); + } + else if (property.Value.ValueEquals("file_path"u8)) + { + return TextContentFilePathAnnotation.DeserializeTextContentFilePathAnnotation(element, options); + } + else + { + throw new ArgumentException(property.Value.GetString()); + } + } + } + throw new ArgumentException(nameof(element)); + } + + internal abstract void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options); +} diff --git a/.dotnet/src/Custom/Assistants/TextContentAnnotation.cs b/.dotnet/src/Custom/Assistants/TextContentAnnotation.cs new file mode 100644 index 000000000..bd3f390c0 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/TextContentAnnotation.cs @@ -0,0 +1,6 @@ +namespace OpenAI.Assistants; + + +public abstract partial class TextContentAnnotation +{ +} diff --git a/.dotnet/src/Custom/Assistants/TextContentFileCitationAnnotation.cs b/.dotnet/src/Custom/Assistants/TextContentFileCitationAnnotation.cs new file mode 100644 index 000000000..459b91a94 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/TextContentFileCitationAnnotation.cs @@ -0,0 +1,95 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public class TextContentFileCitationAnnotation : TextContentAnnotation +{ + public string TextToReplace { get; } + + public string FileId { get; } + + public string Quote { get; } + + public int StartIndex { get; } + + public int EndIndex { get; } + + internal TextContentFileCitationAnnotation(string textToReplace, string citationFileId, string citationQuote, int startIndex, int endIndex) + { + TextToReplace = textToReplace; + FileId = citationFileId; + Quote = citationQuote; + StartIndex = startIndex; + EndIndex = endIndex; + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "file_citation"u8); + writer.WriteString("text"u8, TextToReplace); + writer.WritePropertyName("file_citation"u8); + writer.WriteStartObject(); + writer.WriteString("file_id"u8, FileId); + writer.WriteString("quote"u8, Quote); + writer.WriteEndObject(); + writer.WriteNumber("start_index"u8, StartIndex); + writer.WriteNumber("end_index"u8, EndIndex); + } + + + internal static TextContentFileCitationAnnotation DeserializeTextContentFileCitationAnnotation( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string textToReplace = null; + int startIndex = 0; + int endIndex = 0; + string citationFileId = null; + string citationQuote = null; + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("text"u8)) + { + textToReplace = property.Value.GetString(); + continue; + } + if (property.NameEquals("start_index"u8)) + { + startIndex = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("end_index"u8)) + { + endIndex = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("file_citation"u8)) + { + foreach (var filePathObjectProperty in property.Value.EnumerateObject()) + { + if (filePathObjectProperty.NameEquals("file_id"u8)) + { + citationFileId = filePathObjectProperty.Value.GetString(); + continue; + } + if (filePathObjectProperty.NameEquals("quote"u8)) + { + citationQuote = filePathObjectProperty.Value.GetString(); + continue; + } + } + } + } + return new TextContentFileCitationAnnotation(textToReplace, citationFileId, citationQuote, startIndex, endIndex); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/TextContentFilePathAnnotation.cs b/.dotnet/src/Custom/Assistants/TextContentFilePathAnnotation.cs new file mode 100644 index 000000000..7e716bc6c --- /dev/null +++ b/.dotnet/src/Custom/Assistants/TextContentFilePathAnnotation.cs @@ -0,0 +1,85 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public class TextContentFilePathAnnotation : TextContentAnnotation +{ + public string TextToReplace { get; } + + public string FileId { get; } + + public int StartIndex { get; } + + public int EndIndex { get; } + + internal TextContentFilePathAnnotation(string textToReplace, string createdFileId, int startIndex, int endIndex) + { + TextToReplace = textToReplace; + FileId = createdFileId; + StartIndex = startIndex; + EndIndex = endIndex; + } + + internal override void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "file_path"u8); + writer.WriteString("text"u8, TextToReplace); + writer.WritePropertyName("file_path"u8); + writer.WriteStartObject(); + writer.WriteString("file_id"u8, FileId); + writer.WriteEndObject(); + writer.WriteNumber("start_index"u8, StartIndex); + writer.WriteNumber("end_index"u8, EndIndex); + } + + internal static TextContentFilePathAnnotation DeserializeTextContentFilePathAnnotation( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string textToReplace = null; + int startIndex = 0; + int endIndex = 0; + string createdFileId = null; + + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("text"u8)) + { + textToReplace = property.Value.GetString(); + continue; + } + if (property.NameEquals("start_index"u8)) + { + startIndex = property.Value.GetInt32(); + continue; + } + if (property.NameEquals ("end_index"u8)) + { + endIndex = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("file_path"u8)) + { + foreach (var filePathObjectProperty in property.Value.EnumerateObject()) + { + if (filePathObjectProperty.NameEquals("file_id"u8)) + { + createdFileId = filePathObjectProperty.Value.GetString(); + continue; + } + } + } + } + return new TextContentFilePathAnnotation(textToReplace, createdFileId, startIndex, endIndex); + } + +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/ThreadCreationOptions.cs b/.dotnet/src/Custom/Assistants/ThreadCreationOptions.cs new file mode 100644 index 000000000..39246b756 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ThreadCreationOptions.cs @@ -0,0 +1,26 @@ +using OpenAI.ClientShared.Internal; +using System.ClientModel.Internal; + +using System.Collections.Generic; + +namespace OpenAI.Assistants; + +/// +/// Represents additional options available when creating a new . +/// +public partial class ThreadCreationOptions +{ + public IList Messages { get; } = new ChangeTrackingList(); + + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new ChangeTrackingDictionary(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/ThreadInitializationMessage.cs b/.dotnet/src/Custom/Assistants/ThreadInitializationMessage.cs new file mode 100644 index 000000000..797789a5e --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ThreadInitializationMessage.cs @@ -0,0 +1,45 @@ +using OpenAI.ClientShared.Internal; +using System.ClientModel.Internal; + +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI.Assistants; + +public partial class ThreadInitializationMessage +{ + public required MessageRole Role { get; set; } + + public required string Content { get; set; } + + /// + /// A list of File IDs that the message should use.There can be a maximum of 10 files attached to a message. Useful + /// for tools like retrieval and code_interpreter that can access and use files. + /// + public IList FileIds { get; } = new ChangeTrackingList(); + + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new ChangeTrackingDictionary(); + + [SetsRequiredMembers] + public ThreadInitializationMessage(MessageRole role, string content) + { + Role = role; + Content = content; + } + + public ThreadInitializationMessage() + { } + + public static implicit operator ThreadInitializationMessage(string content) + => new(MessageRole.User, content); +} diff --git a/.dotnet/src/Custom/Assistants/ThreadMessage.cs b/.dotnet/src/Custom/Assistants/ThreadMessage.cs new file mode 100644 index 000000000..01b481b36 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ThreadMessage.cs @@ -0,0 +1,62 @@ +using System; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class ThreadMessage +{ + public string Id { get; } + public DateTimeOffset CreatedAt { get; } + public string ThreadId { get; } + public MessageRole Role { get; } + public IReadOnlyList ContentItems { get; } + public string AssistantId { get; } + + public string RunId { get; } + public IReadOnlyList FileIds { get; } + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IReadOnlyDictionary Metadata { get; } + + internal ThreadMessage(Internal.Models.MessageObject internalMessage) + { + MessageRole convertedRole = MessageRole.User; + if (internalMessage.Role.ToString() == "user") + { + convertedRole = MessageRole.User; + } + else if (internalMessage.Role.ToString() == "assistant") + { + convertedRole = MessageRole.Assistant; + } + else + { + throw new ArgumentException(internalMessage.Role.ToString()); + } + + List content = []; + foreach (BinaryData unionContentData in internalMessage.Content) + { + content.Add(MessageContent.DeserializeMessageContent(JsonDocument.Parse(unionContentData).RootElement)); + } + + Id = internalMessage.Id; + AssistantId = internalMessage.AssistantId; + ThreadId = internalMessage.ThreadId; + RunId = internalMessage.RunId; + Metadata = internalMessage.Metadata; + FileIds = internalMessage.FileIds; + CreatedAt = internalMessage.CreatedAt; + Role = convertedRole; + ContentItems = content; + } +} diff --git a/.dotnet/src/Custom/Assistants/ThreadModificationOptions.cs b/.dotnet/src/Custom/Assistants/ThreadModificationOptions.cs new file mode 100644 index 000000000..8e47015d3 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ThreadModificationOptions.cs @@ -0,0 +1,21 @@ +using System.Collections.Generic; + +namespace OpenAI.Assistants; + +/// +/// Represents additional options available when modifying an existing . +/// +public partial class ThreadModificationOptions +{ + /// + /// A replacement for the optional key/value mapping of additional, supplemental data items to attach to the + /// . This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IDictionary Metadata { get; } = new ChangeTrackingDictionary(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/ThreadRun.cs b/.dotnet/src/Custom/Assistants/ThreadRun.cs new file mode 100644 index 000000000..fa65d9c08 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ThreadRun.cs @@ -0,0 +1,105 @@ +using System; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public partial class ThreadRun +{ + public string Id { get; } + public string ThreadId { get; } + public string AssistantId { get; } + public DateTimeOffset CreatedAt { get; } + + public RunStatus Status { get; } + + public IReadOnlyList RequiredActions { get; } + + public RunError LastError { get; } + public DateTimeOffset? ExpiresAt { get; } + public DateTimeOffset? StartedAt { get; } + public DateTimeOffset? CancelledAt { get; } + public DateTimeOffset? FailedAt { get; } + public DateTimeOffset? CompletedAt { get; } + public string Model { get; } + public string Instructions { get; } + public IReadOnlyList Tools { get; } + public IReadOnlyList FileIds { get; } + /// + /// An optional key/value mapping of additional, supplemental data items to attach to the . + /// This information may be useful for storing custom details in a structured format. + /// + /// + /// + /// Keys can be a maximum of 64 characters in length. + /// Values can be a maximum of 512 characters in length. + /// + /// + public IReadOnlyDictionary Metadata { get; } + public RunTokenUsage Usage { get; } + + internal ThreadRun(Internal.Models.RunObject internalRun) + { + Id = internalRun.Id; + ThreadId = internalRun.ThreadId; + AssistantId = internalRun.AssistantId; + CreatedAt = internalRun.CreatedAt; + FailedAt = internalRun.FailedAt; + ExpiresAt = internalRun.ExpiresAt; + StartedAt = internalRun.StartedAt; + CancelledAt = internalRun.CancelledAt; + CompletedAt = internalRun.CompletedAt; + Status = internalRun.Status.ToString() switch + { + "queued" => RunStatus.Queued, + "in_progress" => RunStatus.InProgress, + "requires_action" => RunStatus.RequiresAction, + "cancelling" => RunStatus.Cancelling, + "cancelled" => RunStatus.Cancelled, + "failed" => RunStatus.Failed, + "completed" => RunStatus.CompletedSuccessfully, + "expired" => RunStatus.Expired, + _ => throw new ArgumentException(nameof(Status)), + }; + Metadata = internalRun.Metadata; + FileIds = internalRun.FileIds; + Metadata = internalRun.Metadata; + Model = internalRun.Model; + Instructions = internalRun.Instructions; + + if (internalRun.LastError != null) + { + LastError = new(internalRun.LastError); + } + + if (internalRun.Usage != null) + { + Usage = new(internalRun.Usage); + } + + if (internalRun.Tools != null) + { + List tools = []; + foreach (BinaryData unionToolInfo in internalRun.Tools) + { + tools.Add(ToolInfo.DeserializeToolInfo(JsonDocument.Parse(unionToolInfo).RootElement)); + } + Tools = tools; + } + + IReadOnlyList internalFunctionCalls + = internalRun.RequiredAction?.SubmitToolOutputs?.ToolCalls; + if (internalFunctionCalls != null) + { + List actions = []; + foreach (Internal.Models.RunToolCallObject internalToolCall in internalFunctionCalls) + { + actions.Add(new RequiredFunctionToolCall( + internalToolCall.Id, + internalToolCall.Function.Name, + internalToolCall.Function.Arguments)); + } + RequiredActions = actions; + } + } +} diff --git a/.dotnet/src/Custom/Assistants/ToolDefinition.Serialization.cs b/.dotnet/src/Custom/Assistants/ToolDefinition.Serialization.cs new file mode 100644 index 000000000..a4975c9bb --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ToolDefinition.Serialization.cs @@ -0,0 +1,98 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public abstract partial class ToolDefinition : IJsonModel +{ + ToolDefinition IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ToolDefinition)} does not support '{format}' format."); + } + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeToolDefinition(document.RootElement, options); + } + + ToolDefinition IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeToolDefinition(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ToolDefinition)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + WriteDerived(writer, options); + writer.WriteEndObject(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ToolDefinition)} does not support '{options.Format}' format."); + } + } + + internal abstract void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options); + + internal static ToolDefinition DeserializeToolDefinition( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + if (property.Value.ValueEquals("code_interpreter"u8)) + { + return CodeInterpreterToolDefinition.DeserializeCodeInterpreterToolDefinition(element, options); + } + else if (property.Value.ValueEquals("retrieval"u8)) + { + return RetrievalToolDefinition.DeserializeRetrievalToolDefinition(element, options); + } + else if (property.Value.ValueEquals("function"u8)) + { + return FunctionToolDefinition.DeserializeFunctionToolDefinition(element, options); + } + else + { + throw new ArgumentException(property.Value.GetString()); + } + } + } + throw new ArgumentException(nameof(element)); + } + +} diff --git a/.dotnet/src/Custom/Assistants/ToolDefinition.cs b/.dotnet/src/Custom/Assistants/ToolDefinition.cs new file mode 100644 index 000000000..b39de3b21 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ToolDefinition.cs @@ -0,0 +1,5 @@ +namespace OpenAI.Assistants; + +public abstract partial class ToolDefinition +{ +} diff --git a/.dotnet/src/Custom/Assistants/ToolInfo.Serialization.cs b/.dotnet/src/Custom/Assistants/ToolInfo.Serialization.cs new file mode 100644 index 000000000..ae8d31b33 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ToolInfo.Serialization.cs @@ -0,0 +1,97 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +public abstract partial class ToolInfo : IJsonModel +{ + ToolInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ToolInfo)} does not support '{format}' format."); + } + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeToolInfo(document.RootElement, options); + } + + ToolInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeToolInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ToolInfo)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + WriteDerived(writer, options); + writer.WriteEndObject(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ToolInfo)} does not support '{options.Format}' format."); + } + } + + internal abstract void WriteDerived(Utf8JsonWriter writer, ModelReaderWriterOptions options); + + internal static ToolInfo DeserializeToolInfo( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + if (property.Value.ValueEquals("code_interpreter"u8)) + { + return CodeInterpreterToolInfo.DeserializeCodeInterpreterToolInfo(element, options); + } + else if (property.Value.ValueEquals("retrieval"u8)) + { + return RetrievalToolInfo.DeserializeRetrievalToolInfo(element, options); + } + else if (property.Value.ValueEquals("function"u8)) + { + return FunctionToolInfo.DeserializeFunctionToolInfo(element, options); + } + else + { + throw new ArgumentException(property.Value.GetString()); + } + } + } + throw new ArgumentException(nameof(element)); + } +} diff --git a/.dotnet/src/Custom/Assistants/ToolInfo.cs b/.dotnet/src/Custom/Assistants/ToolInfo.cs new file mode 100644 index 000000000..1708f94d2 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ToolInfo.cs @@ -0,0 +1,5 @@ +namespace OpenAI.Assistants; + +public abstract partial class ToolInfo +{ +} diff --git a/.dotnet/src/Custom/Assistants/ToolOutput.Serialization.cs b/.dotnet/src/Custom/Assistants/ToolOutput.Serialization.cs new file mode 100644 index 000000000..33557cacc --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ToolOutput.Serialization.cs @@ -0,0 +1,97 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; +using OpenAI.ClientShared.Internal; + +namespace OpenAI.Assistants; + +public partial class ToolOutput : IJsonModel +{ + ToolOutput IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ToolOutput)} does not support '{format}' format."); + } + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeToolOutput(document.RootElement, options); + } + + ToolOutput IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeToolOutput(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ToolOutput)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + if (Optional.IsDefined(Id)) + { + writer.WriteString("tool_call_id"u8, Id); + } + if (Optional.IsDefined(Output)) + { + writer.WriteString("output"u8, Output); + } + writer.WriteEndObject(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ToolOutput)} does not support '{options.Format}' format."); + } + } + + internal static ToolOutput DeserializeToolOutput( + JsonElement element, + ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + string id = null; + string output = null; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("tool_call_id"u8)) + { + id = property.Value.ToString(); + continue; + } + if (property.NameEquals("output"u8)) + { + output = property.Value.ToString(); + continue; + } + } + return new ToolOutput(id, output); + } +} diff --git a/.dotnet/src/Custom/Assistants/ToolOutput.cs b/.dotnet/src/Custom/Assistants/ToolOutput.cs new file mode 100644 index 000000000..762466f89 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/ToolOutput.cs @@ -0,0 +1,27 @@ +using System.Diagnostics.CodeAnalysis; +using System.Text.Json.Serialization; + +namespace OpenAI.Assistants; + +public partial class ToolOutput +{ + [JsonPropertyName("tool_call_id")] + public required string Id { get; set; } + [JsonPropertyName("output")] + public string Output { get; set; } + + public ToolOutput() + { } + + [SetsRequiredMembers] + public ToolOutput(string toolCallId, string output = null) + { + Id = toolCallId; + Output = output; + } + + [SetsRequiredMembers] + public ToolOutput(RequiredToolCall toolCall, string output = null) + : this(toolCall.Id, output) + { } +} diff --git a/.dotnet/src/Custom/Audio/AudioClient.Protocol.cs b/.dotnet/src/Custom/Audio/AudioClient.Protocol.cs new file mode 100644 index 000000000..d4998eb39 --- /dev/null +++ b/.dotnet/src/Custom/Audio/AudioClient.Protocol.cs @@ -0,0 +1,197 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.ComponentModel; +using System.IO; +using System.Threading.Tasks; + +namespace OpenAI.Audio; + +public partial class AudioClient +{ + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GenerateSpeechFromText(BinaryContent content, RequestOptions options = null) + => Shim.CreateSpeech(content, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GenerateSpeechFromTextAsync(BinaryContent content, RequestOptions options = null) + => await Shim.CreateSpeechAsync(content, options).ConfigureAwait(false); + + /// + /// [Protocol Method] Transcribes audio into the input language. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler or convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The content type of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult TranscribeAudio(BinaryContent content, string contentType, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(contentType, nameof(contentType)); + + options ??= new RequestOptions(); + + using PipelineMessage message = CreateCreateTranscriptionRequest(content, contentType, options); + + Shim.Pipeline.Send(message); + + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) + { + throw new ClientResultException(response); + } + + return ClientResult.FromResponse(response); + } + + /// + /// [Protocol Method] Transcribes audio into the input language. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler or convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The content type of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task TranscribeAudioAsync(BinaryContent content, string contentType, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(contentType, nameof(contentType)); + + options ??= new RequestOptions(); + + using PipelineMessage message = CreateCreateTranscriptionRequest(content, contentType, options); + + Shim.Pipeline.Send(message); + + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) + { + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); + } + + return ClientResult.FromResponse(response); + } + + /// + /// [Protocol Method] Translates audio into English. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler or convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The content type of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult TranslateAudio(BinaryContent content, string contentType, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(contentType, nameof(contentType)); + + options ??= new RequestOptions(); + + using PipelineMessage message = CreateCreateTranslationRequest(content, contentType, options); + + Shim.Pipeline.Send(message); + + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) + { + throw new ClientResultException(response); + } + + return ClientResult.FromResponse(response); + } + + /// + /// [Protocol Method] Translates audio into English. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler or convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The content type of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task TranslateAudioAsync(BinaryContent content, string contentType, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(contentType, nameof(contentType)); + + options ??= new RequestOptions(); + + using PipelineMessage message = CreateCreateTranslationRequest(content, contentType, options); + + Shim.Pipeline.Send(message); + + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) + { + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); + } + + return ClientResult.FromResponse(response); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/AudioClient.cs b/.dotnet/src/Custom/Audio/AudioClient.cs new file mode 100644 index 000000000..3d85f3686 --- /dev/null +++ b/.dotnet/src/Custom/Audio/AudioClient.cs @@ -0,0 +1,333 @@ +using OpenAI.Internal; +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.IO; +using System.Runtime.InteropServices.ComTypes; +using System.Text; +using System.Threading.Tasks; + +namespace OpenAI.Audio; + +/// The service client for OpenAI audio operations. +public partial class AudioClient +{ + private readonly OpenAIClientConnector _clientConnector; + private Internal.Audio Shim => _clientConnector.InternalClient.GetAudioClient(); + + /// + /// Initializes a new instance of , used for audio operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The model name for audio operations that the client should use. + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public AudioClient(string model, ApiKeyCredential credential = default, OpenAIClientOptions options = default) + { + _clientConnector = new(model, credential, options); + } + + /// + /// Creates text-to-speech audio that reflects the specified voice speaking the provided input text. + /// + /// + /// Unless otherwise specified via , the mp3 format of + /// will be used for the generated audio. + /// + /// The text for the voice to speak. + /// The voice to use. + /// Additional options to control the text-to-speech operation. + /// + /// A result containing generated, spoken audio in the specified output format. + /// Unless otherwise specified via , the mp3 format of + /// will be used for the generated audio. + /// + public virtual ClientResult GenerateSpeechFromText( + string text, + TextToSpeechVoice voice, + TextToSpeechOptions options = null) + { + Internal.Models.CreateSpeechRequest request = CreateInternalTtsRequest(text, voice, options); + return Shim.CreateSpeech(request); + } + + /// + /// Creates text-to-speech audio that reflects the specified voice speaking the provided input text. + /// + /// + /// Unless otherwise specified via , the mp3 format of + /// will be used for the generated audio. + /// + /// The text for the voice to speak. + /// The voice to use. + /// Additional options to control the text-to-speech operation. + /// + /// A result containing generated, spoken audio in the specified output format. + /// Unless otherwise specified via , the mp3 format of + /// will be used for the generated audio. + /// + public virtual Task> GenerateSpeechFromTextAsync( + string text, + TextToSpeechVoice voice, + TextToSpeechOptions options = null) + { + Internal.Models.CreateSpeechRequest request = CreateInternalTtsRequest(text, voice, options); + return Shim.CreateSpeechAsync(request); + } + + // convenience method - sync; Stream overload + // TODO: add refdoc comment + public virtual ClientResult TranscribeAudio(Stream fileStream, string fileName, AudioTranscriptionOptions options = null) + { + Argument.AssertNotNull(fileStream, nameof(fileStream)); + Argument.AssertNotNull(fileName, nameof(fileName)); + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(fileStream, fileName, _clientConnector.Model); + + ClientResult result = TranscribeAudio(content, content.ContentType); + + PipelineResponse response = result.GetRawResponse(); + + AudioTranscription value = AudioTranscription.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + // convenience method - sync + // TODO: add refdoc comment + public virtual ClientResult TranscribeAudio(BinaryData audioBytes, string fileName, AudioTranscriptionOptions options = null) + { + Argument.AssertNotNull(audioBytes, nameof(audioBytes)); + Argument.AssertNotNull(fileName, nameof(fileName)); + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(audioBytes, fileName, _clientConnector.Model); + + ClientResult result = TranscribeAudio(content, content.ContentType); + + PipelineResponse response = result.GetRawResponse(); + + AudioTranscription value = AudioTranscription.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + // convenience method - async + // TODO: add refdoc comment + public virtual async Task> TranscribeAudioAsync(Stream fileStream, string filename, AudioTranscriptionOptions options = null) + { + Argument.AssertNotNull(fileStream, nameof(fileStream)); + Argument.AssertNotNull(filename, nameof(filename)); + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(fileStream, filename, _clientConnector.Model); + + ClientResult result = await TranscribeAudioAsync(content, content.ContentType).ConfigureAwait(false); + + PipelineResponse response = result.GetRawResponse(); + + AudioTranscription value = AudioTranscription.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + // convenience method - async + // TODO: add refdoc comment + public virtual async Task> TranscribeAudioAsync(BinaryData audioBytes, string fileName, AudioTranscriptionOptions options = null) + { + Argument.AssertNotNull(audioBytes, nameof(audioBytes)); + Argument.AssertNotNull(fileName, nameof(fileName)); + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(audioBytes, fileName, _clientConnector.Model); + + ClientResult result = await TranscribeAudioAsync(content, content.ContentType).ConfigureAwait(false); + + PipelineResponse response = result.GetRawResponse(); + + AudioTranscription value = AudioTranscription.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + private PipelineMessage CreateCreateTranscriptionRequest(BinaryContent content, string contentType, RequestOptions options) + { + PipelineMessage message = Shim.Pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + + PipelineRequest request = message.Request; + request.Method = "POST"; + + UriBuilder uriBuilder = new(_clientConnector.Endpoint.AbsoluteUri); + + StringBuilder path = new(); + path.Append("/audio/transcriptions"); + uriBuilder.Path += path.ToString(); + + request.Uri = uriBuilder.Uri; + + request.Headers.Set("Content-Type", contentType); + + request.Content = content; + + message.Apply(options); + + return message; + } + + // convenience method - sync; Stream overload + // TODO: add refdoc comment + public virtual ClientResult TranslateAudio(Stream fileStream, string fileName, AudioTranslationOptions options = null) + { + Argument.AssertNotNull(fileStream, nameof(fileStream)); + Argument.AssertNotNull(fileName, nameof(fileName)); + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(fileStream, fileName, _clientConnector.Model); + + ClientResult result = TranslateAudio(content, content.ContentType); + + PipelineResponse response = result.GetRawResponse(); + + AudioTranslation value = AudioTranslation.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + // convenience method - sync + // TODO: add refdoc comment + public virtual ClientResult TranslateAudio(BinaryData audioBytes, string fileName, AudioTranslationOptions options = null) + { + Argument.AssertNotNull(audioBytes, nameof(audioBytes)); + Argument.AssertNotNull(fileName, nameof(fileName)); + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(audioBytes, fileName, _clientConnector.Model); + + ClientResult result = TranslateAudio(content, content.ContentType); + + PipelineResponse response = result.GetRawResponse(); + + AudioTranslation value = AudioTranslation.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + // convenience method - async; Stream overload + // TODO: add refdoc comment + public virtual async Task> TranslateAudioAsync(Stream fileStream, string fileName, AudioTranslationOptions options = null) + { + Argument.AssertNotNull(fileStream, nameof(fileStream)); + Argument.AssertNotNull(fileName, nameof(fileName)); + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(fileStream, fileName, _clientConnector.Model); + + ClientResult result = await TranslateAudioAsync(content, content.ContentType).ConfigureAwait(false); + + PipelineResponse response = result.GetRawResponse(); + + AudioTranslation value = AudioTranslation.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + // convenience method - async + // TODO: add refdoc comment + public virtual async Task> TranslateAudioAsync(BinaryData audioBytes, string fileName, AudioTranslationOptions options = null) + { + Argument.AssertNotNull(audioBytes, nameof(audioBytes)); + Argument.AssertNotNull(fileName, nameof(fileName)); + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(audioBytes, fileName, _clientConnector.Model); + + ClientResult result = await TranslateAudioAsync(content, content.ContentType).ConfigureAwait(false); + + PipelineResponse response = result.GetRawResponse(); + + AudioTranslation value = AudioTranslation.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + private PipelineMessage CreateCreateTranslationRequest(BinaryContent content, string contentType, RequestOptions options) + { + PipelineMessage message = Shim.Pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + + PipelineRequest request = message.Request; + request.Method = "POST"; + + UriBuilder uriBuilder = new(_clientConnector.Endpoint.AbsoluteUri); + + StringBuilder path = new(); + path.Append("/audio/translations"); + uriBuilder.Path += path.ToString(); + + request.Uri = uriBuilder.Uri; + + request.Headers.Set("Content-Type", contentType); + + request.Content = content; + + message.Apply(options); + + return message; + } + + private Internal.Models.CreateSpeechRequest CreateInternalTtsRequest( + string input, + TextToSpeechVoice voice, + TextToSpeechOptions options = null) + { + options ??= new(); + Internal.Models.CreateSpeechRequestResponseFormat? internalResponseFormat = null; + if (options.ResponseFormat != null) + { + internalResponseFormat = options.ResponseFormat switch + { + AudioDataFormat.Aac => "aac", + AudioDataFormat.Flac => "flac", + AudioDataFormat.M4a => "m4a", + AudioDataFormat.Mp3 => "mp3", + AudioDataFormat.Mp4 => "mp4", + AudioDataFormat.Mpeg => "mpeg", + AudioDataFormat.Mpga => "mpga", + AudioDataFormat.Ogg => "ogg", + AudioDataFormat.Opus => "opus", + AudioDataFormat.Wav => "wav", + AudioDataFormat.Webm => "webm", + _ => throw new ArgumentException(nameof(options.ResponseFormat)), + }; + } + return new Internal.Models.CreateSpeechRequest( + _clientConnector.Model, + input, + voice.ToString(), + internalResponseFormat, + options?.SpeedMultiplier, + serializedAdditionalRawData: null); + } + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); +} diff --git a/.dotnet/src/Custom/Audio/AudioDataFormat.cs b/.dotnet/src/Custom/Audio/AudioDataFormat.cs new file mode 100644 index 000000000..b8840ca44 --- /dev/null +++ b/.dotnet/src/Custom/Audio/AudioDataFormat.cs @@ -0,0 +1,92 @@ +namespace OpenAI.Audio; + +/// +/// Represents an audio data format available as either input or output into an audio operation. +/// +public enum AudioDataFormat +{ + /// + /// MP3, an all-purpose audio compression format with a moderate tradeoff of quality for data size. + /// + /// mp3 is supported for input into translation and transcription as well as for output from text-to-speech. + /// + /// + Mp3, + /// + /// AAC, an alternative all-purpose format to MP3 preferred by YouTube, Android, and iOS. + /// + /// aac is supported for input into translation and transcription as well as for output from text-to-speech. + /// + /// + Aac, + /// + /// OGG, a balanced, open-source, general use format favored by Spotify. + /// + /// ogg is supported as input into translation and transcription but is not available for + /// text-to-speech output. + /// + /// + Ogg, + /// + /// FLAC, a high-quality, lossless compression format preferred for audio archival and enthusiast use. + /// + /// flac is supported for input into translation and transcription as well as for output from text-to-speech. + /// + /// + Flac, + /// + /// MP4, a multimedia container format that generally features bigger sizes and higher quality relative to MP3. + /// + /// mp4 is supported as input into translation and transcription but is not available for + /// text-to-speech output. + /// + /// + Mp4, + /// + /// MPEG, a multimedia container format that can contain any of several different underlying audio formats. + /// + /// mpeg is supported as input into translation and transcription but is not available for + /// text-to-speech output. + /// + /// + Mpeg, + /// + /// MPGA, effectively an alias for MP3. + /// + /// mpga is supported as input into translation and transcription but is not available for + /// text-to-speech output. + /// + /// + Mpga, + /// + /// M4A, the audio-only counterpart to MP4 that generally features larger data sizes and higher quality than MP3. + /// + /// m4a is supported as input into translation and transcription but is not available for + /// text-to-speech output. + /// + /// + M4a, + /// + /// Opus, a higher-quality compression format that features integrated optimizations for speech. + /// + /// opus is supported for input into translation and transcription as well as for output from text-to-speech. + /// + /// + Opus, + /// + /// WAV, an uncompressed, lossless format with maximum quality, highest file size, and minimal decoding. + /// + /// wav is supported as input into translation and transcription but is not available for + /// text-to-speech output. + /// + /// + Wav, + /// + /// WebM, a multimedia container that generally uses Opus or OGG audio. + /// + /// webm is supported as input into translation and transcription but is not available for + /// text-to-speech output. + /// + /// + Webm, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/AudioTranscription.cs b/.dotnet/src/Custom/Audio/AudioTranscription.cs new file mode 100644 index 000000000..efd5a0e8d --- /dev/null +++ b/.dotnet/src/Custom/Audio/AudioTranscription.cs @@ -0,0 +1,78 @@ +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Audio; + +public partial class AudioTranscription +{ + public string Language { get; } + public TimeSpan? Duration { get; } + public string Text { get; } + public IReadOnlyList Words { get; } + public IReadOnlyList Segments { get; } + + internal AudioTranscription(string language, TimeSpan? duration, string text, IReadOnlyList words, IReadOnlyList segments) + { + Language = language; + Duration = duration; + Text = text; + Words = words; + Segments = segments; + } + + internal static AudioTranscription Deserialize(BinaryData content) + { + using JsonDocument responseDocument = JsonDocument.Parse(content); + return DeserializeAudioTranscription(responseDocument.RootElement); + } + + internal static AudioTranscription DeserializeAudioTranscription(JsonElement element, ModelReaderWriterOptions options = default) + { + string language = null; + TimeSpan? duration = null; + string text = null; + List words = null; + List segments = null; + + foreach (JsonProperty topLevelProperty in element.EnumerateObject()) + { + if (topLevelProperty.NameEquals("language"u8)) + { + language = topLevelProperty.Value.GetString(); + continue; + } + if (topLevelProperty.NameEquals("duration"u8)) + { + duration = TimeSpan.FromSeconds(topLevelProperty.Value.GetSingle()); + continue; + } + if (topLevelProperty.NameEquals("text"u8)) + { + text = topLevelProperty.Value.GetString(); + continue; + } + if (topLevelProperty.NameEquals("words"u8)) + { + words = []; + foreach (JsonElement wordElement in topLevelProperty.Value.EnumerateArray()) + { + words.Add(TranscribedWord.DeserializeTranscribedWord(wordElement, options)); + } + continue; + } + if (topLevelProperty.NameEquals("segments"u8)) + { + segments = []; + foreach (JsonElement segmentElement in topLevelProperty.Value.EnumerateArray()) + { + segments.Add(TranscriptionSegment.DeserializeTranscriptionSegment(segmentElement, options)); + } + continue; + } + } + + return new AudioTranscription(language, duration, text, words, segments); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/AudioTranscriptionFormat.cs b/.dotnet/src/Custom/Audio/AudioTranscriptionFormat.cs new file mode 100644 index 000000000..585099b7a --- /dev/null +++ b/.dotnet/src/Custom/Audio/AudioTranscriptionFormat.cs @@ -0,0 +1,12 @@ +using System; +using System.Collections.Generic; + +namespace OpenAI.Audio; + +public enum AudioTranscriptionFormat +{ + Simple, + Detailed, + Srt, + Vtt, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/AudioTranscriptionOptions.cs b/.dotnet/src/Custom/Audio/AudioTranscriptionOptions.cs new file mode 100644 index 000000000..6d4c3ab12 --- /dev/null +++ b/.dotnet/src/Custom/Audio/AudioTranscriptionOptions.cs @@ -0,0 +1,89 @@ +using OpenAI.Internal; +using System; +using System.Collections.Generic; +using System.IO; +using System.Text.Json; + +namespace OpenAI.Audio; + +public partial class AudioTranscriptionOptions +{ + public string Language { get; set; } + public string Prompt { get; set; } + public AudioTranscriptionFormat? ResponseFormat { get; set; } + public float? Temperature { get; set; } + public bool? EnableWordTimestamps { get; set; } + public bool? EnableSegmentTimestamps { get; set; } + + internal MultipartFormDataBinaryContent ToMultipartContent(Stream fileStream, string fileName, string model) + { + MultipartFormDataBinaryContent content = new(); + + content.Add(fileStream, "file", fileName); + + AddContent(model, content); + + return content; + } + + internal MultipartFormDataBinaryContent ToMultipartContent(BinaryData audioBytes, string fileName, string model) + { + MultipartFormDataBinaryContent content = new(); + + content.Add(audioBytes, "file", fileName); + + AddContent(model, content); + + return content; + } + + private void AddContent(string model, MultipartFormDataBinaryContent content) + { + content.Add(model, "model"); + + if (Language is not null) + { + content.Add(Language, "language"); + } + + if (Prompt is not null) + { + content.Add(Prompt, "prompt"); + } + + if (ResponseFormat is not null) + { + string value = ResponseFormat switch + { + AudioTranscriptionFormat.Simple => "json", + AudioTranscriptionFormat.Detailed => "verbose_json", + AudioTranscriptionFormat.Srt => "srt", + AudioTranscriptionFormat.Vtt => "vtt", + _ => throw new ArgumentException(nameof(ResponseFormat)) + }; + + content.Add(value, "response_format"); + } + + if (Temperature is not null) + { + content.Add(Temperature.Value, "temperature"); + } + + if (EnableWordTimestamps is not null || EnableSegmentTimestamps is not null) + { + List granularities = []; + if (EnableWordTimestamps.Value) + { + granularities.Add("word"); + } + if (EnableSegmentTimestamps.Value) + { + granularities.Add("segment"); + } + + byte[] data = JsonSerializer.SerializeToUtf8Bytes(granularities); + content.Add(data, "timestamp_granularities"); + } + } +} diff --git a/.dotnet/src/Custom/Audio/AudioTranslation.cs b/.dotnet/src/Custom/Audio/AudioTranslation.cs new file mode 100644 index 000000000..b683161b3 --- /dev/null +++ b/.dotnet/src/Custom/Audio/AudioTranslation.cs @@ -0,0 +1,37 @@ +using System; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Audio; + +public partial class AudioTranslation +{ + public string Text { get; } + + internal AudioTranslation(string text) + { + Text = text; + } + + internal static AudioTranslation Deserialize(BinaryData content) + { + using JsonDocument responseDocument = JsonDocument.Parse(content); + return DeserializeAudioTranslation(responseDocument.RootElement); + } + + internal static AudioTranslation DeserializeAudioTranslation(JsonElement element, ModelReaderWriterOptions options = default) + { + string text = null; + + foreach (JsonProperty property in element.EnumerateObject()) + { + if (property.NameEquals("text"u8)) + { + text = property.Value.GetString(); + continue; + } + } + + return new AudioTranslation(text); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/AudioTranslationOptions.cs b/.dotnet/src/Custom/Audio/AudioTranslationOptions.cs new file mode 100644 index 000000000..50f8a89e0 --- /dev/null +++ b/.dotnet/src/Custom/Audio/AudioTranslationOptions.cs @@ -0,0 +1,58 @@ +using OpenAI.Internal; +using System; +using System.IO; + +namespace OpenAI.Audio; + +public partial class AudioTranslationOptions +{ + public string Prompt { get; set; } + public AudioTranscriptionFormat? ResponseFormat { get; set; } + public float? Temperature { get; set; } + + internal MultipartFormDataBinaryContent ToMultipartContent(Stream fileStream, string fileName, string model) + { + MultipartFormDataBinaryContent content = new(); + + content.Add(fileStream, "file", fileName); + + AddContent(model, content); + + return content; + } + + internal MultipartFormDataBinaryContent ToMultipartContent(BinaryData audioBytes, string fileName, string model) + { + MultipartFormDataBinaryContent content = new(); + + content.Add(audioBytes, "file", fileName); + + AddContent(model, content); + + return content; + } + + private void AddContent(string model, MultipartFormDataBinaryContent content) + { + content.Add(model, "model"); + + if (Prompt is not null) + { + content.Add(Prompt, "prompt"); + } + + if (ResponseFormat is not null) + { + string value = ResponseFormat switch + { + AudioTranscriptionFormat.Simple => "json", + AudioTranscriptionFormat.Detailed => "verbose_json", + AudioTranscriptionFormat.Srt => "srt", + AudioTranscriptionFormat.Vtt => "vtt", + _ => throw new ArgumentException(nameof(ResponseFormat)) + }; + + content.Add(value, "response_format"); + } + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/TextToSpeechOptions.cs b/.dotnet/src/Custom/Audio/TextToSpeechOptions.cs new file mode 100644 index 000000000..476d564af --- /dev/null +++ b/.dotnet/src/Custom/Audio/TextToSpeechOptions.cs @@ -0,0 +1,29 @@ +namespace OpenAI.Audio; + +/// +/// A representation of additional options available to control the behavior of a text-to-speech audio generation +/// operation. +/// +public partial class TextToSpeechOptions +{ + /// + /// The desired format of the generated text-to-speech audio. If not specified, a default value of mp3 will + /// be used. + /// + /// Supported output formats include: + /// + /// mp3 - + /// opus - + /// aac - + /// flac - + /// + /// + /// + public AudioDataFormat? ResponseFormat { get; set; } + + /// + /// A multiplicative speed factor to apply to the generated audio, with 1.0 being the default and valid + /// values ranging from 0.25 to 4.0. + /// + public float? SpeedMultiplier { get; set; } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/TextToSpeechVoice.cs b/.dotnet/src/Custom/Audio/TextToSpeechVoice.cs new file mode 100644 index 000000000..55bc324c5 --- /dev/null +++ b/.dotnet/src/Custom/Audio/TextToSpeechVoice.cs @@ -0,0 +1,64 @@ +using System; + +namespace OpenAI.Audio; + +/// +/// Represents the available text-to-speech voices. +/// +public readonly struct TextToSpeechVoice : IEquatable +{ + private readonly Internal.Models.CreateSpeechRequestVoice _internalVoice; + + /// + /// Creates a new instance of . + /// + /// The textual representation of the value to use. + public TextToSpeechVoice(string value) + : this(new Internal.Models.CreateSpeechRequestVoice(value)) + { } + + internal TextToSpeechVoice(Internal.Models.CreateSpeechRequestVoice internalVoice) + { + _internalVoice = internalVoice; + } + + /// + /// The onyx voice. + /// + public static TextToSpeechVoice Onyx { get; } = new TextToSpeechVoice(Internal.Models.CreateSpeechRequestVoice.Onyx); + /// + /// The shimmer voice. + /// + public static TextToSpeechVoice Shimmer { get; } = new TextToSpeechVoice(Internal.Models.CreateSpeechRequestVoice.Shimmer); + /// + /// The alloy voice. + /// + public static TextToSpeechVoice Alloy { get; } = new TextToSpeechVoice(Internal.Models.CreateSpeechRequestVoice.Alloy); + /// + /// The fable voice. + /// + public static TextToSpeechVoice Fable { get; } = new TextToSpeechVoice(Internal.Models.CreateSpeechRequestVoice.Fable); + /// + /// The echo voice. + /// + public static TextToSpeechVoice Echo { get; } = new TextToSpeechVoice(Internal.Models.CreateSpeechRequestVoice.Echo); + + /// + public static bool operator ==(TextToSpeechVoice left, TextToSpeechVoice right) + => left._internalVoice == right._internalVoice; + /// + public static implicit operator TextToSpeechVoice(string value) + => new TextToSpeechVoice(new Internal.Models.CreateSpeechRequestVoice(value)); + /// + public static bool operator !=(TextToSpeechVoice left, TextToSpeechVoice right) + => left._internalVoice != right._internalVoice; + /// + public bool Equals(TextToSpeechVoice other) => _internalVoice.Equals(other._internalVoice); + /// + public override string ToString() => _internalVoice.ToString(); + /// + public override bool Equals(object obj) => + (obj is TextToSpeechVoice voice && this.Equals(voice)) || _internalVoice.Equals(obj); + /// + public override int GetHashCode() => _internalVoice.GetHashCode(); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/TranscribedWord.cs b/.dotnet/src/Custom/Audio/TranscribedWord.cs new file mode 100644 index 000000000..94e26fde3 --- /dev/null +++ b/.dotnet/src/Custom/Audio/TranscribedWord.cs @@ -0,0 +1,45 @@ +using System; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Audio; + +public partial class TranscribedWord +{ + public string Word { get; } + public TimeSpan Start { get; } + public TimeSpan End { get; } + + internal TranscribedWord(string word, TimeSpan start, TimeSpan end) + { + Word = word; + Start = start; + End = end; + } + + internal static TranscribedWord DeserializeTranscribedWord(JsonElement element, ModelReaderWriterOptions options = default) + { + string word = null; + TimeSpan? start = null; + TimeSpan? end = null; + foreach (JsonProperty wordProperty in element.EnumerateObject()) + { + if (wordProperty.NameEquals("word"u8)) + { + word = wordProperty.Value.GetString(); + continue; + } + if (wordProperty.NameEquals("start"u8)) + { + start = TimeSpan.FromSeconds(wordProperty.Value.GetSingle()); + continue; + } + if (wordProperty.NameEquals("end"u8)) + { + end = TimeSpan.FromSeconds(wordProperty.Value.GetSingle()); + continue; + } + } + return new TranscribedWord(word, start.Value, end.Value); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/TranscriptionSegment.cs b/.dotnet/src/Custom/Audio/TranscriptionSegment.cs new file mode 100644 index 000000000..c1ee0632e --- /dev/null +++ b/.dotnet/src/Custom/Audio/TranscriptionSegment.cs @@ -0,0 +1,109 @@ +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using System.Threading; + +namespace OpenAI.Audio; + +public partial class TranscriptionSegment +{ + public int Id { get; } + public int SeekOffset { get; } + public TimeSpan Start { get; } + public TimeSpan End { get; } + public string Text { get; } + public IReadOnlyList TokenIds { get; } + public float Temperature { get; } + public float AverageLogProbability { get; } + public float CompressionRatio { get; } + public float NoSpeechProbability { get; } + + internal TranscriptionSegment(int id, int seekOffset, TimeSpan start, TimeSpan end, string text, IReadOnlyList tokenIds, float temperature, float averageLogProbability, float compressionRatio, float noSpeechProbability) + { + Id = id; + SeekOffset = seekOffset; + Start = start; + End = end; + Text = text; + TokenIds = tokenIds; + Temperature = temperature; + AverageLogProbability = averageLogProbability; + CompressionRatio = compressionRatio; + NoSpeechProbability = noSpeechProbability; + } + + internal static TranscriptionSegment DeserializeTranscriptionSegment(JsonElement element, ModelReaderWriterOptions options = default) + { + int id = 0; + int seekOffset = 0; + TimeSpan start = default; + TimeSpan end = default; + string text = null; + List tokenIds = null; + float temperature = 0; + float averageLogProbability = 0; + float compressionRatio = 0; + float noSpeechProbability = 0; + + foreach (JsonProperty topLevelProperty in element.EnumerateObject()) + { + if (topLevelProperty.NameEquals("id"u8)) + { + id = topLevelProperty.Value.GetInt32(); + continue; + } + if (topLevelProperty.NameEquals("seek"u8)) + { + seekOffset = topLevelProperty.Value.GetInt32(); + continue; + } + if (topLevelProperty.NameEquals("start"u8)) + { + start = TimeSpan.FromSeconds(topLevelProperty.Value.GetSingle()); + continue; + } + if (topLevelProperty.NameEquals("end"u8)) + { + end = TimeSpan.FromSeconds(topLevelProperty.Value.GetSingle()); + continue; + } + if (topLevelProperty.NameEquals("text"u8)) + { + text = topLevelProperty.Value.GetString(); + continue; + } + if (topLevelProperty.NameEquals("tokens"u8)) + { + tokenIds = []; + foreach (JsonElement tokenIdElement in topLevelProperty.Value.EnumerateArray()) + { + tokenIds.Add(tokenIdElement.GetInt32()); + } + continue; + } + if (topLevelProperty.NameEquals("temperature"u8)) + { + temperature = topLevelProperty.Value.GetSingle(); + continue; + } + if (topLevelProperty.NameEquals("avg_logprob"u8)) + { + averageLogProbability = topLevelProperty.Value.GetSingle(); + continue; + } + if (topLevelProperty.NameEquals("compression_ratio"u8)) + { + compressionRatio = topLevelProperty.Value.GetSingle(); + continue; + } + if (topLevelProperty.NameEquals("no_speech_prob"u8)) + { + noSpeechProbability = topLevelProperty.Value.GetSingle(); + continue; + } + } + + return new TranscriptionSegment(id, seekOffset, start, end, text, tokenIds, temperature, averageLogProbability, compressionRatio, noSpeechProbability); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatClient.Protocol.cs b/.dotnet/src/Custom/Chat/ChatClient.Protocol.cs new file mode 100644 index 000000000..7d6c4c1b9 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatClient.Protocol.cs @@ -0,0 +1,20 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.ComponentModel; +using System.Threading.Tasks; + +namespace OpenAI.Chat; + +/// The service client for the OpenAI Chat Completions endpoint. +public partial class ChatClient +{ + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult CompleteChat(BinaryContent content, RequestOptions options = null) + => Shim.CreateChatCompletion(content, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task CompleteChatAsync(BinaryContent content, RequestOptions options = null) + => await Shim.CreateChatCompletionAsync(content, options).ConfigureAwait(false); +} diff --git a/.dotnet/src/Custom/Chat/ChatClient.cs b/.dotnet/src/Custom/Chat/ChatClient.cs new file mode 100644 index 000000000..f867f1b02 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatClient.cs @@ -0,0 +1,326 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text; +using System.Threading.Tasks; + +namespace OpenAI.Chat; + +/// The service client for the OpenAI Chat Completions endpoint. +public partial class ChatClient +{ + private readonly OpenAIClientConnector _clientConnector; + private Internal.Chat Shim => _clientConnector.InternalClient.GetChatClient(); + + /// + /// Initializes a new instance of , used for Chat Completion requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The model name for chat completions that the client should use. + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public ChatClient(string model, ApiKeyCredential credential = default, OpenAIClientOptions options = null) + { + _clientConnector = new(model, credential, options); + } + + /// + /// Generates a single chat completion result for a single, simple user message. + /// + /// The user message to provide as a prompt for chat completion. + /// Additional options for the chat completion request. + /// A result for a single chat completion. + public virtual ClientResult CompleteChat(string message, ChatCompletionOptions options = null) + => CompleteChat(new List() { new ChatRequestUserMessage(message) }, options); + + /// + /// Generates a single chat completion result for a single, simple user message. + /// + /// The user message to provide as a prompt for chat completion. + /// Additional options for the chat completion request. + /// A result for a single chat completion. + public virtual Task> CompleteChatAsync(string message, ChatCompletionOptions options = null) + => CompleteChatAsync( + new List() { new ChatRequestUserMessage(message) }, options); + + /// + /// Generates a single chat completion result for a provided set of input chat messages. + /// + /// The messages to provide as input and history for chat completion. + /// Additional options for the chat completion request. + /// A result for a single chat completion. + public virtual ClientResult CompleteChat( + IEnumerable messages, + ChatCompletionOptions options = null) + { + Internal.Models.CreateChatCompletionRequest request = CreateInternalRequest(messages, options); + ClientResult response = Shim.CreateChatCompletion(request); + ChatCompletion chatCompletion = new(response.Value, internalChoiceIndex: 0); + return ClientResult.FromValue(chatCompletion, response.GetRawResponse()); + } + + /// + /// Generates a single chat completion result for a provided set of input chat messages. + /// + /// The messages to provide as input and history for chat completion. + /// Additional options for the chat completion request. + /// A result for a single chat completion. + public virtual async Task> CompleteChatAsync( + IEnumerable messages, + ChatCompletionOptions options = null) + { + Internal.Models.CreateChatCompletionRequest request = CreateInternalRequest(messages, options); + ClientResult response = await Shim.CreateChatCompletionAsync(request).ConfigureAwait(false); + ChatCompletion chatCompletion = new(response.Value, internalChoiceIndex: 0); + return ClientResult.FromValue(chatCompletion, response.GetRawResponse()); + } + + /// + /// Generates a collection of chat completion results for a provided set of input chat messages. + /// + /// The messages to provide as input and history for chat completion. + /// + /// The number of independent, alternative response choices that should be generated. + /// + /// Additional options for the chat completion request. + /// The cancellation token for the operation. + /// A result for a single chat completion. + public virtual ClientResult CompleteChat( + IEnumerable messages, + int choiceCount, + ChatCompletionOptions options = null) + { + Internal.Models.CreateChatCompletionRequest request = CreateInternalRequest(messages, options, choiceCount); + ClientResult response = Shim.CreateChatCompletion(request); + List chatCompletions = []; + for (int i = 0; i < response.Value.Choices.Count; i++) + { + chatCompletions.Add(new(response.Value, (int)response.Value.Choices[i].Index)); + } + return ClientResult.FromValue(new ChatCompletionCollection(chatCompletions), response.GetRawResponse()); + } + + /// + /// Generates a collection of chat completion results for a provided set of input chat messages. + /// + /// The messages to provide as input and history for chat completion. + /// + /// The number of independent, alternative response choices that should be generated. + /// + /// Additional options for the chat completion request. + /// A result for a single chat completion. + public virtual async Task> CompleteChatAsync( + IEnumerable messages, + int choiceCount, + ChatCompletionOptions options = null) + { + Internal.Models.CreateChatCompletionRequest request = CreateInternalRequest(messages, options, choiceCount); + ClientResult response = await Shim.CreateChatCompletionAsync(request).ConfigureAwait(false); + List chatCompletions = []; + for (int i = 0; i < response.Value.Choices.Count; i++) + { + chatCompletions.Add(new(response.Value, (int)response.Value.Choices[i].Index)); + } + return ClientResult.FromValue(new ChatCompletionCollection(chatCompletions), response.GetRawResponse()); + } + + /// + /// Begins a streaming response for a chat completion request using a single, simple user message as input. + /// + /// + /// can be enumerated over using the await foreach pattern using the + /// interface. + /// + /// The user message to provide as a prompt for chat completion. + /// + /// The number of independent, alternative choices that the chat completion request should generate. + /// + /// Additional options for the chat completion request. + /// A streaming result with incremental chat completion updates. + public virtual StreamingClientResult CompleteChatStreaming( + string message, + int? choiceCount = null, + ChatCompletionOptions options = null) + => CompleteChatStreaming( + new List { new ChatRequestUserMessage(message) }, + choiceCount, + options); + + /// + /// Begins a streaming response for a chat completion request using a single, simple user message as input. + /// + /// + /// can be enumerated over using the await foreach pattern using the + /// interface. + /// + /// The user message to provide as a prompt for chat completion. + /// + /// The number of independent, alternative choices that the chat completion request should generate. + /// + /// Additional options for the chat completion request. + /// A streaming result with incremental chat completion updates. + public virtual Task> CompleteChatStreamingAsync( + string message, + int? choiceCount = null, + ChatCompletionOptions options = null) + => CompleteChatStreamingAsync( + new List { new ChatRequestUserMessage(message) }, + choiceCount, + options); + + /// + /// Begins a streaming response for a chat completion request using the provided chat messages as input and + /// history. + /// + /// + /// can be enumerated over using the await foreach pattern using the + /// interface. + /// + /// The messages to provide as input for chat completion. + /// + /// The number of independent, alternative choices that the chat completion request should generate. + /// + /// Additional options for the chat completion request. + /// The cancellation token for the operation. + /// A streaming result with incremental chat completion updates. + public virtual StreamingClientResult CompleteChatStreaming( + IEnumerable messages, + int? choiceCount = null, + ChatCompletionOptions options = null) + { + PipelineMessage requestMessage = CreateCustomRequestMessage(messages, choiceCount, options); + requestMessage.BufferResponse = false; + Shim.Pipeline.Send(requestMessage); + PipelineResponse response = requestMessage.ExtractResponse(); + + if (response.IsError) + { + throw new ClientResultException(response); + } + + return StreamingEventResult.CreateFromResponse( + response, + (responseForEnumeration) => SseAsyncEnumerator.EnumerateFromSseStream( + responseForEnumeration.ContentStream, + e => StreamingChatUpdate.DeserializeStreamingChatUpdates(e))); + } + + /// + /// Begins a streaming response for a chat completion request using the provided chat messages as input and + /// history. + /// + /// + /// can be enumerated over using the await foreach pattern using the + /// interface. + /// + /// The messages to provide as input for chat completion. + /// + /// The number of independent, alternative choices that the chat completion request should generate. + /// + /// Additional options for the chat completion request. + /// A streaming result with incremental chat completion updates. + public virtual async Task> CompleteChatStreamingAsync( + IEnumerable messages, + int? choiceCount = null, + ChatCompletionOptions options = null) + { + PipelineMessage requestMessage = CreateCustomRequestMessage(messages, choiceCount, options); + requestMessage.BufferResponse = false; + await Shim.Pipeline.SendAsync(requestMessage).ConfigureAwait(false); + PipelineResponse response = requestMessage.ExtractResponse(); + + if (response.IsError) + { + throw new ClientResultException(response); + } + + return StreamingEventResult.CreateFromResponse( + response, + (responseForEnumeration) => SseAsyncEnumerator.EnumerateFromSseStream( + responseForEnumeration.ContentStream, + e => StreamingChatUpdate.DeserializeStreamingChatUpdates(e))); + } + + private Internal.Models.CreateChatCompletionRequest CreateInternalRequest( + IEnumerable messages, + ChatCompletionOptions options = null, + int? choiceCount = null, + bool? stream = null) + { + options ??= new(); + Internal.Models.CreateChatCompletionRequestResponseFormat? internalFormat = null; + if (options.ResponseFormat is not null) + { + internalFormat = new(options.ResponseFormat switch + { + ChatResponseFormat.Text => Internal.Models.CreateChatCompletionRequestResponseFormatType.Text, + ChatResponseFormat.JsonObject => Internal.Models.CreateChatCompletionRequestResponseFormatType.JsonObject, + _ => throw new ArgumentException(nameof(options.ResponseFormat)), + }, null); + } + List messageDataItems = []; + foreach (ChatRequestMessage message in messages) + { + messageDataItems.Add(ModelReaderWriter.Write(message)); + } + Dictionary additionalData = []; + return new Internal.Models.CreateChatCompletionRequest( + messageDataItems, + _clientConnector.Model, + options?.FrequencyPenalty, + options?.GetInternalLogitBias(), + options?.IncludeLogProbabilities, + options?.LogProbabilityCount, + options?.MaxTokens, + choiceCount, + options?.PresencePenalty, + internalFormat, + options?.Seed, + options?.GetInternalStopSequences(), + stream, + options?.Temperature, + options?.NucleusSamplingFactor, + options?.GetInternalTools(), + options?.ToolConstraint?.GetBinaryData(), + options?.User, + options?.FunctionConstraint?.ToBinaryData(), + options?.GetInternalFunctions(), + additionalData + ); + } + + private PipelineMessage CreateCustomRequestMessage(IEnumerable messages, int? choiceCount, ChatCompletionOptions options) + { + Internal.Models.CreateChatCompletionRequest internalRequest = CreateInternalRequest(messages, options, choiceCount, stream: true); + BinaryContent content = BinaryContent.Create(internalRequest); + + PipelineMessage message = Shim.Pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + message.BufferResponse = false; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_clientConnector.Endpoint.AbsoluteUri); + StringBuilder path = new(); + path.Append("/chat/completions"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + + return message; + } + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); +} diff --git a/.dotnet/src/Custom/Chat/ChatCompletion.cs b/.dotnet/src/Custom/Chat/ChatCompletion.cs new file mode 100644 index 000000000..08c6a75cd --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatCompletion.cs @@ -0,0 +1,82 @@ +using OpenAI.ClientShared.Internal; +using System; +using System.Collections.Generic; + +namespace OpenAI.Chat; + +/// +public class ChatCompletion +{ + private Internal.Models.CreateChatCompletionResponse _internalResponse; + private int _internalChoiceIndex; + + /// + public string Id => _internalResponse.Id; + /// + public string SystemFingerprint => _internalResponse.SystemFingerprint; + /// + public DateTimeOffset CreatedAt => _internalResponse.Created; + /// + public ChatTokenUsage Usage { get; } + /// + public ChatFinishReason FinishReason { get; } + /// + public ChatMessageContent Content { get; } + /// + public IReadOnlyList ToolCalls { get; } + /// + public ChatFunctionCall FunctionCall { get; } + /// + public ChatRole Role { get; } + /// + public ChatLogProbabilityCollection LogProbabilities { get; } + /// + public int Index => (int)_internalResponse.Choices[(int)_internalChoiceIndex].Index; + + internal ChatCompletion(Internal.Models.CreateChatCompletionResponse internalResponse, int internalChoiceIndex) + { + Internal.Models.CreateChatCompletionResponseChoice internalChoice = internalResponse.Choices[(int)internalChoiceIndex]; + _internalResponse = internalResponse; + _internalChoiceIndex = internalChoiceIndex; + Role = internalChoice.Message.Role.ToString() switch + { + "system" => ChatRole.System, + "user" => ChatRole.User, + "assistant" => ChatRole.Assistant, + "tool" => ChatRole.Tool, + "function" => ChatRole.Function, + _ => throw new ArgumentException(nameof(internalChoice.Message.Role)), + }; + Usage = new(_internalResponse.Usage); + FinishReason = internalChoice.FinishReason.ToString() switch + { + "stop" => ChatFinishReason.Stopped, + "length" => ChatFinishReason.Length, + "tool_calls" => ChatFinishReason.ToolCalls, + "function_call" => ChatFinishReason.FunctionCall, + "content_filter" => ChatFinishReason.ContentFilter, + _ => throw new ArgumentException(nameof(internalChoice.FinishReason)), + }; + Content = internalChoice.Message.Content; + if (internalChoice.Message.ToolCalls != null) + { + ChangeTrackingList toolCalls = []; + foreach (Internal.Models.ChatCompletionMessageToolCall internalToolCall in internalChoice.Message.ToolCalls) + { + if (internalToolCall.Type == "function") + { + toolCalls.Add(new ChatFunctionToolCall(internalToolCall.Id, internalToolCall.Function.Name, internalToolCall.Function.Arguments)); + } + } + ToolCalls = toolCalls; + } + if (internalChoice.Message.FunctionCall != null) + { + FunctionCall = new(internalChoice.Message.FunctionCall.Name, internalChoice.Message.FunctionCall.Arguments); + } + if (internalChoice.Logprobs != null) + { + LogProbabilities = ChatLogProbabilityCollection.FromInternalData(internalChoice.Logprobs); + } + } +} diff --git a/.dotnet/src/Custom/Chat/ChatCompletionCollection.cs b/.dotnet/src/Custom/Chat/ChatCompletionCollection.cs new file mode 100644 index 000000000..d677b65a9 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatCompletionCollection.cs @@ -0,0 +1,13 @@ +using System.Collections.Generic; +using System.Collections.ObjectModel; + +namespace OpenAI.Chat; + +/// +/// Represents a chat completions response payload that contains information about multiple requested chat completion +/// choices. +/// +public class ChatCompletionCollection : ReadOnlyCollection +{ + internal ChatCompletionCollection(IList list) : base(list) { } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatCompletionOptions.cs b/.dotnet/src/Custom/Chat/ChatCompletionOptions.cs new file mode 100644 index 000000000..69dfd7e8f --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatCompletionOptions.cs @@ -0,0 +1,115 @@ +using OpenAI.ClientShared.Internal; +using System; +using System.ClientModel.Internal; + +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Chat; + +/// +/// Request-level options for chat completion. +/// +public partial class ChatCompletionOptions +{ + /// + public double? FrequencyPenalty { get; set; } + /// + public IDictionary TokenSelectionBiases { get; set; } = new ChangeTrackingDictionary(); + /// + public bool? IncludeLogProbabilities { get; set; } + /// + public int? LogProbabilityCount { get; set; } + /// + public int? MaxTokens { get; set; } + /// + public double? PresencePenalty { get; set; } + /// + public ChatResponseFormat? ResponseFormat { get; set; } + /// + public int? Seed { get; set; } + /// + public IList StopSequences { get; } = new ChangeTrackingList(); + /// + public double? Temperature { get; set; } + /// + public double? NucleusSamplingFactor { get; set; } + /// + public IList Tools { get; } = new ChangeTrackingList(); + /// + public ChatToolConstraint? ToolConstraint { get; set; } + /// + public string User { get; set; } + /// + public IList Functions { get; } = new ChangeTrackingList(); + /// + public ChatFunctionConstraint? FunctionConstraint { get; set; } + + internal BinaryData GetInternalStopSequences() + { + if (!Optional.IsCollectionDefined(StopSequences)) + { + return null; + } + return BinaryData.FromObjectAsJson(StopSequences); + } + + internal IDictionary GetInternalLogitBias() + { + ChangeTrackingDictionary packedLogitBias = []; + foreach (KeyValuePair pair in TokenSelectionBiases) + { + packedLogitBias[$"{pair.Key}"] = pair.Value; + } + return packedLogitBias; + } + + internal IList GetInternalTools() + { + ChangeTrackingList internalTools = []; + foreach (ChatToolDefinition tool in Tools) + { + if (tool is ChatFunctionToolDefinition functionTool) + { + Internal.Models.FunctionObject functionObject = new( + functionTool.Description, + functionTool.Name, + CreateInternalFunctionParameters(functionTool.Parameters), + serializedAdditionalRawData: null); + internalTools.Add(new(functionObject)); + } + } + return internalTools; + } + + internal IList GetInternalFunctions() + { + ChangeTrackingList internalFunctions = new(); + foreach (ChatFunctionDefinition function in Functions) + { + Internal.Models.ChatCompletionFunctions internalFunction = new( + function.Description, + function.Name, + CreateInternalFunctionParameters(function.Parameters), + serializedAdditionalRawData: null); + internalFunctions.Add(internalFunction); + } + return internalFunctions; + } + + internal static Internal.Models.FunctionParameters CreateInternalFunctionParameters(BinaryData parameters) + { + if (parameters == null) + { + return null; + } + JsonElement parametersElement = JsonDocument.Parse(parameters.ToString()).RootElement; + Internal.Models.FunctionParameters internalParameters = new(); + foreach (JsonProperty property in parametersElement.EnumerateObject()) + { + BinaryData propertyData = BinaryData.FromString(property.Value.GetRawText()); + internalParameters.AdditionalProperties.Add(property.Name, propertyData); + } + return internalParameters; + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatFinishReason.cs b/.dotnet/src/Custom/Chat/ChatFinishReason.cs new file mode 100644 index 000000000..48196d52d --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatFinishReason.cs @@ -0,0 +1,77 @@ +namespace OpenAI.Chat; + +/// +/// The reason the model stopped generating tokens. This will be: +/// +/// +/// Property +/// REST +/// Condition +/// +/// +/// +/// stop +/// The model encountered a natural stop point or provided stop sequence. +/// +/// +/// +/// length +/// The maximum number of tokens specified in the request was reached. +/// +/// +/// +/// content_filter +/// Content was omitted due to a triggered content filter rule. +/// +/// +/// +/// tool_calls +/// +/// With no explicit tool_choice, the model called one or more tools that were defined in the request. +/// +/// +/// +/// +/// function_call +/// (Deprecated) The model called a function that was defined in the request. +/// +/// +/// +public enum ChatFinishReason +{ + /// + /// Indicates that the model encountered a natural stop point or provided stop sequence. + /// + Stopped, + /// + /// Indicates that the model reached the maximum number of tokens allowed for the request. + /// + Length, + /// + /// Indicates that content was omitted due to a triggered content filter rule. + /// + ContentFilter, + /// + /// Indicates that the model called a function that was defined in the request. + /// + /// + /// To resolve tool calls, append the message associated with the tool calls followed by matching instances of + /// for each tool call, then perform another chat completion with the combined + /// set of messages. + /// + /// Note: is not provided as the finish_reason if the model calls a + /// tool in response to an explicit tool_choice via . + /// In that case, calling the specified tool is assumed and the expected reason is . + /// + /// + ToolCalls, + /// + /// Indicates that the model called a function that was defined in the request. + /// + /// + /// To resolve a function call, append the message associated with the function call followed by a + /// with the appropriate name and arguments, then perform another chat + /// completion with the combined set of messages. + /// + FunctionCall, +} diff --git a/.dotnet/src/Custom/Chat/ChatFunctionCall.Serialization.cs b/.dotnet/src/Custom/Chat/ChatFunctionCall.Serialization.cs new file mode 100644 index 000000000..eb198cfd4 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatFunctionCall.Serialization.cs @@ -0,0 +1,37 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Diagnostics.CodeAnalysis; +using System.Text.Json; + +namespace OpenAI.Chat; + +public partial class ChatFunctionCall : IJsonModel +{ + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + writer.WriteString("name"u8, Name); + writer.WriteString("arguments"u8, Arguments); + writer.WriteEndObject(); + } + + ChatFunctionCall IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + ChatFunctionCall IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; +} diff --git a/.dotnet/src/Custom/Chat/ChatFunctionCall.cs b/.dotnet/src/Custom/Chat/ChatFunctionCall.cs new file mode 100644 index 000000000..822f1577f --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatFunctionCall.cs @@ -0,0 +1,47 @@ +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI.Chat; + +/// +/// Represents an assistant call against a supplied that is needed by the +/// model to continue the logical conversation. +/// +/// +/// +/// Note that functions are deprecated in favor of tools and using +/// instances with will enable the use of tool_calls via +/// instead of this type. +/// +/// +/// The model makes a function_call in response to evaluation of supplied name> and +/// description information in functions and is resolved by providing a new +/// with matching functioning output on a subsequent chat completion +/// request. +/// +/// +public partial class ChatFunctionCall +{ + /// + /// The name of the function being called by the model. + /// + public required string Name { get; set; } + /// + /// The arguments to the function being called by the model. + /// + public required string Arguments { get; set; } + /// + /// Creates a new instance of . + /// + public ChatFunctionCall() { } + /// + /// Creates a new instance of . + /// + /// The name of the function that was called by the model. + /// The arguments to the function that was called by the model. + [SetsRequiredMembers] + public ChatFunctionCall(string functionName, string arguments) + { + Name = functionName; + Arguments = arguments; + } +} diff --git a/.dotnet/src/Custom/Chat/ChatFunctionConstraint.cs b/.dotnet/src/Custom/Chat/ChatFunctionConstraint.cs new file mode 100644 index 000000000..fe9e7bf66 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatFunctionConstraint.cs @@ -0,0 +1,73 @@ +using System; + +namespace OpenAI.Chat; + +/// +/// Represents a desired manner in which the model should use the functions defined in a chat completion request. +/// +public readonly partial struct ChatFunctionConstraint : IEquatable +{ + private readonly string _value; + private readonly bool _isPredefined; + + /// + /// auto specifies that the model should freely call any or none of the provided functions. + /// This is the implied default when not otherwise specified. + /// + public static ChatFunctionConstraint Auto { get; } = new("auto", isPredefined: true); + /// + /// none specifies that the model should not call any of the provided functions. Note that the definition + /// of the functions may still influence the chat completion content even when not called. + /// + public static ChatFunctionConstraint None { get; } = new("none", isPredefined: true); + + /// + /// Creates a new instance of that specifies that the model should invoke a + /// specific, named function. + /// + /// The name of the function that the model should call. + public ChatFunctionConstraint(string functionName) + : this(functionName, isPredefined: false) + { + } + + internal ChatFunctionConstraint(string functionNameOrPredefinedLabel, bool isPredefined) + { + _value = functionNameOrPredefinedLabel; + _isPredefined = isPredefined; + } + + /// + public static bool operator ==(ChatFunctionConstraint left, ChatFunctionConstraint right) + => left._isPredefined == right._isPredefined && left._value == right._value; + /// + public static implicit operator ChatFunctionConstraint(string value) => new(value); + /// + public static bool operator !=(ChatFunctionConstraint left, ChatFunctionConstraint right) + => left._isPredefined != right._isPredefined || left._value != right._value; + /// + public bool Equals(ChatFunctionConstraint other) + => other._isPredefined.Equals(_isPredefined) && other._value.Equals(_value); + /// + public override string ToString() => ToBinaryData().ToString(); + /// + public override bool Equals(object obj) + => obj is ChatFunctionConstraint constraint && constraint.Equals(this); + /// + public override int GetHashCode() => $"{_value}-{_isPredefined}".GetHashCode(); + + internal BinaryData ToBinaryData() + { + if (_isPredefined) + { + return BinaryData.FromString(_value); + } + else + { + return BinaryData.FromObjectAsJson(new + { + name = _value, + }); + } + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatFunctionDefinition.cs b/.dotnet/src/Custom/Chat/ChatFunctionDefinition.cs new file mode 100644 index 000000000..ddcbb6ffa --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatFunctionDefinition.cs @@ -0,0 +1,60 @@ +using System; +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI.Chat; + +/// +/// Represents the definition of a function that the model may call, as supplied in a chat completion request. +/// +public class ChatFunctionDefinition +{ + /// + /// The name of the function. + /// + public required string Name { get; set; } + /// + /// A friendly description of the function. This supplements in informing the model about when + /// it should call the function. + /// + public string Description { get; set; } + /// + /// The parameter information for the function, provided in JSON Schema format. + /// + /// + /// The method provides + /// an easy definition interface using the dynamic type: + /// + /// Parameters = BinaryData.FromObjectAsJson(new + /// { + /// type = "object", + /// properties = new + /// { + /// your_function_argument = new + /// { + /// type = "string", + /// description = "the description of your function argument" + /// } + /// }, + /// required = new[] { "your_function_argument" } + /// }) + /// + /// + public BinaryData Parameters { get; set; } + /// + /// Creates a new instance of . + /// + public ChatFunctionDefinition() { } + /// + /// Creates a new instance of . + /// + /// The name of the function. + /// A description of the function's behavior or purpose. + /// The parameter information for the function, in JSON Schema format. + [SetsRequiredMembers] + public ChatFunctionDefinition(string name, string description = null, BinaryData parameters = null) + { + Name = name; + Description = description; + Parameters = parameters; + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatFunctionToolCall.cs b/.dotnet/src/Custom/Chat/ChatFunctionToolCall.cs new file mode 100644 index 000000000..5a6a823b6 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatFunctionToolCall.cs @@ -0,0 +1,69 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Diagnostics.CodeAnalysis; +using System.Text.Json; + +namespace OpenAI.Chat; + +/// +/// Represents a call made by the model to a function tool that was defined in a chat completion request. +/// +public class ChatFunctionToolCall : ChatToolCall +{ + internal Internal.Models.ChatCompletionMessageToolCallFunction InternalToolCall { get; } + + /// + /// Gets the name of the function. + /// + public required string Name + { + get => InternalToolCall.Name; + set => InternalToolCall.Name = value; + } + + /// + /// Gets the arguments to the function. + /// + public required string Arguments + { + get => InternalToolCall.Arguments; + set => InternalToolCall.Arguments = value; + + } + /// + /// Creates a new instance of . + /// + public ChatFunctionToolCall() + { + InternalToolCall = new(); + } + + /// + /// Creates a new instance of . + /// + /// + /// The ID of the tool call, used when resolving the tool call with a future + /// . + /// + /// The name of the function. + /// The arguments to the function. + [SetsRequiredMembers] + public ChatFunctionToolCall(string toolCallId, string functionName, string arguments) + : this() + { + Id = toolCallId; + Name = functionName; + Arguments = arguments; + } + + internal override void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("type"u8, "function"u8); + writer.WritePropertyName("function"u8); + writer.WriteStartObject(); + writer.WriteString("name"u8, Name); + writer.WriteString("arguments"u8, Arguments); + writer.WriteEndObject(); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatFunctionToolDefinition.cs b/.dotnet/src/Custom/Chat/ChatFunctionToolDefinition.cs new file mode 100644 index 000000000..313b76aec --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatFunctionToolDefinition.cs @@ -0,0 +1,60 @@ +using System; +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI.Chat; + +/// +/// Represents the definition of a function tool that is callable by the model for a chat completion request. +/// +public class ChatFunctionToolDefinition : ChatToolDefinition +{ + /// + /// The name of the function that the tool represents. + /// + public required string Name { get; set; } + /// + /// A friendly description of the function. This supplements in informing the model about when + /// it should call the function. + /// + public string Description { get; set; } + /// + /// The parameter information for the function, provided in JSON Schema format. + /// + /// + /// The method provides + /// an easy definition interface using the dynamic type: + /// + /// Parameters = BinaryData.FromObjectAsJson(new + /// { + /// type = "object", + /// properties = new + /// { + /// your_function_argument = new + /// { + /// type = "string", + /// description = "the description of your function argument" + /// } + /// }, + /// required = new[] { "your_function_argument" } + /// }) + /// + /// + public BinaryData Parameters { get; set; } + /// + /// Creates a new instance of . + /// + public ChatFunctionToolDefinition() { } + /// + /// Creates a new instance of . + /// + /// The name of the function. + /// The description of the function. + /// The parameters into the function, in JSON Schema format. + [SetsRequiredMembers] + public ChatFunctionToolDefinition(string name, string description = null, BinaryData parameters = null) + { + Name = name; + Description = description; + Parameters = parameters; + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatLogProbabilityCollection.cs b/.dotnet/src/Custom/Chat/ChatLogProbabilityCollection.cs new file mode 100644 index 000000000..01accec6c --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatLogProbabilityCollection.cs @@ -0,0 +1,46 @@ +using System.Collections.Generic; +using System.Collections.ObjectModel; +using System.Linq; + +namespace OpenAI.Chat; + +/// +/// Represents a collection of log probability result information as requested via +/// . +/// +public class ChatLogProbabilityCollection : ReadOnlyCollection +{ + internal ChatLogProbabilityCollection(IList list) : base(list) { } + internal static ChatLogProbabilityCollection FromInternalData( + Internal.Models.CreateChatCompletionResponseChoiceLogprobs internalLogprobs) + { + if (internalLogprobs == null) + { + return null; + } + List logProbabilities = []; + foreach (Internal.Models.ChatCompletionTokenLogprob internalLogprob in internalLogprobs.Content) + { + List alternateLogProbabilities = null; + if (internalLogprob.TopLogprobs != null) + { + alternateLogProbabilities = []; + foreach (Internal.Models.ChatCompletionTokenLogprobTopLogprob internalTopLogprob in internalLogprob.TopLogprobs) + { + List convertedByteValues = internalLogprob.Bytes.Select(longByteValue => (int)longByteValue).ToList(); + alternateLogProbabilities.Add(new( + internalLogprob.Token, + internalLogprob.Logprob, + convertedByteValues)); + } + } + List convertedResultByteValues = internalLogprob.Bytes.Select(longByteValue => (int)longByteValue).ToList(); + logProbabilities.Add(new( + internalLogprob.Token, + internalLogprob.Logprob, + convertedResultByteValues, + alternateLogProbabilities)); + } + return new ChatLogProbabilityCollection(logProbabilities); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatLogProbabilityResult.cs b/.dotnet/src/Custom/Chat/ChatLogProbabilityResult.cs new file mode 100644 index 000000000..619040910 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatLogProbabilityResult.cs @@ -0,0 +1,43 @@ +namespace OpenAI.Chat; + +using System.Collections.Generic; +using System.Linq; + +/// +/// Represents a single token's log probability information, as requested via +/// . +/// +public class ChatLogProbabilityResult +{ + /// + /// The token for which this log probability information applies. + /// + public string Token { get; } + /// + /// The logprob for the token. + /// + public double LogProbability { get; } + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where + /// characters are represented by multiple tokens and their byte representations must be combined to generate + /// the correct text representation. Can be null if there is no bytes representation for the token. + /// + public IReadOnlyList Utf8ByteValues { get; } + /// + /// List of the most likely tokens and their log probability at this token position. In rare cases, + /// there may be fewer than the number of requested top_logprobs returned, as supplied via + /// . + /// + public IReadOnlyList AlternateLogProbabilities { get; } + internal ChatLogProbabilityResult( + string token, + double logProbability, + IEnumerable byteValues, + IEnumerable alternateLogProbabilities) + { + Token = token; + LogProbability = logProbability; + Utf8ByteValues = byteValues.ToList(); + AlternateLogProbabilities = alternateLogProbabilities.ToList(); + } +} diff --git a/.dotnet/src/Custom/Chat/ChatLogProbabilityResultItem.cs b/.dotnet/src/Custom/Chat/ChatLogProbabilityResultItem.cs new file mode 100644 index 000000000..906d2891c --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatLogProbabilityResultItem.cs @@ -0,0 +1,42 @@ +using System.Collections.Generic; + +namespace OpenAI.Chat; + +/// +/// Represents a single item of log probability information as requested via +/// and +/// . +/// +public class ChatLogProbabilityResultItem +{ + /// + /// The token for which this log probability information applies. + /// + public string Token { get; } + /// + /// The logprob for the token. + /// + public double LogProbability { get; } + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where + /// characters are represented by multiple tokens and their byte representations must be combined to generate + /// the correct text representation. Can be null if there is no bytes representation for the token. + /// + public IReadOnlyList Utf8ByteValues { get; } + /// + /// Creates a new instance of . + /// + protected ChatLogProbabilityResultItem() { } + /// + /// Creates a new instance of . + /// + /// The token represented by this item. + /// The logprob for the token. + /// The UTF8 byte value sequence representation for the token. + internal ChatLogProbabilityResultItem(string token, double logProbability, IEnumerable byteValues) + { + Token = token; + LogProbability = logProbability; + Utf8ByteValues = new List(byteValues); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatMessageContent.cs b/.dotnet/src/Custom/Chat/ChatMessageContent.cs new file mode 100644 index 000000000..a39fbf184 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatMessageContent.cs @@ -0,0 +1,112 @@ +using System; + +namespace OpenAI.Chat; + +/// +/// Represents the common base type for a piece of message content used for chat completions. +/// +public partial class ChatMessageContent +{ + /// + /// The type of message content data, e.g. text or image, that this instance + /// represents. + /// + public ChatMessageContentKind ContentKind { get; } + + private object _contentValue; + private string _contentMediaTypeName; + + internal ChatMessageContent(object value, ChatMessageContentKind kind, string contentMediaTypeName = null) + { + _contentValue = value; + ContentKind = kind; + _contentMediaTypeName = contentMediaTypeName; + } + + /// + /// Creates a new instance of that encapsulates text content. + /// + /// The content for the new instance. + /// A new instance of . + public static ChatMessageContent CreateText(string text) => new(text, ChatMessageContentKind.Text); + + /// + /// Creates a new instance of that encapsulates image content obtained from + /// an internet location that will be accessible to the model when evaluating a message with this content. + /// + /// + /// An internet location pointing to an image. This must be accessible to the model. + /// + /// A new instance of . + public static ChatMessageContent CreateImage(Uri imageUri) => new(imageUri, ChatMessageContentKind.Image); + + /// + /// Creates a new instance of that encapsulates binary image content. + /// + /// The binary representation of the image content. + /// The media type name, e.g. image/png, for the image. + /// A new instance of . + public static ChatMessageContent CreateImage(BinaryData imageBytes, string mediaType) + => new(imageBytes, ChatMessageContentKind.Image, mediaType); + + /// + /// Provides the associated with a content item using + /// . + /// + /// + /// will infer from the content type and `ChatMessageContent` known to be text can typically + /// be treated like a string without calling this explicitly. + /// + /// The content string for the text content item. + /// The content does not support a text representation. + public string ToText() + => ContentKind switch + { + ChatMessageContentKind.Text => _contentValue?.ToString(), + _ => throw new InvalidOperationException( + $"{nameof(ToText)} conversion not supported for content kind: {ContentKind}"), + }; + + /// + /// Provides a associated with a content item. These URIs can refer to an internet location + /// accessible to the target model or can be base64-encoded data URIs. + /// + /// A URI representation of the content item. + /// The content does not support a URI representation. + public Uri ToUri() + => ContentKind switch + { + ChatMessageContentKind.Image => _contentValue switch + { + Uri imageUri => imageUri, + BinaryData imageData => new Uri($"data:{_contentMediaTypeName};base64,{Convert.ToBase64String(imageData.ToArray())}"), + _ => throw new InvalidOperationException( + $"Cannot convert underlying image data type '{_contentValue?.GetType()}' to a {nameof(Uri)}"), + }, + _ => throw new InvalidOperationException( + $"{nameof(ToText)} conversion not supported for content kind: {ContentKind}"), + }; + + /// + /// The implicit conversion operator that infers an equivalent instance from + /// a plain . + /// + /// The text for the message content. + public static implicit operator ChatMessageContent(string value) => CreateText(value); + + /// + /// An implicit operator allowing a content item to be treated as a string. + /// + /// + public static implicit operator string(ChatMessageContent content) => content.ToText(); + + /// + public override string ToString() + { + if (ContentKind == ChatMessageContentKind.Text) + { + return ToText(); + } + return base.ToString(); + } +} diff --git a/.dotnet/src/Custom/Chat/ChatMessageContentKind.cs b/.dotnet/src/Custom/Chat/ChatMessageContentKind.cs new file mode 100644 index 000000000..7fafc8889 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatMessageContentKind.cs @@ -0,0 +1,19 @@ +namespace OpenAI.Chat; + +/// +/// Represents the possibles of underlying data for a chat message's content property. +/// +public enum ChatMessageContentKind +{ + /// + /// Plain text content, represented as a . + /// + Text, + /// + /// Image content, as used exclusively by gpt-4-vision-preview when providing an array of content items + /// into a chat completion request. + /// + Image, + // Audio, + // Video, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatRequestAssistantMessage.cs b/.dotnet/src/Custom/Chat/ChatRequestAssistantMessage.cs new file mode 100644 index 000000000..eb80d71ea --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRequestAssistantMessage.cs @@ -0,0 +1,128 @@ +using OpenAI.ClientShared.Internal; +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Chat; + +/// +/// Represents a chat message of the assistant role as supplied to a chat completion request. As assistant +/// messages are originated by the model on responses, instances typically +/// represent chat history or example interactions to guide model behavior. +/// +public class ChatRequestAssistantMessage : ChatRequestMessage +{ + /// + /// An optional name associated with the assistant message. This is typically defined with a system + /// message and is used to differentiate between multiple participants of the same role. + /// + public string Name { get; set; } + + /// + /// The tool_calls furnished by the model that are needed to continue the logical conversation across chat + /// completion requests. A instance corresponds to a supplied + /// instance and is resolved by providing a + /// that correlates via id to the item in tool_calls. + /// + public IReadOnlyList ToolCalls { get; } = new ChangeTrackingList(); + + /// + /// Deprecated in favor of tool_calls. + /// + /// The function_call furnished by the model that is needed to continue the logical conversation + /// across chat completion requests. A instance corresponds to a supplied + /// instance and is resolved by providing a + /// that correlates via name to the function_call. + /// + /// + public ChatFunctionCall FunctionCall { get; } + + // Assistant messages may present ONE OF: + // - Ordinary text content without tools or a function, in which case the content is required; + // - A list of tool calls, together with optional text content + // - A function call, together with optional text content + + /// + /// Creates a new instance of that represents ordinary text content and + /// does not feature tool or function calls. + /// + /// The text content of the message. + public ChatRequestAssistantMessage(string content) + : base(ChatRole.Assistant, content) + { } + + /// + /// Creates a new instance of that represents tool_calls that + /// were provided by the model. + /// + /// The tool_calls made by the model. + /// Optional text content associated with the message. + public ChatRequestAssistantMessage(IEnumerable toolCalls, string content = null) + : base(ChatRole.Assistant, content) + { + ToolCalls = new List(toolCalls); + } + + /// + /// Creates a new instance of that represents a function_call + /// (deprecated in favor of tool_calls) that was made by the model. + /// + /// The function_call made by the model. + /// Optional text content associated with the message. + public ChatRequestAssistantMessage(ChatFunctionCall functionCall, string content = null) + : base(ChatRole.Assistant, content) + { + FunctionCall = functionCall; + } + + /// + /// Creates a new instance of from a with + /// an assistant role response. + /// + /// + /// This constructor will copy the content, tool_calls, and function_call from a chat + /// completion response into a new assistant role request message. + /// + /// + /// The from which the conversation history request message should be created. + /// + /// + /// The role of the provided chat completion response was not . + /// + public ChatRequestAssistantMessage(ChatCompletion chatCompletion) + : base(ChatRole.Assistant, chatCompletion?.Content) + { + if (chatCompletion?.Role != ChatRole.Assistant) + { + throw new ArgumentException( + $"Can't instantiate a {nameof(ChatRequestAssistantMessage)} from a chat completion" + + $" with finish reason: {chatCompletion?.FinishReason}"); + } + ToolCalls = chatCompletion.ToolCalls; + FunctionCall = chatCompletion.FunctionCall; + } + + internal override void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + if (Optional.IsDefined(Name)) + { + writer.WriteString("name"u8, Name); + } + if (Optional.IsCollectionDefined(ToolCalls)) + { + writer.WritePropertyName("tool_calls"u8); + writer.WriteStartArray(); + foreach (ChatToolCall toolCall in ToolCalls) + { + (toolCall as IJsonModel).Write(writer, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(FunctionCall)) + { + writer.WritePropertyName("function_call"u8); + (FunctionCall as IJsonModel).Write(writer, options); + } + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatRequestFunctionMessage.cs b/.dotnet/src/Custom/Chat/ChatRequestFunctionMessage.cs new file mode 100644 index 000000000..f1190771e --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRequestFunctionMessage.cs @@ -0,0 +1,40 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Chat; + +/// +/// Represents a chat message of the function role as provided to a chat completion request. A function message +/// resolves a prior function_call received from the model and correlates to both a supplied +/// instance as well as a made by the model on an +/// assistant response message. +/// +public class ChatRequestFunctionMessage : ChatRequestMessage +{ + /// + /// The name of the called function that this message provides information from. + /// + public string FunctionName { get; set; } // JSON "name" + + /// + /// Creates a new instance of . + /// + /// + /// The name of the called function that this message provides information from. + /// + /// + /// The textual content that represents the output or result from the called function. There is no format + /// restriction (e.g. JSON) imposed on this content. + /// + public ChatRequestFunctionMessage(string functionName, string content) + : base(ChatRole.Function, content) + { + FunctionName = functionName; + } + + internal override void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("name"u8, FunctionName); + } +} diff --git a/.dotnet/src/Custom/Chat/ChatRequestMessage.Serialization.cs b/.dotnet/src/Custom/Chat/ChatRequestMessage.Serialization.cs new file mode 100644 index 000000000..96cc4991f --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRequestMessage.Serialization.cs @@ -0,0 +1,94 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.ComponentModel.Design; +using System.Text.Json; +using OpenAI.ClientShared.Internal; + +namespace OpenAI.Chat; + +public abstract partial class ChatRequestMessage : IJsonModel +{ + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + writer.WriteString("role"u8, Role switch + { + ChatRole.System => "system", + ChatRole.User => "user", + ChatRole.Assistant => "assistant", + ChatRole.Tool => "tool", + ChatRole.Function => "function", + _ => throw new ArgumentException(nameof(Role)) + }); + if (Optional.IsDefined(Content)) + { + writer.WritePropertyName("content"u8); + if (Content.Span.Length == 0) + { + writer.WriteNullValue(); + } + else if (Content.Span.Length == 1) + { + if (Content.Span[0].ContentKind == ChatMessageContentKind.Text) + { + writer.WriteStringValue(Content.Span[0].ToText()); + } + else + { + throw new InvalidOperationException(); + } + } + else if (Content.Span.Length > 1) + { + writer.WriteStartArray(); + foreach (ChatMessageContent contentItem in Content.Span) + { + writer.WriteStartObject(); + if (contentItem.ContentKind == ChatMessageContentKind.Text) + { + writer.WriteString("type"u8, "text"u8); + writer.WriteString("text"u8, contentItem.ToText()); + } + else if (contentItem.ContentKind == ChatMessageContentKind.Image) + { + writer.WriteString("type"u8, "image_url"u8); + writer.WritePropertyName("image_url"u8); + writer.WriteStartObject(); + writer.WriteString("url"u8, contentItem.ToUri().AbsoluteUri); + writer.WriteEndObject(); + } + else + { + throw new InvalidOperationException(); + } + writer.WriteEndObject(); + } + writer.WriteEndArray(); + } + } + WriteDerivedAdditions(writer, options); + writer.WriteEndObject(); + } + + ChatRequestMessage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + ChatRequestMessage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal abstract void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options); +} diff --git a/.dotnet/src/Custom/Chat/ChatRequestMessage.cs b/.dotnet/src/Custom/Chat/ChatRequestMessage.cs new file mode 100644 index 000000000..38444eb4e --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRequestMessage.cs @@ -0,0 +1,108 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections; +using System.Collections.Generic; + +namespace OpenAI.Chat; + +/// +/// A common, base representation of a message provided as input into a chat completion request. +/// +/// +/// +/// +/// Type - +/// Role - +/// Description +/// +/// +/// - +/// system - +/// Instructions to the model that guide the behavior of future assistant messages. +/// +/// +/// - +/// user - +/// Input messages from the caller, typically paired with assistant messages in a conversation. +/// +/// +/// - +/// assistant - +/// +/// Output messages from the model with responses to the user or calls to tools or functions that are +/// needed to continue the logical conversation. +/// +/// +/// +/// - +/// tool - +/// +/// Resolution information for a in an earlier +/// that was made against a supplied +/// . +/// +/// +/// +/// - +/// function - +/// +/// Resolution information for a in an earlier +/// that was made against a supplied +/// . Note that functions are deprecated in favor of +/// tool_calls. +/// +/// +/// +/// +public abstract partial class ChatRequestMessage +{ + /// + /// The role associated with the message. + /// + public ChatRole Role { get; } + + /// + /// The content associated with the message. The interpretation of this content will vary depending on the message type. + /// + public ReadOnlyMemory Content => _contentItems.AsMemory(); + private readonly ChatMessageContent[] _contentItems; + + internal ChatRequestMessage(ChatRole role, ChatMessageContent content) + : this(role, [content]) + { } + + internal ChatRequestMessage(ChatRole role, ChatMessageContent[] content) + { + Role = role; + _contentItems = content; + } + + /// + public static ChatRequestSystemMessage CreateSystemMessage(string content) + => new ChatRequestSystemMessage(content); + + /// + public static ChatRequestUserMessage CreateUserMessage(string content) + => new ChatRequestUserMessage(content); + + /// + public static ChatRequestUserMessage CreateUserMessage(IEnumerable contentItems) + => new ChatRequestUserMessage(contentItems); + + /// + public static ChatRequestUserMessage CreateUserMessage(params ChatMessageContent[] contentItems) + => new ChatRequestUserMessage(contentItems); + + /// + public static ChatRequestAssistantMessage CreateAssistantMessage(string content) + => new ChatRequestAssistantMessage(content); + + /// + public static ChatRequestToolMessage CreateToolMessage(string toolCallId, string content) + => new ChatRequestToolMessage(toolCallId, content); + + /// + public static ChatRequestFunctionMessage CreateFunctionMessage(string toolCallId, string content) + => new ChatRequestFunctionMessage(toolCallId, content); +} diff --git a/.dotnet/src/Custom/Chat/ChatRequestSystemMessage.cs b/.dotnet/src/Custom/Chat/ChatRequestSystemMessage.cs new file mode 100644 index 000000000..6081118d0 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRequestSystemMessage.cs @@ -0,0 +1,39 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Dynamic; +using System.Runtime.InteropServices; +using System.Text.Json; +using OpenAI.ClientShared.Internal; + +namespace OpenAI.Chat; + +/// +/// Represents a chat message of the system role as supplied to a chat completion request. A system message is +/// generally supplied as the first message to a chat completion request and guides the model's behavior across future +/// assistant role response messages. These messages may help control behavior, style, tone, and +/// restrictions for a model-based assistant. +/// +public class ChatRequestSystemMessage : ChatRequestMessage +{ + /// + /// An optional name for the participant. + /// + public string Name { get; set; } // JSON "name" + + /// + /// Creates a new instance of . + /// + /// The system message text that guides the model's behavior. + public ChatRequestSystemMessage(string content) : base(ChatRole.System, content) { } + + internal override void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + if (Optional.IsDefined(Name)) + { + writer.WriteString("name"u8, Name); + } + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatRequestToolMessage.cs b/.dotnet/src/Custom/Chat/ChatRequestToolMessage.cs new file mode 100644 index 000000000..7ec17eea5 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRequestToolMessage.cs @@ -0,0 +1,53 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Chat; + +/// +/// Represents a chat message of the tool role as supplied to a chat completion request. A tool message +/// encapsulates a resolution of a made by the model. The typical interaction flow featuring +/// tool messages is: +/// +/// A provides a on a request; +/// +/// Based on the name and description information of provided tools, the model responds with one or +/// more instances that need to be resolved to continue the logical conversation; +/// +/// +/// For each , the matching tool is invoked and its output is supplied back to the model +/// via a to resolve the tool call and allow the logical conversation to +/// continue. +/// +/// +/// +public class ChatRequestToolMessage : ChatRequestMessage +{ + /// + /// The id correlating to the prior made by the model. + /// + public string ToolCallId { get; set; } + + /// + /// Creates a new instance of . + /// + /// The id correlating to a made by the model. + /// + /// The textual content, produced by the defined tool in response to the correlated , + /// that resolves the tool call and allows the logical conversation to continue. No format restrictions (e.g. + /// JSON) are imposed on the content emitted by tools. + /// + public ChatRequestToolMessage(string toolCallId, string content) + : base(ChatRole.Tool, content) + { + ToolCallId = toolCallId; + } + + internal override void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteString("tool_call_id"u8, ToolCallId); + } +} diff --git a/.dotnet/src/Custom/Chat/ChatRequestUserMessage.cs b/.dotnet/src/Custom/Chat/ChatRequestUserMessage.cs new file mode 100644 index 000000000..95a80567a --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRequestUserMessage.cs @@ -0,0 +1,65 @@ +using System.ClientModel.Internal; + +using System; +using System.Collections.Generic; +using System.Dynamic; +using System.Text.Json; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Linq; +using OpenAI.ClientShared.Internal; + +namespace OpenAI.Chat; + +/// +/// Represents a chat message of the user role as supplied to a chat completion request. A user message contains +/// information originating from the caller and serves as a prompt for the model to complete. User messages may result +/// in either direct assistant message responses or in calls to supplied tools or functions. +/// +public class ChatRequestUserMessage : ChatRequestMessage +{ + /// + /// An optional name for the participant. + /// + public string Name { get; set; } + + /// + /// Creates a new instance of with ordinary text content. + /// + /// The textual content associated with the message. + public ChatRequestUserMessage(string content) + : base(ChatRole.User, ChatMessageContent.CreateText(content)) + { } + + /// + /// Creates a new instance of using a collection of content items that can + /// include text and image information. This content format is currently only applicable to the + /// gpt-4-vision-preview model and will not be accepted by other models. + /// + /// + /// The collection of text and image content items associated with the message. + /// + public ChatRequestUserMessage(IEnumerable contentItems) + : base(ChatRole.User, contentItems.ToArray()) + { } + + /// + /// Creates a new instance of using a collection of content items that can + /// include text and image information. This content format is currently only applicable to the + /// gpt-4-vision-preview model and will not be accepted by other models. + /// + /// + /// The collection of text and image content items associated with the message. + /// + public ChatRequestUserMessage(params ChatMessageContent[] contentItems) + : this(contentItems as IEnumerable) + { } + + internal override void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + if (Optional.IsDefined(Name)) + { + writer.WriteString("name"u8, Name); + } + } +} diff --git a/.dotnet/src/Custom/Chat/ChatResponseFormat.cs b/.dotnet/src/Custom/Chat/ChatResponseFormat.cs new file mode 100644 index 000000000..b7d539655 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatResponseFormat.cs @@ -0,0 +1,44 @@ +using System; + +namespace OpenAI.Chat; + +/// +/// Represents a requested response_format for the model to use, enabling "JSON mode" for guaranteed valid output. +/// +/// +/// Important: when using JSON mode, the model must also be instructed to produce JSON via a +/// system or user message. +/// +/// Without this paired, message-based accompaniment, the model may generate an unending stream of whitespace until the +/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. +/// +/// +/// Also note that the message content may be partially cut off if finish_reason is length, which +/// indicates that the generation exceeded max_tokens or the conversation exceeded the max context length for +/// the model. +/// +/// +public enum ChatResponseFormat +{ + /// + /// Specifies that the model should provide plain, textual output. + /// + Text, + /// + /// Specifies that the model should enable "JSON mode" and better guarantee the emission of valid JSON. + /// + /// + /// Important: when using JSON mode, the model must also be instructed to produce JSON via a + /// system or user message. + /// + /// Without this paired, message-based accompaniment, the model may generate an unending stream of whitespace until the + /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. + /// + /// + /// Also note that the message content may be partially cut off if finish_reason is length, which + /// indicates that the generation exceeded max_tokens or the conversation exceeded the max context length for + /// the model. + /// + /// + JsonObject, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatRole.cs b/.dotnet/src/Custom/Chat/ChatRole.cs new file mode 100644 index 000000000..724baa74f --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatRole.cs @@ -0,0 +1,85 @@ +namespace OpenAI.Chat; + +/// +/// Represents the role of a chat completion message. +/// +/// +/// +/// +/// Type - +/// Role - +/// Description +/// +/// +/// - +/// system - +/// Instructions to the model that guide the behavior of future assistant messages. +/// +/// +/// - +/// user - +/// Input messages from the caller, typically paired with assistant messages in a conversation. +/// +/// +/// - +/// assistant - +/// +/// Output messages from the model with responses to the user or calls to tools or functions that are +/// needed to continue the logical conversation. +/// +/// +/// +/// - +/// tool - +/// +/// Resolution information for a in an earlier +/// that was made against a supplied +/// . +/// +/// +/// +/// - +/// function - +/// +/// Resolution information for a in an earlier +/// that was made against a supplied +/// . Note that functions are deprecated in favor of +/// tool_calls. +/// +/// +/// +/// +public enum ChatRole +{ + /// + /// The system role, which provides instructions to the model that guide the behavior of future + /// assistant messages + /// + System, + /// + /// The assistant role that provides output from the model that either issues completions in response to + /// user messages or calls provided tools or functions. + /// + Assistant, + /// + /// The user role that provides input from the caller as a prompt for model responses. + /// + User, + /// + /// The tool role that provides resolving information to prior tool_calls made by the model against + /// supplied tools. + /// + Tool, + /// + /// + /// The function role that provides resolving information to a prior function_call made by the model + /// against a definition supplied in functions. + /// + /// + /// + /// functions are deprecated in favor of tools and supplying tools will result in + /// tool_calls that must be resolved via the tool role rather than a function_call resolved + /// by a function role message. + /// + Function, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatTokenUsage.cs b/.dotnet/src/Custom/Chat/ChatTokenUsage.cs new file mode 100644 index 000000000..8daf79814 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatTokenUsage.cs @@ -0,0 +1,21 @@ +namespace OpenAI.Chat; + +/// +/// Represents computed token consumption statistics for a chat completion request. +/// +public class ChatTokenUsage +{ + /// + public int InputTokens { get; } + /// + public int OutputTokens { get; } + /// + public int TotalTokens { get; } + + internal ChatTokenUsage(Internal.Models.CompletionUsage internalUsage) + { + InputTokens = (int)internalUsage.PromptTokens; + OutputTokens = (int)internalUsage.CompletionTokens; + TotalTokens = (int)internalUsage.TotalTokens; + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatToolCall.Serialization.cs b/.dotnet/src/Custom/Chat/ChatToolCall.Serialization.cs new file mode 100644 index 000000000..eff6eec3c --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatToolCall.Serialization.cs @@ -0,0 +1,38 @@ +using System; +using System.ClientModel.Internal; + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Chat; + +public abstract partial class ChatToolCall : IJsonModel +{ + ChatToolCall IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + ChatToolCall IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + writer.WriteString("id"u8, Id); + WriteDerivedAdditions(writer, options); + writer.WriteEndObject(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + throw new NotImplementedException(); + } + + internal abstract void WriteDerivedAdditions(Utf8JsonWriter writer, ModelReaderWriterOptions options); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatToolCall.cs b/.dotnet/src/Custom/Chat/ChatToolCall.cs new file mode 100644 index 000000000..9203f4b01 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatToolCall.cs @@ -0,0 +1,15 @@ +namespace OpenAI.Chat; + +/// +/// A base representation of an item in an assistant role response's tool_calls that specifies +/// parameterized resolution against a previously defined tool that is needed for the model to continue the logical +/// conversation. +/// +public abstract partial class ChatToolCall +{ + /// + /// A unique identifier associated with the tool call, used in a subsequent to + /// resolve the tool call and continue the logical conversation. + /// + public required string Id { get; set; } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatToolConstraint.cs b/.dotnet/src/Custom/Chat/ChatToolConstraint.cs new file mode 100644 index 000000000..15622b5be --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatToolConstraint.cs @@ -0,0 +1,87 @@ +using System; + +namespace OpenAI.Chat; + +/// +/// Represents tool_choice, the desired manner in which the model should use the tools defined in a +/// chat completion request. +/// +public readonly struct ChatToolConstraint : IEquatable +{ + private enum ToolConstraintKind + { + Predefined, + Function, + } + + private readonly ToolConstraintKind _constraintKind; + private readonly BinaryData _serializableData; + + /// + /// Creates a new instance of which requests that the model restricts its behavior + /// to calling the specified tool. + /// + /// The definition of the tool that the model should call. + /// + /// tool_choice uses the name of a tool of the function type as the correlation field, so + /// instantiating a new instance of with the desired name is + /// sufficient if the matching instance is not available. + /// + public ChatToolConstraint(ChatToolDefinition toolDefinition) + { + if (toolDefinition is ChatFunctionToolDefinition functionToolDefinition) + { + _constraintKind = ToolConstraintKind.Function; + _serializableData = BinaryData.FromObjectAsJson(new + { + type = "function", + function = new + { + name = functionToolDefinition.Name, + } + }); + } + else + { + throw new ArgumentException( + $"Unsupported {nameof(toolDefinition)} type for 'tool_choice' constraint: {toolDefinition.GetType()}"); + } + } + + internal ChatToolConstraint(string predefinedLabel) + { + _constraintKind = ToolConstraintKind.Predefined; + _serializableData = BinaryData.FromString($@"""{predefinedLabel}"""); + } + + /// + /// auto specifies that the model should freely call any combination of the provided tools, including + /// the option to not invoke any tools and issue an ordinary response. + /// + public static ChatToolConstraint Auto { get; } = new("auto"); + /// + /// none specifies that the model should not invoke any of the provided tools and instead force an + /// ordinary assistant response. Note that provided tool definitions may still influence the behavior of + /// chat completions even when tools are not called. + /// + public static ChatToolConstraint None { get; } = new("none"); + /// + public static bool operator ==(ChatToolConstraint left, ChatToolConstraint right) + => left._serializableData?.ToString() == right._serializableData?.ToString(); + /// + public static bool operator !=(ChatToolConstraint left, ChatToolConstraint right) + => left._serializableData?.ToString() != right._serializableData?.ToString(); + /// + public bool Equals(ChatToolConstraint other) + => (_serializableData == null && other._serializableData == null) + || (_serializableData.ToString().Equals(other._serializableData.ToString())); + /// + public override string ToString() => _serializableData?.ToString(); + /// + public override bool Equals(object obj) + => obj is ChatToolConstraint constraint && constraint.Equals(this); + /// + public override int GetHashCode() => $"{_serializableData?.ToString()}".GetHashCode(); + + internal BinaryData GetBinaryData() => _serializableData; +} diff --git a/.dotnet/src/Custom/Chat/ChatToolDefinition.cs b/.dotnet/src/Custom/Chat/ChatToolDefinition.cs new file mode 100644 index 000000000..d116ac460 --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatToolDefinition.cs @@ -0,0 +1,11 @@ +namespace OpenAI.Chat; + +/// +/// A base representation of a tool supplied to a chat completion request. Tools inform the model about additional, +/// caller-provided behaviors that can be invoked to provide prompt enrichment or custom actions. +/// +/// +/// Chat completion currently supports function tools via . +/// +public abstract class ChatToolDefinition +{ } diff --git a/.dotnet/src/Custom/Chat/StreamingChatUpdate.cs b/.dotnet/src/Custom/Chat/StreamingChatUpdate.cs new file mode 100644 index 000000000..c1540897b --- /dev/null +++ b/.dotnet/src/Custom/Chat/StreamingChatUpdate.cs @@ -0,0 +1,336 @@ +namespace OpenAI.Chat; + +using System; +using System.Collections.Generic; +using System.Text.Json; + +/// +/// Represents an incremental item of new data in a streaming response to a chat completion request. +/// +public partial class StreamingChatUpdate +{ + /// + /// Gets a unique identifier associated with this streamed Chat Completions response. + /// + /// + /// + /// Corresponds to $.id in the underlying REST schema. + /// + /// When using Azure OpenAI, note that the values of and may not be + /// populated until the first containing role, content, or + /// function information. + /// + public string Id { get; } + + /// + /// Gets the first timestamp associated with generation activity for this completions response, + /// represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. + /// + /// + /// + /// Corresponds to $.created in the underlying REST schema. + /// + /// When using Azure OpenAI, note that the values of and may not be + /// populated until the first containing role, content, or + /// function information. + /// + public DateTimeOffset? Created { get; } + + /// + /// Gets the associated with this update. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.role in the underlying REST schema. + /// + /// assignment typically occurs in a single update across a streamed Chat Completions + /// choice and the value should be considered to be persist for all subsequent updates without a + /// that bear the same . + /// + public ChatRole? Role { get; } + + /// + /// Gets the content fragment associated with this update. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.content in the underlying REST schema. + /// + /// Each update contains only a small number of tokens. When presenting or reconstituting a full, streamed + /// response, all values for the same should be + /// combined. + /// + public string ContentUpdate { get; } + + /// + /// Gets the name of a function to be called. + /// + /// + /// Corresponds to e.g. $.choices[0].delta.function_call.name in the underlying REST schema. + /// + public string FunctionName { get; } + + /// + /// Gets a function arguments fragment associated with this update. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.function_call.arguments in the underlying REST schema. + /// + /// + /// + /// Each update contains only a small number of tokens. When presenting or reconstituting a full, streamed + /// arguments body, all values for the same + /// should be combined. + /// + /// + /// + /// As is the case for non-streaming , the content provided for function + /// arguments is not guaranteed to be well-formed JSON or to contain expected data. Callers should validate + /// function arguments before using them. + /// + /// + public string FunctionArgumentsUpdate { get; } + + /// + /// An incremental update payload for a tool call that is part of this response. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.tool_calls[0].index in the REST API schema. + /// + /// + /// To differentiate between parallel streaming tool calls within a single streaming choice, use the value of the + /// property. + /// + /// + /// Please note is the base class. According to the scenario, a derived class + /// of the base class might need to be assigned here, or this property needs to be casted to one of the possible + /// derived classes. + /// The available derived classes include: . + /// + /// + public StreamingToolCallUpdate ToolCallUpdate { get; } + + /// + /// Gets the associated with this update. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].finish_reason in the underlying REST schema. + /// + /// + /// assignment typically appears in the final streamed update message associated + /// with a choice. + /// + /// + public ChatFinishReason? FinishReason { get; } + + /// + /// Gets the choice index associated with this streamed update. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].index in the underlying REST schema. + /// + /// + /// Unless a value greater than 1 was provided as the choiceCount to + /// , + /// only one choice will be generated. In that case, this value will always be 0 and may not need to be considered. + /// + /// + /// When a value greater than 1 to that choiceCount is provided, this index represents + /// which logical choice the information is associated with. In the event + /// that a single underlying server-sent event contains multiple choices, multiple instances of + /// will be created. + /// + /// + public int? ChoiceIndex { get; } + + /// + public string SystemFingerprint { get; } + + /// + /// The log probability information for choices in the chat completion response, as requested via + /// . + /// + public ChatLogProbabilityCollection LogProbabilities { get; } + + internal StreamingChatUpdate( + string id, + DateTimeOffset created, + string systemFingerprint = null, + int? choiceIndex = null, + ChatRole? role = null, + string contentUpdate = null, + ChatFinishReason? finishReason = null, + string functionName = null, + string functionArgumentsUpdate = null, + StreamingToolCallUpdate toolCallUpdate = null, + ChatLogProbabilityCollection logProbabilities = null) + { + Id = id; + Created = created; + SystemFingerprint = systemFingerprint; + ChoiceIndex = choiceIndex; + Role = role; + ContentUpdate = contentUpdate; + FinishReason = finishReason; + FunctionName = functionName; + FunctionArgumentsUpdate = functionArgumentsUpdate; + ToolCallUpdate = toolCallUpdate; + LogProbabilities = logProbabilities; + } + + internal static List DeserializeStreamingChatUpdates(JsonElement element) + { + List results = []; + if (element.ValueKind == JsonValueKind.Null) + { + return results; + } + string id = default; + DateTimeOffset created = default; + string systemFingerprint = null; + foreach (JsonProperty property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("created"u8)) + { + created = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("system_fingerprint")) + { + systemFingerprint = property.Value.GetString(); + continue; + } + if (property.NameEquals("choices"u8)) + { + foreach (JsonElement choiceElement in property.Value.EnumerateArray()) + { + ChatRole? role = null; + string contentUpdate = null; + string functionName = null; + string functionArgumentsUpdate = null; + int choiceIndex = 0; + ChatFinishReason? finishReason = null; + List toolCallUpdates = []; + ChatLogProbabilityCollection logProbabilities = null; + + foreach (JsonProperty choiceProperty in choiceElement.EnumerateObject()) + { + if (choiceProperty.NameEquals("index"u8)) + { + choiceIndex = choiceProperty.Value.GetInt32(); + continue; + } + if (choiceProperty.NameEquals("finish_reason"u8)) + { + if (choiceProperty.Value.ValueKind == JsonValueKind.Null) + { + finishReason = null; + continue; + } + finishReason = choiceProperty.Value.GetString() switch + { + "stop" => ChatFinishReason.Stopped, + "length" => ChatFinishReason.Length, + "tool_calls" => ChatFinishReason.ToolCalls, + "function_call" => ChatFinishReason.FunctionCall, + "content_filter" => ChatFinishReason.ContentFilter, + _ => throw new ArgumentException(nameof(finishReason)), + }; + continue; + } + if (choiceProperty.NameEquals("delta"u8)) + { + foreach (JsonProperty deltaProperty in choiceProperty.Value.EnumerateObject()) + { + if (deltaProperty.NameEquals("role"u8)) + { + role = deltaProperty.Value.GetString() switch + { + "system" => ChatRole.System, + "user" => ChatRole.User, + "assistant" => ChatRole.Assistant, + "tool" => ChatRole.Tool, + "function" => ChatRole.Function, + _ => throw new ArgumentException(nameof(role)), + }; + continue; + } + if (deltaProperty.NameEquals("content"u8)) + { + contentUpdate = deltaProperty.Value.GetString(); + continue; + } + if (deltaProperty.NameEquals("function_call"u8)) + { + foreach (JsonProperty functionProperty in deltaProperty.Value.EnumerateObject()) + { + if (functionProperty.NameEquals("name"u8)) + { + functionName = functionProperty.Value.GetString(); + continue; + } + if (functionProperty.NameEquals("arguments"u8)) + { + functionArgumentsUpdate = functionProperty.Value.GetString(); + } + } + } + if (deltaProperty.NameEquals("tool_calls")) + { + foreach (JsonElement toolCallElement in deltaProperty.Value.EnumerateArray()) + { + toolCallUpdates.Add( + StreamingToolCallUpdate.DeserializeStreamingToolCallUpdate(toolCallElement)); + } + } + } + } + if (choiceProperty.NameEquals("logprobs"u8)) + { + Internal.Models.CreateChatCompletionResponseChoiceLogprobs internalLogprobs + = Internal.Models.CreateChatCompletionResponseChoiceLogprobs.DeserializeCreateChatCompletionResponseChoiceLogprobs( + choiceProperty.Value); + logProbabilities = ChatLogProbabilityCollection.FromInternalData(internalLogprobs); + } + } + // In the unlikely event that more than one tool call arrives on a single chunk, we'll generate + // separate updates just like for choices. Adding a "null" if empty lets us avoid a separate loop. + if (toolCallUpdates.Count == 0) + { + toolCallUpdates.Add(null); + } + foreach (StreamingToolCallUpdate toolCallUpdate in toolCallUpdates) + { + results.Add(new StreamingChatUpdate( + id, + created, + systemFingerprint, + choiceIndex, + role, + contentUpdate, + finishReason, + functionName, + functionArgumentsUpdate, + toolCallUpdate, + logProbabilities)); + } + } + continue; + } + } + if (results.Count == 0) + { + results.Add(new StreamingChatUpdate(id, created, systemFingerprint)); + } + return results; + } +} diff --git a/.dotnet/src/Custom/Chat/StreamingFunctionToolCallUpdate.cs b/.dotnet/src/Custom/Chat/StreamingFunctionToolCallUpdate.cs new file mode 100644 index 000000000..bbae5e5c4 --- /dev/null +++ b/.dotnet/src/Custom/Chat/StreamingFunctionToolCallUpdate.cs @@ -0,0 +1,90 @@ +namespace OpenAI.Chat; +using System.Text.Json; + +/// +/// Represents an incremental update to a streaming function tool call that is part of a streaming chat completions +/// choice. +/// +public partial class StreamingFunctionToolCallUpdate : StreamingToolCallUpdate +{ + /// + /// The name of the function requested by the tool call. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.tool_calls[0].function.name in the REST API schema. + /// + /// + /// For a streaming function tool call, this name will appear in a single streaming update payload, typically the + /// first. Use the property to differentiate between multiple, + /// parallel tool calls when streaming. + /// + /// + public string Name { get; } + + /// + /// The next new segment of the function arguments for the function tool called by a streaming tool call. + /// These must be accumulated for the complete contents of the function arguments. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.tool_calls[0].function.arguments in the REST API schema. + /// + /// Note that the model does not always generate valid JSON and may hallucinate parameters + /// not defined by your function schema. Validate the arguments in your code before calling + /// your function. + /// + public string ArgumentsUpdate { get; } + + internal StreamingFunctionToolCallUpdate( + string id, + int toolCallIndex, + string functionName, + string functionArgumentsUpdate) + : base("function", id, toolCallIndex) + { + Name = functionName; + ArgumentsUpdate = functionArgumentsUpdate; + } + + internal static StreamingFunctionToolCallUpdate DeserializeStreamingFunctionToolCallUpdate(JsonElement element) + { + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + string id = null; + int toolCallIndex = 0; + string functionName = null; + string functionArgumentsUpdate = null; + + foreach (JsonProperty property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + } + if (property.NameEquals("index"u8)) + { + toolCallIndex = property.Value.GetInt32(); + } + if (property.NameEquals("function"u8)) + { + foreach (JsonProperty functionProperty in property.Value.EnumerateObject()) + { + if (functionProperty.NameEquals("name"u8)) + { + functionName = functionProperty.Value.GetString(); + } + if (functionProperty.NameEquals("arguments"u8)) + { + functionArgumentsUpdate = functionProperty.Value.GetString(); + } + } + } + } + + return new StreamingFunctionToolCallUpdate(id, toolCallIndex, functionName, functionArgumentsUpdate); + } +} diff --git a/.dotnet/src/Custom/Chat/StreamingToolCallUpdate.cs b/.dotnet/src/Custom/Chat/StreamingToolCallUpdate.cs new file mode 100644 index 000000000..ce9fc9b56 --- /dev/null +++ b/.dotnet/src/Custom/Chat/StreamingToolCallUpdate.cs @@ -0,0 +1,97 @@ +namespace OpenAI.Chat; +using System.Text.Json; + +/// +/// A base representation of an incremental update to a streaming tool call that is part of a streaming chat completion +/// request. +/// +/// +/// +/// This type encapsulates the payload located in e.g. $.choices[0].delta.tool_calls[] in the REST API schema. +/// +/// +/// To differentiate between parallel streaming tool calls within a single streaming choice, use the value of the +/// property. +/// +/// +/// is the streaming, base class counterpart to . +/// Currently, chat completion supports function tools and the derived +/// type will provide required information about the matching function +/// tool call. +/// +/// +public abstract partial class StreamingToolCallUpdate +{ + /// + /// Gets the ID associated with with the streaming tool call. + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.tool_calls[0].id in the REST API schema. + /// + /// + /// This value appears once for each streaming tool call, typically on the first update message for each + /// . Callers should retain the value when it arrives to accumulate the complete tool + /// call information. + /// + /// + /// Tool call IDs must be provided in instances that respond to tool calls. + /// + /// + public string Id { get; } + + /// + /// Gets the tool call index associated with this . + /// + /// + /// + /// Corresponds to e.g. $.choices[0].delta.tool_calls[0].index in the REST API schema. + /// + /// + /// This value appears on every streaming tool call update. When multiple tool calls occur within the same + /// streaming chat choice, this index specifies which tool call that this update contains new information for. + /// + /// + public int ToolCallIndex { get; } + + internal string Type { get; } + + internal StreamingToolCallUpdate(string type, string id, int toolCallIndex) + { + Type = type; + Id = id; + ToolCallIndex = toolCallIndex; + } + + internal static StreamingToolCallUpdate DeserializeStreamingToolCallUpdate(JsonElement element) + { + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + foreach (JsonProperty property in element.EnumerateObject()) + { + // CUSTOM CODE NOTE: + // "type" is superficially the JSON discriminator for possible tool call categories, but it does not + // appear on every streamed delta message. To account for this without maintaining state, we instead + // allow the deserialization to infer the type based on the presence of the named/typed key. This is + // consistent across all existing patterns of the form: + // { + // "type": "" + // "": { ... } + // } + if (property.NameEquals("type"u8)) + { + if (property.Value.GetString() == "function") + { + return StreamingFunctionToolCallUpdate.DeserializeStreamingFunctionToolCallUpdate(element); + } + } + else if (property.NameEquals("function"u8)) + { + return StreamingFunctionToolCallUpdate.DeserializeStreamingFunctionToolCallUpdate(element); + } + } + return null; + } +} diff --git a/.dotnet/src/Custom/Embeddings/Embedding.cs b/.dotnet/src/Custom/Embeddings/Embedding.cs new file mode 100644 index 000000000..219fb99cc --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/Embedding.cs @@ -0,0 +1,44 @@ +using System; + +namespace OpenAI.Embeddings; + +/// +/// Represents an embedding vector returned by embedding endpoint. +/// +public partial class Embedding +{ + /// + /// The embedding vector, which is a list of floats. + /// + public ReadOnlyMemory Vector { get; } + /// + public int Index { get; } + /// + public string Model { get; } + /// + public EmbeddingTokenUsage Usage { get; } + + internal Embedding(ReadOnlyMemory vector, int index, EmbeddingTokenUsage usage) + { + Vector = vector; + Index = index; + Usage = usage; + } + + internal Embedding( + Internal.Models.CreateEmbeddingResponse internalResponse, + int internalDataIndex, + EmbeddingTokenUsage usage = null) + { + Internal.Models.Embedding dataItem = internalResponse.Data[(int)internalDataIndex]; + string dataItemBase64 = dataItem.EmbeddingProperty.ToString(); + dataItemBase64 = dataItemBase64.Substring(1, dataItemBase64.Length - 2); + byte[] bytes = Convert.FromBase64String(dataItemBase64); + float[] vector = new float[bytes.Length / sizeof(float)]; + Buffer.BlockCopy(bytes, 0, vector, 0, bytes.Length); + Vector = new ReadOnlyMemory(vector); + Index = (int)dataItem.Index; + Usage = usage ?? new(internalResponse.Usage); + Model = internalResponse.Model; + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Embeddings/EmbeddingClient.Protocol.cs b/.dotnet/src/Custom/Embeddings/EmbeddingClient.Protocol.cs new file mode 100644 index 000000000..b9fd3c746 --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/EmbeddingClient.Protocol.cs @@ -0,0 +1,19 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.ComponentModel; +using System.Threading.Tasks; + +namespace OpenAI.Embeddings; + +public partial class EmbeddingClient +{ + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GenerateEmbeddings(BinaryContent content, RequestOptions options = null) + => Shim.CreateEmbedding(content, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GenerateEmbeddingsAsync(BinaryContent content, RequestOptions options = null) + => await Shim.CreateEmbeddingAsync(content, options).ConfigureAwait(false); +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Embeddings/EmbeddingClient.cs b/.dotnet/src/Custom/Embeddings/EmbeddingClient.cs new file mode 100644 index 000000000..49a2e7b42 --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/EmbeddingClient.cs @@ -0,0 +1,94 @@ +using System; +using System.ClientModel; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace OpenAI.Embeddings; + +/// The service client for the OpenAI Embeddings endpoint. +public partial class EmbeddingClient +{ + private readonly OpenAIClientConnector _clientConnector; + private Internal.Embeddings Shim => _clientConnector.InternalClient.GetEmbeddingsClient(); + + public EmbeddingClient(string model, ApiKeyCredential credential = default, OpenAIClientOptions options = default) + { + _clientConnector = new(model, credential, options); + } + + public virtual ClientResult GenerateEmbedding(string input, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(input, options); + ClientResult response = Shim.CreateEmbedding(request); + Embedding embeddingResult = new(response.Value, internalDataIndex: 0); + return ClientResult.FromValue(embeddingResult, response.GetRawResponse()); + } + + public virtual async Task> GenerateEmbeddingAsync(string input, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(input, options); + ClientResult response = await Shim.CreateEmbeddingAsync(request); + Embedding embeddingResult = new(response.Value, internalDataIndex: 0); + return ClientResult.FromValue(embeddingResult, response.GetRawResponse()); + } + + public virtual ClientResult GenerateEmbedding(IEnumerable input, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(input, options); + ClientResult response = Shim.CreateEmbedding(request); + Embedding embeddingResult = new(response.Value, internalDataIndex: 0); + return ClientResult.FromValue(embeddingResult, response.GetRawResponse()); + } + + public virtual async Task> GenerateEmbeddingAsync(IEnumerable input, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(input, options); + ClientResult response = await Shim.CreateEmbeddingAsync(request); + Embedding embeddingResult = new(response.Value, internalDataIndex: 0); + return ClientResult.FromValue(embeddingResult, response.GetRawResponse()); + } + + public virtual ClientResult GenerateEmbeddings(IEnumerable inputs, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(inputs, options); + ClientResult response = Shim.CreateEmbedding(request); + EmbeddingCollection resultCollection = EmbeddingCollection.CreateFromInternalResponse(response.Value); + return ClientResult.FromValue(resultCollection, response.GetRawResponse()); + } + + public virtual async Task> GenerateEmbeddingsAsync(IEnumerable inputs, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(inputs, options); + ClientResult response = await Shim.CreateEmbeddingAsync(request); + EmbeddingCollection resultCollection = EmbeddingCollection.CreateFromInternalResponse(response.Value); + return ClientResult.FromValue(resultCollection, response.GetRawResponse()); + } + + public virtual ClientResult GenerateEmbeddings(IEnumerable> inputs, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(inputs, options); + ClientResult response = Shim.CreateEmbedding(request); + EmbeddingCollection resultCollection = EmbeddingCollection.CreateFromInternalResponse(response.Value); + return ClientResult.FromValue(resultCollection, response.GetRawResponse()); + } + + public virtual async Task> GenerateEmbeddingsAsync(IEnumerable> inputs, EmbeddingOptions options = null) + { + Internal.Models.CreateEmbeddingRequest request = CreateInternalRequest(inputs, options); + ClientResult response = await Shim.CreateEmbeddingAsync(request); + EmbeddingCollection resultCollection = EmbeddingCollection.CreateFromInternalResponse(response.Value); + return ClientResult.FromValue(resultCollection, response.GetRawResponse()); + } + + private Internal.Models.CreateEmbeddingRequest CreateInternalRequest(object inputObject, EmbeddingOptions options) + { + options ??= new(); + return new Internal.Models.CreateEmbeddingRequest( + BinaryData.FromObjectAsJson(inputObject), + new(_clientConnector.Model), + Internal.Models.CreateEmbeddingRequestEncodingFormat.Base64, + options?.Dimensions, + options?.User, + serializedAdditionalRawData: null); + } +} diff --git a/.dotnet/src/Custom/Embeddings/EmbeddingCollection.cs b/.dotnet/src/Custom/Embeddings/EmbeddingCollection.cs new file mode 100644 index 000000000..b87076584 --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/EmbeddingCollection.cs @@ -0,0 +1,19 @@ +using System.Collections.Generic; +using System.Collections.ObjectModel; + +namespace OpenAI.Embeddings; + +public class EmbeddingCollection : ReadOnlyCollection +{ + internal EmbeddingCollection(IList list) : base(list) { } + internal static EmbeddingCollection CreateFromInternalResponse(Internal.Models.CreateEmbeddingResponse response) + { + EmbeddingTokenUsage usage = new(response.Usage); + List items = []; + for (int i = 0; i < response.Data.Count; i++) + { + items.Add(new(response, i, usage)); + } + return new EmbeddingCollection(items); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Embeddings/EmbeddingOptions.cs b/.dotnet/src/Custom/Embeddings/EmbeddingOptions.cs new file mode 100644 index 000000000..da22117b1 --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/EmbeddingOptions.cs @@ -0,0 +1,8 @@ +namespace OpenAI.Embeddings; + +public class EmbeddingOptions +{ + public string User { get; set; } + + public int? Dimensions { get; set; } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Embeddings/EmbeddingTokenUsage.cs b/.dotnet/src/Custom/Embeddings/EmbeddingTokenUsage.cs new file mode 100644 index 000000000..92ba47bd4 --- /dev/null +++ b/.dotnet/src/Custom/Embeddings/EmbeddingTokenUsage.cs @@ -0,0 +1,16 @@ +namespace OpenAI.Embeddings; + +public partial class EmbeddingTokenUsage +{ + private Internal.Models.EmbeddingUsage _internalUsage; + + /// + public int InputTokens => (int)_internalUsage.PromptTokens; + /// + public int TotalTokens => (int)_internalUsage.TotalTokens; + + internal EmbeddingTokenUsage(Internal.Models.EmbeddingUsage internalUsage) + { + _internalUsage = internalUsage; + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Files/FileClient.Protocol.cs b/.dotnet/src/Custom/Files/FileClient.Protocol.cs new file mode 100644 index 000000000..d7298b1dc --- /dev/null +++ b/.dotnet/src/Custom/Files/FileClient.Protocol.cs @@ -0,0 +1,152 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.ComponentModel; +using System.IO; +using System.Threading.Tasks; + +namespace OpenAI.Files; + +public partial class FileClient +{ + /// + /// [Protocol Method] Upload a file that can be used across various endpoints. The size of all the files uploaded by + /// one organization can be up to 100 GB. + /// + /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See + /// the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files + /// supported. The Fine-tuning API only supports `.jsonl` files. + /// + /// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler or convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The content type of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult UploadFile(BinaryContent content, string contentType, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(contentType, nameof(contentType)); + + options ??= new RequestOptions(); + + using PipelineMessage message = CreateUploadFileRequest(content, contentType, options); + + Shim.Pipeline.Send(message); + + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) + { + throw new ClientResultException(response); + } + + return ClientResult.FromResponse(response); + } + + /// + /// [Protocol Method] Upload a file that can be used across various endpoints. The size of all the files uploaded by + /// one organization can be up to 100 GB. + /// + /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See + /// the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files + /// supported. The Fine-tuning API only supports `.jsonl` files. + /// + /// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler or convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The content type of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task UploadFileAsync(BinaryContent content, string contentType, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(contentType, nameof(contentType)); + + options ??= new RequestOptions(); + + using PipelineMessage message = CreateUploadFileRequest(content, contentType, options); + + Shim.Pipeline.Send(message); + + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) + { + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); + } + + return ClientResult.FromResponse(response); + } + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetFileInfo(string fileId, RequestOptions options) + => Shim.RetrieveFile(fileId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GetFileInfoAsync(string fileId, RequestOptions options) + => await Shim.RetrieveFileAsync(fileId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetFileInfoList(string purpose, RequestOptions options) + => Shim.GetFiles(purpose, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GetFileInfoListAsync(string purpose, RequestOptions options) + => await Shim.GetFilesAsync(purpose, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult DownloadFile(string fileId, RequestOptions options) + => Shim.DownloadFile(fileId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task DownloadFileAsync(string fileId, RequestOptions options) + => await Shim.DownloadFileAsync(fileId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult DeleteFile(string fileId, RequestOptions options) + => Shim.DeleteFile(fileId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task DeleteFileAsync(string fileId, RequestOptions options) + => await Shim.DeleteFileAsync(fileId, options).ConfigureAwait(false); +} diff --git a/.dotnet/src/Custom/Files/FileClient.cs b/.dotnet/src/Custom/Files/FileClient.cs new file mode 100644 index 000000000..42ef17eca --- /dev/null +++ b/.dotnet/src/Custom/Files/FileClient.cs @@ -0,0 +1,261 @@ +using OpenAI.Internal; +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.IO; +using System.Text; +using System.Threading.Tasks; + +namespace OpenAI.Files; + +/// +/// The service client for OpenAI file operations. +/// +public partial class FileClient +{ + private readonly OpenAIClientConnector _clientConnector; + private Internal.Files Shim => _clientConnector.InternalClient.GetFilesClient(); + + /// + /// Initializes a new instance of , used for file operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public FileClient(ApiKeyCredential credential = default, OpenAIClientOptions options = null) + { + _clientConnector = new(model: null, credential, options); + } + + // convenience method - sync; Stream overload + // TODO: add refdoc comment + public virtual ClientResult UploadFile(Stream fileStream, string fileName, OpenAIFilePurpose purpose) + { + Argument.AssertNotNull(fileStream, nameof(fileStream)); + Argument.AssertNotNull(fileName, nameof(fileName)); + + using MultipartFormDataBinaryContent content = UploadFileOptions.ToMultipartContent(fileStream, fileName, purpose); + + ClientResult result = UploadFile(content, content.ContentType); + + PipelineResponse response = result.GetRawResponse(); + + Internal.Models.OpenAIFile internalFile = Internal.Models.OpenAIFile.FromResponse(response); + OpenAIFileInfo fileInfo = new(internalFile); + + return ClientResult.FromValue(fileInfo, response); + } + + // convenience method - sync + // TODO: add refdoc comment + public virtual ClientResult UploadFile(BinaryData file, string fileName, OpenAIFilePurpose purpose) + { + Argument.AssertNotNull(file, nameof(file)); + Argument.AssertNotNull(fileName, nameof(fileName)); + + using MultipartFormDataBinaryContent content = UploadFileOptions.ToMultipartContent(file, fileName, purpose); + + ClientResult result = UploadFile(content, content.ContentType); + + PipelineResponse response = result.GetRawResponse(); + + Internal.Models.OpenAIFile internalFile = Internal.Models.OpenAIFile.FromResponse(response); + OpenAIFileInfo fileInfo = new(internalFile); + + return ClientResult.FromValue(fileInfo, response); + } + + // convenience method - async; Stream overload + // TODO: add refdoc comment + public virtual async Task> UploadFileAsync(Stream file, string fileName, OpenAIFilePurpose purpose) + { + Argument.AssertNotNull(file, nameof(file)); + Argument.AssertNotNull(fileName, nameof(fileName)); + + using MultipartFormDataBinaryContent content = UploadFileOptions.ToMultipartContent(file, fileName, purpose); + + ClientResult result = await UploadFileAsync(content, content.ContentType).ConfigureAwait(false); + + PipelineResponse response = result.GetRawResponse(); + + Internal.Models.OpenAIFile internalFile = Internal.Models.OpenAIFile.FromResponse(response); + OpenAIFileInfo fileInfo = new(internalFile); + + return ClientResult.FromValue(fileInfo, response); + } + + // convenience method - async + // TODO: add refdoc comment + public virtual async Task> UploadFileAsync(BinaryData file, string fileName, OpenAIFilePurpose purpose) + { + Argument.AssertNotNull(file, nameof(file)); + Argument.AssertNotNull(fileName, nameof(fileName)); + + using MultipartFormDataBinaryContent content = UploadFileOptions.ToMultipartContent(file, fileName, purpose); + + ClientResult result = await UploadFileAsync(content, content.ContentType).ConfigureAwait(false); + + PipelineResponse response = result.GetRawResponse(); + + Internal.Models.OpenAIFile internalFile = Internal.Models.OpenAIFile.FromResponse(response); + OpenAIFileInfo fileInfo = new(internalFile); + + return ClientResult.FromValue(fileInfo, response); + } + + public virtual ClientResult GetFileInfo(string fileId) + { + ClientResult internalResult = Shim.RetrieveFile(fileId); + return ClientResult.FromValue(new OpenAIFileInfo(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetFileInfoAsync(string fileId) + { + ClientResult internalResult = await Shim.RetrieveFileAsync(fileId); + return ClientResult.FromValue(new OpenAIFileInfo(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult GetFileInfoList(OpenAIFilePurpose? purpose = null) + { + Internal.Models.OpenAIFilePurpose? internalPurpose = ToInternalFilePurpose(purpose); + string internalPurposeText = null; + if (internalPurpose != null) + { + internalPurposeText = internalPurpose.ToString(); + } + ClientResult result = Shim.GetFiles(internalPurposeText); + List infoItems = []; + foreach (Internal.Models.OpenAIFile internalFile in result.Value.Data) + { + infoItems.Add(new(internalFile)); + } + return ClientResult.FromValue(new OpenAIFileInfoCollection(infoItems), result.GetRawResponse()); + } + + public virtual async Task> GetFileInfoListAsync(OpenAIFilePurpose? purpose = null) + { + Internal.Models.OpenAIFilePurpose? internalPurpose = ToInternalFilePurpose(purpose); + string internalPurposeText = null; + if (internalPurpose != null) + { + internalPurposeText = internalPurpose.ToString(); + } + ClientResult result = await Shim.GetFilesAsync(internalPurposeText).ConfigureAwait(false); + List infoItems = []; + foreach (Internal.Models.OpenAIFile internalFile in result.Value.Data) + { + infoItems.Add(new(internalFile)); + } + return ClientResult.FromValue(new OpenAIFileInfoCollection(infoItems), result.GetRawResponse()); + } + + public virtual ClientResult DownloadFile(string fileId) + { + PipelineMessage message = Shim.Pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_clientConnector.Endpoint.AbsoluteUri); + StringBuilder path = new(); + path.Append($"/files/{fileId}/content"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("content-type", "multipart/form-data"); + Shim.Pipeline.Send(message); + + if (message.Response.IsError) + { + throw new ClientResultException(message.Response); + } + + return ClientResult.FromValue(message.Response.Content, message.Response); + } + + public virtual async Task> DownloadFileAsync(string fileId) + { + PipelineMessage message = Shim.Pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_clientConnector.Endpoint.AbsoluteUri); + StringBuilder path = new(); + path.Append($"/files/{fileId}/content"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("content-type", "multipart/form-data"); + + await Shim.Pipeline.SendAsync(message).ConfigureAwait(false); + + if (message.Response.IsError) + { + throw new ClientResultException(message.Response); + } + + return ClientResult.FromValue(message.Response.Content, message.Response); + } + + public virtual void DeleteFile(string fileId) + { + _ = Shim.DeleteFile(fileId); + } + + public virtual async Task DeleteFileAsync(string fileId) + { + _ = Shim.DeleteFileAsync(fileId); + } + + private PipelineMessage CreateUploadFileRequest(BinaryContent content, string contentType, RequestOptions options) + { + PipelineMessage message = Shim.Pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + + PipelineRequest request = message.Request; + request.Method = "POST"; + + UriBuilder uriBuilder = new(_clientConnector.Endpoint.AbsoluteUri); + + StringBuilder path = new(); + path.Append("/files"); + uriBuilder.Path += path.ToString(); + + request.Uri = uriBuilder.Uri; + + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", contentType); + + request.Content = content; + + message.Apply(options); + + return message; + } + + private static Internal.Models.OpenAIFilePurpose? ToInternalFilePurpose(OpenAIFilePurpose? purpose) + { + if (purpose == null) + { + return null; + } + return purpose switch + { + OpenAIFilePurpose.FineTuning => Internal.Models.OpenAIFilePurpose.FineTune, + OpenAIFilePurpose.FineTuningResults => Internal.Models.OpenAIFilePurpose.FineTuneResults, + OpenAIFilePurpose.Assistants => Internal.Models.OpenAIFilePurpose.Assistants, + OpenAIFilePurpose.AssistantOutputs => Internal.Models.OpenAIFilePurpose.AssistantsOutput, + _ => throw new ArgumentException($"Unsupported file purpose: {purpose}"), + }; + } + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); +} diff --git a/.dotnet/src/Custom/Files/OpenAIFileInfo.cs b/.dotnet/src/Custom/Files/OpenAIFileInfo.cs new file mode 100644 index 000000000..3fb799ca4 --- /dev/null +++ b/.dotnet/src/Custom/Files/OpenAIFileInfo.cs @@ -0,0 +1,36 @@ +using System; + +namespace OpenAI.Files; + +public partial class OpenAIFileInfo +{ + public string Id { get; } + public OpenAIFilePurpose Purpose { get; } + public string Filename { get; } + public long? Size { get; } + public DateTimeOffset CreatedAt { get; } + + internal OpenAIFileInfo(Internal.Models.OpenAIFile internalFile) + { + Id = internalFile.Id; + Purpose = internalFile.Purpose.ToString() switch + { + "fine-tune" => OpenAIFilePurpose.FineTuning, + "fine-tune-results" => OpenAIFilePurpose.FineTuningResults, + "assistants" => OpenAIFilePurpose.Assistants, + "assistants_output" => OpenAIFilePurpose.AssistantOutputs, + _ => throw new ArgumentException(nameof(internalFile)), + }; + Filename = internalFile.Filename; + Size = internalFile.Bytes; + CreatedAt = internalFile.CreatedAt; + } +} + +public enum OpenAIFilePurpose +{ + FineTuning, + FineTuningResults, + Assistants, + AssistantOutputs, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Files/OpenAIFileInfoCollection.cs b/.dotnet/src/Custom/Files/OpenAIFileInfoCollection.cs new file mode 100644 index 000000000..3c40e56a5 --- /dev/null +++ b/.dotnet/src/Custom/Files/OpenAIFileInfoCollection.cs @@ -0,0 +1,11 @@ +using System.Collections.Generic; +using System.Collections.ObjectModel; + +namespace OpenAI.Files; + +public partial class OpenAIFileInfoCollection : ReadOnlyCollection +{ + internal OpenAIFileInfoCollection(IList list) : base(list) + { + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Files/UploadFileOptions.cs b/.dotnet/src/Custom/Files/UploadFileOptions.cs new file mode 100644 index 000000000..4b3b6c2d1 --- /dev/null +++ b/.dotnet/src/Custom/Files/UploadFileOptions.cs @@ -0,0 +1,42 @@ +using OpenAI.Internal; +using System; +using System.IO; + +namespace OpenAI.Files; + +internal class UploadFileOptions +{ + internal static MultipartFormDataBinaryContent ToMultipartContent(Stream fileStream, string fileName, OpenAIFilePurpose purpose) + { + MultipartFormDataBinaryContent content = new(); + + content.Add(fileStream, "file", fileName); + + AddContent(purpose, content); + + return content; + } + + internal static MultipartFormDataBinaryContent ToMultipartContent(BinaryData fileData, string fileName, OpenAIFilePurpose purpose) + { + MultipartFormDataBinaryContent content = new(); + + content.Add(fileData, "file", fileName); + + AddContent(purpose, content); + + return content; + } + + private static void AddContent(OpenAIFilePurpose purpose, MultipartFormDataBinaryContent content) + { + string purposeValue = purpose switch + { + OpenAIFilePurpose.FineTuning => "fine-tune", + OpenAIFilePurpose.Assistants => "assistants", + _ => throw new ArgumentException($"Unsupported purpose for file upload: {purpose}"), + }; + + content.Add(purposeValue, "\"purpose\""); + } +} diff --git a/.dotnet/src/Custom/FineTuning/FineTuningManagementClient.Protocol.cs b/.dotnet/src/Custom/FineTuning/FineTuningManagementClient.Protocol.cs new file mode 100644 index 000000000..d5450df4e --- /dev/null +++ b/.dotnet/src/Custom/FineTuning/FineTuningManagementClient.Protocol.cs @@ -0,0 +1,74 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Threading.Tasks; + +namespace OpenAI.FineTuningManagement; + +public partial class FineTuningManagementClient +{ + /// + public virtual ClientResult CreateFineTuningJob( + BinaryContent content, + RequestOptions options = null) + => FineTuningShim.CreateFineTuningJob(content, options); + + /// + public virtual async Task CreateFineTuningJobAsync( + BinaryContent content, + RequestOptions options = null) + => await FineTuningShim.CreateFineTuningJobAsync(content, options).ConfigureAwait(false); + + /// + public virtual ClientResult GetFineTuningJob( + string jobId, + RequestOptions options) + => FineTuningShim.RetrieveFineTuningJob(jobId, options); + + /// + public virtual async Task GetFineTuningJobAsync( + string jobId, + RequestOptions options) + => await FineTuningShim.RetrieveFineTuningJobAsync(jobId, options).ConfigureAwait(false); + + /// + public virtual ClientResult GetFineTuningJobs( + string previousJobId, + int? maxResults, + RequestOptions options) + => FineTuningShim.GetPaginatedFineTuningJobs(previousJobId, maxResults, options); + + /// + public virtual async Task GetFineTuningJobsAsync( + string previousJobId, + int? maxResults, + RequestOptions options) + => await FineTuningShim.GetPaginatedFineTuningJobsAsync(previousJobId, maxResults, options).ConfigureAwait(false); + + /// + public virtual ClientResult CancelFineTuningJob( + string jobId, + RequestOptions options) + => FineTuningShim.CancelFineTuningJob(jobId, options); + + /// + public virtual async Task CancelFineTuningJobAsync( + string jobId, + RequestOptions options) + => await FineTuningShim.CancelFineTuningJobAsync(jobId, options).ConfigureAwait(false); + + /// + public virtual ClientResult GetFineTuningJobEvents( + string jobId, + string previousEventId, + int? maxResults, + RequestOptions options) + => FineTuningShim.GetFineTuningEvents(jobId, previousEventId, maxResults, options); + + /// + public virtual async Task GetFineTuningJobEventsAsync( + string jobId, + string previousEventId, + int? maxResults, + RequestOptions options) + => await FineTuningShim.GetFineTuningEventsAsync(jobId, previousEventId, maxResults, options).ConfigureAwait(false); +} diff --git a/.dotnet/src/Custom/FineTuning/FineTuningManagementClient.cs b/.dotnet/src/Custom/FineTuning/FineTuningManagementClient.cs new file mode 100644 index 000000000..ad12cd2e5 --- /dev/null +++ b/.dotnet/src/Custom/FineTuning/FineTuningManagementClient.cs @@ -0,0 +1,33 @@ +using System; +using System.ClientModel; + +namespace OpenAI.FineTuningManagement; + +/// +/// The service client for OpenAI fine-tuning operations. +/// +public partial class FineTuningManagementClient +{ + private readonly OpenAIClientConnector _clientConnector; + private Internal.FineTuning FineTuningShim => _clientConnector.InternalClient.GetFineTuningClient(); + + /// + /// Initializes a new instance of , used for fine-tuning operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public FineTuningManagementClient(ApiKeyCredential credential = default, OpenAIClientOptions options = default) + { + _clientConnector = new(model: null, credential, options); + } +} diff --git a/.dotnet/src/Custom/Images/GeneratedImage.cs b/.dotnet/src/Custom/Images/GeneratedImage.cs new file mode 100644 index 000000000..4643ffc24 --- /dev/null +++ b/.dotnet/src/Custom/Images/GeneratedImage.cs @@ -0,0 +1,48 @@ +using System; + +namespace OpenAI.Images; + +/// +/// Represents the result data for an image generation request. +/// +public class GeneratedImage +{ + /// + /// The binary image data received from the response, provided when + /// is set to . + /// + /// + /// This property is mutually exclusive with and will be null when the other + /// is present. + /// + public BinaryData ImageBytes { get; } + /// + /// A temporary internet location for an image, provided by default or when + /// is set to . + /// + /// + /// This property is mutually exclusive with and will be null when the other + /// is present. + /// + public Uri ImageUri { get; } + /// + /// The final, revised prompt that was used to generate the result image, populated if the model performed any + /// such revisions to the prompt. + /// + /// + /// Revisions are automatically performed to enrich image prompts and improve output quality and consistency. + /// + public string RevisedPrompt { get; } + /// + /// The timestamp at which the result image was generated. + /// + public DateTimeOffset CreatedAt { get; } + + internal GeneratedImage(Internal.Models.ImagesResponse internalResponse, int internalDataIndex) + { + CreatedAt = internalResponse.Created; + ImageBytes = internalResponse.Data[(int)internalDataIndex].B64Json; + RevisedPrompt = internalResponse.Data[(int)internalDataIndex].RevisedPrompt; + ImageUri = internalResponse.Data[(int)internalDataIndex].Url; + } +} diff --git a/.dotnet/src/Custom/Images/GeneratedImageCollection.cs b/.dotnet/src/Custom/Images/GeneratedImageCollection.cs new file mode 100644 index 000000000..9f286e260 --- /dev/null +++ b/.dotnet/src/Custom/Images/GeneratedImageCollection.cs @@ -0,0 +1,33 @@ +using System; +using System.Collections.Generic; +using System.Collections.ObjectModel; +using System.Text.Json; + +namespace OpenAI.Images; + +/// +/// Represents an image generation response payload that contains information for multiple generated images. +/// +public class GeneratedImageCollection : ReadOnlyCollection +{ + internal GeneratedImageCollection(IList list) : base(list) { } + + internal static GeneratedImageCollection Deserialize(BinaryData content) + { + using JsonDocument responseDocument = JsonDocument.Parse(content); + return Deserialize(responseDocument.RootElement); + } + + internal static GeneratedImageCollection Deserialize(JsonElement element) + { + Internal.Models.ImagesResponse response = Internal.Models.ImagesResponse.DeserializeImagesResponse(element); + + List images = []; + for (int i = 0; i < response.Data.Count; i++) + { + images.Add(new GeneratedImage(response, i)); + } + + return new GeneratedImageCollection(images); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Images/ImageClient.Protocol.cs b/.dotnet/src/Custom/Images/ImageClient.Protocol.cs new file mode 100644 index 000000000..61fc82eb1 --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageClient.Protocol.cs @@ -0,0 +1,197 @@ +using System; +using System.ClientModel.Primitives; +using System.ClientModel; +using System.ComponentModel; +using System.IO; +using System.Threading.Tasks; + +namespace OpenAI.Images; + +public partial class ImageClient +{ + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GenerateImage(BinaryContent content, RequestOptions options = null) + => Shim.CreateImage(content, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GenerateImageAsync(BinaryContent content, RequestOptions options = null) + => await Shim.CreateImageAsync(content, options).ConfigureAwait(false); + + /// + /// [Protocol Method] Creates an edited or extended image given an original image and a prompt. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler or convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The content type of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GenerateImageEdits(BinaryContent content, string contentType, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(contentType, nameof(contentType)); + + options ??= new RequestOptions(); + + using PipelineMessage message = CreateCreateImageEditsRequest(content, contentType, options); + + Shim.Pipeline.Send(message); + + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) + { + throw new ClientResultException(response); + } + + return ClientResult.FromResponse(response); + } + + /// + /// [Protocol Method] Creates an edited or extended image given an original image and a prompt. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler or convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The content type of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GenerateImageEditsAsync(BinaryContent content, string contentType, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(contentType, nameof(contentType)); + + options ??= new RequestOptions(); + + using PipelineMessage message = CreateCreateImageEditsRequest(content, contentType, options); + + Shim.Pipeline.Send(message); + + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) + { + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); + } + + return ClientResult.FromResponse(response); + } + + /// + /// [Protocol Method] Creates an edited or extended image given an original image and a prompt. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler or convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The content type of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GenerateImageVariations(BinaryContent content, string contentType, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(contentType, nameof(contentType)); + + options ??= new RequestOptions(); + + using PipelineMessage message = CreateImageVariationsRequest(content, contentType, options); + + Shim.Pipeline.Send(message); + + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) + { + throw new ClientResultException(response); + } + + return ClientResult.FromResponse(response); + } + + /// + /// [Protocol Method] Creates an edited or extended image given an original image and a prompt. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler or convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The content type of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GenerateImageVariationsAsync(BinaryContent content, string contentType, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(contentType, nameof(contentType)); + + options ??= new RequestOptions(); + + using PipelineMessage message = CreateImageVariationsRequest(content, contentType, options); + + Shim.Pipeline.Send(message); + + PipelineResponse response = message.Response!; + + if (response.IsError && options.ErrorOptions == ClientErrorBehaviors.Default) + { + throw await ClientResultException.CreateAsync(response).ConfigureAwait(false); + } + + return ClientResult.FromResponse(response); + } +} diff --git a/.dotnet/src/Custom/Images/ImageClient.cs b/.dotnet/src/Custom/Images/ImageClient.cs new file mode 100644 index 000000000..4f9ca8f92 --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageClient.cs @@ -0,0 +1,461 @@ +using OpenAI.Internal; +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.IO; +using System.Runtime.InteropServices.ComTypes; +using System.Text; +using System.Threading.Tasks; + +namespace OpenAI.Images; + +/// The service client for OpenAI image operations. +public partial class ImageClient +{ + private readonly OpenAIClientConnector _clientConnector; + private Internal.Images Shim => _clientConnector.InternalClient.GetImagesClient(); + + /// + /// Initializes a new instance of , used for image operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The model name for image operations that the client should use. + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public ImageClient(string model, ApiKeyCredential credential = default, OpenAIClientOptions options = null) + { + _clientConnector = new(model, credential, options); + } + + /// + /// Generates a single image for a provided prompt. + /// + /// The description and instructions for the image. + /// Additional options for the image generation request. + /// The cancellation token for the operation. + /// A result for a single image generation. + public virtual ClientResult GenerateImage( + string prompt, + ImageGenerationOptions options = null) + { + ClientResult multiResult = GenerateImages(prompt, imageCount: null, options); + return ClientResult.FromValue(multiResult.Value[0], multiResult.GetRawResponse()); + } + + /// + /// Generates a single image for a provided prompt. + /// + /// The description and instructions for the image. + /// Additional options for the image generation request. + /// The cancellation token for the operation. + /// A result for a single image generation. + public virtual async Task> GenerateImageAsync( + string prompt, + ImageGenerationOptions options = null) + { + ClientResult multiResult = await GenerateImagesAsync(prompt, imageCount: null, options).ConfigureAwait(false); + return ClientResult.FromValue(multiResult.Value[0], multiResult.GetRawResponse()); + } + + /// + /// Generates a collection of image alternatives for a provided prompt. + /// + /// The description and instructions for the image. + /// + /// The number of alternative images to generate for the prompt. + /// + /// Additional options for the image generation request. + /// The cancellation token for the operation. + /// A result for a single image generation. + public virtual ClientResult GenerateImages( + string prompt, + int? imageCount = null, + ImageGenerationOptions options = null) + { + Internal.Models.CreateImageRequest request = CreateInternalImageRequest(prompt, imageCount, options); + ClientResult response = Shim.CreateImage(request); + + List images = []; + for (int i = 0; i < response.Value.Data.Count; i++) + { + images.Add(new GeneratedImage(response.Value, i)); + } + + return ClientResult.FromValue(new GeneratedImageCollection(images), response.GetRawResponse()); + } + + /// + /// Generates a collection of image alternatives for a provided prompt. + /// + /// The description and instructions for the image. + /// + /// The number of alternative images to generate for the prompt. + /// + /// Additional options for the image generation request. + /// The cancellation token for the operation. + /// A result for a single image generation. + public virtual async Task> GenerateImagesAsync( + string prompt, + int? imageCount = null, + ImageGenerationOptions options = null) + { + Internal.Models.CreateImageRequest request = CreateInternalImageRequest(prompt, imageCount, options); + ClientResult response = await Shim.CreateImageAsync(request).ConfigureAwait(false); + + List images = []; + for (int i = 0; i < response.Value.Data.Count; i++) + { + images.Add(new GeneratedImage(response.Value, i)); + } + + return ClientResult.FromValue(new GeneratedImageCollection(images), response.GetRawResponse()); + } + + // convenience method - sync; Stream overload + // TODO: add refdoc comment + public virtual ClientResult GenerateImageEdits( + Stream fileStream, + string fileName, + string prompt, + int? imageCount = null, + ImageEditOptions options = null) + { + Argument.AssertNotNull(fileStream, nameof(fileStream)); + Argument.AssertNotNull(fileName, nameof(fileName)); + Argument.AssertNotNull(prompt, nameof(prompt)); + + if (options?.MaskBytes is not null) + { + Argument.AssertNotNull(options.MaskFileName, nameof(options.MaskFileName)); + } + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(fileStream, fileName, prompt, _clientConnector.Model, imageCount); + + ClientResult result = GenerateImageEdits(content, content.ContentType); + + PipelineResponse response = result.GetRawResponse(); + + GeneratedImageCollection value = GeneratedImageCollection.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + // convenience method - sync + // TODO: add refdoc comment + public virtual ClientResult GenerateImageEdits( + BinaryData imageBytes, + string fileName, + string prompt, + int? imageCount = null, + ImageEditOptions options = null) + { + Argument.AssertNotNull(imageBytes, nameof(imageBytes)); + Argument.AssertNotNull(fileName, nameof(fileName)); + Argument.AssertNotNull(prompt, nameof(prompt)); + + if (options?.MaskBytes is not null) + { + Argument.AssertNotNull(options.MaskFileName, nameof(options.MaskFileName)); + } + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(imageBytes, fileName, prompt, _clientConnector.Model, imageCount); + + ClientResult result = GenerateImageEdits(content, content.ContentType); + + PipelineResponse response = result.GetRawResponse(); + + GeneratedImageCollection value = GeneratedImageCollection.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + // convenience method - async; Stream overload + // TODO: add refdoc comment + public virtual async Task> GenerateImageEditsAsync( + Stream fileStream, + string fileName, + string prompt, + int? imageCount = null, + ImageEditOptions options = null) + { + Argument.AssertNotNull(fileStream, nameof(fileStream)); + Argument.AssertNotNull(fileName, nameof(fileName)); + Argument.AssertNotNull(prompt, nameof(prompt)); + + if (options?.MaskBytes is not null) + { + Argument.AssertNotNull(options.MaskFileName, nameof(options.MaskFileName)); + } + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(fileStream, fileName, prompt, _clientConnector.Model, imageCount); + + ClientResult result = await GenerateImageEditsAsync(content, content.ContentType).ConfigureAwait(false); + + PipelineResponse response = result.GetRawResponse(); + + GeneratedImageCollection value = GeneratedImageCollection.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + // convenience method - async + // TODO: add refdoc comment + public virtual async Task> GenerateImageEditsAsync( + BinaryData imageBytes, + string fileName, + string prompt, + int? imageCount = null, + ImageEditOptions options = null) + { + Argument.AssertNotNull(imageBytes, nameof(imageBytes)); + Argument.AssertNotNull(fileName, nameof(fileName)); + Argument.AssertNotNull(prompt, nameof(prompt)); + + if (options?.MaskBytes is not null) + { + Argument.AssertNotNull(options.MaskFileName, nameof(options.MaskFileName)); + } + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(imageBytes, fileName, prompt, _clientConnector.Model, imageCount); + + ClientResult result = await GenerateImageEditsAsync(content, content.ContentType).ConfigureAwait(false); + + PipelineResponse response = result.GetRawResponse(); + + GeneratedImageCollection value = GeneratedImageCollection.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + // convenience method - sync + // TODO: add refdoc comment + public virtual ClientResult GenerateImageVariations( + Stream fileStream, + string fileName, + int? imageCount = null, + ImageVariationOptions options = null) + { + Argument.AssertNotNull(fileStream, nameof(fileStream)); + Argument.AssertNotNull(fileName, nameof(fileName)); + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(fileStream, fileName, _clientConnector.Model, imageCount); + + ClientResult result = GenerateImageVariations(content, content.ContentType); + + PipelineResponse response = result.GetRawResponse(); + + GeneratedImageCollection value = GeneratedImageCollection.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + // convenience method - sync + // TODO: add refdoc comment + public virtual ClientResult GenerateImageVariations( + BinaryData imageBytes, + string fileName, + int? imageCount = null, + ImageVariationOptions options = null) + { + Argument.AssertNotNull(imageBytes, nameof(imageBytes)); + Argument.AssertNotNull(fileName, nameof(fileName)); + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(imageBytes, fileName, _clientConnector.Model, imageCount); + + ClientResult result = GenerateImageVariations(content, content.ContentType); + + PipelineResponse response = result.GetRawResponse(); + + GeneratedImageCollection value = GeneratedImageCollection.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + // convenience method - async; Stream overload + // TODO: add refdoc comment + public virtual async Task> GenerateImageVariationsAsync( + Stream fileStream, + string fileName, + int? imageCount = null, + ImageVariationOptions options = null) + { + Argument.AssertNotNull(fileStream, nameof(fileStream)); + Argument.AssertNotNull(fileName, nameof(fileName)); + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(fileStream, fileName, _clientConnector.Model, imageCount); + + ClientResult result = await GenerateImageVariationsAsync(content, content.ContentType).ConfigureAwait(false); + + PipelineResponse response = result.GetRawResponse(); + + GeneratedImageCollection value = GeneratedImageCollection.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + // convenience method - async + // TODO: add refdoc comment + public virtual async Task> GenerateImageVariationsAsync( + BinaryData imageBytes, + string fileName, + int? imageCount = null, + ImageVariationOptions options = null) + { + Argument.AssertNotNull(imageBytes, nameof(imageBytes)); + Argument.AssertNotNull(fileName, nameof(fileName)); + + options ??= new(); + + using MultipartFormDataBinaryContent content = options.ToMultipartContent(imageBytes, fileName, _clientConnector.Model, imageCount); + + ClientResult result = await GenerateImageVariationsAsync(content, content.ContentType).ConfigureAwait(false); + + PipelineResponse response = result.GetRawResponse(); + + GeneratedImageCollection value = GeneratedImageCollection.Deserialize(response.Content!); + + return ClientResult.FromValue(value, response); + } + + private Internal.Models.CreateImageRequest CreateInternalImageRequest( + string prompt, + int? imageCount = null, + ImageGenerationOptions options = null) + { + options ??= new(); + Internal.Models.CreateImageRequestQuality? internalQuality = null; + if (options.Quality != null) + { + internalQuality = options.Quality switch + { + ImageQuality.Standard => Internal.Models.CreateImageRequestQuality.Standard, + ImageQuality.High => Internal.Models.CreateImageRequestQuality.Hd, + _ => throw new ArgumentException(nameof(options.Quality)), + }; + } + + Internal.Models.CreateImageRequestResponseFormat? internalFormat = null; + if (options.ResponseFormat != null) + { + internalFormat = options.ResponseFormat switch + { + ImageResponseFormat.Bytes => Internal.Models.CreateImageRequestResponseFormat.B64Json, + ImageResponseFormat.Uri => Internal.Models.CreateImageRequestResponseFormat.Url, + _ => throw new ArgumentException(nameof(options.ResponseFormat)), + }; + } + + Internal.Models.CreateImageRequestSize? internalSize = null; + if (options.Size != null) + { + internalSize = options.Size switch + { + ImageSize.Size256x256 => Internal.Models.CreateImageRequestSize._256x256, + ImageSize.Size512x512 => Internal.Models.CreateImageRequestSize._512x512, + ImageSize.Size1024x1024 => Internal.Models.CreateImageRequestSize._1024x1024, + ImageSize.Size1024x1792 => Internal.Models.CreateImageRequestSize._1024x1792, + ImageSize.Size1792x1024 => Internal.Models.CreateImageRequestSize._1792x1024, + _ => throw new ArgumentException(nameof(options.Size)), + }; + } + + Internal.Models.CreateImageRequestStyle? internalStyle = null; + if (options.Style != null) + { + internalStyle = options.Style switch + { + ImageStyle.Vivid => Internal.Models.CreateImageRequestStyle.Vivid, + ImageStyle.Natural => Internal.Models.CreateImageRequestStyle.Natural, + _ => throw new ArgumentException(nameof(options.Style)), + }; + } + + return new Internal.Models.CreateImageRequest( + prompt, + _clientConnector.Model, + imageCount, + quality: internalQuality, + responseFormat: internalFormat, + size: internalSize, + style: internalStyle, + options.User, + serializedAdditionalRawData: null); + } + + private PipelineMessage CreateCreateImageEditsRequest(BinaryContent content, string contentType, RequestOptions options) + { + PipelineMessage message = Shim.Pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + + PipelineRequest request = message.Request; + request.Method = "POST"; + + UriBuilder uriBuilder = new(_clientConnector.Endpoint.AbsoluteUri); + + StringBuilder path = new(); + path.Append("/images/edits"); + uriBuilder.Path += path.ToString(); + + request.Uri = uriBuilder.Uri; + + request.Headers.Set("Content-Type", contentType); + + request.Content = content; + + message.Apply(options); + + return message; + } + + private PipelineMessage CreateImageVariationsRequest(BinaryContent content, string contentType, RequestOptions options) + { + PipelineMessage message = Shim.Pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + + PipelineRequest request = message.Request; + request.Method = "POST"; + + UriBuilder uriBuilder = new(_clientConnector.Endpoint.AbsoluteUri); + + StringBuilder path = new(); + path.Append("/images/variations"); + uriBuilder.Path += path.ToString(); + + request.Uri = uriBuilder.Uri; + + request.Headers.Set("Content-Type", contentType); + + request.Content = content; + + message.Apply(options); + + return message; + } + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); +} diff --git a/.dotnet/src/Custom/Images/ImageEditOptions.cs b/.dotnet/src/Custom/Images/ImageEditOptions.cs new file mode 100644 index 000000000..2a816428f --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageEditOptions.cs @@ -0,0 +1,109 @@ +using OpenAI.Internal; +using System; +using System.IO; + +namespace OpenAI.Images; + +/// +/// Represents additional options available to control the behavior of an image generation operation. +/// +public partial class ImageEditOptions +{ + /// + public BinaryData MaskBytes { get; set; } + + // The generator will need to add file-name to models for properties that + // represent files in order to enable setting the header. + /// + /// TODO + /// + public string MaskFileName { get; set; } + + /// + public ImageResponseFormat? ResponseFormat { get; set; } + + /// + public ImageSize? Size { get; set; } + + /// + public string User { get; set; } + + internal MultipartFormDataBinaryContent ToMultipartContent(Stream fileStream, + string fileName, + string prompt, + string model, + int? imageCount) + { + MultipartFormDataBinaryContent content = new(); + + content.Add(fileStream, "image", fileName); + + AddContent(model, prompt, imageCount, content); + + return content; + } + + internal MultipartFormDataBinaryContent ToMultipartContent(BinaryData imageBytes, + string fileName, + string prompt, + string model, + int? imageCount) + { + MultipartFormDataBinaryContent content = new(); + + content.Add(imageBytes, "image", fileName); + + AddContent(model, prompt, imageCount, content); + + return content; + } + + private void AddContent(string model, string prompt, int? imageCount, MultipartFormDataBinaryContent content) + { + content.Add(prompt, "prompt"); + content.Add(model, "model"); + + if (MaskBytes is not null) + { + content.Add(MaskBytes.ToArray(), "mask", MaskFileName); + } + + if (imageCount is not null) + { + content.Add(imageCount.Value, "n"); + } + + if (ResponseFormat is not null) + { + string format = ResponseFormat switch + { + ImageResponseFormat.Uri => "url", + ImageResponseFormat.Bytes => "b64_json", + _ => throw new ArgumentException(nameof(ResponseFormat)), + }; + + content.Add(format, "response_format"); + } + + if (Size is not null) + { + string imageSize = Size switch + { + ImageSize.Size256x256 => "256x256", + ImageSize.Size512x512 => "512x512", + ImageSize.Size1024x1024 => "1024x1024", + // TODO: 1024x1792 and 1792x1024 are currently not supported in image edits. + ImageSize.Size1024x1792 => "1024x1792", + ImageSize.Size1792x1024 => "1792x1024", + _ => throw new ArgumentException(nameof(imageSize)) + }; + + content.Add(imageSize, "size"); + } + + if (User is not null) + { + content.Add(User, "user"); + } + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Images/ImageGenerationOptions.cs b/.dotnet/src/Custom/Images/ImageGenerationOptions.cs new file mode 100644 index 000000000..37cdb95af --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageGenerationOptions.cs @@ -0,0 +1,75 @@ +namespace OpenAI.Images; + +/// +/// Represents additional options available to control the behavior of an image generation operation. +/// +public partial class ImageGenerationOptions +{ + /// + /// Specifies the quality level of the image that will be generated. This setting is only available when using + /// the dall-e-3 model. + /// + /// + /// hd - - Finer details, greater consistency, slower, more intensive. + /// + /// + /// standard - - The default quality level that's faster and less + /// intensive but may also be less detailed and consistent than hd. + /// + /// + /// + public ImageQuality? Quality { get; set; } + /// + /// Specifies the desired output representation of the generated image. + /// + /// + /// url - - Default, provides a temporary internet location that + /// the generated image can be retrieved from. + /// + /// + /// b64_json - - Provides the full image data on the response, + /// encoded in the result as a base64 string. This offers the fastest round trip time but can drastically + /// increase the size of response payloads. + /// + /// + /// + public ImageResponseFormat? ResponseFormat { get; set; } + /// + /// Specifies the dimensions of the generated image. Larger images take longer to create. + /// + /// Available for dall-e-2: + /// + /// 1024x1024 - - default + /// 256x256 - - small + /// 512x512 - - medium + /// + /// + /// + /// Available for dall-e-3: + /// + /// 1024x1024 - - default + /// 1024x1792 - - extra tall + /// 1792x1024 - - extra wide + /// + /// + /// + public ImageSize? Size { get; set; } + /// + /// The style kind to guide the generation of the image. + /// + /// + /// vivid - - default, a style that tends towards more realistic, + /// dramatic images. + /// + /// + /// natural - - a more subdued style with less tendency towards + /// realism and striking imagery. + /// + /// + /// + public ImageStyle? Style { get; set; } + /// + /// An optional identifier for the end user that can help OpenAI monitor for and detect abuse. + /// + public string User { get; set; } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Images/ImageQuality.cs b/.dotnet/src/Custom/Images/ImageQuality.cs new file mode 100644 index 000000000..41f13b0ff --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageQuality.cs @@ -0,0 +1,24 @@ +namespace OpenAI.Images; + +/// +/// A representation of the quality setting for image operations that controls the level of work that the model will +/// perform. +/// +/// +/// Available qualities consist of: +/// +/// - standard - The default setting that balances speed, detail, and consistecy. +/// - hd - Better consistency and finer details, but may be slower. +/// +/// +public enum ImageQuality +{ + /// + /// The hd image quality that provides finer details and greater consistency but may be slower. + /// + High, + /// + /// The standard image quality that provides a balanced mix of detailing, consistency, and speed. + /// + Standard, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Images/ImageResponseFormat.cs b/.dotnet/src/Custom/Images/ImageResponseFormat.cs new file mode 100644 index 000000000..8403482c5 --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageResponseFormat.cs @@ -0,0 +1,32 @@ +using System; + +namespace OpenAI.Images; + +/// +/// Represents the available output methods for generated images. +/// +/// +/// url - - Default, provides a temporary internet location that +/// the generated image can be retrieved from. +/// +/// +/// b64_json - - Provides the full image data on the response, +/// encoded in the result as a base64 string. This offers the fastest round trip time but can drastically +/// increase the size of response payloads. +/// +/// +/// +public enum ImageResponseFormat +{ + /// + /// Instructs the request to return image data directly on the response, encoded as a base64 string in the response + /// JSON. This minimizes availability time but drastically increases the size of responses, required bandwidth, and + /// immediate memory needs. This is equivalent to b64_json in the REST API. + /// + Bytes, + /// + /// The default setting that instructs the request to return a temporary internet location from which the image can + /// be retrieved. + /// + Uri, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Images/ImageSize.cs b/.dotnet/src/Custom/Images/ImageSize.cs new file mode 100644 index 000000000..c857509d1 --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageSize.cs @@ -0,0 +1,43 @@ +namespace OpenAI.Images; + +/// +/// Represents the available output dimensions for generated images. +/// +public enum ImageSize +{ + /// + /// A small, square image with 256 pixels of both width and height. + /// + /// Supported only for the older dall-e-2 model. + /// + /// + Size256x256, + /// + /// A medium-small, square image with 512 pixels of both width and height. + /// + /// Supported only for the older dall-e-2 model. + /// + /// + Size512x512, + /// + /// A square image with 1024 pixels of both width and height. + /// + /// Supported and default for both dall-e-2 and dall-e-3 models. + /// + /// + Size1024x1024, + /// + /// An extra tall image, 1024 pixels wide by 1792 pixels high. + /// + /// Supported only for the dall-e-3 model. + /// + /// + Size1024x1792, + /// + /// An extra wide image, 1792 pixels wide by 1024 pixels high. + /// + /// Supported only for the dall-e-3 model. + /// + /// + Size1792x1024, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Images/ImageStyle.cs b/.dotnet/src/Custom/Images/ImageStyle.cs new file mode 100644 index 000000000..30616eccd --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageStyle.cs @@ -0,0 +1,18 @@ +namespace OpenAI.Images; + +/// +/// The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards +/// generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real +/// looking images. This param is only supported for dall-e-3. +/// +public enum ImageStyle +{ + /// + /// The vivid style, with which the model will tend towards hyper-realistic, dramatic imagery. + /// + Vivid, + /// + /// The natural style, with which the model will not tend towards hyper-realistic, dramatic imagery. + /// + Natural, +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Images/ImageVariationOptions.cs b/.dotnet/src/Custom/Images/ImageVariationOptions.cs new file mode 100644 index 000000000..cb72dfcc7 --- /dev/null +++ b/.dotnet/src/Custom/Images/ImageVariationOptions.cs @@ -0,0 +1,85 @@ +using OpenAI.Internal; +using System; +using System.IO; + +namespace OpenAI.Images; + +/// +/// Represents additional options available to control the behavior of an image generation operation. +/// +public partial class ImageVariationOptions +{ + /// + public ImageSize? Size { get; set; } + + /// + public ImageResponseFormat? ResponseFormat { get; set; } + + /// + public string User { get; set; } + + internal MultipartFormDataBinaryContent ToMultipartContent(Stream fileStream, string fileName, string model, int? imageCount) + { + MultipartFormDataBinaryContent content = new(); + + content.Add(fileStream, "image", fileName); + + AddContent(model, imageCount, content); + + return content; + } + + internal MultipartFormDataBinaryContent ToMultipartContent(BinaryData imageBytes, string fileName, string model, int? imageCount) + { + MultipartFormDataBinaryContent content = new(); + + content.Add(imageBytes, "image", fileName); + + AddContent(model, imageCount, content); + + return content; + } + + private void AddContent(string model, int? imageCount, MultipartFormDataBinaryContent content) + { + content.Add(model, "model"); + + if (imageCount is not null) + { + content.Add(imageCount.Value, "n"); + } + + if (ResponseFormat is not null) + { + string format = ResponseFormat switch + { + ImageResponseFormat.Uri => "url", + ImageResponseFormat.Bytes => "b64_json", + _ => throw new ArgumentException(nameof(ResponseFormat)), + }; + + content.Add(format, "response_format"); + } + + if (Size is not null) + { + string imageSize = Size switch + { + ImageSize.Size256x256 => "256x256", + ImageSize.Size512x512 => "512x512", + ImageSize.Size1024x1024 => "1024x1024", + // TODO: 1024x1792 and 1792x1024 are currently not supported in image edits. + ImageSize.Size1024x1792 => "1024x1792", + ImageSize.Size1792x1024 => "1792x1024", + _ => throw new ArgumentException(nameof(imageSize)) + }; + + content.Add(imageSize, "size"); + } + + if (User is not null) + { + content.Add(User, "user"); + } + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/LegacyCompletions/LegacyCompletionClient.Protocol.cs b/.dotnet/src/Custom/LegacyCompletions/LegacyCompletionClient.Protocol.cs new file mode 100644 index 000000000..f4b576ce9 --- /dev/null +++ b/.dotnet/src/Custom/LegacyCompletions/LegacyCompletionClient.Protocol.cs @@ -0,0 +1,16 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Threading.Tasks; + +namespace OpenAI.LegacyCompletions; + +public partial class LegacyCompletionClient +{ + /// + public virtual ClientResult GenerateLegacyCompletions(BinaryContent content, RequestOptions options = null) + => Shim.CreateCompletion(content, options); + + /// + public virtual async Task GenerateLegacyCompletionsAsync(BinaryContent content, RequestOptions options = null) + => await Shim.CreateCompletionAsync(content, options).ConfigureAwait(false); +} diff --git a/.dotnet/src/Custom/LegacyCompletions/LegacyCompletionClient.cs b/.dotnet/src/Custom/LegacyCompletions/LegacyCompletionClient.cs new file mode 100644 index 000000000..fd2b2b75a --- /dev/null +++ b/.dotnet/src/Custom/LegacyCompletions/LegacyCompletionClient.cs @@ -0,0 +1,37 @@ +using System; +using System.ClientModel; + +namespace OpenAI.LegacyCompletions; + +/// +/// The basic, protocol-level service client for OpenAI legacy completion operations. +/// +/// Note: pre-chat completions are a legacy feature. New solutions should consider the use of chat +/// completions or assistants, instead. +/// +/// +public partial class LegacyCompletionClient +{ + private readonly OpenAIClientConnector _clientConnector; + private Internal.Completions Shim => _clientConnector.InternalClient.GetCompletionsClient(); + + /// + /// Initializes a new instance of , used for legacy completion requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public LegacyCompletionClient(ApiKeyCredential credential = default, OpenAIClientOptions options = default) + { + _clientConnector = new(model: null, credential, options); + } +} diff --git a/.dotnet/src/Custom/Models/ModelDetailCollection.cs b/.dotnet/src/Custom/Models/ModelDetailCollection.cs new file mode 100644 index 000000000..8e89d769e --- /dev/null +++ b/.dotnet/src/Custom/Models/ModelDetailCollection.cs @@ -0,0 +1,13 @@ +using System.Collections.Generic; +using System.Collections.ObjectModel; + +namespace OpenAI.ModelManagement; + +/// +/// Represents a collection of entries for available models. +/// +public partial class ModelDetailCollection : ReadOnlyCollection +{ + internal ModelDetailCollection(IList list) : base(list) + {} +} diff --git a/.dotnet/src/Custom/Models/ModelDetails.cs b/.dotnet/src/Custom/Models/ModelDetails.cs new file mode 100644 index 000000000..30d9ec8b5 --- /dev/null +++ b/.dotnet/src/Custom/Models/ModelDetails.cs @@ -0,0 +1,29 @@ +using System; + +namespace OpenAI.ModelManagement; + +/// +/// Represents information about a single available model entry. +/// +public partial class ModelDetails +{ + /// + /// The ID of the model as used when calling the service. An example is 'gpt-3.5-turbo'. + /// + public string Id { get; } + /// + /// The timestamp when the current model entry became available. + /// + public DateTimeOffset CreatedAt { get; } + /// + /// The name of the organization that owns the model. + /// + public string OwnerOrganization { get; } + + internal ModelDetails(Internal.Models.Model internalModel) + { + Id = internalModel.Id; + CreatedAt = internalModel.Created; + OwnerOrganization = internalModel.OwnedBy; + } +} diff --git a/.dotnet/src/Custom/Models/ModelManagementClient.Protocol.cs b/.dotnet/src/Custom/Models/ModelManagementClient.Protocol.cs new file mode 100644 index 000000000..1e4074647 --- /dev/null +++ b/.dotnet/src/Custom/Models/ModelManagementClient.Protocol.cs @@ -0,0 +1,39 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.ComponentModel; +using System.Threading.Tasks; + +namespace OpenAI.ModelManagement; + +public partial class ModelManagementClient +{ + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetModelInfo(string modelId, RequestOptions options) + => Shim.Retrieve(modelId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GetModelInfoAsync(string modelId, RequestOptions options) + => await Shim.RetrieveAsync(modelId, options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult GetModels(RequestOptions options) + => Shim.GetModels(options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task GetModelsAsync(RequestOptions options) + => await Shim.GetModelsAsync(options).ConfigureAwait(false); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual ClientResult DeleteModel(string modelId, RequestOptions options) + => Shim.Delete(modelId, options); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public virtual async Task DeleteModelAsync(string modelId, RequestOptions options) + => await Shim.DeleteAsync(modelId, options).ConfigureAwait(false); +} diff --git a/.dotnet/src/Custom/Models/ModelManagementClient.cs b/.dotnet/src/Custom/Models/ModelManagementClient.cs new file mode 100644 index 000000000..fad3d3615 --- /dev/null +++ b/.dotnet/src/Custom/Models/ModelManagementClient.cs @@ -0,0 +1,83 @@ +using OpenAI.ClientShared.Internal; +using System; +using System.ClientModel; +using System.Threading.Tasks; + +namespace OpenAI.ModelManagement; + +/// +/// The service client for OpenAI model operations. +/// +public partial class ModelManagementClient +{ + private readonly OpenAIClientConnector _clientConnector; + private Internal.ModelsOps Shim => _clientConnector.InternalClient.GetModelsOpsClient(); + + /// + /// Initializes a new instance of , used for model operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public ModelManagementClient(ApiKeyCredential credential = default, OpenAIClientOptions options = default) + { + _clientConnector = new(model: "none", credential, options); + } + + public virtual ClientResult GetModelInfo(string modelId) + { + ClientResult internalResult = Shim.Retrieve(modelId); + return ClientResult.FromValue(new ModelDetails(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual async Task> GetModelInfoAsync(string modelId) + { + ClientResult internalResult = await Shim.RetrieveAsync(modelId).ConfigureAwait(false); + return ClientResult.FromValue(new ModelDetails(internalResult.Value), internalResult.GetRawResponse()); + } + + public virtual ClientResult GetModels() + { + ClientResult internalResult = Shim.GetModels(); + ChangeTrackingList modelEntries = []; + foreach (Internal.Models.Model internalModel in internalResult.Value.Data) + { + modelEntries.Add(new(internalModel)); + } + return ClientResult.FromValue(new ModelDetailCollection(modelEntries), internalResult.GetRawResponse()); + } + + public virtual async Task> GetModelsAsync() + { + ClientResult internalResult + = await Shim.GetModelsAsync().ConfigureAwait(false); + ChangeTrackingList modelEntries = []; + foreach (Internal.Models.Model internalModel in internalResult.Value.Data) + { + modelEntries.Add(new(internalModel)); + } + return ClientResult.FromValue(new ModelDetailCollection(modelEntries), internalResult.GetRawResponse()); + } + + public virtual ClientResult DeleteModel(string modelId) + { + ClientResult internalResult = Shim.Delete(modelId); + return ClientResult.FromValue(internalResult.Value.Deleted, internalResult.GetRawResponse()); + } + + public virtual async Task> DeleteModelAsync(string modelId) + { + ClientResult internalResult + = await Shim.DeleteAsync(modelId).ConfigureAwait(false); + return ClientResult.FromValue(internalResult.Value.Deleted, internalResult.GetRawResponse()); + } +} diff --git a/.dotnet/src/Custom/Moderations/ModerationClient.Protocol.cs b/.dotnet/src/Custom/Moderations/ModerationClient.Protocol.cs new file mode 100644 index 000000000..ac1858316 --- /dev/null +++ b/.dotnet/src/Custom/Moderations/ModerationClient.Protocol.cs @@ -0,0 +1,16 @@ +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Threading.Tasks; + +namespace OpenAI.Moderations; + +public partial class ModerationClient +{ + /// + public virtual ClientResult ClassifyText(BinaryContent content, RequestOptions options = null) + => Shim.CreateModeration(content, options); + + /// + public virtual async Task ClassifyTextAsync(BinaryContent content, RequestOptions options = null) + => await Shim.CreateModerationAsync(content, options).ConfigureAwait(false); +} diff --git a/.dotnet/src/Custom/Moderations/ModerationClient.cs b/.dotnet/src/Custom/Moderations/ModerationClient.cs new file mode 100644 index 000000000..bfda1e411 --- /dev/null +++ b/.dotnet/src/Custom/Moderations/ModerationClient.cs @@ -0,0 +1,32 @@ +using System.ClientModel; + +namespace OpenAI.Moderations; + +/// +/// The service client for OpenAI moderation operations. +/// +public partial class ModerationClient +{ + private readonly OpenAIClientConnector _clientConnector; + private Internal.Moderations Shim => _clientConnector.InternalClient.GetModerationsClient(); + + /// + /// Initializes a new instance of , used for moderation operation requests. + /// + /// + /// + /// If an endpoint is not provided, the client will use the OPENAI_ENDPOINT environment variable if it + /// defined and otherwise use the default OpenAI v1 endpoint. + /// + /// + /// If an authentication credential is not defined, the client use the OPENAI_API_KEY environment variable + /// if it is defined. + /// + /// + /// The API key used to authenticate with the service endpoint. + /// Additional options to customize the client. + public ModerationClient(ApiKeyCredential credential = default, OpenAIClientOptions options = default) + { + _clientConnector = new(model: null, credential, options); + } +} diff --git a/.dotnet/src/Custom/OpenAIClient.cs b/.dotnet/src/Custom/OpenAIClient.cs new file mode 100644 index 000000000..929158d5f --- /dev/null +++ b/.dotnet/src/Custom/OpenAIClient.cs @@ -0,0 +1,153 @@ +using OpenAI.Assistants; +using OpenAI.Audio; +using OpenAI.Chat; +using OpenAI.Embeddings; +using OpenAI.Files; +using OpenAI.FineTuningManagement; +using OpenAI.Images; +using OpenAI.Internal.Models; +using OpenAI.LegacyCompletions; +using OpenAI.ModelManagement; +using OpenAI.Moderations; +using System; +using System.ClientModel; +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI; + +/// +/// A top-level client factory that enables convenient creation of scenario-specific sub-clients while reusing shared +/// configuration details like endpoint, authentication, and pipeline customization. +/// +public partial class OpenAIClient +{ + private readonly ApiKeyCredential _cachedCredential = null; + private readonly OpenAIClientOptions _cachedOptions = null; + + /// + /// Creates a new instance of will store common client configuration details to permit + /// easy reuse and propagation to multiple, scenario-specific subclients. + /// + /// + /// This client does not provide any model functionality directly and is purely a helper to facilitate the creation + /// of the scenario-specific subclients like . + /// + /// An explicitly defined credential that all clients created by this should use. + /// A common client options definition that all clients created by this should use. + public OpenAIClient(ApiKeyCredential credential = default, OpenAIClientOptions clientOptions = default) + { + _cachedCredential = credential; + _cachedOptions = clientOptions; + } + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + [Experimental("OPENAI001")] + public AssistantClient GetAssistantClient() => new(_cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public AudioClient GetAudioClient(string model) => new(model, _cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public ChatClient GetChatClient(string model) => new(model, _cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public EmbeddingClient GetEmbeddingClient(string model) => new(model, _cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public FileClient GetFileClient() => new(_cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public FineTuningManagementClient GetFineTuningManagementClient() => new(_cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public ImageClient GetImageClient(string model) => new(model, _cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public LegacyCompletionClient GetLegacyCompletionClient() => new(_cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public ModelManagementClient GetModelManagementClient() => new(_cachedCredential, _cachedOptions); + + /// + /// Gets a new instance of that reuses the client configuration details provided to + /// the instance. + /// + /// + /// This method is functionally equivalent to using the constructor directly with + /// the same configuration details. + /// + /// A new . + public ModerationClient GetModerationClient() => new(_cachedCredential, _cachedOptions); +} diff --git a/.dotnet/src/Custom/OpenAIClientConnector.cs b/.dotnet/src/Custom/OpenAIClientConnector.cs new file mode 100644 index 000000000..8ee47b1ee --- /dev/null +++ b/.dotnet/src/Custom/OpenAIClientConnector.cs @@ -0,0 +1,32 @@ +using System; +using System.ClientModel; +using System.ClientModel.Internal; + + +namespace OpenAI; + +// This internal type facilitates composition rather than inheritance for scenario clients. + +internal partial class OpenAIClientConnector +{ + private static readonly string s_OpenAIEndpointEnvironmentVariable = "OPENAI_ENDPOINT"; + private static readonly string s_OpenAIApiKeyEnvironmentVariable = "OPENAI_API_KEY"; + private static readonly string s_defaultOpenAIV1Endpoint = "https://api.openai.com/v1"; + + internal Internal.OpenAIClient InternalClient { get; } + internal string Model { get; } + internal Uri Endpoint { get; } + + internal OpenAIClientConnector( + string model, + ApiKeyCredential credential = null, + OpenAIClientOptions options = null) + { + if (model is null) throw new ArgumentNullException(nameof(model)); + Model = model; + Endpoint ??= options?.Endpoint ?? new(Environment.GetEnvironmentVariable(s_OpenAIEndpointEnvironmentVariable) ?? s_defaultOpenAIV1Endpoint); + credential ??= new(Environment.GetEnvironmentVariable(s_OpenAIApiKeyEnvironmentVariable) ?? string.Empty); + options ??= new(); + InternalClient = new(Endpoint, credential, options.InternalOptions); + } +} diff --git a/.dotnet/src/Custom/OpenAIClientOptions.cs b/.dotnet/src/Custom/OpenAIClientOptions.cs new file mode 100644 index 000000000..8234691b5 --- /dev/null +++ b/.dotnet/src/Custom/OpenAIClientOptions.cs @@ -0,0 +1,38 @@ +using System; +using System.ClientModel; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Threading; + +namespace OpenAI; + +/// +/// Client-level options for the OpenAI service. +/// +public partial class OpenAIClientOptions : RequestOptions +{ + /// + /// Gets or sets a non-default base endpoint that clients should use when connecting. + /// + public Uri Endpoint { get; set; } + + // Note: this type currently proxies RequestOptions properties manually via the matching internal type. This is a + // temporary extra step pending richer integration with code generation. + + internal Internal.OpenAIClientOptions InternalOptions { get; } + + public new void AddPolicy(PipelinePolicy policy, PipelinePosition position) + { + InternalOptions.AddPolicy(policy, position); + } + + public OpenAIClientOptions() + : this(internalOptions: null) + { } + + internal OpenAIClientOptions(Internal.OpenAIClientOptions internalOptions = null) + { + internalOptions ??= new(); + InternalOptions = internalOptions; + } +} diff --git a/.dotnet/src/Directory.Build.props b/.dotnet/src/Directory.Build.props new file mode 100644 index 000000000..21f972465 --- /dev/null +++ b/.dotnet/src/Directory.Build.props @@ -0,0 +1,8 @@ + + + 0.1.0 + beta.1 + true + OpenAI.snk + + \ No newline at end of file diff --git a/.dotnet/src/Generated/Assistants.cs b/.dotnet/src/Generated/Assistants.cs new file mode 100644 index 000000000..2da58ef6e --- /dev/null +++ b/.dotnet/src/Generated/Assistants.cs @@ -0,0 +1,1394 @@ +// + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Internal.Models; + +namespace OpenAI.Internal +{ + // Data plane generated sub-client. + /// The Assistants sub-client. + internal partial class Assistants + { + private const string AuthorizationHeader = "Authorization"; + private readonly ApiKeyCredential _credential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual ClientPipeline Pipeline => _pipeline; + + /// Initializes a new instance of Assistants for mocking. + protected Assistants() + { + } + + /// Initializes a new instance of Assistants. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Assistants(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) + { + _pipeline = pipeline; + _credential = credential; + _endpoint = endpoint; + } + + /// Create an assistant with a model and instructions. + /// The to use. + /// is null. + public virtual async Task> CreateAssistantAsync(CreateAssistantRequest assistant) + { + Argument.AssertNotNull(assistant, nameof(assistant)); + + using BinaryContent content = BinaryContent.Create(assistant); + ClientResult result = await CreateAssistantAsync(content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Create an assistant with a model and instructions. + /// The to use. + /// is null. + public virtual ClientResult CreateAssistant(CreateAssistantRequest assistant) + { + Argument.AssertNotNull(assistant, nameof(assistant)); + + using BinaryContent content = BinaryContent.Create(assistant); + ClientResult result = CreateAssistant(content, DefaultRequestContext); + return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Create an assistant with a model and instructions. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateAssistantAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.CreateAssistant"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateAssistantRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Create an assistant with a model and instructions. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateAssistant(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.CreateAssistant"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateAssistantRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Returns a list of assistants. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + public virtual async Task> GetAssistantsAsync(int? limit = null, ListOrder? order = null, string after = null, string before = null) + { + ClientResult result = await GetAssistantsAsync(limit, order?.ToString(), after, before, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ListAssistantsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns a list of assistants. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + public virtual ClientResult GetAssistants(int? limit = null, ListOrder? order = null, string after = null, string before = null) + { + ClientResult result = GetAssistants(limit, order?.ToString(), after, before, DefaultRequestContext); + return ClientResult.FromValue(ListAssistantsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns a list of assistants. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetAssistantsAsync(int? limit, string order, string after, string before, RequestOptions options) + { + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistants"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantsRequest(limit, order, after, before, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns a list of assistants. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetAssistants(int? limit, string order, string after, string before, RequestOptions options) + { + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistants"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantsRequest(limit, order, after, before, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Retrieves an assistant. + /// The ID of the assistant to retrieve. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> GetAssistantAsync(string assistantId) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + ClientResult result = await GetAssistantAsync(assistantId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Retrieves an assistant. + /// The ID of the assistant to retrieve. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult GetAssistant(string assistantId) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + ClientResult result = GetAssistant(assistantId, DefaultRequestContext); + return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieves an assistant. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant to retrieve. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetAssistantAsync(string assistantId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistant"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantRequest(assistantId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieves an assistant. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant to retrieve. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetAssistant(string assistantId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistant"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantRequest(assistantId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Modifies an assistant. + /// The ID of the assistant to modify. + /// The to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> ModifyAssistantAsync(string assistantId, ModifyAssistantRequest assistant) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNull(assistant, nameof(assistant)); + + using BinaryContent content = BinaryContent.Create(assistant); + ClientResult result = await ModifyAssistantAsync(assistantId, content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Modifies an assistant. + /// The ID of the assistant to modify. + /// The to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult ModifyAssistant(string assistantId, ModifyAssistantRequest assistant) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNull(assistant, nameof(assistant)); + + using BinaryContent content = BinaryContent.Create(assistant); + ClientResult result = ModifyAssistant(assistantId, content, DefaultRequestContext); + return ClientResult.FromValue(AssistantObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Modifies an assistant. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant to modify. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task ModifyAssistantAsync(string assistantId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.ModifyAssistant"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateModifyAssistantRequest(assistantId, content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Modifies an assistant. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant to modify. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult ModifyAssistant(string assistantId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.ModifyAssistant"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateModifyAssistantRequest(assistantId, content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Delete an assistant. + /// The ID of the assistant to delete. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> DeleteAssistantAsync(string assistantId) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + ClientResult result = await DeleteAssistantAsync(assistantId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(DeleteAssistantResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Delete an assistant. + /// The ID of the assistant to delete. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult DeleteAssistant(string assistantId) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + ClientResult result = DeleteAssistant(assistantId, DefaultRequestContext); + return ClientResult.FromValue(DeleteAssistantResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Delete an assistant. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant to delete. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task DeleteAssistantAsync(string assistantId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.DeleteAssistant"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateDeleteAssistantRequest(assistantId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Delete an assistant. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant to delete. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult DeleteAssistant(string assistantId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.DeleteAssistant"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateDeleteAssistantRequest(assistantId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// Create an assistant file by attaching a [File](/docs/api-reference/files) to a + /// [assistant](/docs/api-reference/assistants). + /// + /// The ID of the assistant for which to create a file. + /// The to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> CreateAssistantFileAsync(string assistantId, CreateAssistantFileRequest file) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNull(file, nameof(file)); + + using BinaryContent content = BinaryContent.Create(file); + ClientResult result = await CreateAssistantFileAsync(assistantId, content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Create an assistant file by attaching a [File](/docs/api-reference/files) to a + /// [assistant](/docs/api-reference/assistants). + /// + /// The ID of the assistant for which to create a file. + /// The to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult CreateAssistantFile(string assistantId, CreateAssistantFileRequest file) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNull(file, nameof(file)); + + using BinaryContent content = BinaryContent.Create(file); + ClientResult result = CreateAssistantFile(assistantId, content, DefaultRequestContext); + return ClientResult.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Create an assistant file by attaching a [File](/docs/api-reference/files) to a + /// [assistant](/docs/api-reference/assistants). + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant for which to create a file. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateAssistantFileAsync(string assistantId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.CreateAssistantFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateAssistantFileRequest(assistantId, content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Create an assistant file by attaching a [File](/docs/api-reference/files) to a + /// [assistant](/docs/api-reference/assistants). + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant for which to create a file. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateAssistantFile(string assistantId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.CreateAssistantFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateAssistantFileRequest(assistantId, content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Returns a list of assistant files. + /// The ID of the assistant the file belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> GetAssistantFilesAsync(string assistantId, int? limit = null, ListOrder? order = null, string after = null, string before = null) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + ClientResult result = await GetAssistantFilesAsync(assistantId, limit, order?.ToString(), after, before, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ListAssistantFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns a list of assistant files. + /// The ID of the assistant the file belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult GetAssistantFiles(string assistantId, int? limit = null, ListOrder? order = null, string after = null, string before = null) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + ClientResult result = GetAssistantFiles(assistantId, limit, order?.ToString(), after, before, DefaultRequestContext); + return ClientResult.FromValue(ListAssistantFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns a list of assistant files. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant the file belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetAssistantFilesAsync(string assistantId, int? limit, string order, string after, string before, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistantFiles"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantFilesRequest(assistantId, limit, order, after, before, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns a list of assistant files. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant the file belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetAssistantFiles(string assistantId, int? limit, string order, string after, string before, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistantFiles"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantFilesRequest(assistantId, limit, order, after, before, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Retrieves an assistant file. + /// The ID of the assistant the file belongs to. + /// The ID of the file we're getting. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> GetAssistantFileAsync(string assistantId, string fileId) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + ClientResult result = await GetAssistantFileAsync(assistantId, fileId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Retrieves an assistant file. + /// The ID of the assistant the file belongs to. + /// The ID of the file we're getting. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual ClientResult GetAssistantFile(string assistantId, string fileId) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + ClientResult result = GetAssistantFile(assistantId, fileId, DefaultRequestContext); + return ClientResult.FromValue(AssistantFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieves an assistant file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant the file belongs to. + /// The ID of the file we're getting. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetAssistantFileAsync(string assistantId, string fileId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistantFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantFileRequest(assistantId, fileId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieves an assistant file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant the file belongs to. + /// The ID of the file we're getting. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetAssistantFile(string assistantId, string fileId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.GetAssistantFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetAssistantFileRequest(assistantId, fileId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Delete an assistant file. + /// The ID of the assistant the file belongs to. + /// The ID of the file to delete. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> DeleteAssistantFileAsync(string assistantId, string fileId) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + ClientResult result = await DeleteAssistantFileAsync(assistantId, fileId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(DeleteAssistantFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Delete an assistant file. + /// The ID of the assistant the file belongs to. + /// The ID of the file to delete. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual ClientResult DeleteAssistantFile(string assistantId, string fileId) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + ClientResult result = DeleteAssistantFile(assistantId, fileId, DefaultRequestContext); + return ClientResult.FromValue(DeleteAssistantFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Delete an assistant file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant the file belongs to. + /// The ID of the file to delete. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task DeleteAssistantFileAsync(string assistantId, string fileId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.DeleteAssistantFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateDeleteAssistantFileRequest(assistantId, fileId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Delete an assistant file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the assistant the file belongs to. + /// The ID of the file to delete. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult DeleteAssistantFile(string assistantId, string fileId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(assistantId, nameof(assistantId)); + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Assistants.DeleteAssistantFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateDeleteAssistantFileRequest(assistantId, fileId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateAssistantRequest(BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateGetAssistantsRequest(int? limit, string order, string after, string before, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants"); + if (limit != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } + } + if (order != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&order={order}"; + } + else + { + uriBuilder.Query = $"order={order}"; + } + } + if (after != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } + } + if (before != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&before={before}"; + } + else + { + uriBuilder.Query = $"before={before}"; + } + } + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetAssistantRequest(string assistantId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants/"); + path.Append(assistantId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateModifyAssistantRequest(string assistantId, BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants/"); + path.Append(assistantId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateDeleteAssistantRequest(string assistantId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "DELETE"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants/"); + path.Append(assistantId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateCreateAssistantFileRequest(string assistantId, BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants/"); + path.Append(assistantId); + path.Append("/files"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateGetAssistantFilesRequest(string assistantId, int? limit, string order, string after, string before, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants/"); + path.Append(assistantId); + path.Append("/files"); + if (limit != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } + } + if (order != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&order={order}"; + } + else + { + uriBuilder.Query = $"order={order}"; + } + } + if (after != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } + } + if (before != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&before={before}"; + } + else + { + uriBuilder.Query = $"before={before}"; + } + } + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetAssistantFileRequest(string assistantId, string fileId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants/"); + path.Append(assistantId); + path.Append("/files/"); + path.Append(fileId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateDeleteAssistantFileRequest(string assistantId, string fileId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "DELETE"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/assistants/"); + path.Append(assistantId); + path.Append("/files/"); + path.Append(fileId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Audio.cs b/.dotnet/src/Generated/Audio.cs new file mode 100644 index 000000000..2b73c9657 --- /dev/null +++ b/.dotnet/src/Generated/Audio.cs @@ -0,0 +1,407 @@ +// + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Internal.Models; + +namespace OpenAI.Internal +{ + // Data plane generated sub-client. + /// The Audio sub-client. + internal partial class Audio + { + private const string AuthorizationHeader = "Authorization"; + private readonly ApiKeyCredential _credential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual ClientPipeline Pipeline => _pipeline; + + /// Initializes a new instance of Audio for mocking. + protected Audio() + { + } + + /// Initializes a new instance of Audio. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Audio(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) + { + _pipeline = pipeline; + _credential = credential; + _endpoint = endpoint; + } + + /// Generates audio from the input text. + /// The to use. + /// is null. + public virtual async Task> CreateSpeechAsync(CreateSpeechRequest speech) + { + Argument.AssertNotNull(speech, nameof(speech)); + + using BinaryContent content = BinaryContent.Create(speech); + ClientResult result = await CreateSpeechAsync(content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(result.GetRawResponse().Content, result.GetRawResponse()); + } + + /// Generates audio from the input text. + /// The to use. + /// is null. + public virtual ClientResult CreateSpeech(CreateSpeechRequest speech) + { + Argument.AssertNotNull(speech, nameof(speech)); + + using BinaryContent content = BinaryContent.Create(speech); + ClientResult result = CreateSpeech(content, DefaultRequestContext); + return ClientResult.FromValue(result.GetRawResponse().Content, result.GetRawResponse()); + } + + /// + /// [Protocol Method] Generates audio from the input text. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateSpeechAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Audio.CreateSpeech"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateSpeechRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Generates audio from the input text. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateSpeech(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Audio.CreateSpeech"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateSpeechRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Transcribes audio into the input language. + /// The to use. + /// is null. + public virtual async Task> CreateTranscriptionAsync(CreateTranscriptionRequest audio) + { + Argument.AssertNotNull(audio, nameof(audio)); + + using BinaryContent content = BinaryContent.Create(audio); + ClientResult result = await CreateTranscriptionAsync(content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(CreateTranscriptionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Transcribes audio into the input language. + /// The to use. + /// is null. + public virtual ClientResult CreateTranscription(CreateTranscriptionRequest audio) + { + Argument.AssertNotNull(audio, nameof(audio)); + + using BinaryContent content = BinaryContent.Create(audio); + ClientResult result = CreateTranscription(content, DefaultRequestContext); + return ClientResult.FromValue(CreateTranscriptionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Transcribes audio into the input language. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateTranscriptionAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Audio.CreateTranscription"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateTranscriptionRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Transcribes audio into the input language. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateTranscription(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Audio.CreateTranscription"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateTranscriptionRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Translates audio into English.. + /// The to use. + /// is null. + public virtual async Task> CreateTranslationAsync(CreateTranslationRequest audio) + { + Argument.AssertNotNull(audio, nameof(audio)); + + using BinaryContent content = BinaryContent.Create(audio); + ClientResult result = await CreateTranslationAsync(content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(CreateTranslationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Translates audio into English.. + /// The to use. + /// is null. + public virtual ClientResult CreateTranslation(CreateTranslationRequest audio) + { + Argument.AssertNotNull(audio, nameof(audio)); + + using BinaryContent content = BinaryContent.Create(audio); + ClientResult result = CreateTranslation(content, DefaultRequestContext); + return ClientResult.FromValue(CreateTranslationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Translates audio into English.. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateTranslationAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Audio.CreateTranslation"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateTranslationRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Translates audio into English.. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateTranslation(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Audio.CreateTranslation"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateTranslationRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateSpeechRequest(BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/audio/speech"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/octet-stream"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateCreateTranscriptionRequest(BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/audio/transcriptions"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("content-type", "multipart/form-data"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateCreateTranslationRequest(BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/audio/translations"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("content-type", "multipart/form-data"); + request.Content = content; + message.Apply(options); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Chat.cs b/.dotnet/src/Generated/Chat.cs new file mode 100644 index 000000000..d164d2a43 --- /dev/null +++ b/.dotnet/src/Generated/Chat.cs @@ -0,0 +1,167 @@ +// + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Internal.Models; + +namespace OpenAI.Internal +{ + // Data plane generated sub-client. + /// The Chat sub-client. + internal partial class Chat + { + private const string AuthorizationHeader = "Authorization"; + private readonly ApiKeyCredential _credential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual ClientPipeline Pipeline => _pipeline; + + /// Initializes a new instance of Chat for mocking. + protected Chat() + { + } + + /// Initializes a new instance of Chat. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Chat(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) + { + _pipeline = pipeline; + _credential = credential; + _endpoint = endpoint; + } + + /// Creates a model response for the given chat conversation. + /// The to use. + /// is null. + public virtual async Task> CreateChatCompletionAsync(CreateChatCompletionRequest createChatCompletionRequest) + { + Argument.AssertNotNull(createChatCompletionRequest, nameof(createChatCompletionRequest)); + + using BinaryContent content = BinaryContent.Create(createChatCompletionRequest); + ClientResult result = await CreateChatCompletionAsync(content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(CreateChatCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Creates a model response for the given chat conversation. + /// The to use. + /// is null. + public virtual ClientResult CreateChatCompletion(CreateChatCompletionRequest createChatCompletionRequest) + { + Argument.AssertNotNull(createChatCompletionRequest, nameof(createChatCompletionRequest)); + + using BinaryContent content = BinaryContent.Create(createChatCompletionRequest); + ClientResult result = CreateChatCompletion(content, DefaultRequestContext); + return ClientResult.FromValue(CreateChatCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates a model response for the given chat conversation. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateChatCompletionAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Chat.CreateChatCompletion"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateChatCompletionRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates a model response for the given chat conversation. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateChatCompletion(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Chat.CreateChatCompletion"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateChatCompletionRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateChatCompletionRequest(BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/chat/completions"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Completions.cs b/.dotnet/src/Generated/Completions.cs new file mode 100644 index 000000000..a8350ff68 --- /dev/null +++ b/.dotnet/src/Generated/Completions.cs @@ -0,0 +1,167 @@ +// + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Internal.Models; + +namespace OpenAI.Internal +{ + // Data plane generated sub-client. + /// The Completions sub-client. + internal partial class Completions + { + private const string AuthorizationHeader = "Authorization"; + private readonly ApiKeyCredential _credential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual ClientPipeline Pipeline => _pipeline; + + /// Initializes a new instance of Completions for mocking. + protected Completions() + { + } + + /// Initializes a new instance of Completions. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Completions(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) + { + _pipeline = pipeline; + _credential = credential; + _endpoint = endpoint; + } + + /// Creates a completion for the provided prompt and parameters. + /// The to use. + /// is null. + public virtual async Task> CreateCompletionAsync(CreateCompletionRequest createCompletionRequest) + { + Argument.AssertNotNull(createCompletionRequest, nameof(createCompletionRequest)); + + using BinaryContent content = BinaryContent.Create(createCompletionRequest); + ClientResult result = await CreateCompletionAsync(content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(CreateCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Creates a completion for the provided prompt and parameters. + /// The to use. + /// is null. + public virtual ClientResult CreateCompletion(CreateCompletionRequest createCompletionRequest) + { + Argument.AssertNotNull(createCompletionRequest, nameof(createCompletionRequest)); + + using BinaryContent content = BinaryContent.Create(createCompletionRequest); + ClientResult result = CreateCompletion(content, DefaultRequestContext); + return ClientResult.FromValue(CreateCompletionResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates a completion for the provided prompt and parameters. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateCompletionAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Completions.CreateCompletion"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateCompletionRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates a completion for the provided prompt and parameters. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateCompletion(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Completions.CreateCompletion"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateCompletionRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateCompletionRequest(BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/completions"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Embeddings.cs b/.dotnet/src/Generated/Embeddings.cs new file mode 100644 index 000000000..9b4e99e82 --- /dev/null +++ b/.dotnet/src/Generated/Embeddings.cs @@ -0,0 +1,167 @@ +// + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Internal.Models; + +namespace OpenAI.Internal +{ + // Data plane generated sub-client. + /// The Embeddings sub-client. + internal partial class Embeddings + { + private const string AuthorizationHeader = "Authorization"; + private readonly ApiKeyCredential _credential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual ClientPipeline Pipeline => _pipeline; + + /// Initializes a new instance of Embeddings for mocking. + protected Embeddings() + { + } + + /// Initializes a new instance of Embeddings. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Embeddings(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) + { + _pipeline = pipeline; + _credential = credential; + _endpoint = endpoint; + } + + /// Creates an embedding vector representing the input text. + /// The to use. + /// is null. + public virtual async Task> CreateEmbeddingAsync(CreateEmbeddingRequest embedding) + { + Argument.AssertNotNull(embedding, nameof(embedding)); + + using BinaryContent content = BinaryContent.Create(embedding); + ClientResult result = await CreateEmbeddingAsync(content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(CreateEmbeddingResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Creates an embedding vector representing the input text. + /// The to use. + /// is null. + public virtual ClientResult CreateEmbedding(CreateEmbeddingRequest embedding) + { + Argument.AssertNotNull(embedding, nameof(embedding)); + + using BinaryContent content = BinaryContent.Create(embedding); + ClientResult result = CreateEmbedding(content, DefaultRequestContext); + return ClientResult.FromValue(CreateEmbeddingResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates an embedding vector representing the input text. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateEmbeddingAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Embeddings.CreateEmbedding"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateEmbeddingRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates an embedding vector representing the input text. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateEmbedding(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Embeddings.CreateEmbedding"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateEmbeddingRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateEmbeddingRequest(BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/embeddings"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Files.cs b/.dotnet/src/Generated/Files.cs new file mode 100644 index 000000000..f54d4b2ce --- /dev/null +++ b/.dotnet/src/Generated/Files.cs @@ -0,0 +1,674 @@ +// + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Internal.Models; + +namespace OpenAI.Internal +{ + // Data plane generated sub-client. + /// The Files sub-client. + internal partial class Files + { + private const string AuthorizationHeader = "Authorization"; + private readonly ApiKeyCredential _credential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual ClientPipeline Pipeline => _pipeline; + + /// Initializes a new instance of Files for mocking. + protected Files() + { + } + + /// Initializes a new instance of Files. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Files(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) + { + _pipeline = pipeline; + _credential = credential; + _endpoint = endpoint; + } + + /// + /// Upload a file that can be used across various endpoints. The size of all the files uploaded by + /// one organization can be up to 100 GB. + /// + /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See + /// the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files + /// supported. The Fine-tuning API only supports `.jsonl` files. + /// + /// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + /// + /// The to use. + /// is null. + public virtual async Task> CreateFileAsync(CreateFileRequest file) + { + Argument.AssertNotNull(file, nameof(file)); + + using BinaryContent content = BinaryContent.Create(file); + ClientResult result = await CreateFileAsync(content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Upload a file that can be used across various endpoints. The size of all the files uploaded by + /// one organization can be up to 100 GB. + /// + /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See + /// the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files + /// supported. The Fine-tuning API only supports `.jsonl` files. + /// + /// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + /// + /// The to use. + /// is null. + public virtual ClientResult CreateFile(CreateFileRequest file) + { + Argument.AssertNotNull(file, nameof(file)); + + using BinaryContent content = BinaryContent.Create(file); + ClientResult result = CreateFile(content, DefaultRequestContext); + return ClientResult.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Upload a file that can be used across various endpoints. The size of all the files uploaded by + /// one organization can be up to 100 GB. + /// + /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See + /// the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files + /// supported. The Fine-tuning API only supports `.jsonl` files. + /// + /// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateFileAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Files.CreateFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateFileRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Upload a file that can be used across various endpoints. The size of all the files uploaded by + /// one organization can be up to 100 GB. + /// + /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See + /// the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files + /// supported. The Fine-tuning API only supports `.jsonl` files. + /// + /// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateFile(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Files.CreateFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateFileRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Returns a list of files that belong to the user's organization. + /// Only return files with the given purpose. + public virtual async Task> GetFilesAsync(string purpose = null) + { + ClientResult result = await GetFilesAsync(purpose, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ListFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns a list of files that belong to the user's organization. + /// Only return files with the given purpose. + public virtual ClientResult GetFiles(string purpose = null) + { + ClientResult result = GetFiles(purpose, DefaultRequestContext); + return ClientResult.FromValue(ListFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns a list of files that belong to the user's organization. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// Only return files with the given purpose. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetFilesAsync(string purpose, RequestOptions options) + { + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Files.GetFiles"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetFilesRequest(purpose, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns a list of files that belong to the user's organization. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// Only return files with the given purpose. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetFiles(string purpose, RequestOptions options) + { + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Files.GetFiles"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetFilesRequest(purpose, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Returns information about a specific file. + /// The ID of the file to use for this request. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> RetrieveFileAsync(string fileId) + { + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + ClientResult result = await RetrieveFileAsync(fileId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns information about a specific file. + /// The ID of the file to use for this request. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult RetrieveFile(string fileId) + { + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + ClientResult result = RetrieveFile(fileId, DefaultRequestContext); + return ClientResult.FromValue(OpenAIFile.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns information about a specific file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the file to use for this request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task RetrieveFileAsync(string fileId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Files.RetrieveFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveFileRequest(fileId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns information about a specific file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the file to use for this request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult RetrieveFile(string fileId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Files.RetrieveFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveFileRequest(fileId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Delete a file. + /// The ID of the file to use for this request. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> DeleteFileAsync(string fileId) + { + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + ClientResult result = await DeleteFileAsync(fileId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(DeleteFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Delete a file. + /// The ID of the file to use for this request. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult DeleteFile(string fileId) + { + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + ClientResult result = DeleteFile(fileId, DefaultRequestContext); + return ClientResult.FromValue(DeleteFileResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Delete a file + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the file to use for this request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task DeleteFileAsync(string fileId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Files.DeleteFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateDeleteFileRequest(fileId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Delete a file + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the file to use for this request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult DeleteFile(string fileId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Files.DeleteFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateDeleteFileRequest(fileId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Returns the contents of the specified file. + /// The ID of the file to use for this request. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> DownloadFileAsync(string fileId) + { + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + ClientResult result = await DownloadFileAsync(fileId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(result.GetRawResponse().Content.ToObjectFromJson(), result.GetRawResponse()); + } + + /// Returns the contents of the specified file. + /// The ID of the file to use for this request. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult DownloadFile(string fileId) + { + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + ClientResult result = DownloadFile(fileId, DefaultRequestContext); + return ClientResult.FromValue(result.GetRawResponse().Content.ToObjectFromJson(), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns the contents of the specified file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the file to use for this request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task DownloadFileAsync(string fileId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Files.DownloadFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateDownloadFileRequest(fileId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns the contents of the specified file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the file to use for this request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult DownloadFile(string fileId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Files.DownloadFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateDownloadFileRequest(fileId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateFileRequest(BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/files"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("content-type", "multipart/form-data"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateGetFilesRequest(string purpose, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/files"); + if (purpose != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&purpose={purpose}"; + } + else + { + uriBuilder.Query = $"purpose={purpose}"; + } + } + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateRetrieveFileRequest(string fileId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/files/"); + path.Append(fileId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateDeleteFileRequest(string fileId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "DELETE"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/files/"); + path.Append(fileId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateDownloadFileRequest(string fileId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/files/"); + path.Append(fileId); + path.Append("/content"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/FineTuning.cs b/.dotnet/src/Generated/FineTuning.cs new file mode 100644 index 000000000..fd95bb9ae --- /dev/null +++ b/.dotnet/src/Generated/FineTuning.cs @@ -0,0 +1,720 @@ +// + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Internal.Models; + +namespace OpenAI.Internal +{ + // Data plane generated sub-client. + /// The FineTuning sub-client. + internal partial class FineTuning + { + private const string AuthorizationHeader = "Authorization"; + private readonly ApiKeyCredential _credential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual ClientPipeline Pipeline => _pipeline; + + /// Initializes a new instance of FineTuning for mocking. + protected FineTuning() + { + } + + /// Initializes a new instance of FineTuning. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal FineTuning(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) + { + _pipeline = pipeline; + _credential = credential; + _endpoint = endpoint; + } + + /// + /// Creates a fine-tuning job which begins the process of creating a new model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// The to use. + /// is null. + public virtual async Task> CreateFineTuningJobAsync(CreateFineTuningJobRequest job) + { + Argument.AssertNotNull(job, nameof(job)); + + using BinaryContent content = BinaryContent.Create(job); + ClientResult result = await CreateFineTuningJobAsync(content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Creates a fine-tuning job which begins the process of creating a new model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// The to use. + /// is null. + public virtual ClientResult CreateFineTuningJob(CreateFineTuningJobRequest job) + { + Argument.AssertNotNull(job, nameof(job)); + + using BinaryContent content = BinaryContent.Create(job); + ClientResult result = CreateFineTuningJob(content, DefaultRequestContext); + return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates a fine-tuning job which begins the process of creating a new model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateFineTuningJobAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("FineTuning.CreateFineTuningJob"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateFineTuningJobRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates a fine-tuning job which begins the process of creating a new model from a given dataset. + /// + /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateFineTuningJob(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("FineTuning.CreateFineTuningJob"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateFineTuningJobRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// List your organization's fine-tuning jobs. + /// Identifier for the last job from the previous pagination request. + /// Number of fine-tuning jobs to retrieve. + public virtual async Task> GetPaginatedFineTuningJobsAsync(string after = null, int? limit = null) + { + ClientResult result = await GetPaginatedFineTuningJobsAsync(after, limit, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ListPaginatedFineTuningJobsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// List your organization's fine-tuning jobs. + /// Identifier for the last job from the previous pagination request. + /// Number of fine-tuning jobs to retrieve. + public virtual ClientResult GetPaginatedFineTuningJobs(string after = null, int? limit = null) + { + ClientResult result = GetPaginatedFineTuningJobs(after, limit, DefaultRequestContext); + return ClientResult.FromValue(ListPaginatedFineTuningJobsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] List your organization's fine-tuning jobs + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// Identifier for the last job from the previous pagination request. + /// Number of fine-tuning jobs to retrieve. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetPaginatedFineTuningJobsAsync(string after, int? limit, RequestOptions options) + { + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("FineTuning.GetPaginatedFineTuningJobs"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetPaginatedFineTuningJobsRequest(after, limit, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] List your organization's fine-tuning jobs + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// Identifier for the last job from the previous pagination request. + /// Number of fine-tuning jobs to retrieve. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetPaginatedFineTuningJobs(string after, int? limit, RequestOptions options) + { + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("FineTuning.GetPaginatedFineTuningJobs"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetPaginatedFineTuningJobsRequest(after, limit, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// Get info about a fine-tuning job. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// The ID of the fine-tuning job. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> RetrieveFineTuningJobAsync(string fineTuningJobId) + { + Argument.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + ClientResult result = await RetrieveFineTuningJobAsync(fineTuningJobId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Get info about a fine-tuning job. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// The ID of the fine-tuning job. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult RetrieveFineTuningJob(string fineTuningJobId) + { + Argument.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + ClientResult result = RetrieveFineTuningJob(fineTuningJobId, DefaultRequestContext); + return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Get info about a fine-tuning job. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tuning job. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task RetrieveFineTuningJobAsync(string fineTuningJobId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("FineTuning.RetrieveFineTuningJob"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveFineTuningJobRequest(fineTuningJobId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Get info about a fine-tuning job. + /// + /// [Learn more about fine-tuning](/docs/guides/fine-tuning) + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tuning job. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult RetrieveFineTuningJob(string fineTuningJobId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("FineTuning.RetrieveFineTuningJob"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveFineTuningJobRequest(fineTuningJobId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Immediately cancel a fine-tune job. + /// The ID of the fine-tuning job to cancel. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> CancelFineTuningJobAsync(string fineTuningJobId) + { + Argument.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + ClientResult result = await CancelFineTuningJobAsync(fineTuningJobId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Immediately cancel a fine-tune job. + /// The ID of the fine-tuning job to cancel. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult CancelFineTuningJob(string fineTuningJobId) + { + Argument.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + ClientResult result = CancelFineTuningJob(fineTuningJobId, DefaultRequestContext); + return ClientResult.FromValue(FineTuningJob.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Immediately cancel a fine-tune job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tuning job to cancel. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CancelFineTuningJobAsync(string fineTuningJobId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("FineTuning.CancelFineTuningJob"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCancelFineTuningJobRequest(fineTuningJobId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Immediately cancel a fine-tune job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tuning job to cancel. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CancelFineTuningJob(string fineTuningJobId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("FineTuning.CancelFineTuningJob"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCancelFineTuningJobRequest(fineTuningJobId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Get status updates for a fine-tuning job. + /// The ID of the fine-tuning job to get events for. + /// Identifier for the last event from the previous pagination request. + /// Number of events to retrieve. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> GetFineTuningEventsAsync(string fineTuningJobId, string after = null, int? limit = null) + { + Argument.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + ClientResult result = await GetFineTuningEventsAsync(fineTuningJobId, after, limit, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ListFineTuningJobEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Get status updates for a fine-tuning job. + /// The ID of the fine-tuning job to get events for. + /// Identifier for the last event from the previous pagination request. + /// Number of events to retrieve. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult GetFineTuningEvents(string fineTuningJobId, string after = null, int? limit = null) + { + Argument.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + ClientResult result = GetFineTuningEvents(fineTuningJobId, after, limit, DefaultRequestContext); + return ClientResult.FromValue(ListFineTuningJobEventsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Get status updates for a fine-tuning job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tuning job to get events for. + /// Identifier for the last event from the previous pagination request. + /// Number of events to retrieve. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetFineTuningEventsAsync(string fineTuningJobId, string after, int? limit, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("FineTuning.GetFineTuningEvents"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetFineTuningEventsRequest(fineTuningJobId, after, limit, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Get status updates for a fine-tuning job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the fine-tuning job to get events for. + /// Identifier for the last event from the previous pagination request. + /// Number of events to retrieve. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetFineTuningEvents(string fineTuningJobId, string after, int? limit, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(fineTuningJobId, nameof(fineTuningJobId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("FineTuning.GetFineTuningEvents"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetFineTuningEventsRequest(fineTuningJobId, after, limit, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateFineTuningJobRequest(BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/fine_tuning/jobs"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateGetPaginatedFineTuningJobsRequest(string after, int? limit, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/fine_tuning/jobs"); + if (after != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } + } + if (limit != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } + } + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateRetrieveFineTuningJobRequest(string fineTuningJobId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/fine_tuning/jobs/"); + path.Append(fineTuningJobId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateCancelFineTuningJobRequest(string fineTuningJobId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/fine_tuning/jobs/"); + path.Append(fineTuningJobId); + path.Append("/cancel"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetFineTuningEventsRequest(string fineTuningJobId, string after, int? limit, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/fine_tuning/jobs/"); + path.Append(fineTuningJobId); + path.Append("/events"); + if (after != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } + } + if (limit != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } + } + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Images.cs b/.dotnet/src/Generated/Images.cs new file mode 100644 index 000000000..52e5ba6f1 --- /dev/null +++ b/.dotnet/src/Generated/Images.cs @@ -0,0 +1,407 @@ +// + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Internal.Models; + +namespace OpenAI.Internal +{ + // Data plane generated sub-client. + /// The Images sub-client. + internal partial class Images + { + private const string AuthorizationHeader = "Authorization"; + private readonly ApiKeyCredential _credential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual ClientPipeline Pipeline => _pipeline; + + /// Initializes a new instance of Images for mocking. + protected Images() + { + } + + /// Initializes a new instance of Images. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Images(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) + { + _pipeline = pipeline; + _credential = credential; + _endpoint = endpoint; + } + + /// Creates an image given a prompt. + /// The to use. + /// is null. + public virtual async Task> CreateImageAsync(CreateImageRequest image) + { + Argument.AssertNotNull(image, nameof(image)); + + using BinaryContent content = BinaryContent.Create(image); + ClientResult result = await CreateImageAsync(content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Creates an image given a prompt. + /// The to use. + /// is null. + public virtual ClientResult CreateImage(CreateImageRequest image) + { + Argument.AssertNotNull(image, nameof(image)); + + using BinaryContent content = BinaryContent.Create(image); + ClientResult result = CreateImage(content, DefaultRequestContext); + return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates an image given a prompt + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateImageAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Images.CreateImage"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateImageRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates an image given a prompt + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateImage(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Images.CreateImage"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateImageRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Creates an edited or extended image given an original image and a prompt. + /// The to use. + /// is null. + public virtual async Task> CreateImageEditAsync(CreateImageEditRequest image) + { + Argument.AssertNotNull(image, nameof(image)); + + using BinaryContent content = BinaryContent.Create(image); + ClientResult result = await CreateImageEditAsync(content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Creates an edited or extended image given an original image and a prompt. + /// The to use. + /// is null. + public virtual ClientResult CreateImageEdit(CreateImageEditRequest image) + { + Argument.AssertNotNull(image, nameof(image)); + + using BinaryContent content = BinaryContent.Create(image); + ClientResult result = CreateImageEdit(content, DefaultRequestContext); + return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates an edited or extended image given an original image and a prompt. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateImageEditAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Images.CreateImageEdit"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateImageEditRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates an edited or extended image given an original image and a prompt. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateImageEdit(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Images.CreateImageEdit"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateImageEditRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Creates an edited or extended image given an original image and a prompt. + /// The to use. + /// is null. + public virtual async Task> CreateImageVariationAsync(CreateImageVariationRequest image) + { + Argument.AssertNotNull(image, nameof(image)); + + using BinaryContent content = BinaryContent.Create(image); + ClientResult result = await CreateImageVariationAsync(content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Creates an edited or extended image given an original image and a prompt. + /// The to use. + /// is null. + public virtual ClientResult CreateImageVariation(CreateImageVariationRequest image) + { + Argument.AssertNotNull(image, nameof(image)); + + using BinaryContent content = BinaryContent.Create(image); + ClientResult result = CreateImageVariation(content, DefaultRequestContext); + return ClientResult.FromValue(ImagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Creates an edited or extended image given an original image and a prompt. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateImageVariationAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Images.CreateImageVariation"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateImageVariationRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates an edited or extended image given an original image and a prompt. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateImageVariation(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Images.CreateImageVariation"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateImageVariationRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateImageRequest(BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/images/generations"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateCreateImageEditRequest(BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/images/edits"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("content-type", "multipart/form-data"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateCreateImageVariationRequest(BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/images/variations"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("content-type", "multipart/form-data"); + request.Content = content; + message.Apply(options); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Internal/Argument.cs b/.dotnet/src/Generated/Internal/Argument.cs new file mode 100644 index 000000000..29cb5ac18 --- /dev/null +++ b/.dotnet/src/Generated/Internal/Argument.cs @@ -0,0 +1,126 @@ +// + +#nullable disable + +using System; +using System.Collections; +using System.Collections.Generic; + +namespace OpenAI +{ + internal static class Argument + { + public static void AssertNotNull(T value, string name) + { + if (value is null) + { + throw new ArgumentNullException(name); + } + } + + public static void AssertNotNull(T? value, string name) + where T : struct + { + if (!value.HasValue) + { + throw new ArgumentNullException(name); + } + } + + public static void AssertNotNullOrEmpty(IEnumerable value, string name) + { + if (value is null) + { + throw new ArgumentNullException(name); + } + if (value is ICollection collectionOfT && collectionOfT.Count == 0) + { + throw new ArgumentException("Value cannot be an empty collection.", name); + } + if (value is ICollection collection && collection.Count == 0) + { + throw new ArgumentException("Value cannot be an empty collection.", name); + } + using IEnumerator e = value.GetEnumerator(); + if (!e.MoveNext()) + { + throw new ArgumentException("Value cannot be an empty collection.", name); + } + } + + public static void AssertNotNullOrEmpty(string value, string name) + { + if (value is null) + { + throw new ArgumentNullException(name); + } + if (value.Length == 0) + { + throw new ArgumentException("Value cannot be an empty string.", name); + } + } + + public static void AssertNotNullOrWhiteSpace(string value, string name) + { + if (value is null) + { + throw new ArgumentNullException(name); + } + if (string.IsNullOrWhiteSpace(value)) + { + throw new ArgumentException("Value cannot be empty or contain only white-space characters.", name); + } + } + + public static void AssertNotDefault(ref T value, string name) + where T : struct, IEquatable + { + if (value.Equals(default)) + { + throw new ArgumentException("Value cannot be empty.", name); + } + } + + public static void AssertInRange(T value, T minimum, T maximum, string name) + where T : notnull, IComparable + { + if (minimum.CompareTo(value) > 0) + { + throw new ArgumentOutOfRangeException(name, "Value is less than the minimum allowed."); + } + if (maximum.CompareTo(value) < 0) + { + throw new ArgumentOutOfRangeException(name, "Value is greater than the maximum allowed."); + } + } + + public static void AssertEnumDefined(Type enumType, object value, string name) + { + if (!Enum.IsDefined(enumType, value)) + { + throw new ArgumentException($"Value not defined for {enumType.FullName}.", name); + } + } + + public static T CheckNotNull(T value, string name) + where T : class + { + AssertNotNull(value, name); + return value; + } + + public static string CheckNotNullOrEmpty(string value, string name) + { + AssertNotNullOrEmpty(value, name); + return value; + } + + public static void AssertNull(T value, string name, string message = null) + { + if (value != null) + { + throw new ArgumentException(message ?? "Value must be null.", name); + } + } + } +} diff --git a/.dotnet/src/Generated/Internal/ChangeTrackingDictionary.cs b/.dotnet/src/Generated/Internal/ChangeTrackingDictionary.cs new file mode 100644 index 000000000..2eb05d04a --- /dev/null +++ b/.dotnet/src/Generated/Internal/ChangeTrackingDictionary.cs @@ -0,0 +1,164 @@ +// + +#nullable disable + +using System; +using System.Collections; +using System.Collections.Generic; + +namespace OpenAI +{ + internal class ChangeTrackingDictionary : IDictionary, IReadOnlyDictionary where TKey : notnull + { + private IDictionary _innerDictionary; + + public ChangeTrackingDictionary() + { + } + + public ChangeTrackingDictionary(IDictionary dictionary) + { + if (dictionary == null) + { + return; + } + _innerDictionary = new Dictionary(dictionary); + } + + public ChangeTrackingDictionary(IReadOnlyDictionary dictionary) + { + if (dictionary == null) + { + return; + } + _innerDictionary = new Dictionary(); + foreach (var pair in dictionary) + { + _innerDictionary.Add(pair); + } + } + + public bool IsUndefined => _innerDictionary == null; + + public int Count => IsUndefined ? 0 : EnsureDictionary().Count; + + public bool IsReadOnly => IsUndefined ? false : EnsureDictionary().IsReadOnly; + + public ICollection Keys => IsUndefined ? Array.Empty() : EnsureDictionary().Keys; + + public ICollection Values => IsUndefined ? Array.Empty() : EnsureDictionary().Values; + + public TValue this[TKey key] + { + get + { + if (IsUndefined) + { + throw new KeyNotFoundException(nameof(key)); + } + return EnsureDictionary()[key]; + } + set + { + EnsureDictionary()[key] = value; + } + } + + IEnumerable IReadOnlyDictionary.Keys => Keys; + + IEnumerable IReadOnlyDictionary.Values => Values; + + public IEnumerator> GetEnumerator() + { + if (IsUndefined) + { + IEnumerator> enumerateEmpty() + { + yield break; + } + return enumerateEmpty(); + } + return EnsureDictionary().GetEnumerator(); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + public void Add(KeyValuePair item) + { + EnsureDictionary().Add(item); + } + + public void Clear() + { + EnsureDictionary().Clear(); + } + + public bool Contains(KeyValuePair item) + { + if (IsUndefined) + { + return false; + } + return EnsureDictionary().Contains(item); + } + + public void CopyTo(KeyValuePair[] array, int index) + { + if (IsUndefined) + { + return; + } + EnsureDictionary().CopyTo(array, index); + } + + public bool Remove(KeyValuePair item) + { + if (IsUndefined) + { + return false; + } + return EnsureDictionary().Remove(item); + } + + public void Add(TKey key, TValue value) + { + EnsureDictionary().Add(key, value); + } + + public bool ContainsKey(TKey key) + { + if (IsUndefined) + { + return false; + } + return EnsureDictionary().ContainsKey(key); + } + + public bool Remove(TKey key) + { + if (IsUndefined) + { + return false; + } + return EnsureDictionary().Remove(key); + } + + public bool TryGetValue(TKey key, out TValue value) + { + if (IsUndefined) + { + value = default; + return false; + } + return EnsureDictionary().TryGetValue(key, out value); + } + + public IDictionary EnsureDictionary() + { + return _innerDictionary ??= new Dictionary(); + } + } +} diff --git a/.dotnet/src/Generated/Internal/ChangeTrackingList.cs b/.dotnet/src/Generated/Internal/ChangeTrackingList.cs new file mode 100644 index 000000000..c64afc504 --- /dev/null +++ b/.dotnet/src/Generated/Internal/ChangeTrackingList.cs @@ -0,0 +1,150 @@ +// + +#nullable disable + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI +{ + internal class ChangeTrackingList : IList, IReadOnlyList + { + private IList _innerList; + + public ChangeTrackingList() + { + } + + public ChangeTrackingList(IList innerList) + { + if (innerList != null) + { + _innerList = innerList; + } + } + + public ChangeTrackingList(IReadOnlyList innerList) + { + if (innerList != null) + { + _innerList = innerList.ToList(); + } + } + + public bool IsUndefined => _innerList == null; + + public int Count => IsUndefined ? 0 : EnsureList().Count; + + public bool IsReadOnly => IsUndefined ? false : EnsureList().IsReadOnly; + + public T this[int index] + { + get + { + if (IsUndefined) + { + throw new ArgumentOutOfRangeException(nameof(index)); + } + return EnsureList()[index]; + } + set + { + if (IsUndefined) + { + throw new ArgumentOutOfRangeException(nameof(index)); + } + EnsureList()[index] = value; + } + } + + public void Reset() + { + _innerList = null; + } + + public IEnumerator GetEnumerator() + { + if (IsUndefined) + { + IEnumerator enumerateEmpty() + { + yield break; + } + return enumerateEmpty(); + } + return EnsureList().GetEnumerator(); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + public void Add(T item) + { + EnsureList().Add(item); + } + + public void Clear() + { + EnsureList().Clear(); + } + + public bool Contains(T item) + { + if (IsUndefined) + { + return false; + } + return EnsureList().Contains(item); + } + + public void CopyTo(T[] array, int arrayIndex) + { + if (IsUndefined) + { + return; + } + EnsureList().CopyTo(array, arrayIndex); + } + + public bool Remove(T item) + { + if (IsUndefined) + { + return false; + } + return EnsureList().Remove(item); + } + + public int IndexOf(T item) + { + if (IsUndefined) + { + return -1; + } + return EnsureList().IndexOf(item); + } + + public void Insert(int index, T item) + { + EnsureList().Insert(index, item); + } + + public void RemoveAt(int index) + { + if (IsUndefined) + { + throw new ArgumentOutOfRangeException(nameof(index)); + } + EnsureList().RemoveAt(index); + } + + public IList EnsureList() + { + return _innerList ??= new List(); + } + } +} diff --git a/.dotnet/src/Generated/Internal/ClientPipelineExtensions.cs b/.dotnet/src/Generated/Internal/ClientPipelineExtensions.cs new file mode 100644 index 000000000..c34101d11 --- /dev/null +++ b/.dotnet/src/Generated/Internal/ClientPipelineExtensions.cs @@ -0,0 +1,49 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Threading; +using System.Threading.Tasks; + +namespace OpenAI +{ + internal static class ClientPipelineExtensions + { + public static async ValueTask ProcessMessageAsync(this ClientPipeline pipeline, PipelineMessage message, RequestOptions requestContext, CancellationToken cancellationToken = default) + { + await pipeline.SendAsync(message).ConfigureAwait(false); + + if (message.Response == null) + { + throw new InvalidOperationException("Failed to receive Result."); + } + + if (!message.Response.IsError || requestContext.ErrorOptions == ClientErrorBehaviors.NoThrow) + { + return message.Response; + } + + throw new ClientResultException(message.Response); + } + + public static PipelineResponse ProcessMessage(this ClientPipeline pipeline, PipelineMessage message, RequestOptions requestContext, CancellationToken cancellationToken = default) + { + pipeline.Send(message); + + if (message.Response == null) + { + throw new InvalidOperationException("Failed to receive Result."); + } + + if (!message.Response.IsError || requestContext.ErrorOptions == ClientErrorBehaviors.NoThrow) + { + return message.Response; + } + + throw new ClientResultException(message.Response); + } + } +} diff --git a/.dotnet/src/Generated/Internal/ErrorResult.cs b/.dotnet/src/Generated/Internal/ErrorResult.cs new file mode 100644 index 000000000..42938626f --- /dev/null +++ b/.dotnet/src/Generated/Internal/ErrorResult.cs @@ -0,0 +1,23 @@ +// + +#nullable disable + +using System.ClientModel; +using System.ClientModel.Primitives; + +namespace OpenAI +{ + internal class ErrorResult : ClientResult + { + private readonly PipelineResponse _response; + private readonly ClientResultException _exception; + + public ErrorResult(PipelineResponse response, ClientResultException exception) : base(default, response) + { + _response = response; + _exception = exception; + } + + public override T Value => throw _exception; + } +} diff --git a/.dotnet/src/Generated/Internal/Optional.cs b/.dotnet/src/Generated/Internal/Optional.cs new file mode 100644 index 000000000..7b3fe4806 --- /dev/null +++ b/.dotnet/src/Generated/Internal/Optional.cs @@ -0,0 +1,48 @@ +// + +#nullable disable + +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI +{ + internal static class Optional + { + public static bool IsCollectionDefined(IEnumerable collection) + { + return !(collection is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined); + } + + public static bool IsCollectionDefined(IDictionary collection) + { + return !(collection is ChangeTrackingDictionary changeTrackingDictionary && changeTrackingDictionary.IsUndefined); + } + + public static bool IsCollectionDefined(IReadOnlyDictionary collection) + { + return !(collection is ChangeTrackingDictionary changeTrackingDictionary && changeTrackingDictionary.IsUndefined); + } + + public static bool IsDefined(T? value) + where T : struct + { + return value.HasValue; + } + + public static bool IsDefined(object value) + { + return value != null; + } + + public static bool IsDefined(JsonElement value) + { + return value.ValueKind != JsonValueKind.Undefined; + } + + public static bool IsDefined(string value) + { + return value != null; + } + } +} diff --git a/.dotnet/src/Generated/Internal/Utf8JsonRequestBody.cs b/.dotnet/src/Generated/Internal/Utf8JsonRequestBody.cs new file mode 100644 index 000000000..c86d98827 --- /dev/null +++ b/.dotnet/src/Generated/Internal/Utf8JsonRequestBody.cs @@ -0,0 +1,53 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.IO; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; + +namespace OpenAI +{ + internal class Utf8JsonRequestBody : BinaryContent + { + private readonly MemoryStream _stream; + private readonly BinaryContent _content; + + public Utf8JsonRequestBody() + { + _stream = new MemoryStream(); + _content = BinaryContent.Create(BinaryData.FromStream(_stream)); + JsonWriter = new Utf8JsonWriter(_stream); + } + + public Utf8JsonWriter JsonWriter { get; } + + public override async Task WriteToAsync(Stream stream, CancellationToken cancellationToken = default) + { + await JsonWriter.FlushAsync().ConfigureAwait(false); + await _content.WriteToAsync(stream, cancellationToken).ConfigureAwait(false); + } + + public override void WriteTo(Stream stream, CancellationToken cancellationToken = default) + { + JsonWriter.Flush(); + _content.WriteTo(stream, cancellationToken); + } + + public override bool TryComputeLength(out long length) + { + length = JsonWriter.BytesCommitted + JsonWriter.BytesPending; + return true; + } + + public override void Dispose() + { + JsonWriter.Dispose(); + _content.Dispose(); + _stream.Dispose(); + } + } +} diff --git a/.dotnet/src/Generated/Messages.cs b/.dotnet/src/Generated/Messages.cs new file mode 100644 index 000000000..22557309c --- /dev/null +++ b/.dotnet/src/Generated/Messages.cs @@ -0,0 +1,1078 @@ +// + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Internal.Models; + +namespace OpenAI.Internal +{ + // Data plane generated sub-client. + /// The Messages sub-client. + internal partial class Messages + { + private const string AuthorizationHeader = "Authorization"; + private readonly ApiKeyCredential _credential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual ClientPipeline Pipeline => _pipeline; + + /// Initializes a new instance of Messages for mocking. + protected Messages() + { + } + + /// Initializes a new instance of Messages. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Messages(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) + { + _pipeline = pipeline; + _credential = credential; + _endpoint = endpoint; + } + + /// Create a message. + /// The ID of the [thread](/docs/api-reference/threads) to create a message for. + /// The to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> CreateMessageAsync(string threadId, CreateMessageRequest message) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNull(message, nameof(message)); + + using BinaryContent content = BinaryContent.Create(message); + ClientResult result = await CreateMessageAsync(threadId, content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Create a message. + /// The ID of the [thread](/docs/api-reference/threads) to create a message for. + /// The to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult CreateMessage(string threadId, CreateMessageRequest message) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNull(message, nameof(message)); + + using BinaryContent content = BinaryContent.Create(message); + ClientResult result = CreateMessage(threadId, content, DefaultRequestContext); + return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Create a message. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) to create a message for. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateMessageAsync(string threadId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Messages.CreateMessage"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateMessageRequest(threadId, content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Create a message. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) to create a message for. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateMessage(string threadId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Messages.CreateMessage"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateMessageRequest(threadId, content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Returns a list of messages for a given thread. + /// The ID of the [thread](/docs/api-reference/threads) the messages belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> GetMessagesAsync(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + ClientResult result = await GetMessagesAsync(threadId, limit, order?.ToString(), after, before, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ListMessagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns a list of messages for a given thread. + /// The ID of the [thread](/docs/api-reference/threads) the messages belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult GetMessages(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + ClientResult result = GetMessages(threadId, limit, order?.ToString(), after, before, DefaultRequestContext); + return ClientResult.FromValue(ListMessagesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns a list of messages for a given thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) the messages belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetMessagesAsync(string threadId, int? limit, string order, string after, string before, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessages"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetMessagesRequest(threadId, limit, order, after, before, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns a list of messages for a given thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) the messages belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetMessages(string threadId, int? limit, string order, string after, string before, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessages"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetMessagesRequest(threadId, limit, order, after, before, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Retrieve a message. + /// The ID of the [thread](/docs/api-reference/threads) to which this message belongs. + /// The ID of the message to retrieve. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> GetMessageAsync(string threadId, string messageId) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + ClientResult result = await GetMessageAsync(threadId, messageId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Retrieve a message. + /// The ID of the [thread](/docs/api-reference/threads) to which this message belongs. + /// The ID of the message to retrieve. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual ClientResult GetMessage(string threadId, string messageId) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + ClientResult result = GetMessage(threadId, messageId, DefaultRequestContext); + return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieve a message. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) to which this message belongs. + /// The ID of the message to retrieve. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetMessageAsync(string threadId, string messageId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessage"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetMessageRequest(threadId, messageId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieve a message. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) to which this message belongs. + /// The ID of the message to retrieve. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetMessage(string threadId, string messageId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessage"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetMessageRequest(threadId, messageId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Modifies a message. + /// The ID of the thread to which this message belongs. + /// The ID of the message to modify. + /// The to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> ModifyMessageAsync(string threadId, string messageId, ModifyMessageRequest message) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + Argument.AssertNotNull(message, nameof(message)); + + using BinaryContent content = BinaryContent.Create(message); + ClientResult result = await ModifyMessageAsync(threadId, messageId, content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Modifies a message. + /// The ID of the thread to which this message belongs. + /// The ID of the message to modify. + /// The to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual ClientResult ModifyMessage(string threadId, string messageId, ModifyMessageRequest message) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + Argument.AssertNotNull(message, nameof(message)); + + using BinaryContent content = BinaryContent.Create(message); + ClientResult result = ModifyMessage(threadId, messageId, content, DefaultRequestContext); + return ClientResult.FromValue(MessageObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Modifies a message. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which this message belongs. + /// The ID of the message to modify. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task ModifyMessageAsync(string threadId, string messageId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Messages.ModifyMessage"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateModifyMessageRequest(threadId, messageId, content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Modifies a message. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which this message belongs. + /// The ID of the message to modify. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult ModifyMessage(string threadId, string messageId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Messages.ModifyMessage"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateModifyMessageRequest(threadId, messageId, content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Returns a list of message files. + /// The ID of the thread that the message and files belong to. + /// The ID of the message that the files belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> GetMessageFilesAsync(string threadId, string messageId, int? limit = null, ListOrder? order = null, string after = null, string before = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + ClientResult result = await GetMessageFilesAsync(threadId, messageId, limit, order?.ToString(), after, before, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ListMessageFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns a list of message files. + /// The ID of the thread that the message and files belong to. + /// The ID of the message that the files belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual ClientResult GetMessageFiles(string threadId, string messageId, int? limit = null, ListOrder? order = null, string after = null, string before = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + ClientResult result = GetMessageFiles(threadId, messageId, limit, order?.ToString(), after, before, DefaultRequestContext); + return ClientResult.FromValue(ListMessageFilesResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns a list of message files. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread that the message and files belong to. + /// The ID of the message that the files belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetMessageFilesAsync(string threadId, string messageId, int? limit, string order, string after, string before, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessageFiles"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetMessageFilesRequest(threadId, messageId, limit, order, after, before, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns a list of message files. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread that the message and files belong to. + /// The ID of the message that the files belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetMessageFiles(string threadId, string messageId, int? limit, string order, string after, string before, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessageFiles"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetMessageFilesRequest(threadId, messageId, limit, order, after, before, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Retrieves a message file. + /// The ID of the thread to which the message and File belong. + /// The ID of the message the file belongs to. + /// The ID of the file being retrieved. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + public virtual async Task> GetMessageFileAsync(string threadId, string messageId, string fileId) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + ClientResult result = await GetMessageFileAsync(threadId, messageId, fileId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(MessageFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Retrieves a message file. + /// The ID of the thread to which the message and File belong. + /// The ID of the message the file belongs to. + /// The ID of the file being retrieved. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + public virtual ClientResult GetMessageFile(string threadId, string messageId, string fileId) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + ClientResult result = GetMessageFile(threadId, messageId, fileId, DefaultRequestContext); + return ClientResult.FromValue(MessageFileObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieves a message file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which the message and File belong. + /// The ID of the message the file belongs to. + /// The ID of the file being retrieved. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetMessageFileAsync(string threadId, string messageId, string fileId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessageFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetMessageFileRequest(threadId, messageId, fileId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieves a message file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which the message and File belong. + /// The ID of the message the file belongs to. + /// The ID of the file being retrieved. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetMessageFile(string threadId, string messageId, string fileId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(messageId, nameof(messageId)); + Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Messages.GetMessageFile"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetMessageFileRequest(threadId, messageId, fileId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateMessageRequest(string threadId, BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + path.Append("/messages"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateGetMessagesRequest(string threadId, int? limit, string order, string after, string before, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + path.Append("/messages"); + if (limit != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } + } + if (order != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&order={order}"; + } + else + { + uriBuilder.Query = $"order={order}"; + } + } + if (after != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } + } + if (before != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&before={before}"; + } + else + { + uriBuilder.Query = $"before={before}"; + } + } + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetMessageRequest(string threadId, string messageId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + path.Append("/messages/"); + path.Append(messageId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateModifyMessageRequest(string threadId, string messageId, BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + path.Append("/messages/"); + path.Append(messageId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateGetMessageFilesRequest(string threadId, string messageId, int? limit, string order, string after, string before, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + path.Append("/messages/"); + path.Append(messageId); + path.Append("/files"); + if (limit != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } + } + if (order != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&order={order}"; + } + else + { + uriBuilder.Query = $"order={order}"; + } + } + if (after != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } + } + if (before != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&before={before}"; + } + else + { + uriBuilder.Query = $"before={before}"; + } + } + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetMessageFileRequest(string threadId, string messageId, string fileId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + path.Append("/messages/"); + path.Append(messageId); + path.Append("/files/"); + path.Append(fileId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Models/AssistantFileObject.Serialization.cs b/.dotnet/src/Generated/Models/AssistantFileObject.Serialization.cs new file mode 100644 index 000000000..452f90c5c --- /dev/null +++ b/.dotnet/src/Generated/Models/AssistantFileObject.Serialization.cs @@ -0,0 +1,154 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class AssistantFileObject : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AssistantFileObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("assistant_id"u8); + writer.WriteStringValue(AssistantId); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + AssistantFileObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AssistantFileObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeAssistantFileObject(document.RootElement, options); + } + + internal static AssistantFileObject DeserializeAssistantFileObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + AssistantFileObjectObject @object = default; + DateTimeOffset createdAt = default; + string assistantId = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new AssistantFileObjectObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("assistant_id"u8)) + { + assistantId = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new AssistantFileObject(id, @object, createdAt, assistantId, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(AssistantFileObject)} does not support '{options.Format}' format."); + } + } + + AssistantFileObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeAssistantFileObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(AssistantFileObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static AssistantFileObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeAssistantFileObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/AssistantFileObject.cs b/.dotnet/src/Generated/Models/AssistantFileObject.cs new file mode 100644 index 000000000..ff43db447 --- /dev/null +++ b/.dotnet/src/Generated/Models/AssistantFileObject.cs @@ -0,0 +1,89 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// A list of [Files](/docs/api-reference/files) attached to an `assistant`. + internal partial class AssistantFileObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The Unix timestamp (in seconds) for when the assistant file was created. + /// The assistant ID that the file is attached to. + /// or is null. + internal AssistantFileObject(string id, DateTimeOffset createdAt, string assistantId) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(assistantId, nameof(assistantId)); + + Id = id; + CreatedAt = createdAt; + AssistantId = assistantId; + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `assistant.file`. + /// The Unix timestamp (in seconds) for when the assistant file was created. + /// The assistant ID that the file is attached to. + /// Keeps track of any properties unknown to the library. + internal AssistantFileObject(string id, AssistantFileObjectObject @object, DateTimeOffset createdAt, string assistantId, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + AssistantId = assistantId; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal AssistantFileObject() + { + } + + /// The identifier, which can be referenced in API endpoints. + public string Id { get; } + /// The object type, which is always `assistant.file`. + public AssistantFileObjectObject Object { get; } = AssistantFileObjectObject.AssistantFile; + + /// The Unix timestamp (in seconds) for when the assistant file was created. + public DateTimeOffset CreatedAt { get; } + /// The assistant ID that the file is attached to. + public string AssistantId { get; } + } +} diff --git a/.dotnet/src/Generated/Models/AssistantFileObjectObject.cs b/.dotnet/src/Generated/Models/AssistantFileObjectObject.cs new file mode 100644 index 000000000..08816d6a5 --- /dev/null +++ b/.dotnet/src/Generated/Models/AssistantFileObjectObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The AssistantFileObject_object. + internal readonly partial struct AssistantFileObjectObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public AssistantFileObjectObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AssistantFileValue = "assistant.file"; + + /// assistant.file. + public static AssistantFileObjectObject AssistantFile { get; } = new AssistantFileObjectObject(AssistantFileValue); + /// Determines if two values are the same. + public static bool operator ==(AssistantFileObjectObject left, AssistantFileObjectObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(AssistantFileObjectObject left, AssistantFileObjectObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator AssistantFileObjectObject(string value) => new AssistantFileObjectObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is AssistantFileObjectObject other && Equals(other); + /// + public bool Equals(AssistantFileObjectObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/AssistantObject.Serialization.cs b/.dotnet/src/Generated/Models/AssistantObject.Serialization.cs new file mode 100644 index 000000000..43e63517d --- /dev/null +++ b/.dotnet/src/Generated/Models/AssistantObject.Serialization.cs @@ -0,0 +1,311 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class AssistantObject : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AssistantObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + if (Name != null) + { + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + } + else + { + writer.WriteNull("name"); + } + if (Description != null) + { + writer.WritePropertyName("description"u8); + writer.WriteStringValue(Description); + } + else + { + writer.WriteNull("description"); + } + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + if (Instructions != null) + { + writer.WritePropertyName("instructions"u8); + writer.WriteStringValue(Instructions); + } + else + { + writer.WriteNull("instructions"); + } + writer.WritePropertyName("tools"u8); + writer.WriteStartArray(); + foreach (var item in Tools) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + writer.WritePropertyName("file_ids"u8); + writer.WriteStartArray(); + foreach (var item in FileIds) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + if (Metadata != null && Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + AssistantObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AssistantObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeAssistantObject(document.RootElement, options); + } + + internal static AssistantObject DeserializeAssistantObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + AssistantObjectObject @object = default; + DateTimeOffset createdAt = default; + string name = default; + string description = default; + string model = default; + string instructions = default; + IReadOnlyList tools = default; + IReadOnlyList fileIds = default; + IReadOnlyDictionary metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new AssistantObjectObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("name"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + name = null; + continue; + } + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("description"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + description = null; + continue; + } + description = property.Value.GetString(); + continue; + } + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("instructions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + instructions = null; + continue; + } + instructions = property.Value.GetString(); + continue; + } + if (property.NameEquals("tools"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + tools = array; + continue; + } + if (property.NameEquals("file_ids"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + fileIds = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + metadata = new ChangeTrackingDictionary(); + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new AssistantObject( + id, + @object, + createdAt, + name, + description, + model, + instructions, + tools, + fileIds, + metadata, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(AssistantObject)} does not support '{options.Format}' format."); + } + } + + AssistantObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeAssistantObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(AssistantObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static AssistantObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeAssistantObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/AssistantObject.cs b/.dotnet/src/Generated/Models/AssistantObject.cs new file mode 100644 index 000000000..e5488b199 --- /dev/null +++ b/.dotnet/src/Generated/Models/AssistantObject.cs @@ -0,0 +1,200 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// Represents an `assistant` that can call the model and use tools. + internal partial class AssistantObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The Unix timestamp (in seconds) for when the assistant was created. + /// The name of the assistant. The maximum length is 256 characters. + /// The description of the assistant. The maximum length is 512 characters. + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// , , or is null. + internal AssistantObject(string id, DateTimeOffset createdAt, string name, string description, string model, string instructions, IEnumerable tools, IEnumerable fileIds, IReadOnlyDictionary metadata) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(model, nameof(model)); + Argument.AssertNotNull(tools, nameof(tools)); + Argument.AssertNotNull(fileIds, nameof(fileIds)); + + Id = id; + CreatedAt = createdAt; + Name = name; + Description = description; + Model = model; + Instructions = instructions; + Tools = tools.ToList(); + FileIds = fileIds.ToList(); + Metadata = metadata; + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `assistant`. + /// The Unix timestamp (in seconds) for when the assistant was created. + /// The name of the assistant. The maximum length is 256 characters. + /// The description of the assistant. The maximum length is 512 characters. + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal AssistantObject(string id, AssistantObjectObject @object, DateTimeOffset createdAt, string name, string description, string model, string instructions, IReadOnlyList tools, IReadOnlyList fileIds, IReadOnlyDictionary metadata, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + Name = name; + Description = description; + Model = model; + Instructions = instructions; + Tools = tools; + FileIds = fileIds; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal AssistantObject() + { + } + + /// The identifier, which can be referenced in API endpoints. + public string Id { get; } + /// The object type, which is always `assistant`. + public AssistantObjectObject Object { get; } = AssistantObjectObject.Assistant; + + /// The Unix timestamp (in seconds) for when the assistant was created. + public DateTimeOffset CreatedAt { get; } + /// The name of the assistant. The maximum length is 256 characters. + public string Name { get; } + /// The description of the assistant. The maximum length is 512 characters. + public string Description { get; } + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + public string Model { get; } + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + public string Instructions { get; } + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IReadOnlyList Tools { get; } + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + public IReadOnlyList FileIds { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IReadOnlyDictionary Metadata { get; } + } +} diff --git a/.dotnet/src/Generated/Models/AssistantObjectObject.cs b/.dotnet/src/Generated/Models/AssistantObjectObject.cs new file mode 100644 index 000000000..64e27426c --- /dev/null +++ b/.dotnet/src/Generated/Models/AssistantObjectObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The AssistantObject_object. + internal readonly partial struct AssistantObjectObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public AssistantObjectObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AssistantValue = "assistant"; + + /// assistant. + public static AssistantObjectObject Assistant { get; } = new AssistantObjectObject(AssistantValue); + /// Determines if two values are the same. + public static bool operator ==(AssistantObjectObject left, AssistantObjectObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(AssistantObjectObject left, AssistantObjectObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator AssistantObjectObject(string value) => new AssistantObjectObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is AssistantObjectObject other && Equals(other); + /// + public bool Equals(AssistantObjectObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/AudioSegment.Serialization.cs b/.dotnet/src/Generated/Models/AudioSegment.Serialization.cs new file mode 100644 index 000000000..a677ceb5d --- /dev/null +++ b/.dotnet/src/Generated/Models/AudioSegment.Serialization.cs @@ -0,0 +1,223 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class AudioSegment : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AudioSegment)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteNumberValue(Id); + writer.WritePropertyName("seek"u8); + writer.WriteNumberValue(Seek); + writer.WritePropertyName("start"u8); + writer.WriteNumberValue(Convert.ToInt32(Start.ToString("%s"))); + writer.WritePropertyName("end"u8); + writer.WriteNumberValue(Convert.ToInt32(End.ToString("%s"))); + writer.WritePropertyName("text"u8); + writer.WriteStringValue(Text); + writer.WritePropertyName("tokens"u8); + writer.WriteStartArray(); + foreach (var item in Tokens) + { + writer.WriteNumberValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("temperature"u8); + writer.WriteNumberValue(Temperature); + writer.WritePropertyName("avg_logprob"u8); + writer.WriteNumberValue(AvgLogprob); + writer.WritePropertyName("compression_ratio"u8); + writer.WriteNumberValue(CompressionRatio); + writer.WritePropertyName("no_speech_prob"u8); + writer.WriteNumberValue(NoSpeechProb); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + AudioSegment IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AudioSegment)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeAudioSegment(document.RootElement, options); + } + + internal static AudioSegment DeserializeAudioSegment(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + long id = default; + long seek = default; + TimeSpan start = default; + TimeSpan end = default; + string text = default; + IReadOnlyList tokens = default; + double temperature = default; + double avgLogprob = default; + double compressionRatio = default; + double noSpeechProb = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("seek"u8)) + { + seek = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("start"u8)) + { + start = TimeSpan.FromSeconds(property.Value.GetInt32()); + continue; + } + if (property.NameEquals("end"u8)) + { + end = TimeSpan.FromSeconds(property.Value.GetInt32()); + continue; + } + if (property.NameEquals("text"u8)) + { + text = property.Value.GetString(); + continue; + } + if (property.NameEquals("tokens"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetInt64()); + } + tokens = array; + continue; + } + if (property.NameEquals("temperature"u8)) + { + temperature = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("avg_logprob"u8)) + { + avgLogprob = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("compression_ratio"u8)) + { + compressionRatio = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("no_speech_prob"u8)) + { + noSpeechProb = property.Value.GetDouble(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new AudioSegment( + id, + seek, + start, + end, + text, + tokens, + temperature, + avgLogprob, + compressionRatio, + noSpeechProb, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(AudioSegment)} does not support '{options.Format}' format."); + } + } + + AudioSegment IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeAudioSegment(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(AudioSegment)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static AudioSegment FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeAudioSegment(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/AudioSegment.cs b/.dotnet/src/Generated/Models/AudioSegment.cs new file mode 100644 index 000000000..ac48fd377 --- /dev/null +++ b/.dotnet/src/Generated/Models/AudioSegment.cs @@ -0,0 +1,145 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The AudioSegment. + internal partial class AudioSegment + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The zero-based index of this segment. + /// + /// The seek position associated with the processing of this audio segment. Seek positions are + /// expressed as hundredths of seconds. The model may process several segments from a single seek + /// position, so while the seek position will never represent a later time than the segment's + /// start, the segment's start may represent a significantly later time than the segment's + /// associated seek position. + /// + /// The time at which this segment started relative to the beginning of the audio. + /// The time at which this segment ended relative to the beginning of the audio. + /// The text that was part of this audio segment. + /// The token IDs matching the text in this audio segment. + /// The temperature score associated with this audio segment. + /// The average log probability associated with this audio segment. + /// The compression ratio of this audio segment. + /// The probability of no speech detection within this audio segment. + /// or is null. + internal AudioSegment(long id, long seek, TimeSpan start, TimeSpan end, string text, IEnumerable tokens, double temperature, double avgLogprob, double compressionRatio, double noSpeechProb) + { + Argument.AssertNotNull(text, nameof(text)); + Argument.AssertNotNull(tokens, nameof(tokens)); + + Id = id; + Seek = seek; + Start = start; + End = end; + Text = text; + Tokens = tokens.ToList(); + Temperature = temperature; + AvgLogprob = avgLogprob; + CompressionRatio = compressionRatio; + NoSpeechProb = noSpeechProb; + } + + /// Initializes a new instance of . + /// The zero-based index of this segment. + /// + /// The seek position associated with the processing of this audio segment. Seek positions are + /// expressed as hundredths of seconds. The model may process several segments from a single seek + /// position, so while the seek position will never represent a later time than the segment's + /// start, the segment's start may represent a significantly later time than the segment's + /// associated seek position. + /// + /// The time at which this segment started relative to the beginning of the audio. + /// The time at which this segment ended relative to the beginning of the audio. + /// The text that was part of this audio segment. + /// The token IDs matching the text in this audio segment. + /// The temperature score associated with this audio segment. + /// The average log probability associated with this audio segment. + /// The compression ratio of this audio segment. + /// The probability of no speech detection within this audio segment. + /// Keeps track of any properties unknown to the library. + internal AudioSegment(long id, long seek, TimeSpan start, TimeSpan end, string text, IReadOnlyList tokens, double temperature, double avgLogprob, double compressionRatio, double noSpeechProb, IDictionary serializedAdditionalRawData) + { + Id = id; + Seek = seek; + Start = start; + End = end; + Text = text; + Tokens = tokens; + Temperature = temperature; + AvgLogprob = avgLogprob; + CompressionRatio = compressionRatio; + NoSpeechProb = noSpeechProb; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal AudioSegment() + { + } + + /// The zero-based index of this segment. + public long Id { get; } + /// + /// The seek position associated with the processing of this audio segment. Seek positions are + /// expressed as hundredths of seconds. The model may process several segments from a single seek + /// position, so while the seek position will never represent a later time than the segment's + /// start, the segment's start may represent a significantly later time than the segment's + /// associated seek position. + /// + public long Seek { get; } + /// The time at which this segment started relative to the beginning of the audio. + public TimeSpan Start { get; } + /// The time at which this segment ended relative to the beginning of the audio. + public TimeSpan End { get; } + /// The text that was part of this audio segment. + public string Text { get; } + /// The token IDs matching the text in this audio segment. + public IReadOnlyList Tokens { get; } + /// The temperature score associated with this audio segment. + public double Temperature { get; } + /// The average log probability associated with this audio segment. + public double AvgLogprob { get; } + /// The compression ratio of this audio segment. + public double CompressionRatio { get; } + /// The probability of no speech detection within this audio segment. + public double NoSpeechProb { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.Serialization.cs new file mode 100644 index 000000000..4ca6b989f --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.Serialization.cs @@ -0,0 +1,130 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ChatCompletionFunctionCallOption : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionFunctionCallOption)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionFunctionCallOption IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionFunctionCallOption)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionFunctionCallOption(document.RootElement, options); + } + + internal static ChatCompletionFunctionCallOption DeserializeChatCompletionFunctionCallOption(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionFunctionCallOption(name, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionFunctionCallOption)} does not support '{options.Format}' format."); + } + } + + ChatCompletionFunctionCallOption IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionFunctionCallOption(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionFunctionCallOption)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionFunctionCallOption FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionFunctionCallOption(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.cs new file mode 100644 index 000000000..cda44802a --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctionCallOption.cs @@ -0,0 +1,74 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// + /// Specifying a particular function via `{"name": "my_function"}` forces the model to call that + /// function. + /// + internal partial class ChatCompletionFunctionCallOption + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The name of the function to call. + /// is null. + public ChatCompletionFunctionCallOption(string name) + { + Argument.AssertNotNull(name, nameof(name)); + + Name = name; + } + + /// Initializes a new instance of . + /// The name of the function to call. + /// Keeps track of any properties unknown to the library. + internal ChatCompletionFunctionCallOption(string name, IDictionary serializedAdditionalRawData) + { + Name = name; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionFunctionCallOption() + { + } + + /// The name of the function to call. + public string Name { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctions.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctions.Serialization.cs new file mode 100644 index 000000000..f638fd9cf --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctions.Serialization.cs @@ -0,0 +1,156 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ChatCompletionFunctions : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionFunctions)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Description)) + { + writer.WritePropertyName("description"u8); + writer.WriteStringValue(Description); + } + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + if (Optional.IsDefined(Parameters)) + { + writer.WritePropertyName("parameters"u8); + writer.WriteObjectValue(Parameters); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionFunctions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionFunctions)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionFunctions(document.RootElement, options); + } + + internal static ChatCompletionFunctions DeserializeChatCompletionFunctions(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string description = default; + string name = default; + FunctionParameters parameters = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("description"u8)) + { + description = property.Value.GetString(); + continue; + } + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("parameters"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + parameters = FunctionParameters.DeserializeFunctionParameters(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionFunctions(description, name, parameters, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionFunctions)} does not support '{options.Format}' format."); + } + } + + ChatCompletionFunctions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionFunctions(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionFunctions)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionFunctions FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionFunctions(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionFunctions.cs b/.dotnet/src/Generated/Models/ChatCompletionFunctions.cs new file mode 100644 index 000000000..b955e356d --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionFunctions.cs @@ -0,0 +1,95 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ChatCompletionFunctions. + [Obsolete("deprecated")] + internal partial class ChatCompletionFunctions + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + /// dashes, with a maximum length of 64. + /// + /// is null. + public ChatCompletionFunctions(string name) + { + Argument.AssertNotNull(name, nameof(name)); + + Name = name; + } + + /// Initializes a new instance of . + /// + /// A description of what the function does, used by the model to choose when and how to call the + /// function. + /// + /// + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + /// dashes, with a maximum length of 64. + /// + /// + /// Keeps track of any properties unknown to the library. + internal ChatCompletionFunctions(string description, string name, FunctionParameters parameters, IDictionary serializedAdditionalRawData) + { + Description = description; + Name = name; + Parameters = parameters; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionFunctions() + { + } + + /// + /// A description of what the function does, used by the model to choose when and how to call the + /// function. + /// + public string Description { get; set; } + /// + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + /// dashes, with a maximum length of 64. + /// + public string Name { get; } + /// Gets or sets the parameters. + public FunctionParameters Parameters { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.Serialization.cs new file mode 100644 index 000000000..ec80b0cb1 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.Serialization.cs @@ -0,0 +1,146 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ChatCompletionMessageToolCall : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCall)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("function"u8); + writer.WriteObjectValue(Function); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionMessageToolCall IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCall)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionMessageToolCall(document.RootElement, options); + } + + internal static ChatCompletionMessageToolCall DeserializeChatCompletionMessageToolCall(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + ChatCompletionMessageToolCallType type = default; + ChatCompletionMessageToolCallFunction function = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("type"u8)) + { + type = new ChatCompletionMessageToolCallType(property.Value.GetString()); + continue; + } + if (property.NameEquals("function"u8)) + { + function = ChatCompletionMessageToolCallFunction.DeserializeChatCompletionMessageToolCallFunction(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionMessageToolCall(id, type, function, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCall)} does not support '{options.Format}' format."); + } + } + + ChatCompletionMessageToolCall IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionMessageToolCall(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCall)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionMessageToolCall FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionMessageToolCall(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.cs new file mode 100644 index 000000000..b2757dca1 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCall.cs @@ -0,0 +1,83 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ChatCompletionMessageToolCall. + internal partial class ChatCompletionMessageToolCall + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The ID of the tool call. + /// The function that the model called. + /// or is null. + public ChatCompletionMessageToolCall(string id, ChatCompletionMessageToolCallFunction function) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(function, nameof(function)); + + Id = id; + Function = function; + } + + /// Initializes a new instance of . + /// The ID of the tool call. + /// The type of the tool. Currently, only `function` is supported. + /// The function that the model called. + /// Keeps track of any properties unknown to the library. + internal ChatCompletionMessageToolCall(string id, ChatCompletionMessageToolCallType type, ChatCompletionMessageToolCallFunction function, IDictionary serializedAdditionalRawData) + { + Id = id; + Type = type; + Function = function; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionMessageToolCall() + { + } + + /// The ID of the tool call. + public string Id { get; set; } + /// The type of the tool. Currently, only `function` is supported. + public ChatCompletionMessageToolCallType Type { get; } = ChatCompletionMessageToolCallType.Function; + + /// The function that the model called. + public ChatCompletionMessageToolCallFunction Function { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.Serialization.cs new file mode 100644 index 000000000..8ffbd2090 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.Serialization.cs @@ -0,0 +1,138 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ChatCompletionMessageToolCallFunction : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCallFunction)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + writer.WritePropertyName("arguments"u8); + writer.WriteStringValue(Arguments); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionMessageToolCallFunction IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCallFunction)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionMessageToolCallFunction(document.RootElement, options); + } + + internal static ChatCompletionMessageToolCallFunction DeserializeChatCompletionMessageToolCallFunction(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + string arguments = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("arguments"u8)) + { + arguments = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionMessageToolCallFunction(name, arguments, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCallFunction)} does not support '{options.Format}' format."); + } + } + + ChatCompletionMessageToolCallFunction IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionMessageToolCallFunction(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionMessageToolCallFunction)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionMessageToolCallFunction FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionMessageToolCallFunction(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.cs new file mode 100644 index 000000000..926277164 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallFunction.cs @@ -0,0 +1,90 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ChatCompletionMessageToolCallFunction. + internal partial class ChatCompletionMessageToolCallFunction + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The name of the function to call. + /// + /// The arguments to call the function with, as generated by the model in JSON format. Note that + /// the model does not always generate valid JSON, and may hallucinate parameters not defined by + /// your function schema. Validate the arguments in your code before calling your function. + /// + /// or is null. + public ChatCompletionMessageToolCallFunction(string name, string arguments) + { + Argument.AssertNotNull(name, nameof(name)); + Argument.AssertNotNull(arguments, nameof(arguments)); + + Name = name; + Arguments = arguments; + } + + /// Initializes a new instance of . + /// The name of the function to call. + /// + /// The arguments to call the function with, as generated by the model in JSON format. Note that + /// the model does not always generate valid JSON, and may hallucinate parameters not defined by + /// your function schema. Validate the arguments in your code before calling your function. + /// + /// Keeps track of any properties unknown to the library. + internal ChatCompletionMessageToolCallFunction(string name, string arguments, IDictionary serializedAdditionalRawData) + { + Name = name; + Arguments = arguments; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionMessageToolCallFunction() + { + } + + /// The name of the function to call. + public string Name { get; set; } + /// + /// The arguments to call the function with, as generated by the model in JSON format. Note that + /// the model does not always generate valid JSON, and may hallucinate parameters not defined by + /// your function schema. Validate the arguments in your code before calling your function. + /// + public string Arguments { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallType.cs b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallType.cs new file mode 100644 index 000000000..3da609cee --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionMessageToolCallType.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ChatCompletionMessageToolCall_type. + internal readonly partial struct ChatCompletionMessageToolCallType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ChatCompletionMessageToolCallType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FunctionValue = "function"; + + /// function. + public static ChatCompletionMessageToolCallType Function { get; } = new ChatCompletionMessageToolCallType(FunctionValue); + /// Determines if two values are the same. + public static bool operator ==(ChatCompletionMessageToolCallType left, ChatCompletionMessageToolCallType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ChatCompletionMessageToolCallType left, ChatCompletionMessageToolCallType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ChatCompletionMessageToolCallType(string value) => new ChatCompletionMessageToolCallType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ChatCompletionMessageToolCallType other && Equals(other); + /// + public bool Equals(ChatCompletionMessageToolCallType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.Serialization.cs new file mode 100644 index 000000000..765cdef78 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.Serialization.cs @@ -0,0 +1,138 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ChatCompletionNamedToolChoice : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoice)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("function"u8); + writer.WriteObjectValue(Function); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionNamedToolChoice IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoice)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionNamedToolChoice(document.RootElement, options); + } + + internal static ChatCompletionNamedToolChoice DeserializeChatCompletionNamedToolChoice(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ChatCompletionNamedToolChoiceType type = default; + ChatCompletionNamedToolChoiceFunction function = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = new ChatCompletionNamedToolChoiceType(property.Value.GetString()); + continue; + } + if (property.NameEquals("function"u8)) + { + function = ChatCompletionNamedToolChoiceFunction.DeserializeChatCompletionNamedToolChoiceFunction(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionNamedToolChoice(type, function, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoice)} does not support '{options.Format}' format."); + } + } + + ChatCompletionNamedToolChoice IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionNamedToolChoice(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoice)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionNamedToolChoice FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionNamedToolChoice(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.cs new file mode 100644 index 000000000..bbc4a9013 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoice.cs @@ -0,0 +1,76 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// Specifies a tool the model should use. Use to force the model to call a specific function. + internal partial class ChatCompletionNamedToolChoice + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// is null. + public ChatCompletionNamedToolChoice(ChatCompletionNamedToolChoiceFunction function) + { + Argument.AssertNotNull(function, nameof(function)); + + Function = function; + } + + /// Initializes a new instance of . + /// The type of the tool. Currently, only `function` is supported. + /// + /// Keeps track of any properties unknown to the library. + internal ChatCompletionNamedToolChoice(ChatCompletionNamedToolChoiceType type, ChatCompletionNamedToolChoiceFunction function, IDictionary serializedAdditionalRawData) + { + Type = type; + Function = function; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionNamedToolChoice() + { + } + + /// The type of the tool. Currently, only `function` is supported. + public ChatCompletionNamedToolChoiceType Type { get; } = ChatCompletionNamedToolChoiceType.Function; + + /// Gets the function. + public ChatCompletionNamedToolChoiceFunction Function { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.Serialization.cs new file mode 100644 index 000000000..9773625c1 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.Serialization.cs @@ -0,0 +1,130 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ChatCompletionNamedToolChoiceFunction : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoiceFunction)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionNamedToolChoiceFunction IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoiceFunction)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionNamedToolChoiceFunction(document.RootElement, options); + } + + internal static ChatCompletionNamedToolChoiceFunction DeserializeChatCompletionNamedToolChoiceFunction(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionNamedToolChoiceFunction(name, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoiceFunction)} does not support '{options.Format}' format."); + } + } + + ChatCompletionNamedToolChoiceFunction IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionNamedToolChoiceFunction(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionNamedToolChoiceFunction)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionNamedToolChoiceFunction FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionNamedToolChoiceFunction(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.cs new file mode 100644 index 000000000..d040031c5 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceFunction.cs @@ -0,0 +1,71 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ChatCompletionNamedToolChoiceFunction. + internal partial class ChatCompletionNamedToolChoiceFunction + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The name of the function to call. + /// is null. + public ChatCompletionNamedToolChoiceFunction(string name) + { + Argument.AssertNotNull(name, nameof(name)); + + Name = name; + } + + /// Initializes a new instance of . + /// The name of the function to call. + /// Keeps track of any properties unknown to the library. + internal ChatCompletionNamedToolChoiceFunction(string name, IDictionary serializedAdditionalRawData) + { + Name = name; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionNamedToolChoiceFunction() + { + } + + /// The name of the function to call. + public string Name { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceType.cs b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceType.cs new file mode 100644 index 000000000..0765fe138 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionNamedToolChoiceType.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ChatCompletionNamedToolChoice_type. + internal readonly partial struct ChatCompletionNamedToolChoiceType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ChatCompletionNamedToolChoiceType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FunctionValue = "function"; + + /// function. + public static ChatCompletionNamedToolChoiceType Function { get; } = new ChatCompletionNamedToolChoiceType(FunctionValue); + /// Determines if two values are the same. + public static bool operator ==(ChatCompletionNamedToolChoiceType left, ChatCompletionNamedToolChoiceType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ChatCompletionNamedToolChoiceType left, ChatCompletionNamedToolChoiceType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ChatCompletionNamedToolChoiceType(string value) => new ChatCompletionNamedToolChoiceType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ChatCompletionNamedToolChoiceType other && Equals(other); + /// + public bool Equals(ChatCompletionNamedToolChoiceType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.Serialization.cs new file mode 100644 index 000000000..79e67b36a --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.Serialization.cs @@ -0,0 +1,190 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ChatCompletionResponseMessage : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionResponseMessage)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (Content != null) + { + writer.WritePropertyName("content"u8); + writer.WriteStringValue(Content); + } + else + { + writer.WriteNull("content"); + } + if (Optional.IsCollectionDefined(ToolCalls)) + { + writer.WritePropertyName("tool_calls"u8); + writer.WriteStartArray(); + foreach (var item in ToolCalls) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + } + writer.WritePropertyName("role"u8); + writer.WriteStringValue(Role.ToString()); + if (Optional.IsDefined(FunctionCall)) + { + writer.WritePropertyName("function_call"u8); + writer.WriteObjectValue(FunctionCall); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionResponseMessage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionResponseMessage)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionResponseMessage(document.RootElement, options); + } + + internal static ChatCompletionResponseMessage DeserializeChatCompletionResponseMessage(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string content = default; + IReadOnlyList toolCalls = default; + ChatCompletionResponseMessageRole role = default; + ChatCompletionResponseMessageFunctionCall functionCall = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("content"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + content = null; + continue; + } + content = property.Value.GetString(); + continue; + } + if (property.NameEquals("tool_calls"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ChatCompletionMessageToolCall.DeserializeChatCompletionMessageToolCall(item, options)); + } + toolCalls = array; + continue; + } + if (property.NameEquals("role"u8)) + { + role = new ChatCompletionResponseMessageRole(property.Value.GetString()); + continue; + } + if (property.NameEquals("function_call"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + functionCall = ChatCompletionResponseMessageFunctionCall.DeserializeChatCompletionResponseMessageFunctionCall(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionResponseMessage(content, toolCalls ?? new ChangeTrackingList(), role, functionCall, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionResponseMessage)} does not support '{options.Format}' format."); + } + } + + ChatCompletionResponseMessage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionResponseMessage(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionResponseMessage)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionResponseMessage FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionResponseMessage(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.cs new file mode 100644 index 000000000..f67c54a22 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessage.cs @@ -0,0 +1,82 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ChatCompletionResponseMessage. + internal partial class ChatCompletionResponseMessage + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The contents of the message. + internal ChatCompletionResponseMessage(string content) + { + Content = content; + ToolCalls = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The contents of the message. + /// + /// The role of the author of this message. + /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + /// Keeps track of any properties unknown to the library. + internal ChatCompletionResponseMessage(string content, IReadOnlyList toolCalls, ChatCompletionResponseMessageRole role, ChatCompletionResponseMessageFunctionCall functionCall, IDictionary serializedAdditionalRawData) + { + Content = content; + ToolCalls = toolCalls; + Role = role; + FunctionCall = functionCall; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionResponseMessage() + { + } + + /// The contents of the message. + public string Content { get; } + /// Gets the tool calls. + public IReadOnlyList ToolCalls { get; } + /// The role of the author of this message. + public ChatCompletionResponseMessageRole Role { get; } = ChatCompletionResponseMessageRole.Assistant; + + /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + public ChatCompletionResponseMessageFunctionCall FunctionCall { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.Serialization.cs new file mode 100644 index 000000000..41fdad1bb --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.Serialization.cs @@ -0,0 +1,138 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ChatCompletionResponseMessageFunctionCall : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionResponseMessageFunctionCall)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("arguments"u8); + writer.WriteStringValue(Arguments); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionResponseMessageFunctionCall IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionResponseMessageFunctionCall)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionResponseMessageFunctionCall(document.RootElement, options); + } + + internal static ChatCompletionResponseMessageFunctionCall DeserializeChatCompletionResponseMessageFunctionCall(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string arguments = default; + string name = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("arguments"u8)) + { + arguments = property.Value.GetString(); + continue; + } + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionResponseMessageFunctionCall(arguments, name, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionResponseMessageFunctionCall)} does not support '{options.Format}' format."); + } + } + + ChatCompletionResponseMessageFunctionCall IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionResponseMessageFunctionCall(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionResponseMessageFunctionCall)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionResponseMessageFunctionCall FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionResponseMessageFunctionCall(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.cs new file mode 100644 index 000000000..5e645efab --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageFunctionCall.cs @@ -0,0 +1,90 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ChatCompletionResponseMessageFunctionCall. + internal partial class ChatCompletionResponseMessageFunctionCall + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The arguments to call the function with, as generated by the model in JSON format. Note that + /// the model does not always generate valid JSON, and may hallucinate parameters not defined by + /// your function schema. Validate the arguments in your code before calling your function. + /// + /// The name of the function to call. + /// or is null. + internal ChatCompletionResponseMessageFunctionCall(string arguments, string name) + { + Argument.AssertNotNull(arguments, nameof(arguments)); + Argument.AssertNotNull(name, nameof(name)); + + Arguments = arguments; + Name = name; + } + + /// Initializes a new instance of . + /// + /// The arguments to call the function with, as generated by the model in JSON format. Note that + /// the model does not always generate valid JSON, and may hallucinate parameters not defined by + /// your function schema. Validate the arguments in your code before calling your function. + /// + /// The name of the function to call. + /// Keeps track of any properties unknown to the library. + internal ChatCompletionResponseMessageFunctionCall(string arguments, string name, IDictionary serializedAdditionalRawData) + { + Arguments = arguments; + Name = name; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionResponseMessageFunctionCall() + { + } + + /// + /// The arguments to call the function with, as generated by the model in JSON format. Note that + /// the model does not always generate valid JSON, and may hallucinate parameters not defined by + /// your function schema. Validate the arguments in your code before calling your function. + /// + public string Arguments { get; } + /// The name of the function to call. + public string Name { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionResponseMessageRole.cs b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageRole.cs new file mode 100644 index 000000000..5ff79558a --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionResponseMessageRole.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ChatCompletionResponseMessage_role. + internal readonly partial struct ChatCompletionResponseMessageRole : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ChatCompletionResponseMessageRole(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AssistantValue = "assistant"; + + /// assistant. + public static ChatCompletionResponseMessageRole Assistant { get; } = new ChatCompletionResponseMessageRole(AssistantValue); + /// Determines if two values are the same. + public static bool operator ==(ChatCompletionResponseMessageRole left, ChatCompletionResponseMessageRole right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ChatCompletionResponseMessageRole left, ChatCompletionResponseMessageRole right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ChatCompletionResponseMessageRole(string value) => new ChatCompletionResponseMessageRole(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ChatCompletionResponseMessageRole other && Equals(other); + /// + public bool Equals(ChatCompletionResponseMessageRole other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.Serialization.cs new file mode 100644 index 000000000..10b2919c1 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.Serialization.cs @@ -0,0 +1,186 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ChatCompletionTokenLogprob : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprob)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("token"u8); + writer.WriteStringValue(Token); + writer.WritePropertyName("logprob"u8); + writer.WriteNumberValue(Logprob); + if (Bytes != null && Optional.IsCollectionDefined(Bytes)) + { + writer.WritePropertyName("bytes"u8); + writer.WriteStartArray(); + foreach (var item in Bytes) + { + writer.WriteNumberValue(item); + } + writer.WriteEndArray(); + } + else + { + writer.WriteNull("bytes"); + } + writer.WritePropertyName("top_logprobs"u8); + writer.WriteStartArray(); + foreach (var item in TopLogprobs) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionTokenLogprob IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprob)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionTokenLogprob(document.RootElement, options); + } + + internal static ChatCompletionTokenLogprob DeserializeChatCompletionTokenLogprob(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string token = default; + double logprob = default; + IReadOnlyList bytes = default; + IReadOnlyList topLogprobs = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("token"u8)) + { + token = property.Value.GetString(); + continue; + } + if (property.NameEquals("logprob"u8)) + { + logprob = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("bytes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + bytes = new ChangeTrackingList(); + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetInt64()); + } + bytes = array; + continue; + } + if (property.NameEquals("top_logprobs"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ChatCompletionTokenLogprobTopLogprob.DeserializeChatCompletionTokenLogprobTopLogprob(item, options)); + } + topLogprobs = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionTokenLogprob(token, logprob, bytes, topLogprobs, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprob)} does not support '{options.Format}' format."); + } + } + + ChatCompletionTokenLogprob IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionTokenLogprob(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprob)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionTokenLogprob FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionTokenLogprob(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.cs new file mode 100644 index 000000000..08702fac6 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprob.cs @@ -0,0 +1,115 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ChatCompletionTokenLogprob. + internal partial class ChatCompletionTokenLogprob + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The token. + /// The log probability of this token. + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in + /// instances where characters are represented by multiple tokens and their byte representations + /// must be combined to generate the correct text representation. Can be `null` if there is no + /// bytes representation for the token. + /// + /// + /// List of the most likely tokens and their log probability, at this token position. In rare + /// cases, there may be fewer than the number of requested `top_logprobs` returned. + /// + /// or is null. + internal ChatCompletionTokenLogprob(string token, double logprob, IEnumerable bytes, IEnumerable topLogprobs) + { + Argument.AssertNotNull(token, nameof(token)); + Argument.AssertNotNull(topLogprobs, nameof(topLogprobs)); + + Token = token; + Logprob = logprob; + Bytes = bytes?.ToList(); + TopLogprobs = topLogprobs.ToList(); + } + + /// Initializes a new instance of . + /// The token. + /// The log probability of this token. + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in + /// instances where characters are represented by multiple tokens and their byte representations + /// must be combined to generate the correct text representation. Can be `null` if there is no + /// bytes representation for the token. + /// + /// + /// List of the most likely tokens and their log probability, at this token position. In rare + /// cases, there may be fewer than the number of requested `top_logprobs` returned. + /// + /// Keeps track of any properties unknown to the library. + internal ChatCompletionTokenLogprob(string token, double logprob, IReadOnlyList bytes, IReadOnlyList topLogprobs, IDictionary serializedAdditionalRawData) + { + Token = token; + Logprob = logprob; + Bytes = bytes; + TopLogprobs = topLogprobs; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionTokenLogprob() + { + } + + /// The token. + public string Token { get; } + /// The log probability of this token. + public double Logprob { get; } + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in + /// instances where characters are represented by multiple tokens and their byte representations + /// must be combined to generate the correct text representation. Can be `null` if there is no + /// bytes representation for the token. + /// + public IReadOnlyList Bytes { get; } + /// + /// List of the most likely tokens and their log probability, at this token position. In rare + /// cases, there may be fewer than the number of requested `top_logprobs` returned. + /// + public IReadOnlyList TopLogprobs { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.Serialization.cs new file mode 100644 index 000000000..f8182e28c --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.Serialization.cs @@ -0,0 +1,168 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ChatCompletionTokenLogprobTopLogprob : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprobTopLogprob)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("token"u8); + writer.WriteStringValue(Token); + writer.WritePropertyName("logprob"u8); + writer.WriteNumberValue(Logprob); + if (Bytes != null && Optional.IsCollectionDefined(Bytes)) + { + writer.WritePropertyName("bytes"u8); + writer.WriteStartArray(); + foreach (var item in Bytes) + { + writer.WriteNumberValue(item); + } + writer.WriteEndArray(); + } + else + { + writer.WriteNull("bytes"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionTokenLogprobTopLogprob IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprobTopLogprob)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionTokenLogprobTopLogprob(document.RootElement, options); + } + + internal static ChatCompletionTokenLogprobTopLogprob DeserializeChatCompletionTokenLogprobTopLogprob(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string token = default; + double logprob = default; + IReadOnlyList bytes = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("token"u8)) + { + token = property.Value.GetString(); + continue; + } + if (property.NameEquals("logprob"u8)) + { + logprob = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("bytes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + bytes = new ChangeTrackingList(); + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetInt64()); + } + bytes = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionTokenLogprobTopLogprob(token, logprob, bytes, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprobTopLogprob)} does not support '{options.Format}' format."); + } + } + + ChatCompletionTokenLogprobTopLogprob IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionTokenLogprobTopLogprob(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionTokenLogprobTopLogprob)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionTokenLogprobTopLogprob FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionTokenLogprobTopLogprob(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.cs b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.cs new file mode 100644 index 000000000..2fbfbdfa1 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionTokenLogprobTopLogprob.cs @@ -0,0 +1,99 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ChatCompletionTokenLogprobTopLogprob. + internal partial class ChatCompletionTokenLogprobTopLogprob + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The token. + /// The log probability of this token. + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in + /// instances where characters are represented by multiple tokens and their byte representations + /// must be combined to generate the correct text representation. Can be `null` if there is no + /// bytes representation for the token. + /// + /// is null. + internal ChatCompletionTokenLogprobTopLogprob(string token, double logprob, IEnumerable bytes) + { + Argument.AssertNotNull(token, nameof(token)); + + Token = token; + Logprob = logprob; + Bytes = bytes?.ToList(); + } + + /// Initializes a new instance of . + /// The token. + /// The log probability of this token. + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in + /// instances where characters are represented by multiple tokens and their byte representations + /// must be combined to generate the correct text representation. Can be `null` if there is no + /// bytes representation for the token. + /// + /// Keeps track of any properties unknown to the library. + internal ChatCompletionTokenLogprobTopLogprob(string token, double logprob, IReadOnlyList bytes, IDictionary serializedAdditionalRawData) + { + Token = token; + Logprob = logprob; + Bytes = bytes; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionTokenLogprobTopLogprob() + { + } + + /// The token. + public string Token { get; } + /// The log probability of this token. + public double Logprob { get; } + /// + /// A list of integers representing the UTF-8 bytes representation of the token. Useful in + /// instances where characters are represented by multiple tokens and their byte representations + /// must be combined to generate the correct text representation. Can be `null` if there is no + /// bytes representation for the token. + /// + public IReadOnlyList Bytes { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionTool.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionTool.Serialization.cs new file mode 100644 index 000000000..7a0a7cd27 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionTool.Serialization.cs @@ -0,0 +1,138 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ChatCompletionTool : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionTool)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("function"u8); + writer.WriteObjectValue(Function); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ChatCompletionTool IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatCompletionTool)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatCompletionTool(document.RootElement, options); + } + + internal static ChatCompletionTool DeserializeChatCompletionTool(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ChatCompletionToolType type = default; + FunctionObject function = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = new ChatCompletionToolType(property.Value.GetString()); + continue; + } + if (property.NameEquals("function"u8)) + { + function = FunctionObject.DeserializeFunctionObject(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ChatCompletionTool(type, function, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatCompletionTool)} does not support '{options.Format}' format."); + } + } + + ChatCompletionTool IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatCompletionTool(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatCompletionTool)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ChatCompletionTool FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeChatCompletionTool(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionTool.cs b/.dotnet/src/Generated/Models/ChatCompletionTool.cs new file mode 100644 index 000000000..e8c98d082 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionTool.cs @@ -0,0 +1,76 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ChatCompletionTool. + internal partial class ChatCompletionTool + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// is null. + public ChatCompletionTool(FunctionObject function) + { + Argument.AssertNotNull(function, nameof(function)); + + Function = function; + } + + /// Initializes a new instance of . + /// The type of the tool. Currently, only `function` is supported. + /// + /// Keeps track of any properties unknown to the library. + internal ChatCompletionTool(ChatCompletionToolType type, FunctionObject function, IDictionary serializedAdditionalRawData) + { + Type = type; + Function = function; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ChatCompletionTool() + { + } + + /// The type of the tool. Currently, only `function` is supported. + public ChatCompletionToolType Type { get; } = ChatCompletionToolType.Function; + + /// Gets the function. + public FunctionObject Function { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ChatCompletionToolType.cs b/.dotnet/src/Generated/Models/ChatCompletionToolType.cs new file mode 100644 index 000000000..af31e3c02 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatCompletionToolType.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ChatCompletionTool_type. + internal readonly partial struct ChatCompletionToolType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ChatCompletionToolType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FunctionValue = "function"; + + /// function. + public static ChatCompletionToolType Function { get; } = new ChatCompletionToolType(FunctionValue); + /// Determines if two values are the same. + public static bool operator ==(ChatCompletionToolType left, ChatCompletionToolType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ChatCompletionToolType left, ChatCompletionToolType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ChatCompletionToolType(string value) => new ChatCompletionToolType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ChatCompletionToolType other && Equals(other); + /// + public bool Equals(ChatCompletionToolType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CompletionUsage.Serialization.cs b/.dotnet/src/Generated/Models/CompletionUsage.Serialization.cs new file mode 100644 index 000000000..9c61687cb --- /dev/null +++ b/.dotnet/src/Generated/Models/CompletionUsage.Serialization.cs @@ -0,0 +1,146 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CompletionUsage : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CompletionUsage)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("prompt_tokens"u8); + writer.WriteNumberValue(PromptTokens); + writer.WritePropertyName("completion_tokens"u8); + writer.WriteNumberValue(CompletionTokens); + writer.WritePropertyName("total_tokens"u8); + writer.WriteNumberValue(TotalTokens); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CompletionUsage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CompletionUsage)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCompletionUsage(document.RootElement, options); + } + + internal static CompletionUsage DeserializeCompletionUsage(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + long promptTokens = default; + long completionTokens = default; + long totalTokens = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("prompt_tokens"u8)) + { + promptTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("completion_tokens"u8)) + { + completionTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("total_tokens"u8)) + { + totalTokens = property.Value.GetInt64(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CompletionUsage(promptTokens, completionTokens, totalTokens, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CompletionUsage)} does not support '{options.Format}' format."); + } + } + + CompletionUsage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCompletionUsage(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CompletionUsage)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CompletionUsage FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCompletionUsage(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CompletionUsage.cs b/.dotnet/src/Generated/Models/CompletionUsage.cs new file mode 100644 index 000000000..64c5aa6ec --- /dev/null +++ b/.dotnet/src/Generated/Models/CompletionUsage.cs @@ -0,0 +1,79 @@ +// + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal.Models +{ + /// Usage statistics for the completion request. + internal partial class CompletionUsage + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// Number of tokens in the prompt. + /// Number of tokens in the generated completion. + /// Total number of tokens used in the request (prompt + completion). + internal CompletionUsage(long promptTokens, long completionTokens, long totalTokens) + { + PromptTokens = promptTokens; + CompletionTokens = completionTokens; + TotalTokens = totalTokens; + } + + /// Initializes a new instance of . + /// Number of tokens in the prompt. + /// Number of tokens in the generated completion. + /// Total number of tokens used in the request (prompt + completion). + /// Keeps track of any properties unknown to the library. + internal CompletionUsage(long promptTokens, long completionTokens, long totalTokens, IDictionary serializedAdditionalRawData) + { + PromptTokens = promptTokens; + CompletionTokens = completionTokens; + TotalTokens = totalTokens; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CompletionUsage() + { + } + + /// Number of tokens in the prompt. + public long PromptTokens { get; } + /// Number of tokens in the generated completion. + public long CompletionTokens { get; } + /// Total number of tokens used in the request (prompt + completion). + public long TotalTokens { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateAssistantFileRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.Serialization.cs new file mode 100644 index 000000000..a407b2f62 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.Serialization.cs @@ -0,0 +1,130 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateAssistantFileRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateAssistantFileRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("file_id"u8); + writer.WriteStringValue(FileId); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateAssistantFileRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateAssistantFileRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateAssistantFileRequest(document.RootElement, options); + } + + internal static CreateAssistantFileRequest DeserializeCreateAssistantFileRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string fileId = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("file_id"u8)) + { + fileId = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateAssistantFileRequest(fileId, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateAssistantFileRequest)} does not support '{options.Format}' format."); + } + } + + CreateAssistantFileRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateAssistantFileRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateAssistantFileRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateAssistantFileRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateAssistantFileRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateAssistantFileRequest.cs b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.cs new file mode 100644 index 000000000..ce3571023 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateAssistantFileRequest.cs @@ -0,0 +1,80 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateAssistantFileRequest. + internal partial class CreateAssistantFileRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// A [File](/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should + /// use. Useful for tools like `retrieval` and `code_interpreter` that can access files. + /// + /// is null. + public CreateAssistantFileRequest(string fileId) + { + Argument.AssertNotNull(fileId, nameof(fileId)); + + FileId = fileId; + } + + /// Initializes a new instance of . + /// + /// A [File](/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should + /// use. Useful for tools like `retrieval` and `code_interpreter` that can access files. + /// + /// Keeps track of any properties unknown to the library. + internal CreateAssistantFileRequest(string fileId, IDictionary serializedAdditionalRawData) + { + FileId = fileId; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateAssistantFileRequest() + { + } + + /// + /// A [File](/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should + /// use. Useful for tools like `retrieval` and `code_interpreter` that can access files. + /// + public string FileId { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateAssistantRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateAssistantRequest.Serialization.cs new file mode 100644 index 000000000..960844426 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateAssistantRequest.Serialization.cs @@ -0,0 +1,309 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateAssistantRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateAssistantRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + if (Optional.IsDefined(Name)) + { + if (Name != null) + { + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + } + else + { + writer.WriteNull("name"); + } + } + if (Optional.IsDefined(Description)) + { + if (Description != null) + { + writer.WritePropertyName("description"u8); + writer.WriteStringValue(Description); + } + else + { + writer.WriteNull("description"); + } + } + if (Optional.IsDefined(Instructions)) + { + if (Instructions != null) + { + writer.WritePropertyName("instructions"u8); + writer.WriteStringValue(Instructions); + } + else + { + writer.WriteNull("instructions"); + } + } + if (Optional.IsCollectionDefined(Tools)) + { + writer.WritePropertyName("tools"u8); + writer.WriteStartArray(); + foreach (var item in Tools) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(FileIds)) + { + writer.WritePropertyName("file_ids"u8); + writer.WriteStartArray(); + foreach (var item in FileIds) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateAssistantRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateAssistantRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateAssistantRequest(document.RootElement, options); + } + + internal static CreateAssistantRequest DeserializeCreateAssistantRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string model = default; + string name = default; + string description = default; + string instructions = default; + IList tools = default; + IList fileIds = default; + IDictionary metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("name"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + name = null; + continue; + } + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("description"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + description = null; + continue; + } + description = property.Value.GetString(); + continue; + } + if (property.NameEquals("instructions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + instructions = null; + continue; + } + instructions = property.Value.GetString(); + continue; + } + if (property.NameEquals("tools"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + tools = array; + continue; + } + if (property.NameEquals("file_ids"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + fileIds = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateAssistantRequest( + model, + name, + description, + instructions, + tools ?? new ChangeTrackingList(), + fileIds ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingDictionary(), + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateAssistantRequest)} does not support '{options.Format}' format."); + } + } + + CreateAssistantRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateAssistantRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateAssistantRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateAssistantRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateAssistantRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateAssistantRequest.cs b/.dotnet/src/Generated/Models/CreateAssistantRequest.cs new file mode 100644 index 000000000..750f6db64 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateAssistantRequest.cs @@ -0,0 +1,159 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateAssistantRequest. + internal partial class CreateAssistantRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// is null. + public CreateAssistantRequest(string model) + { + Argument.AssertNotNull(model, nameof(model)); + + Model = model; + Tools = new ChangeTrackingList(); + FileIds = new ChangeTrackingList(); + Metadata = new ChangeTrackingDictionary(); + } + + /// Initializes a new instance of . + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// The name of the assistant. The maximum length is 256 characters. + /// The description of the assistant. The maximum length is 512 characters. + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal CreateAssistantRequest(string model, string name, string description, string instructions, IList tools, IList fileIds, IDictionary metadata, IDictionary serializedAdditionalRawData) + { + Model = model; + Name = name; + Description = description; + Instructions = instructions; + Tools = tools; + FileIds = fileIds; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateAssistantRequest() + { + } + + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + public string Model { get; } + /// The name of the assistant. The maximum length is 256 characters. + public string Name { get; set; } + /// The description of the assistant. The maximum length is 512 characters. + public string Description { get; set; } + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + public string Instructions { get; set; } + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IList Tools { get; } + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + public IList FileIds { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.Serialization.cs new file mode 100644 index 000000000..749771a7c --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.Serialization.cs @@ -0,0 +1,601 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateChatCompletionRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("messages"u8); + writer.WriteStartArray(); + foreach (var item in Messages) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.ToString()); + if (Optional.IsDefined(FrequencyPenalty)) + { + if (FrequencyPenalty != null) + { + writer.WritePropertyName("frequency_penalty"u8); + writer.WriteNumberValue(FrequencyPenalty.Value); + } + else + { + writer.WriteNull("frequency_penalty"); + } + } + if (Optional.IsCollectionDefined(LogitBias)) + { + if (LogitBias != null) + { + writer.WritePropertyName("logit_bias"u8); + writer.WriteStartObject(); + foreach (var item in LogitBias) + { + writer.WritePropertyName(item.Key); + writer.WriteNumberValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("logit_bias"); + } + } + if (Optional.IsDefined(Logprobs)) + { + if (Logprobs != null) + { + writer.WritePropertyName("logprobs"u8); + writer.WriteBooleanValue(Logprobs.Value); + } + else + { + writer.WriteNull("logprobs"); + } + } + if (Optional.IsDefined(TopLogprobs)) + { + if (TopLogprobs != null) + { + writer.WritePropertyName("top_logprobs"u8); + writer.WriteNumberValue(TopLogprobs.Value); + } + else + { + writer.WriteNull("top_logprobs"); + } + } + if (Optional.IsDefined(MaxTokens)) + { + if (MaxTokens != null) + { + writer.WritePropertyName("max_tokens"u8); + writer.WriteNumberValue(MaxTokens.Value); + } + else + { + writer.WriteNull("max_tokens"); + } + } + if (Optional.IsDefined(N)) + { + if (N != null) + { + writer.WritePropertyName("n"u8); + writer.WriteNumberValue(N.Value); + } + else + { + writer.WriteNull("n"); + } + } + if (Optional.IsDefined(PresencePenalty)) + { + if (PresencePenalty != null) + { + writer.WritePropertyName("presence_penalty"u8); + writer.WriteNumberValue(PresencePenalty.Value); + } + else + { + writer.WriteNull("presence_penalty"); + } + } + if (Optional.IsDefined(ResponseFormat)) + { + writer.WritePropertyName("response_format"u8); + writer.WriteObjectValue(ResponseFormat); + } + if (Optional.IsDefined(Seed)) + { + if (Seed != null) + { + writer.WritePropertyName("seed"u8); + writer.WriteNumberValue(Seed.Value); + } + else + { + writer.WriteNull("seed"); + } + } + if (Optional.IsDefined(Stop)) + { + if (Stop != null) + { + writer.WritePropertyName("stop"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(Stop); +#else + using (JsonDocument document = JsonDocument.Parse(Stop)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + else + { + writer.WriteNull("stop"); + } + } + if (Optional.IsDefined(Stream)) + { + if (Stream != null) + { + writer.WritePropertyName("stream"u8); + writer.WriteBooleanValue(Stream.Value); + } + else + { + writer.WriteNull("stream"); + } + } + if (Optional.IsDefined(Temperature)) + { + if (Temperature != null) + { + writer.WritePropertyName("temperature"u8); + writer.WriteNumberValue(Temperature.Value); + } + else + { + writer.WriteNull("temperature"); + } + } + if (Optional.IsDefined(TopP)) + { + if (TopP != null) + { + writer.WritePropertyName("top_p"u8); + writer.WriteNumberValue(TopP.Value); + } + else + { + writer.WriteNull("top_p"); + } + } + if (Optional.IsCollectionDefined(Tools)) + { + writer.WritePropertyName("tools"u8); + writer.WriteStartArray(); + foreach (var item in Tools) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(ToolChoice)) + { + writer.WritePropertyName("tool_choice"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(ToolChoice); +#else + using (JsonDocument document = JsonDocument.Parse(ToolChoice)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + if (Optional.IsDefined(User)) + { + writer.WritePropertyName("user"u8); + writer.WriteStringValue(User); + } + if (Optional.IsDefined(FunctionCall)) + { + writer.WritePropertyName("function_call"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(FunctionCall); +#else + using (JsonDocument document = JsonDocument.Parse(FunctionCall)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + if (Optional.IsCollectionDefined(Functions)) + { + writer.WritePropertyName("functions"u8); + writer.WriteStartArray(); + foreach (var item in Functions) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateChatCompletionRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateChatCompletionRequest(document.RootElement, options); + } + + internal static CreateChatCompletionRequest DeserializeCreateChatCompletionRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IList messages = default; + CreateChatCompletionRequestModel model = default; + double? frequencyPenalty = default; + IDictionary logitBias = default; + bool? logprobs = default; + long? topLogprobs = default; + long? maxTokens = default; + long? n = default; + double? presencePenalty = default; + CreateChatCompletionRequestResponseFormat responseFormat = default; + long? seed = default; + BinaryData stop = default; + bool? stream = default; + double? temperature = default; + double? topP = default; + IList tools = default; + BinaryData toolChoice = default; + string user = default; + BinaryData functionCall = default; + IList functions = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("messages"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + messages = array; + continue; + } + if (property.NameEquals("model"u8)) + { + model = new CreateChatCompletionRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("frequency_penalty"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + frequencyPenalty = null; + continue; + } + frequencyPenalty = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("logit_bias"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetInt64()); + } + logitBias = dictionary; + continue; + } + if (property.NameEquals("logprobs"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + logprobs = null; + continue; + } + logprobs = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("top_logprobs"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + topLogprobs = null; + continue; + } + topLogprobs = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("max_tokens"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + maxTokens = null; + continue; + } + maxTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("n"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + n = null; + continue; + } + n = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("presence_penalty"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + presencePenalty = null; + continue; + } + presencePenalty = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("response_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + responseFormat = CreateChatCompletionRequestResponseFormat.DeserializeCreateChatCompletionRequestResponseFormat(property.Value, options); + continue; + } + if (property.NameEquals("seed"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + seed = null; + continue; + } + seed = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("stop"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + stop = null; + continue; + } + stop = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("stream"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + stream = null; + continue; + } + stream = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("temperature"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + temperature = null; + continue; + } + temperature = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("top_p"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + topP = null; + continue; + } + topP = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("tools"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ChatCompletionTool.DeserializeChatCompletionTool(item, options)); + } + tools = array; + continue; + } + if (property.NameEquals("tool_choice"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + toolChoice = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("user"u8)) + { + user = property.Value.GetString(); + continue; + } + if (property.NameEquals("function_call"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + functionCall = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("functions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ChatCompletionFunctions.DeserializeChatCompletionFunctions(item, options)); + } + functions = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateChatCompletionRequest( + messages, + model, + frequencyPenalty, + logitBias ?? new ChangeTrackingDictionary(), + logprobs, + topLogprobs, + maxTokens, + n, + presencePenalty, + responseFormat, + seed, + stop, + stream, + temperature, + topP, + tools ?? new ChangeTrackingList(), + toolChoice, + user, + functionCall, + functions ?? new ChangeTrackingList(), + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateChatCompletionRequest)} does not support '{options.Format}' format."); + } + } + + CreateChatCompletionRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateChatCompletionRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateChatCompletionRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateChatCompletionRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateChatCompletionRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequest.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.cs new file mode 100644 index 000000000..0cab891f8 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequest.cs @@ -0,0 +1,510 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateChatCompletionRequest. + internal partial class CreateChatCompletionRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + /// + /// + /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. + /// + /// is null. + public CreateChatCompletionRequest(IEnumerable messages, CreateChatCompletionRequestModel model) + { + Argument.AssertNotNull(messages, nameof(messages)); + + Messages = messages.ToList(); + Model = model; + LogitBias = new ChangeTrackingDictionary(); + Tools = new ChangeTrackingList(); + Functions = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + /// + /// + /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. + /// + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + /// frequency in the text so far, decreasing the model's likelihood to repeat the same line + /// verbatim. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + /// + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an + /// associated bias value from -100 to 100. Mathematically, the bias is added to the logits + /// generated by the model prior to sampling. The exact effect will vary per model, but values + /// between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + /// should result in a ban or exclusive selection of the relevant token. + /// + /// + /// Whether to return log probabilities of the output tokens or not. If true, returns the log + /// probabilities of each output token returned in the `content` of `message`. This option is + /// currently not available on the `gpt-4-vision-preview` model. + /// + /// + /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token + /// position, each with an associated log probability. `logprobs` must be set to `true` if this + /// parameter is used. + /// + /// + /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + /// + /// The total length of input tokens and generated tokens is limited by the model's context length. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + /// for counting tokens. + /// + /// + /// How many chat completion choices to generate for each input message. Note that you will be + /// charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to + /// minimize costs. + /// + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + /// in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + /// + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the + /// model generates is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + /// yourself via a system or user message. Without this, the model may generate an unending stream + /// of whitespace until the generation reaches the token limit, resulting in a long-running and + /// seemingly "stuck" request. Also note that the message content may be partially cut off if + /// `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the + /// conversation exceeded the max context length. + /// + /// + /// This feature is in Beta. + /// + /// If specified, our system will make a best effort to sample deterministically, such that + /// repeated requests with the same `seed` and parameters should return the same result. + /// + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + /// parameter to monitor changes in the backend. + /// + /// Up to 4 sequences where the API will stop generating further tokens. + /// + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + /// as they become available, with the stream terminated by a `data: [DONE]` message. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// + /// + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + /// more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + /// + /// + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers + /// the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + /// the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + /// + /// + /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this + /// to provide a list of functions the model may generate JSON inputs for. + /// + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// + /// Deprecated in favor of `tool_choice`. + /// + /// Controls which (if any) function is called by the model. `none` means the model will not call a + /// function and instead generates a message. `auto` means the model can pick between generating a + /// message or calling a function. Specifying a particular function via `{"name": "my_function"}` + /// forces the model to call that function. + /// + /// `none` is the default when no functions are present. `auto` is the default if functions are + /// present. + /// + /// + /// Deprecated in favor of `tools`. + /// + /// A list of functions the model may generate JSON inputs for. + /// + /// Keeps track of any properties unknown to the library. + internal CreateChatCompletionRequest(IList messages, CreateChatCompletionRequestModel model, double? frequencyPenalty, IDictionary logitBias, bool? logprobs, long? topLogprobs, long? maxTokens, long? n, double? presencePenalty, CreateChatCompletionRequestResponseFormat responseFormat, long? seed, BinaryData stop, bool? stream, double? temperature, double? topP, IList tools, BinaryData toolChoice, string user, BinaryData functionCall, IList functions, IDictionary serializedAdditionalRawData) + { + Messages = messages; + Model = model; + FrequencyPenalty = frequencyPenalty; + LogitBias = logitBias; + Logprobs = logprobs; + TopLogprobs = topLogprobs; + MaxTokens = maxTokens; + N = n; + PresencePenalty = presencePenalty; + ResponseFormat = responseFormat; + Seed = seed; + Stop = stop; + Stream = stream; + Temperature = temperature; + TopP = topP; + Tools = tools; + ToolChoice = toolChoice; + User = user; + FunctionCall = functionCall; + Functions = functions; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateChatCompletionRequest() + { + } + + /// + /// A list of messages comprising the conversation so far. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IList Messages { get; } + /// + /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + /// table for details on which models work with the Chat API. + /// + public CreateChatCompletionRequestModel Model { get; } + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + /// frequency in the text so far, decreasing the model's likelihood to repeat the same line + /// verbatim. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + public double? FrequencyPenalty { get; set; } + /// + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an + /// associated bias value from -100 to 100. Mathematically, the bias is added to the logits + /// generated by the model prior to sampling. The exact effect will vary per model, but values + /// between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + /// should result in a ban or exclusive selection of the relevant token. + /// + public IDictionary LogitBias { get; set; } + /// + /// Whether to return log probabilities of the output tokens or not. If true, returns the log + /// probabilities of each output token returned in the `content` of `message`. This option is + /// currently not available on the `gpt-4-vision-preview` model. + /// + public bool? Logprobs { get; set; } + /// + /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token + /// position, each with an associated log probability. `logprobs` must be set to `true` if this + /// parameter is used. + /// + public long? TopLogprobs { get; set; } + /// + /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + /// + /// The total length of input tokens and generated tokens is limited by the model's context length. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + /// for counting tokens. + /// + public long? MaxTokens { get; set; } + /// + /// How many chat completion choices to generate for each input message. Note that you will be + /// charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to + /// minimize costs. + /// + public long? N { get; set; } + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + /// in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + public double? PresencePenalty { get; set; } + /// + /// An object specifying the format that the model must output. Compatible with + /// [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the + /// model generates is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + /// yourself via a system or user message. Without this, the model may generate an unending stream + /// of whitespace until the generation reaches the token limit, resulting in a long-running and + /// seemingly "stuck" request. Also note that the message content may be partially cut off if + /// `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the + /// conversation exceeded the max context length. + /// + public CreateChatCompletionRequestResponseFormat ResponseFormat { get; set; } + /// + /// This feature is in Beta. + /// + /// If specified, our system will make a best effort to sample deterministically, such that + /// repeated requests with the same `seed` and parameters should return the same result. + /// + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + /// parameter to monitor changes in the backend. + /// + public long? Seed { get; set; } + /// + /// Up to 4 sequences where the API will stop generating further tokens. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// + /// + /// + /// where T is of type + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData Stop { get; set; } + /// + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + /// as they become available, with the stream terminated by a `data: [DONE]` message. + /// [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + /// + public bool? Stream { get; set; } + /// + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + /// more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + /// + public double? Temperature { get; set; } + /// + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers + /// the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + /// the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + /// + public double? TopP { get; set; } + /// + /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this + /// to provide a list of functions the model may generate JSON inputs for. + /// + public IList Tools { get; } + /// + /// Gets or sets the tool choice + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// "none" + /// + /// + /// "auto" + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData ToolChoice { get; set; } + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + public string User { get; set; } + /// + /// Deprecated in favor of `tool_choice`. + /// + /// Controls which (if any) function is called by the model. `none` means the model will not call a + /// function and instead generates a message. `auto` means the model can pick between generating a + /// message or calling a function. Specifying a particular function via `{"name": "my_function"}` + /// forces the model to call that function. + /// + /// `none` is the default when no functions are present. `auto` is the default if functions are + /// present. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// "none" + /// + /// + /// "auto" + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData FunctionCall { get; set; } + /// + /// Deprecated in favor of `tools`. + /// + /// A list of functions the model may generate JSON inputs for. + /// + public IList Functions { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestModel.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestModel.cs new file mode 100644 index 000000000..a646ff7a2 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestModel.cs @@ -0,0 +1,88 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateChatCompletionRequestModel. + internal readonly partial struct CreateChatCompletionRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateChatCompletionRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string Gpt40125PreviewValue = "gpt-4-0125-preview"; + private const string Gpt4TurboPreviewValue = "gpt-4-turbo-preview"; + private const string Gpt41106PreviewValue = "gpt-4-1106-preview"; + private const string Gpt4VisionPreviewValue = "gpt-4-vision-preview"; + private const string Gpt4Value = "gpt-4"; + private const string Gpt40314Value = "gpt-4-0314"; + private const string Gpt40613Value = "gpt-4-0613"; + private const string Gpt432kValue = "gpt-4-32k"; + private const string Gpt432k0314Value = "gpt-4-32k-0314"; + private const string Gpt432k0613Value = "gpt-4-32k-0613"; + private const string Gpt35TurboValue = "gpt-3.5-turbo"; + private const string Gpt35Turbo16kValue = "gpt-3.5-turbo-16k"; + private const string Gpt35Turbo0301Value = "gpt-3.5-turbo-0301"; + private const string Gpt35Turbo0613Value = "gpt-3.5-turbo-0613"; + private const string Gpt35Turbo1106Value = "gpt-3.5-turbo-1106"; + private const string Gpt35Turbo16k0613Value = "gpt-3.5-turbo-16k-0613"; + + /// gpt-4-0125-preview. + public static CreateChatCompletionRequestModel Gpt40125Preview { get; } = new CreateChatCompletionRequestModel(Gpt40125PreviewValue); + /// gpt-4-turbo-preview. + public static CreateChatCompletionRequestModel Gpt4TurboPreview { get; } = new CreateChatCompletionRequestModel(Gpt4TurboPreviewValue); + /// gpt-4-1106-preview. + public static CreateChatCompletionRequestModel Gpt41106Preview { get; } = new CreateChatCompletionRequestModel(Gpt41106PreviewValue); + /// gpt-4-vision-preview. + public static CreateChatCompletionRequestModel Gpt4VisionPreview { get; } = new CreateChatCompletionRequestModel(Gpt4VisionPreviewValue); + /// gpt-4. + public static CreateChatCompletionRequestModel Gpt4 { get; } = new CreateChatCompletionRequestModel(Gpt4Value); + /// gpt-4-0314. + public static CreateChatCompletionRequestModel Gpt40314 { get; } = new CreateChatCompletionRequestModel(Gpt40314Value); + /// gpt-4-0613. + public static CreateChatCompletionRequestModel Gpt40613 { get; } = new CreateChatCompletionRequestModel(Gpt40613Value); + /// gpt-4-32k. + public static CreateChatCompletionRequestModel Gpt432k { get; } = new CreateChatCompletionRequestModel(Gpt432kValue); + /// gpt-4-32k-0314. + public static CreateChatCompletionRequestModel Gpt432k0314 { get; } = new CreateChatCompletionRequestModel(Gpt432k0314Value); + /// gpt-4-32k-0613. + public static CreateChatCompletionRequestModel Gpt432k0613 { get; } = new CreateChatCompletionRequestModel(Gpt432k0613Value); + /// gpt-3.5-turbo. + public static CreateChatCompletionRequestModel Gpt35Turbo { get; } = new CreateChatCompletionRequestModel(Gpt35TurboValue); + /// gpt-3.5-turbo-16k. + public static CreateChatCompletionRequestModel Gpt35Turbo16k { get; } = new CreateChatCompletionRequestModel(Gpt35Turbo16kValue); + /// gpt-3.5-turbo-0301. + public static CreateChatCompletionRequestModel Gpt35Turbo0301 { get; } = new CreateChatCompletionRequestModel(Gpt35Turbo0301Value); + /// gpt-3.5-turbo-0613. + public static CreateChatCompletionRequestModel Gpt35Turbo0613 { get; } = new CreateChatCompletionRequestModel(Gpt35Turbo0613Value); + /// gpt-3.5-turbo-1106. + public static CreateChatCompletionRequestModel Gpt35Turbo1106 { get; } = new CreateChatCompletionRequestModel(Gpt35Turbo1106Value); + /// gpt-3.5-turbo-16k-0613. + public static CreateChatCompletionRequestModel Gpt35Turbo16k0613 { get; } = new CreateChatCompletionRequestModel(Gpt35Turbo16k0613Value); + /// Determines if two values are the same. + public static bool operator ==(CreateChatCompletionRequestModel left, CreateChatCompletionRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateChatCompletionRequestModel left, CreateChatCompletionRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateChatCompletionRequestModel(string value) => new CreateChatCompletionRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateChatCompletionRequestModel other && Equals(other); + /// + public bool Equals(CreateChatCompletionRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.Serialization.cs new file mode 100644 index 000000000..d33e61058 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.Serialization.cs @@ -0,0 +1,137 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateChatCompletionRequestResponseFormat : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionRequestResponseFormat)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Type)) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateChatCompletionRequestResponseFormat IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionRequestResponseFormat)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateChatCompletionRequestResponseFormat(document.RootElement, options); + } + + internal static CreateChatCompletionRequestResponseFormat DeserializeCreateChatCompletionRequestResponseFormat(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + CreateChatCompletionRequestResponseFormatType? type = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + type = new CreateChatCompletionRequestResponseFormatType(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateChatCompletionRequestResponseFormat(type, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateChatCompletionRequestResponseFormat)} does not support '{options.Format}' format."); + } + } + + CreateChatCompletionRequestResponseFormat IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateChatCompletionRequestResponseFormat(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateChatCompletionRequestResponseFormat)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateChatCompletionRequestResponseFormat FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateChatCompletionRequestResponseFormat(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.cs new file mode 100644 index 000000000..82322b0d6 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormat.cs @@ -0,0 +1,60 @@ +// + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal.Models +{ + /// The CreateChatCompletionRequestResponseFormat. + internal partial class CreateChatCompletionRequestResponseFormat + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public CreateChatCompletionRequestResponseFormat() + { + } + + /// Initializes a new instance of . + /// Must be one of `text` or `json_object`. + /// Keeps track of any properties unknown to the library. + internal CreateChatCompletionRequestResponseFormat(CreateChatCompletionRequestResponseFormatType? type, IDictionary serializedAdditionalRawData) + { + Type = type; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Must be one of `text` or `json_object`. + public CreateChatCompletionRequestResponseFormatType? Type { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormatType.cs b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormatType.cs new file mode 100644 index 000000000..8d305ab28 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionRequestResponseFormatType.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for type in CreateChatCompletionRequestResponseFormat. + internal readonly partial struct CreateChatCompletionRequestResponseFormatType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateChatCompletionRequestResponseFormatType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TextValue = "text"; + private const string JsonObjectValue = "json_object"; + + /// text. + public static CreateChatCompletionRequestResponseFormatType Text { get; } = new CreateChatCompletionRequestResponseFormatType(TextValue); + /// json_object. + public static CreateChatCompletionRequestResponseFormatType JsonObject { get; } = new CreateChatCompletionRequestResponseFormatType(JsonObjectValue); + /// Determines if two values are the same. + public static bool operator ==(CreateChatCompletionRequestResponseFormatType left, CreateChatCompletionRequestResponseFormatType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateChatCompletionRequestResponseFormatType left, CreateChatCompletionRequestResponseFormatType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateChatCompletionRequestResponseFormatType(string value) => new CreateChatCompletionRequestResponseFormatType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateChatCompletionRequestResponseFormatType other && Equals(other); + /// + public bool Equals(CreateChatCompletionRequestResponseFormatType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.Serialization.cs new file mode 100644 index 000000000..3ec1f6602 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.Serialization.cs @@ -0,0 +1,206 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateChatCompletionResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("choices"u8); + writer.WriteStartArray(); + foreach (var item in Choices) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("created"u8); + writer.WriteNumberValue(Created, "U"); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + if (Optional.IsDefined(SystemFingerprint)) + { + writer.WritePropertyName("system_fingerprint"u8); + writer.WriteStringValue(SystemFingerprint); + } + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (Optional.IsDefined(Usage)) + { + writer.WritePropertyName("usage"u8); + writer.WriteObjectValue(Usage); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateChatCompletionResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateChatCompletionResponse(document.RootElement, options); + } + + internal static CreateChatCompletionResponse DeserializeCreateChatCompletionResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + IReadOnlyList choices = default; + DateTimeOffset created = default; + string model = default; + string systemFingerprint = default; + CreateChatCompletionResponseObject @object = default; + CompletionUsage usage = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("choices"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(CreateChatCompletionResponseChoice.DeserializeCreateChatCompletionResponseChoice(item, options)); + } + choices = array; + continue; + } + if (property.NameEquals("created"u8)) + { + created = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("system_fingerprint"u8)) + { + systemFingerprint = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new CreateChatCompletionResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("usage"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + usage = CompletionUsage.DeserializeCompletionUsage(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateChatCompletionResponse( + id, + choices, + created, + model, + systemFingerprint, + @object, + usage, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateChatCompletionResponse)} does not support '{options.Format}' format."); + } + } + + CreateChatCompletionResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateChatCompletionResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateChatCompletionResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateChatCompletionResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateChatCompletionResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponse.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.cs new file mode 100644 index 000000000..6317c1996 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponse.cs @@ -0,0 +1,115 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// Represents a chat completion response returned by model, based on the provided input. + internal partial class CreateChatCompletionResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A unique identifier for the chat completion. + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + /// The Unix timestamp (in seconds) of when the chat completion was created. + /// The model used for the chat completion. + /// , or is null. + internal CreateChatCompletionResponse(string id, IEnumerable choices, DateTimeOffset created, string model) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(choices, nameof(choices)); + Argument.AssertNotNull(model, nameof(model)); + + Id = id; + Choices = choices.ToList(); + Created = created; + Model = model; + } + + /// Initializes a new instance of . + /// A unique identifier for the chat completion. + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + /// The Unix timestamp (in seconds) of when the chat completion was created. + /// The model used for the chat completion. + /// + /// This fingerprint represents the backend configuration that the model runs with. + /// + /// Can be used in conjunction with the `seed` request parameter to understand when backend changes + /// have been made that might impact determinism. + /// + /// The object type, which is always `chat.completion`. + /// + /// Keeps track of any properties unknown to the library. + internal CreateChatCompletionResponse(string id, IReadOnlyList choices, DateTimeOffset created, string model, string systemFingerprint, CreateChatCompletionResponseObject @object, CompletionUsage usage, IDictionary serializedAdditionalRawData) + { + Id = id; + Choices = choices; + Created = created; + Model = model; + SystemFingerprint = systemFingerprint; + Object = @object; + Usage = usage; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateChatCompletionResponse() + { + } + + /// A unique identifier for the chat completion. + public string Id { get; } + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + public IReadOnlyList Choices { get; } + /// The Unix timestamp (in seconds) of when the chat completion was created. + public DateTimeOffset Created { get; } + /// The model used for the chat completion. + public string Model { get; } + /// + /// This fingerprint represents the backend configuration that the model runs with. + /// + /// Can be used in conjunction with the `seed` request parameter to understand when backend changes + /// have been made that might impact determinism. + /// + public string SystemFingerprint { get; } + /// The object type, which is always `chat.completion`. + public CreateChatCompletionResponseObject Object { get; } = CreateChatCompletionResponseObject.ChatCompletion; + + /// Gets the usage. + public CompletionUsage Usage { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.Serialization.cs new file mode 100644 index 000000000..8aff6522f --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.Serialization.cs @@ -0,0 +1,166 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateChatCompletionResponseChoice : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoice)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("finish_reason"u8); + writer.WriteStringValue(FinishReason.ToString()); + writer.WritePropertyName("index"u8); + writer.WriteNumberValue(Index); + writer.WritePropertyName("message"u8); + writer.WriteObjectValue(Message); + if (Logprobs != null) + { + writer.WritePropertyName("logprobs"u8); + writer.WriteObjectValue(Logprobs); + } + else + { + writer.WriteNull("logprobs"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateChatCompletionResponseChoice IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoice)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateChatCompletionResponseChoice(document.RootElement, options); + } + + internal static CreateChatCompletionResponseChoice DeserializeCreateChatCompletionResponseChoice(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + CreateChatCompletionResponseChoiceFinishReason finishReason = default; + long index = default; + ChatCompletionResponseMessage message = default; + CreateChatCompletionResponseChoiceLogprobs logprobs = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("finish_reason"u8)) + { + finishReason = new CreateChatCompletionResponseChoiceFinishReason(property.Value.GetString()); + continue; + } + if (property.NameEquals("index"u8)) + { + index = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("message"u8)) + { + message = ChatCompletionResponseMessage.DeserializeChatCompletionResponseMessage(property.Value, options); + continue; + } + if (property.NameEquals("logprobs"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + logprobs = null; + continue; + } + logprobs = CreateChatCompletionResponseChoiceLogprobs.DeserializeCreateChatCompletionResponseChoiceLogprobs(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateChatCompletionResponseChoice(finishReason, index, message, logprobs, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoice)} does not support '{options.Format}' format."); + } + } + + CreateChatCompletionResponseChoice IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateChatCompletionResponseChoice(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoice)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateChatCompletionResponseChoice FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateChatCompletionResponseChoice(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.cs new file mode 100644 index 000000000..374be9a9e --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoice.cs @@ -0,0 +1,107 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateChatCompletionResponseChoice. + internal partial class CreateChatCompletionResponseChoice + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a + /// natural stop point or a provided stop sequence, `length` if the maximum number of tokens + /// specified in the request was reached, `content_filter` if content was omitted due to a flag + /// from our content filters, `tool_calls` if the model called a tool, or `function_call` + /// (deprecated) if the model called a function. + /// + /// The index of the choice in the list of choices. + /// + /// Log probability information for the choice. + /// is null. + internal CreateChatCompletionResponseChoice(CreateChatCompletionResponseChoiceFinishReason finishReason, long index, ChatCompletionResponseMessage message, CreateChatCompletionResponseChoiceLogprobs logprobs) + { + Argument.AssertNotNull(message, nameof(message)); + + FinishReason = finishReason; + Index = index; + Message = message; + Logprobs = logprobs; + } + + /// Initializes a new instance of . + /// + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a + /// natural stop point or a provided stop sequence, `length` if the maximum number of tokens + /// specified in the request was reached, `content_filter` if content was omitted due to a flag + /// from our content filters, `tool_calls` if the model called a tool, or `function_call` + /// (deprecated) if the model called a function. + /// + /// The index of the choice in the list of choices. + /// + /// Log probability information for the choice. + /// Keeps track of any properties unknown to the library. + internal CreateChatCompletionResponseChoice(CreateChatCompletionResponseChoiceFinishReason finishReason, long index, ChatCompletionResponseMessage message, CreateChatCompletionResponseChoiceLogprobs logprobs, IDictionary serializedAdditionalRawData) + { + FinishReason = finishReason; + Index = index; + Message = message; + Logprobs = logprobs; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateChatCompletionResponseChoice() + { + } + + /// + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a + /// natural stop point or a provided stop sequence, `length` if the maximum number of tokens + /// specified in the request was reached, `content_filter` if content was omitted due to a flag + /// from our content filters, `tool_calls` if the model called a tool, or `function_call` + /// (deprecated) if the model called a function. + /// + public CreateChatCompletionResponseChoiceFinishReason FinishReason { get; } + /// The index of the choice in the list of choices. + public long Index { get; } + /// Gets the message. + public ChatCompletionResponseMessage Message { get; } + /// Log probability information for the choice. + public CreateChatCompletionResponseChoiceLogprobs Logprobs { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceFinishReason.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceFinishReason.cs new file mode 100644 index 000000000..817df9578 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceFinishReason.cs @@ -0,0 +1,55 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for finish_reason in CreateChatCompletionResponseChoice. + internal readonly partial struct CreateChatCompletionResponseChoiceFinishReason : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateChatCompletionResponseChoiceFinishReason(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string StopValue = "stop"; + private const string LengthValue = "length"; + private const string ToolCallsValue = "tool_calls"; + private const string ContentFilterValue = "content_filter"; + private const string FunctionCallValue = "function_call"; + + /// stop. + public static CreateChatCompletionResponseChoiceFinishReason Stop { get; } = new CreateChatCompletionResponseChoiceFinishReason(StopValue); + /// length. + public static CreateChatCompletionResponseChoiceFinishReason Length { get; } = new CreateChatCompletionResponseChoiceFinishReason(LengthValue); + /// tool_calls. + public static CreateChatCompletionResponseChoiceFinishReason ToolCalls { get; } = new CreateChatCompletionResponseChoiceFinishReason(ToolCallsValue); + /// content_filter. + public static CreateChatCompletionResponseChoiceFinishReason ContentFilter { get; } = new CreateChatCompletionResponseChoiceFinishReason(ContentFilterValue); + /// function_call. + public static CreateChatCompletionResponseChoiceFinishReason FunctionCall { get; } = new CreateChatCompletionResponseChoiceFinishReason(FunctionCallValue); + /// Determines if two values are the same. + public static bool operator ==(CreateChatCompletionResponseChoiceFinishReason left, CreateChatCompletionResponseChoiceFinishReason right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateChatCompletionResponseChoiceFinishReason left, CreateChatCompletionResponseChoiceFinishReason right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateChatCompletionResponseChoiceFinishReason(string value) => new CreateChatCompletionResponseChoiceFinishReason(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateChatCompletionResponseChoiceFinishReason other && Equals(other); + /// + public bool Equals(CreateChatCompletionResponseChoiceFinishReason other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.Serialization.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.Serialization.cs new file mode 100644 index 000000000..210e712c6 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.Serialization.cs @@ -0,0 +1,152 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateChatCompletionResponseChoiceLogprobs : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoiceLogprobs)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (Content != null && Optional.IsCollectionDefined(Content)) + { + writer.WritePropertyName("content"u8); + writer.WriteStartArray(); + foreach (var item in Content) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + } + else + { + writer.WriteNull("content"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateChatCompletionResponseChoiceLogprobs IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoiceLogprobs)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateChatCompletionResponseChoiceLogprobs(document.RootElement, options); + } + + internal static CreateChatCompletionResponseChoiceLogprobs DeserializeCreateChatCompletionResponseChoiceLogprobs(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IReadOnlyList content = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("content"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + content = new ChangeTrackingList(); + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ChatCompletionTokenLogprob.DeserializeChatCompletionTokenLogprob(item, options)); + } + content = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateChatCompletionResponseChoiceLogprobs(content, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoiceLogprobs)} does not support '{options.Format}' format."); + } + } + + CreateChatCompletionResponseChoiceLogprobs IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateChatCompletionResponseChoiceLogprobs(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateChatCompletionResponseChoiceLogprobs)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateChatCompletionResponseChoiceLogprobs FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateChatCompletionResponseChoiceLogprobs(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.cs new file mode 100644 index 000000000..bd570efb9 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseChoiceLogprobs.cs @@ -0,0 +1,68 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Internal.Models +{ + /// The CreateChatCompletionResponseChoiceLogprobs. + internal partial class CreateChatCompletionResponseChoiceLogprobs + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + internal CreateChatCompletionResponseChoiceLogprobs(IEnumerable content) + { + Content = content?.ToList(); + } + + /// Initializes a new instance of . + /// + /// Keeps track of any properties unknown to the library. + internal CreateChatCompletionResponseChoiceLogprobs(IReadOnlyList content, IDictionary serializedAdditionalRawData) + { + Content = content; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateChatCompletionResponseChoiceLogprobs() + { + } + + /// Gets the content. + public IReadOnlyList Content { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateChatCompletionResponseObject.cs b/.dotnet/src/Generated/Models/CreateChatCompletionResponseObject.cs new file mode 100644 index 000000000..622b7b56a --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateChatCompletionResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateChatCompletionResponse_object. + internal readonly partial struct CreateChatCompletionResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateChatCompletionResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ChatCompletionValue = "chat.completion"; + + /// chat.completion. + public static CreateChatCompletionResponseObject ChatCompletion { get; } = new CreateChatCompletionResponseObject(ChatCompletionValue); + /// Determines if two values are the same. + public static bool operator ==(CreateChatCompletionResponseObject left, CreateChatCompletionResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateChatCompletionResponseObject left, CreateChatCompletionResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateChatCompletionResponseObject(string value) => new CreateChatCompletionResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateChatCompletionResponseObject other && Equals(other); + /// + public bool Equals(CreateChatCompletionResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionRequest.Serialization.cs new file mode 100644 index 000000000..9b1c224b8 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionRequest.Serialization.cs @@ -0,0 +1,525 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateCompletionRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.ToString()); + if (Prompt != null) + { + writer.WritePropertyName("prompt"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(Prompt); +#else + using (JsonDocument document = JsonDocument.Parse(Prompt)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + else + { + writer.WriteNull("prompt"); + } + if (Optional.IsDefined(BestOf)) + { + if (BestOf != null) + { + writer.WritePropertyName("best_of"u8); + writer.WriteNumberValue(BestOf.Value); + } + else + { + writer.WriteNull("best_of"); + } + } + if (Optional.IsDefined(Echo)) + { + if (Echo != null) + { + writer.WritePropertyName("echo"u8); + writer.WriteBooleanValue(Echo.Value); + } + else + { + writer.WriteNull("echo"); + } + } + if (Optional.IsDefined(FrequencyPenalty)) + { + if (FrequencyPenalty != null) + { + writer.WritePropertyName("frequency_penalty"u8); + writer.WriteNumberValue(FrequencyPenalty.Value); + } + else + { + writer.WriteNull("frequency_penalty"); + } + } + if (Optional.IsCollectionDefined(LogitBias)) + { + if (LogitBias != null) + { + writer.WritePropertyName("logit_bias"u8); + writer.WriteStartObject(); + foreach (var item in LogitBias) + { + writer.WritePropertyName(item.Key); + writer.WriteNumberValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("logit_bias"); + } + } + if (Optional.IsDefined(Logprobs)) + { + if (Logprobs != null) + { + writer.WritePropertyName("logprobs"u8); + writer.WriteNumberValue(Logprobs.Value); + } + else + { + writer.WriteNull("logprobs"); + } + } + if (Optional.IsDefined(MaxTokens)) + { + if (MaxTokens != null) + { + writer.WritePropertyName("max_tokens"u8); + writer.WriteNumberValue(MaxTokens.Value); + } + else + { + writer.WriteNull("max_tokens"); + } + } + if (Optional.IsDefined(N)) + { + if (N != null) + { + writer.WritePropertyName("n"u8); + writer.WriteNumberValue(N.Value); + } + else + { + writer.WriteNull("n"); + } + } + if (Optional.IsDefined(PresencePenalty)) + { + if (PresencePenalty != null) + { + writer.WritePropertyName("presence_penalty"u8); + writer.WriteNumberValue(PresencePenalty.Value); + } + else + { + writer.WriteNull("presence_penalty"); + } + } + if (Optional.IsDefined(Seed)) + { + if (Seed != null) + { + writer.WritePropertyName("seed"u8); + writer.WriteNumberValue(Seed.Value); + } + else + { + writer.WriteNull("seed"); + } + } + if (Optional.IsDefined(Stop)) + { + if (Stop != null) + { + writer.WritePropertyName("stop"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(Stop); +#else + using (JsonDocument document = JsonDocument.Parse(Stop)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + else + { + writer.WriteNull("stop"); + } + } + if (Optional.IsDefined(Stream)) + { + if (Stream != null) + { + writer.WritePropertyName("stream"u8); + writer.WriteBooleanValue(Stream.Value); + } + else + { + writer.WriteNull("stream"); + } + } + if (Optional.IsDefined(Suffix)) + { + if (Suffix != null) + { + writer.WritePropertyName("suffix"u8); + writer.WriteStringValue(Suffix); + } + else + { + writer.WriteNull("suffix"); + } + } + if (Optional.IsDefined(Temperature)) + { + if (Temperature != null) + { + writer.WritePropertyName("temperature"u8); + writer.WriteNumberValue(Temperature.Value); + } + else + { + writer.WriteNull("temperature"); + } + } + if (Optional.IsDefined(TopP)) + { + if (TopP != null) + { + writer.WritePropertyName("top_p"u8); + writer.WriteNumberValue(TopP.Value); + } + else + { + writer.WriteNull("top_p"); + } + } + if (Optional.IsDefined(User)) + { + writer.WritePropertyName("user"u8); + writer.WriteStringValue(User); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateCompletionRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateCompletionRequest(document.RootElement, options); + } + + internal static CreateCompletionRequest DeserializeCreateCompletionRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + CreateCompletionRequestModel model = default; + BinaryData prompt = default; + long? bestOf = default; + bool? echo = default; + double? frequencyPenalty = default; + IDictionary logitBias = default; + long? logprobs = default; + long? maxTokens = default; + long? n = default; + double? presencePenalty = default; + long? seed = default; + BinaryData stop = default; + bool? stream = default; + string suffix = default; + double? temperature = default; + double? topP = default; + string user = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("model"u8)) + { + model = new CreateCompletionRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("prompt"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + prompt = null; + continue; + } + prompt = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("best_of"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + bestOf = null; + continue; + } + bestOf = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("echo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + echo = null; + continue; + } + echo = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("frequency_penalty"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + frequencyPenalty = null; + continue; + } + frequencyPenalty = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("logit_bias"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetInt64()); + } + logitBias = dictionary; + continue; + } + if (property.NameEquals("logprobs"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + logprobs = null; + continue; + } + logprobs = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("max_tokens"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + maxTokens = null; + continue; + } + maxTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("n"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + n = null; + continue; + } + n = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("presence_penalty"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + presencePenalty = null; + continue; + } + presencePenalty = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("seed"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + seed = null; + continue; + } + seed = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("stop"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + stop = null; + continue; + } + stop = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("stream"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + stream = null; + continue; + } + stream = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("suffix"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + suffix = null; + continue; + } + suffix = property.Value.GetString(); + continue; + } + if (property.NameEquals("temperature"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + temperature = null; + continue; + } + temperature = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("top_p"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + topP = null; + continue; + } + topP = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("user"u8)) + { + user = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateCompletionRequest( + model, + prompt, + bestOf, + echo, + frequencyPenalty, + logitBias ?? new ChangeTrackingDictionary(), + logprobs, + maxTokens, + n, + presencePenalty, + seed, + stop, + stream, + suffix, + temperature, + topP, + user, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateCompletionRequest)} does not support '{options.Format}' format."); + } + } + + CreateCompletionRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateCompletionRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateCompletionRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateCompletionRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateCompletionRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionRequest.cs b/.dotnet/src/Generated/Models/CreateCompletionRequest.cs new file mode 100644 index 000000000..9230b6acd --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionRequest.cs @@ -0,0 +1,399 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateCompletionRequest. + internal partial class CreateCompletionRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// + /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of + /// tokens, or array of token arrays. + /// + /// Note that <|endoftext|> is the document separator that the model sees during training, so if a + /// prompt is not specified the model will generate as if from the beginning of a new document. + /// + public CreateCompletionRequest(CreateCompletionRequestModel model, BinaryData prompt) + { + Model = model; + Prompt = prompt; + LogitBias = new ChangeTrackingDictionary(); + } + + /// Initializes a new instance of . + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// + /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of + /// tokens, or array of token arrays. + /// + /// Note that <|endoftext|> is the document separator that the model sees during training, so if a + /// prompt is not specified the model will generate as if from the beginning of a new document. + /// + /// + /// Generates `best_of` completions server-side and returns the "best" (the one with the highest + /// log probability per token). Results cannot be streamed. + /// + /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + /// how many to return – `best_of` must be greater than `n`. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token + /// quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + /// + /// Echo back the prompt in addition to the completion. + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + /// frequency in the text so far, decreasing the model's likelihood to repeat the same line + /// verbatim. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + /// + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an + /// associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) + /// to convert text to token IDs. Mathematically, the bias is added to the logits generated by the + /// model prior to sampling. The exact effect will vary per model, but values between -1 and 1 + /// should decrease or increase likelihood of selection; values like -100 or 100 should result in a + /// ban or exclusive selection of the relevant token. + /// + /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being + /// generated. + /// + /// + /// Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + /// For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + /// API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + /// elements in the response. + /// + /// The maximum value for `logprobs` is 5. + /// + /// + /// The maximum number of [tokens](/tokenizer) to generate in the completion. + /// + /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + /// for counting tokens. + /// + /// + /// How many completions to generate for each prompt. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token + /// quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + /// + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + /// in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + /// + /// If specified, our system will make a best effort to sample deterministically, such that + /// repeated requests with the same `seed` and parameters should return the same result. + /// + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + /// parameter to monitor changes in the backend. + /// + /// Up to 4 sequences where the API will stop generating further tokens. + /// + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + /// as they become available, with the stream terminated by a `data: [DONE]` message. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + /// + /// The suffix that comes after a completion of inserted text. + /// + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + /// more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + /// + /// + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers + /// the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + /// the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// Keeps track of any properties unknown to the library. + internal CreateCompletionRequest(CreateCompletionRequestModel model, BinaryData prompt, long? bestOf, bool? echo, double? frequencyPenalty, IDictionary logitBias, long? logprobs, long? maxTokens, long? n, double? presencePenalty, long? seed, BinaryData stop, bool? stream, string suffix, double? temperature, double? topP, string user, IDictionary serializedAdditionalRawData) + { + Model = model; + Prompt = prompt; + BestOf = bestOf; + Echo = echo; + FrequencyPenalty = frequencyPenalty; + LogitBias = logitBias; + Logprobs = logprobs; + MaxTokens = maxTokens; + N = n; + PresencePenalty = presencePenalty; + Seed = seed; + Stop = stop; + Stream = stream; + Suffix = suffix; + Temperature = temperature; + TopP = topP; + User = user; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateCompletionRequest() + { + } + + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + public CreateCompletionRequestModel Model { get; } + /// + /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of + /// tokens, or array of token arrays. + /// + /// Note that <|endoftext|> is the document separator that the model sees during training, so if a + /// prompt is not specified the model will generate as if from the beginning of a new document. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// + /// + /// + /// where T is of type + /// + /// + /// where T is of type + /// + /// + /// where T is of type IList{long} + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData Prompt { get; } + /// + /// Generates `best_of` completions server-side and returns the "best" (the one with the highest + /// log probability per token). Results cannot be streamed. + /// + /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + /// how many to return – `best_of` must be greater than `n`. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token + /// quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + /// + public long? BestOf { get; set; } + /// Echo back the prompt in addition to the completion. + public bool? Echo { get; set; } + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + /// frequency in the text so far, decreasing the model's likelihood to repeat the same line + /// verbatim. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + public double? FrequencyPenalty { get; set; } + /// + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an + /// associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) + /// to convert text to token IDs. Mathematically, the bias is added to the logits generated by the + /// model prior to sampling. The exact effect will vary per model, but values between -1 and 1 + /// should decrease or increase likelihood of selection; values like -100 or 100 should result in a + /// ban or exclusive selection of the relevant token. + /// + /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being + /// generated. + /// + public IDictionary LogitBias { get; set; } + /// + /// Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + /// For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + /// API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + /// elements in the response. + /// + /// The maximum value for `logprobs` is 5. + /// + public long? Logprobs { get; set; } + /// + /// The maximum number of [tokens](/tokenizer) to generate in the completion. + /// + /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + /// for counting tokens. + /// + public long? MaxTokens { get; set; } + /// + /// How many completions to generate for each prompt. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token + /// quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + /// + public long? N { get; set; } + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + /// in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + public double? PresencePenalty { get; set; } + /// + /// If specified, our system will make a best effort to sample deterministically, such that + /// repeated requests with the same `seed` and parameters should return the same result. + /// + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + /// parameter to monitor changes in the backend. + /// + public long? Seed { get; set; } + /// + /// Up to 4 sequences where the API will stop generating further tokens. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// + /// + /// + /// where T is of type + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData Stop { get; set; } + /// + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + /// as they become available, with the stream terminated by a `data: [DONE]` message. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + /// + public bool? Stream { get; set; } + /// The suffix that comes after a completion of inserted text. + public string Suffix { get; set; } + /// + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + /// more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + /// + public double? Temperature { get; set; } + /// + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers + /// the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + /// the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + /// + public double? TopP { get; set; } + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + public string User { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionRequestModel.cs b/.dotnet/src/Generated/Models/CreateCompletionRequestModel.cs new file mode 100644 index 000000000..80fa984b7 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionRequestModel.cs @@ -0,0 +1,49 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateCompletionRequestModel. + internal readonly partial struct CreateCompletionRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateCompletionRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string Gpt35TurboInstructValue = "gpt-3.5-turbo-instruct"; + private const string Davinci002Value = "davinci-002"; + private const string Babbage002Value = "babbage-002"; + + /// gpt-3.5-turbo-instruct. + public static CreateCompletionRequestModel Gpt35TurboInstruct { get; } = new CreateCompletionRequestModel(Gpt35TurboInstructValue); + /// davinci-002. + public static CreateCompletionRequestModel Davinci002 { get; } = new CreateCompletionRequestModel(Davinci002Value); + /// babbage-002. + public static CreateCompletionRequestModel Babbage002 { get; } = new CreateCompletionRequestModel(Babbage002Value); + /// Determines if two values are the same. + public static bool operator ==(CreateCompletionRequestModel left, CreateCompletionRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateCompletionRequestModel left, CreateCompletionRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateCompletionRequestModel(string value) => new CreateCompletionRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateCompletionRequestModel other && Equals(other); + /// + public bool Equals(CreateCompletionRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionResponse.Serialization.cs new file mode 100644 index 000000000..3cd9d4097 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponse.Serialization.cs @@ -0,0 +1,206 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateCompletionResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("choices"u8); + writer.WriteStartArray(); + foreach (var item in Choices) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("created"u8); + writer.WriteNumberValue(Created, "U"); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + if (Optional.IsDefined(SystemFingerprint)) + { + writer.WritePropertyName("system_fingerprint"u8); + writer.WriteStringValue(SystemFingerprint); + } + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (Optional.IsDefined(Usage)) + { + writer.WritePropertyName("usage"u8); + writer.WriteObjectValue(Usage); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateCompletionResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateCompletionResponse(document.RootElement, options); + } + + internal static CreateCompletionResponse DeserializeCreateCompletionResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + IReadOnlyList choices = default; + DateTimeOffset created = default; + string model = default; + string systemFingerprint = default; + CreateCompletionResponseObject @object = default; + CompletionUsage usage = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("choices"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(CreateCompletionResponseChoice.DeserializeCreateCompletionResponseChoice(item, options)); + } + choices = array; + continue; + } + if (property.NameEquals("created"u8)) + { + created = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("system_fingerprint"u8)) + { + systemFingerprint = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new CreateCompletionResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("usage"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + usage = CompletionUsage.DeserializeCompletionUsage(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateCompletionResponse( + id, + choices, + created, + model, + systemFingerprint, + @object, + usage, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateCompletionResponse)} does not support '{options.Format}' format."); + } + } + + CreateCompletionResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateCompletionResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateCompletionResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateCompletionResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateCompletionResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponse.cs b/.dotnet/src/Generated/Models/CreateCompletionResponse.cs new file mode 100644 index 000000000..859c43a17 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponse.cs @@ -0,0 +1,118 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// + /// Represents a completion response from the API. Note: both the streamed and non-streamed response + /// objects share the same shape (unlike the chat endpoint). + /// + internal partial class CreateCompletionResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A unique identifier for the completion. + /// The list of completion choices the model generated for the input. + /// The Unix timestamp (in seconds) of when the completion was created. + /// The model used for the completion. + /// , or is null. + internal CreateCompletionResponse(string id, IEnumerable choices, DateTimeOffset created, string model) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(choices, nameof(choices)); + Argument.AssertNotNull(model, nameof(model)); + + Id = id; + Choices = choices.ToList(); + Created = created; + Model = model; + } + + /// Initializes a new instance of . + /// A unique identifier for the completion. + /// The list of completion choices the model generated for the input. + /// The Unix timestamp (in seconds) of when the completion was created. + /// The model used for the completion. + /// + /// This fingerprint represents the backend configuration that the model runs with. + /// + /// Can be used in conjunction with the `seed` request parameter to understand when backend changes + /// have been made that might impact determinism. + /// + /// The object type, which is always `text_completion`. + /// Usage statistics for the completion request. + /// Keeps track of any properties unknown to the library. + internal CreateCompletionResponse(string id, IReadOnlyList choices, DateTimeOffset created, string model, string systemFingerprint, CreateCompletionResponseObject @object, CompletionUsage usage, IDictionary serializedAdditionalRawData) + { + Id = id; + Choices = choices; + Created = created; + Model = model; + SystemFingerprint = systemFingerprint; + Object = @object; + Usage = usage; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateCompletionResponse() + { + } + + /// A unique identifier for the completion. + public string Id { get; } + /// The list of completion choices the model generated for the input. + public IReadOnlyList Choices { get; } + /// The Unix timestamp (in seconds) of when the completion was created. + public DateTimeOffset Created { get; } + /// The model used for the completion. + public string Model { get; } + /// + /// This fingerprint represents the backend configuration that the model runs with. + /// + /// Can be used in conjunction with the `seed` request parameter to understand when backend changes + /// have been made that might impact determinism. + /// + public string SystemFingerprint { get; } + /// The object type, which is always `text_completion`. + public CreateCompletionResponseObject Object { get; } = CreateCompletionResponseObject.TextCompletion; + + /// Usage statistics for the completion request. + public CompletionUsage Usage { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.Serialization.cs new file mode 100644 index 000000000..285a557b4 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.Serialization.cs @@ -0,0 +1,166 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateCompletionResponseChoice : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionResponseChoice)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("index"u8); + writer.WriteNumberValue(Index); + writer.WritePropertyName("text"u8); + writer.WriteStringValue(Text); + if (Logprobs != null) + { + writer.WritePropertyName("logprobs"u8); + writer.WriteObjectValue(Logprobs); + } + else + { + writer.WriteNull("logprobs"); + } + writer.WritePropertyName("finish_reason"u8); + writer.WriteStringValue(FinishReason.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateCompletionResponseChoice IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionResponseChoice)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateCompletionResponseChoice(document.RootElement, options); + } + + internal static CreateCompletionResponseChoice DeserializeCreateCompletionResponseChoice(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + long index = default; + string text = default; + CreateCompletionResponseChoiceLogprobs logprobs = default; + CreateCompletionResponseChoiceFinishReason finishReason = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("index"u8)) + { + index = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("text"u8)) + { + text = property.Value.GetString(); + continue; + } + if (property.NameEquals("logprobs"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + logprobs = null; + continue; + } + logprobs = CreateCompletionResponseChoiceLogprobs.DeserializeCreateCompletionResponseChoiceLogprobs(property.Value, options); + continue; + } + if (property.NameEquals("finish_reason"u8)) + { + finishReason = new CreateCompletionResponseChoiceFinishReason(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateCompletionResponseChoice(index, text, logprobs, finishReason, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateCompletionResponseChoice)} does not support '{options.Format}' format."); + } + } + + CreateCompletionResponseChoice IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateCompletionResponseChoice(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateCompletionResponseChoice)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateCompletionResponseChoice FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateCompletionResponseChoice(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.cs new file mode 100644 index 000000000..6eac09bfc --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoice.cs @@ -0,0 +1,107 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateCompletionResponseChoice. + internal partial class CreateCompletionResponseChoice + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a + /// natural stop point or a provided stop sequence, or `content_filter` if content was omitted + /// due to a flag from our content filters, `length` if the maximum number of tokens specified + /// in the request was reached, or `content_filter` if content was omitted due to a flag from our + /// content filters. + /// + /// is null. + internal CreateCompletionResponseChoice(long index, string text, CreateCompletionResponseChoiceLogprobs logprobs, CreateCompletionResponseChoiceFinishReason finishReason) + { + Argument.AssertNotNull(text, nameof(text)); + + Index = index; + Text = text; + Logprobs = logprobs; + FinishReason = finishReason; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a + /// natural stop point or a provided stop sequence, or `content_filter` if content was omitted + /// due to a flag from our content filters, `length` if the maximum number of tokens specified + /// in the request was reached, or `content_filter` if content was omitted due to a flag from our + /// content filters. + /// + /// Keeps track of any properties unknown to the library. + internal CreateCompletionResponseChoice(long index, string text, CreateCompletionResponseChoiceLogprobs logprobs, CreateCompletionResponseChoiceFinishReason finishReason, IDictionary serializedAdditionalRawData) + { + Index = index; + Text = text; + Logprobs = logprobs; + FinishReason = finishReason; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateCompletionResponseChoice() + { + } + + /// Gets the index. + public long Index { get; } + /// Gets the text. + public string Text { get; } + /// Gets the logprobs. + public CreateCompletionResponseChoiceLogprobs Logprobs { get; } + /// + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a + /// natural stop point or a provided stop sequence, or `content_filter` if content was omitted + /// due to a flag from our content filters, `length` if the maximum number of tokens specified + /// in the request was reached, or `content_filter` if content was omitted due to a flag from our + /// content filters. + /// + public CreateCompletionResponseChoiceFinishReason FinishReason { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceFinishReason.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceFinishReason.cs new file mode 100644 index 000000000..fc4a928ce --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceFinishReason.cs @@ -0,0 +1,49 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for finish_reason in CreateCompletionResponseChoice. + internal readonly partial struct CreateCompletionResponseChoiceFinishReason : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateCompletionResponseChoiceFinishReason(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string StopValue = "stop"; + private const string LengthValue = "length"; + private const string ContentFilterValue = "content_filter"; + + /// stop. + public static CreateCompletionResponseChoiceFinishReason Stop { get; } = new CreateCompletionResponseChoiceFinishReason(StopValue); + /// length. + public static CreateCompletionResponseChoiceFinishReason Length { get; } = new CreateCompletionResponseChoiceFinishReason(LengthValue); + /// content_filter. + public static CreateCompletionResponseChoiceFinishReason ContentFilter { get; } = new CreateCompletionResponseChoiceFinishReason(ContentFilterValue); + /// Determines if two values are the same. + public static bool operator ==(CreateCompletionResponseChoiceFinishReason left, CreateCompletionResponseChoiceFinishReason right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateCompletionResponseChoiceFinishReason left, CreateCompletionResponseChoiceFinishReason right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateCompletionResponseChoiceFinishReason(string value) => new CreateCompletionResponseChoiceFinishReason(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateCompletionResponseChoiceFinishReason other && Equals(other); + /// + public bool Equals(CreateCompletionResponseChoiceFinishReason other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.Serialization.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.Serialization.cs new file mode 100644 index 000000000..7529ee39c --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.Serialization.cs @@ -0,0 +1,217 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateCompletionResponseChoiceLogprobs : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionResponseChoiceLogprobs)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("tokens"u8); + writer.WriteStartArray(); + foreach (var item in Tokens) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("token_logprobs"u8); + writer.WriteStartArray(); + foreach (var item in TokenLogprobs) + { + writer.WriteNumberValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("top_logprobs"u8); + writer.WriteStartArray(); + foreach (var item in TopLogprobs) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } + writer.WriteStartObject(); + foreach (var item0 in item) + { + writer.WritePropertyName(item0.Key); + writer.WriteNumberValue(item0.Value); + } + writer.WriteEndObject(); + } + writer.WriteEndArray(); + writer.WritePropertyName("text_offset"u8); + writer.WriteStartArray(); + foreach (var item in TextOffset) + { + writer.WriteNumberValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateCompletionResponseChoiceLogprobs IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateCompletionResponseChoiceLogprobs)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateCompletionResponseChoiceLogprobs(document.RootElement, options); + } + + internal static CreateCompletionResponseChoiceLogprobs DeserializeCreateCompletionResponseChoiceLogprobs(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IReadOnlyList tokens = default; + IReadOnlyList tokenLogprobs = default; + IReadOnlyList> topLogprobs = default; + IReadOnlyList textOffset = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("tokens"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + tokens = array; + continue; + } + if (property.NameEquals("token_logprobs"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetDouble()); + } + tokenLogprobs = array; + continue; + } + if (property.NameEquals("top_logprobs"u8)) + { + List> array = new List>(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + Dictionary dictionary = new Dictionary(); + foreach (var property0 in item.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetInt64()); + } + array.Add(dictionary); + } + } + topLogprobs = array; + continue; + } + if (property.NameEquals("text_offset"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetInt64()); + } + textOffset = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateCompletionResponseChoiceLogprobs(tokens, tokenLogprobs, topLogprobs, textOffset, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateCompletionResponseChoiceLogprobs)} does not support '{options.Format}' format."); + } + } + + CreateCompletionResponseChoiceLogprobs IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateCompletionResponseChoiceLogprobs(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateCompletionResponseChoiceLogprobs)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateCompletionResponseChoiceLogprobs FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateCompletionResponseChoiceLogprobs(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.cs new file mode 100644 index 000000000..7d1986025 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseChoiceLogprobs.cs @@ -0,0 +1,93 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateCompletionResponseChoiceLogprobs. + internal partial class CreateCompletionResponseChoiceLogprobs + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// , , or is null. + internal CreateCompletionResponseChoiceLogprobs(IEnumerable tokens, IEnumerable tokenLogprobs, IEnumerable> topLogprobs, IEnumerable textOffset) + { + Argument.AssertNotNull(tokens, nameof(tokens)); + Argument.AssertNotNull(tokenLogprobs, nameof(tokenLogprobs)); + Argument.AssertNotNull(topLogprobs, nameof(topLogprobs)); + Argument.AssertNotNull(textOffset, nameof(textOffset)); + + Tokens = tokens.ToList(); + TokenLogprobs = tokenLogprobs.ToList(); + TopLogprobs = topLogprobs.ToList(); + TextOffset = textOffset.ToList(); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal CreateCompletionResponseChoiceLogprobs(IReadOnlyList tokens, IReadOnlyList tokenLogprobs, IReadOnlyList> topLogprobs, IReadOnlyList textOffset, IDictionary serializedAdditionalRawData) + { + Tokens = tokens; + TokenLogprobs = tokenLogprobs; + TopLogprobs = topLogprobs; + TextOffset = textOffset; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateCompletionResponseChoiceLogprobs() + { + } + + /// Gets the tokens. + public IReadOnlyList Tokens { get; } + /// Gets the token logprobs. + public IReadOnlyList TokenLogprobs { get; } + /// Gets the top logprobs. + public IReadOnlyList> TopLogprobs { get; } + /// Gets the text offset. + public IReadOnlyList TextOffset { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateCompletionResponseObject.cs b/.dotnet/src/Generated/Models/CreateCompletionResponseObject.cs new file mode 100644 index 000000000..00ce256fc --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateCompletionResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateCompletionResponse_object. + internal readonly partial struct CreateCompletionResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateCompletionResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TextCompletionValue = "text_completion"; + + /// text_completion. + public static CreateCompletionResponseObject TextCompletion { get; } = new CreateCompletionResponseObject(TextCompletionValue); + /// Determines if two values are the same. + public static bool operator ==(CreateCompletionResponseObject left, CreateCompletionResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateCompletionResponseObject left, CreateCompletionResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateCompletionResponseObject(string value) => new CreateCompletionResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateCompletionResponseObject other && Equals(other); + /// + public bool Equals(CreateCompletionResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs new file mode 100644 index 000000000..3147e8bb6 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.Serialization.cs @@ -0,0 +1,192 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateEmbeddingRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("input"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(Input); +#else + using (JsonDocument document = JsonDocument.Parse(Input)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.ToString()); + if (Optional.IsDefined(EncodingFormat)) + { + writer.WritePropertyName("encoding_format"u8); + writer.WriteStringValue(EncodingFormat.Value.ToString()); + } + if (Optional.IsDefined(Dimensions)) + { + writer.WritePropertyName("dimensions"u8); + writer.WriteNumberValue(Dimensions.Value); + } + if (Optional.IsDefined(User)) + { + writer.WritePropertyName("user"u8); + writer.WriteStringValue(User); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateEmbeddingRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateEmbeddingRequest(document.RootElement, options); + } + + internal static CreateEmbeddingRequest DeserializeCreateEmbeddingRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData input = default; + CreateEmbeddingRequestModel model = default; + CreateEmbeddingRequestEncodingFormat? encodingFormat = default; + long? dimensions = default; + string user = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("input"u8)) + { + input = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("model"u8)) + { + model = new CreateEmbeddingRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("encoding_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + encodingFormat = new CreateEmbeddingRequestEncodingFormat(property.Value.GetString()); + continue; + } + if (property.NameEquals("dimensions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + dimensions = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("user"u8)) + { + user = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateEmbeddingRequest( + input, + model, + encodingFormat, + dimensions, + user, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{options.Format}' format."); + } + } + + CreateEmbeddingRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateEmbeddingRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateEmbeddingRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateEmbeddingRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateEmbeddingRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs new file mode 100644 index 000000000..67ee0828d --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequest.cs @@ -0,0 +1,184 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateEmbeddingRequest. + internal partial class CreateEmbeddingRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + /// single request, pass an array of strings or array of token arrays. Each input must not exceed + /// the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an + /// empty string. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + /// for counting tokens. + /// + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// is null. + public CreateEmbeddingRequest(BinaryData input, CreateEmbeddingRequestModel model) + { + Argument.AssertNotNull(input, nameof(input)); + + Input = input; + Model = model; + } + + /// Initializes a new instance of . + /// + /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + /// single request, pass an array of strings or array of token arrays. Each input must not exceed + /// the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an + /// empty string. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + /// for counting tokens. + /// + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// + /// The format to return the embeddings in. Can be either `float` or + /// [`base64`](https://pypi.org/project/pybase64/). + /// + /// + /// The number of dimensions the resulting output embeddings should have. Only supported in + /// `text-embedding-3` and later models. + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// Keeps track of any properties unknown to the library. + internal CreateEmbeddingRequest(BinaryData input, CreateEmbeddingRequestModel model, CreateEmbeddingRequestEncodingFormat? encodingFormat, long? dimensions, string user, IDictionary serializedAdditionalRawData) + { + Input = input; + Model = model; + EncodingFormat = encodingFormat; + Dimensions = dimensions; + User = user; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateEmbeddingRequest() + { + } + + /// + /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + /// single request, pass an array of strings or array of token arrays. Each input must not exceed + /// the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an + /// empty string. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + /// for counting tokens. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// + /// + /// + /// where T is of type + /// + /// + /// where T is of type + /// + /// + /// where T is of type IList{long} + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData Input { get; } + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + public CreateEmbeddingRequestModel Model { get; } + /// + /// The format to return the embeddings in. Can be either `float` or + /// [`base64`](https://pypi.org/project/pybase64/). + /// + public CreateEmbeddingRequestEncodingFormat? EncodingFormat { get; set; } + /// + /// The number of dimensions the resulting output embeddings should have. Only supported in + /// `text-embedding-3` and later models. + /// + public long? Dimensions { get; set; } + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + public string User { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs new file mode 100644 index 000000000..18264f565 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequestEncodingFormat.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for encoding_format in CreateEmbeddingRequest. + internal readonly partial struct CreateEmbeddingRequestEncodingFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateEmbeddingRequestEncodingFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FloatValue = "float"; + private const string Base64Value = "base64"; + + /// float. + public static CreateEmbeddingRequestEncodingFormat Float { get; } = new CreateEmbeddingRequestEncodingFormat(FloatValue); + /// base64. + public static CreateEmbeddingRequestEncodingFormat Base64 { get; } = new CreateEmbeddingRequestEncodingFormat(Base64Value); + /// Determines if two values are the same. + public static bool operator ==(CreateEmbeddingRequestEncodingFormat left, CreateEmbeddingRequestEncodingFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateEmbeddingRequestEncodingFormat left, CreateEmbeddingRequestEncodingFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateEmbeddingRequestEncodingFormat(string value) => new CreateEmbeddingRequestEncodingFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateEmbeddingRequestEncodingFormat other && Equals(other); + /// + public bool Equals(CreateEmbeddingRequestEncodingFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs b/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs new file mode 100644 index 000000000..98290654a --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingRequestModel.cs @@ -0,0 +1,49 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateEmbeddingRequestModel. + internal readonly partial struct CreateEmbeddingRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateEmbeddingRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TextEmbeddingAda002Value = "text-embedding-ada-002"; + private const string TextEmbedding3SmallValue = "text-embedding-3-small"; + private const string TextEmbedding3LargeValue = "text-embedding-3-large"; + + /// text-embedding-ada-002. + public static CreateEmbeddingRequestModel TextEmbeddingAda002 { get; } = new CreateEmbeddingRequestModel(TextEmbeddingAda002Value); + /// text-embedding-3-small. + public static CreateEmbeddingRequestModel TextEmbedding3Small { get; } = new CreateEmbeddingRequestModel(TextEmbedding3SmallValue); + /// text-embedding-3-large. + public static CreateEmbeddingRequestModel TextEmbedding3Large { get; } = new CreateEmbeddingRequestModel(TextEmbedding3LargeValue); + /// Determines if two values are the same. + public static bool operator ==(CreateEmbeddingRequestModel left, CreateEmbeddingRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateEmbeddingRequestModel left, CreateEmbeddingRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateEmbeddingRequestModel(string value) => new CreateEmbeddingRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateEmbeddingRequestModel other && Equals(other); + /// + public bool Equals(CreateEmbeddingRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs new file mode 100644 index 000000000..acdea6442 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.Serialization.cs @@ -0,0 +1,164 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateEmbeddingResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("usage"u8); + writer.WriteObjectValue(Usage); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateEmbeddingResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateEmbeddingResponse(document.RootElement, options); + } + + internal static CreateEmbeddingResponse DeserializeCreateEmbeddingResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IReadOnlyList data = default; + string model = default; + CreateEmbeddingResponseObject @object = default; + EmbeddingUsage usage = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(Embedding.DeserializeEmbedding(item, options)); + } + data = array; + continue; + } + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new CreateEmbeddingResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("usage"u8)) + { + usage = EmbeddingUsage.DeserializeEmbeddingUsage(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateEmbeddingResponse(data, model, @object, usage, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{options.Format}' format."); + } + } + + CreateEmbeddingResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateEmbeddingResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateEmbeddingResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateEmbeddingResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateEmbeddingResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs new file mode 100644 index 000000000..f79cba4b7 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponse.cs @@ -0,0 +1,91 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateEmbeddingResponse. + internal partial class CreateEmbeddingResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The list of embeddings generated by the model. + /// The name of the model used to generate the embedding. + /// The usage information for the request. + /// , or is null. + internal CreateEmbeddingResponse(IEnumerable data, string model, EmbeddingUsage usage) + { + Argument.AssertNotNull(data, nameof(data)); + Argument.AssertNotNull(model, nameof(model)); + Argument.AssertNotNull(usage, nameof(usage)); + + Data = data.ToList(); + Model = model; + Usage = usage; + } + + /// Initializes a new instance of . + /// The list of embeddings generated by the model. + /// The name of the model used to generate the embedding. + /// The object type, which is always "list". + /// The usage information for the request. + /// Keeps track of any properties unknown to the library. + internal CreateEmbeddingResponse(IReadOnlyList data, string model, CreateEmbeddingResponseObject @object, EmbeddingUsage usage, IDictionary serializedAdditionalRawData) + { + Data = data; + Model = model; + Object = @object; + Usage = usage; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateEmbeddingResponse() + { + } + + /// The list of embeddings generated by the model. + public IReadOnlyList Data { get; } + /// The name of the model used to generate the embedding. + public string Model { get; } + /// The object type, which is always "list". + public CreateEmbeddingResponseObject Object { get; } = CreateEmbeddingResponseObject.List; + + /// The usage information for the request. + public EmbeddingUsage Usage { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs b/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs new file mode 100644 index 000000000..1d7bc7a1a --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateEmbeddingResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateEmbeddingResponse_object. + internal readonly partial struct CreateEmbeddingResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateEmbeddingResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static CreateEmbeddingResponseObject List { get; } = new CreateEmbeddingResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(CreateEmbeddingResponseObject left, CreateEmbeddingResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateEmbeddingResponseObject left, CreateEmbeddingResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateEmbeddingResponseObject(string value) => new CreateEmbeddingResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateEmbeddingResponseObject other && Equals(other); + /// + public bool Equals(CreateEmbeddingResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateFileRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateFileRequest.Serialization.cs new file mode 100644 index 000000000..70700f22a --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFileRequest.Serialization.cs @@ -0,0 +1,138 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateFileRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateFileRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("file"u8); + writer.WriteBase64StringValue(File.ToArray(), "D"); + writer.WritePropertyName("purpose"u8); + writer.WriteStringValue(Purpose.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateFileRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateFileRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateFileRequest(document.RootElement, options); + } + + internal static CreateFileRequest DeserializeCreateFileRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData file = default; + CreateFileRequestPurpose purpose = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("file"u8)) + { + file = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); + continue; + } + if (property.NameEquals("purpose"u8)) + { + purpose = new CreateFileRequestPurpose(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateFileRequest(file, purpose, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateFileRequest)} does not support '{options.Format}' format."); + } + } + + CreateFileRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateFileRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateFileRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateFileRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateFileRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateFileRequest.cs b/.dotnet/src/Generated/Models/CreateFileRequest.cs new file mode 100644 index 000000000..a11d75f03 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFileRequest.cs @@ -0,0 +1,107 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateFileRequest. + internal partial class CreateFileRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The file object (not file name) to be uploaded. + /// + /// The intended purpose of the uploaded file. Use "fine-tune" for + /// [Fine-tuning](/docs/api-reference/fine-tuning) and "assistants" for + /// [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). This + /// allows us to validate the format of the uploaded file is correct for fine-tuning. + /// + /// is null. + public CreateFileRequest(BinaryData file, CreateFileRequestPurpose purpose) + { + Argument.AssertNotNull(file, nameof(file)); + + File = file; + Purpose = purpose; + } + + /// Initializes a new instance of . + /// The file object (not file name) to be uploaded. + /// + /// The intended purpose of the uploaded file. Use "fine-tune" for + /// [Fine-tuning](/docs/api-reference/fine-tuning) and "assistants" for + /// [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). This + /// allows us to validate the format of the uploaded file is correct for fine-tuning. + /// + /// Keeps track of any properties unknown to the library. + internal CreateFileRequest(BinaryData file, CreateFileRequestPurpose purpose, IDictionary serializedAdditionalRawData) + { + File = file; + Purpose = purpose; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateFileRequest() + { + } + + /// + /// The file object (not file name) to be uploaded. + /// + /// To assign a byte[] to this property use . + /// The byte[] will be serialized to a Base64 encoded string. + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) + /// Creates a payload of "AQID". + /// + /// + /// + /// + public BinaryData File { get; } + /// + /// The intended purpose of the uploaded file. Use "fine-tune" for + /// [Fine-tuning](/docs/api-reference/fine-tuning) and "assistants" for + /// [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). This + /// allows us to validate the format of the uploaded file is correct for fine-tuning. + /// + public CreateFileRequestPurpose Purpose { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateFileRequestPurpose.cs b/.dotnet/src/Generated/Models/CreateFileRequestPurpose.cs new file mode 100644 index 000000000..8dc7f4cd7 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFileRequestPurpose.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for purpose in CreateFileRequest. + internal readonly partial struct CreateFileRequestPurpose : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateFileRequestPurpose(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FineTuneValue = "fine-tune"; + private const string AssistantsValue = "assistants"; + + /// fine-tune. + public static CreateFileRequestPurpose FineTune { get; } = new CreateFileRequestPurpose(FineTuneValue); + /// assistants. + public static CreateFileRequestPurpose Assistants { get; } = new CreateFileRequestPurpose(AssistantsValue); + /// Determines if two values are the same. + public static bool operator ==(CreateFileRequestPurpose left, CreateFileRequestPurpose right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateFileRequestPurpose left, CreateFileRequestPurpose right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateFileRequestPurpose(string value) => new CreateFileRequestPurpose(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateFileRequestPurpose other && Equals(other); + /// + public bool Equals(CreateFileRequestPurpose other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.Serialization.cs new file mode 100644 index 000000000..ddfa6fb31 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.Serialization.cs @@ -0,0 +1,205 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateFineTuningJobRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateFineTuningJobRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.ToString()); + writer.WritePropertyName("training_file"u8); + writer.WriteStringValue(TrainingFile); + if (Optional.IsDefined(Hyperparameters)) + { + writer.WritePropertyName("hyperparameters"u8); + writer.WriteObjectValue(Hyperparameters); + } + if (Optional.IsDefined(Suffix)) + { + if (Suffix != null) + { + writer.WritePropertyName("suffix"u8); + writer.WriteStringValue(Suffix); + } + else + { + writer.WriteNull("suffix"); + } + } + if (Optional.IsDefined(ValidationFile)) + { + if (ValidationFile != null) + { + writer.WritePropertyName("validation_file"u8); + writer.WriteStringValue(ValidationFile); + } + else + { + writer.WriteNull("validation_file"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateFineTuningJobRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateFineTuningJobRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateFineTuningJobRequest(document.RootElement, options); + } + + internal static CreateFineTuningJobRequest DeserializeCreateFineTuningJobRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + CreateFineTuningJobRequestModel model = default; + string trainingFile = default; + CreateFineTuningJobRequestHyperparameters hyperparameters = default; + string suffix = default; + string validationFile = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("model"u8)) + { + model = new CreateFineTuningJobRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("training_file"u8)) + { + trainingFile = property.Value.GetString(); + continue; + } + if (property.NameEquals("hyperparameters"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + hyperparameters = CreateFineTuningJobRequestHyperparameters.DeserializeCreateFineTuningJobRequestHyperparameters(property.Value, options); + continue; + } + if (property.NameEquals("suffix"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + suffix = null; + continue; + } + suffix = property.Value.GetString(); + continue; + } + if (property.NameEquals("validation_file"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + validationFile = null; + continue; + } + validationFile = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateFineTuningJobRequest( + model, + trainingFile, + hyperparameters, + suffix, + validationFile, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateFineTuningJobRequest)} does not support '{options.Format}' format."); + } + } + + CreateFineTuningJobRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateFineTuningJobRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateFineTuningJobRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateFineTuningJobRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateFineTuningJobRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.cs new file mode 100644 index 000000000..b7534d1b9 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequest.cs @@ -0,0 +1,157 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateFineTuningJobRequest. + internal partial class CreateFineTuningJobRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The name of the model to fine-tune. You can select one of the + /// [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + /// + /// + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + /// the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// is null. + public CreateFineTuningJobRequest(CreateFineTuningJobRequestModel model, string trainingFile) + { + Argument.AssertNotNull(trainingFile, nameof(trainingFile)); + + Model = model; + TrainingFile = trainingFile; + } + + /// Initializes a new instance of . + /// + /// The name of the model to fine-tune. You can select one of the + /// [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + /// + /// + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + /// the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// The hyperparameters used for the fine-tuning job. + /// + /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// + /// For example, a `suffix` of "custom-model-name" would produce a model name like + /// `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// + /// + /// The ID of an uploaded file that contains validation data. + /// + /// If you provide this file, the data is used to generate validation metrics periodically during + /// fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + /// not be present in both train and validation files. + /// + /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + /// `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// Keeps track of any properties unknown to the library. + internal CreateFineTuningJobRequest(CreateFineTuningJobRequestModel model, string trainingFile, CreateFineTuningJobRequestHyperparameters hyperparameters, string suffix, string validationFile, IDictionary serializedAdditionalRawData) + { + Model = model; + TrainingFile = trainingFile; + Hyperparameters = hyperparameters; + Suffix = suffix; + ValidationFile = validationFile; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateFineTuningJobRequest() + { + } + + /// + /// The name of the model to fine-tune. You can select one of the + /// [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + /// + public CreateFineTuningJobRequestModel Model { get; } + /// + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + /// the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + public string TrainingFile { get; } + /// The hyperparameters used for the fine-tuning job. + public CreateFineTuningJobRequestHyperparameters Hyperparameters { get; set; } + /// + /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// + /// For example, a `suffix` of "custom-model-name" would produce a model name like + /// `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// + public string Suffix { get; set; } + /// + /// The ID of an uploaded file that contains validation data. + /// + /// If you provide this file, the data is used to generate validation metrics periodically during + /// fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + /// not be present in both train and validation files. + /// + /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + /// `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + public string ValidationFile { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.Serialization.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.Serialization.cs new file mode 100644 index 000000000..ecf71a8e8 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.Serialization.cs @@ -0,0 +1,188 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateFineTuningJobRequestHyperparameters : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateFineTuningJobRequestHyperparameters)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(BatchSize)) + { + writer.WritePropertyName("batch_size"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(BatchSize); +#else + using (JsonDocument document = JsonDocument.Parse(BatchSize)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + if (Optional.IsDefined(LearningRateMultiplier)) + { + writer.WritePropertyName("learning_rate_multiplier"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(LearningRateMultiplier); +#else + using (JsonDocument document = JsonDocument.Parse(LearningRateMultiplier)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + if (Optional.IsDefined(NEpochs)) + { + writer.WritePropertyName("n_epochs"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(NEpochs); +#else + using (JsonDocument document = JsonDocument.Parse(NEpochs)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateFineTuningJobRequestHyperparameters IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateFineTuningJobRequestHyperparameters)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateFineTuningJobRequestHyperparameters(document.RootElement, options); + } + + internal static CreateFineTuningJobRequestHyperparameters DeserializeCreateFineTuningJobRequestHyperparameters(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData batchSize = default; + BinaryData learningRateMultiplier = default; + BinaryData nEpochs = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("batch_size"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + batchSize = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("learning_rate_multiplier"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + learningRateMultiplier = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("n_epochs"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nEpochs = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateFineTuningJobRequestHyperparameters(batchSize, learningRateMultiplier, nEpochs, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateFineTuningJobRequestHyperparameters)} does not support '{options.Format}' format."); + } + } + + CreateFineTuningJobRequestHyperparameters IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateFineTuningJobRequestHyperparameters(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateFineTuningJobRequestHyperparameters)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateFineTuningJobRequestHyperparameters FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateFineTuningJobRequestHyperparameters(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.cs new file mode 100644 index 000000000..eafff5310 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestHyperparameters.cs @@ -0,0 +1,200 @@ +// + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal.Models +{ + /// The CreateFineTuningJobRequestHyperparameters. + internal partial class CreateFineTuningJobRequestHyperparameters + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public CreateFineTuningJobRequestHyperparameters() + { + } + + /// Initializes a new instance of . + /// + /// Number of examples in each batch. A larger batch size means that model parameters are + /// updated less frequently, but with lower variance. + /// + /// + /// Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + /// overfitting. + /// + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// Keeps track of any properties unknown to the library. + internal CreateFineTuningJobRequestHyperparameters(BinaryData batchSize, BinaryData learningRateMultiplier, BinaryData nEpochs, IDictionary serializedAdditionalRawData) + { + BatchSize = batchSize; + LearningRateMultiplier = learningRateMultiplier; + NEpochs = nEpochs; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// + /// Number of examples in each batch. A larger batch size means that model parameters are + /// updated less frequently, but with lower variance. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// "auto" + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData BatchSize { get; set; } + /// + /// Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + /// overfitting. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// "auto" + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData LearningRateMultiplier { get; set; } + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// "auto" + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData NEpochs { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateFineTuningJobRequestModel.cs b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestModel.cs new file mode 100644 index 000000000..1845aa01d --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateFineTuningJobRequestModel.cs @@ -0,0 +1,49 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateFineTuningJobRequestModel. + internal readonly partial struct CreateFineTuningJobRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateFineTuningJobRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string Babbage002Value = "babbage-002"; + private const string Davinci002Value = "davinci-002"; + private const string Gpt35TurboValue = "gpt-3.5-turbo"; + + /// babbage-002. + public static CreateFineTuningJobRequestModel Babbage002 { get; } = new CreateFineTuningJobRequestModel(Babbage002Value); + /// davinci-002. + public static CreateFineTuningJobRequestModel Davinci002 { get; } = new CreateFineTuningJobRequestModel(Davinci002Value); + /// gpt-3.5-turbo. + public static CreateFineTuningJobRequestModel Gpt35Turbo { get; } = new CreateFineTuningJobRequestModel(Gpt35TurboValue); + /// Determines if two values are the same. + public static bool operator ==(CreateFineTuningJobRequestModel left, CreateFineTuningJobRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateFineTuningJobRequestModel left, CreateFineTuningJobRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateFineTuningJobRequestModel(string value) => new CreateFineTuningJobRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateFineTuningJobRequestModel other && Equals(other); + /// + public bool Equals(CreateFineTuningJobRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateImageEditRequest.Serialization.cs new file mode 100644 index 000000000..f08ba1064 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageEditRequest.Serialization.cs @@ -0,0 +1,241 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateImageEditRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateImageEditRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("image"u8); + writer.WriteBase64StringValue(Image.ToArray(), "D"); + writer.WritePropertyName("prompt"u8); + writer.WriteStringValue(Prompt); + if (Optional.IsDefined(Mask)) + { + writer.WritePropertyName("mask"u8); + writer.WriteBase64StringValue(Mask.ToArray(), "D"); + } + if (Optional.IsDefined(Model)) + { + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.Value.ToString()); + } + if (Optional.IsDefined(N)) + { + if (N != null) + { + writer.WritePropertyName("n"u8); + writer.WriteNumberValue(N.Value); + } + else + { + writer.WriteNull("n"); + } + } + if (Optional.IsDefined(Size)) + { + writer.WritePropertyName("size"u8); + writer.WriteStringValue(Size.Value.ToString()); + } + if (Optional.IsDefined(ResponseFormat)) + { + writer.WritePropertyName("response_format"u8); + writer.WriteStringValue(ResponseFormat.Value.ToString()); + } + if (Optional.IsDefined(User)) + { + writer.WritePropertyName("user"u8); + writer.WriteStringValue(User); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateImageEditRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateImageEditRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateImageEditRequest(document.RootElement, options); + } + + internal static CreateImageEditRequest DeserializeCreateImageEditRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData image = default; + string prompt = default; + BinaryData mask = default; + CreateImageEditRequestModel? model = default; + long? n = default; + CreateImageEditRequestSize? size = default; + CreateImageEditRequestResponseFormat? responseFormat = default; + string user = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("image"u8)) + { + image = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); + continue; + } + if (property.NameEquals("prompt"u8)) + { + prompt = property.Value.GetString(); + continue; + } + if (property.NameEquals("mask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + mask = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); + continue; + } + if (property.NameEquals("model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + model = new CreateImageEditRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("n"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + n = null; + continue; + } + n = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("size"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + size = new CreateImageEditRequestSize(property.Value.GetString()); + continue; + } + if (property.NameEquals("response_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + responseFormat = new CreateImageEditRequestResponseFormat(property.Value.GetString()); + continue; + } + if (property.NameEquals("user"u8)) + { + user = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateImageEditRequest( + image, + prompt, + mask, + model, + n, + size, + responseFormat, + user, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateImageEditRequest)} does not support '{options.Format}' format."); + } + } + + CreateImageEditRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateImageEditRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateImageEditRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateImageEditRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateImageEditRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequest.cs b/.dotnet/src/Generated/Models/CreateImageEditRequest.cs new file mode 100644 index 000000000..529c451fb --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageEditRequest.cs @@ -0,0 +1,151 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateImageEditRequest. + internal partial class CreateImageEditRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + /// provided, image must have transparency, which will be used as the mask. + /// + /// A text description of the desired image(s). The maximum length is 1000 characters. + /// or is null. + public CreateImageEditRequest(BinaryData image, string prompt) + { + Argument.AssertNotNull(image, nameof(image)); + Argument.AssertNotNull(prompt, nameof(prompt)); + + Image = image; + Prompt = prompt; + } + + /// Initializes a new instance of . + /// + /// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + /// provided, image must have transparency, which will be used as the mask. + /// + /// A text description of the desired image(s). The maximum length is 1000 characters. + /// + /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where + /// `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + /// as `image`. + /// + /// The model to use for image generation. Only `dall-e-2` is supported at this time. + /// The number of images to generate. Must be between 1 and 10. + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// Keeps track of any properties unknown to the library. + internal CreateImageEditRequest(BinaryData image, string prompt, BinaryData mask, CreateImageEditRequestModel? model, long? n, CreateImageEditRequestSize? size, CreateImageEditRequestResponseFormat? responseFormat, string user, IDictionary serializedAdditionalRawData) + { + Image = image; + Prompt = prompt; + Mask = mask; + Model = model; + N = n; + Size = size; + ResponseFormat = responseFormat; + User = user; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateImageEditRequest() + { + } + + /// + /// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + /// provided, image must have transparency, which will be used as the mask. + /// + /// To assign a byte[] to this property use . + /// The byte[] will be serialized to a Base64 encoded string. + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) + /// Creates a payload of "AQID". + /// + /// + /// + /// + public BinaryData Image { get; } + /// A text description of the desired image(s). The maximum length is 1000 characters. + public string Prompt { get; } + /// + /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where + /// `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + /// as `image`. + /// + /// To assign a byte[] to this property use . + /// The byte[] will be serialized to a Base64 encoded string. + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) + /// Creates a payload of "AQID". + /// + /// + /// + /// + public BinaryData Mask { get; set; } + /// The model to use for image generation. Only `dall-e-2` is supported at this time. + public CreateImageEditRequestModel? Model { get; set; } + /// The number of images to generate. Must be between 1 and 10. + public long? N { get; set; } + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + public CreateImageEditRequestSize? Size { get; set; } + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + public CreateImageEditRequestResponseFormat? ResponseFormat { get; set; } + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + public string User { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequestModel.cs b/.dotnet/src/Generated/Models/CreateImageEditRequestModel.cs new file mode 100644 index 000000000..0f06a6f24 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageEditRequestModel.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateImageEditRequestModel. + internal readonly partial struct CreateImageEditRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageEditRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string DallE2Value = "dall-e-2"; + + /// dall-e-2. + public static CreateImageEditRequestModel DallE2 { get; } = new CreateImageEditRequestModel(DallE2Value); + /// Determines if two values are the same. + public static bool operator ==(CreateImageEditRequestModel left, CreateImageEditRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageEditRequestModel left, CreateImageEditRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageEditRequestModel(string value) => new CreateImageEditRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageEditRequestModel other && Equals(other); + /// + public bool Equals(CreateImageEditRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateImageEditRequestResponseFormat.cs new file mode 100644 index 000000000..374dccec4 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageEditRequestResponseFormat.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for response_format in CreateImageEditRequest. + internal readonly partial struct CreateImageEditRequestResponseFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageEditRequestResponseFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UrlValue = "url"; + private const string B64JsonValue = "b64_json"; + + /// url. + public static CreateImageEditRequestResponseFormat Url { get; } = new CreateImageEditRequestResponseFormat(UrlValue); + /// b64_json. + public static CreateImageEditRequestResponseFormat B64Json { get; } = new CreateImageEditRequestResponseFormat(B64JsonValue); + /// Determines if two values are the same. + public static bool operator ==(CreateImageEditRequestResponseFormat left, CreateImageEditRequestResponseFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageEditRequestResponseFormat left, CreateImageEditRequestResponseFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageEditRequestResponseFormat(string value) => new CreateImageEditRequestResponseFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageEditRequestResponseFormat other && Equals(other); + /// + public bool Equals(CreateImageEditRequestResponseFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageEditRequestSize.cs b/.dotnet/src/Generated/Models/CreateImageEditRequestSize.cs new file mode 100644 index 000000000..63269c1f2 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageEditRequestSize.cs @@ -0,0 +1,49 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for size in CreateImageEditRequest. + internal readonly partial struct CreateImageEditRequestSize : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageEditRequestSize(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string _256x256Value = "256x256"; + private const string _512x512Value = "512x512"; + private const string _1024x1024Value = "1024x1024"; + + /// 256x256. + public static CreateImageEditRequestSize _256x256 { get; } = new CreateImageEditRequestSize(_256x256Value); + /// 512x512. + public static CreateImageEditRequestSize _512x512 { get; } = new CreateImageEditRequestSize(_512x512Value); + /// 1024x1024. + public static CreateImageEditRequestSize _1024x1024 { get; } = new CreateImageEditRequestSize(_1024x1024Value); + /// Determines if two values are the same. + public static bool operator ==(CreateImageEditRequestSize left, CreateImageEditRequestSize right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageEditRequestSize left, CreateImageEditRequestSize right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageEditRequestSize(string value) => new CreateImageEditRequestSize(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageEditRequestSize other && Equals(other); + /// + public bool Equals(CreateImageEditRequestSize other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateImageRequest.Serialization.cs new file mode 100644 index 000000000..0450242da --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageRequest.Serialization.cs @@ -0,0 +1,248 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateImageRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateImageRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("prompt"u8); + writer.WriteStringValue(Prompt); + if (Optional.IsDefined(Model)) + { + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.Value.ToString()); + } + if (Optional.IsDefined(N)) + { + if (N != null) + { + writer.WritePropertyName("n"u8); + writer.WriteNumberValue(N.Value); + } + else + { + writer.WriteNull("n"); + } + } + if (Optional.IsDefined(Quality)) + { + writer.WritePropertyName("quality"u8); + writer.WriteStringValue(Quality.Value.ToString()); + } + if (Optional.IsDefined(ResponseFormat)) + { + writer.WritePropertyName("response_format"u8); + writer.WriteStringValue(ResponseFormat.Value.ToString()); + } + if (Optional.IsDefined(Size)) + { + writer.WritePropertyName("size"u8); + writer.WriteStringValue(Size.Value.ToString()); + } + if (Optional.IsDefined(Style)) + { + writer.WritePropertyName("style"u8); + writer.WriteStringValue(Style.Value.ToString()); + } + if (Optional.IsDefined(User)) + { + writer.WritePropertyName("user"u8); + writer.WriteStringValue(User); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateImageRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateImageRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateImageRequest(document.RootElement, options); + } + + internal static CreateImageRequest DeserializeCreateImageRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string prompt = default; + CreateImageRequestModel? model = default; + long? n = default; + CreateImageRequestQuality? quality = default; + CreateImageRequestResponseFormat? responseFormat = default; + CreateImageRequestSize? size = default; + CreateImageRequestStyle? style = default; + string user = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("prompt"u8)) + { + prompt = property.Value.GetString(); + continue; + } + if (property.NameEquals("model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + model = new CreateImageRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("n"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + n = null; + continue; + } + n = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("quality"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + quality = new CreateImageRequestQuality(property.Value.GetString()); + continue; + } + if (property.NameEquals("response_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + responseFormat = new CreateImageRequestResponseFormat(property.Value.GetString()); + continue; + } + if (property.NameEquals("size"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + size = new CreateImageRequestSize(property.Value.GetString()); + continue; + } + if (property.NameEquals("style"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + style = new CreateImageRequestStyle(property.Value.GetString()); + continue; + } + if (property.NameEquals("user"u8)) + { + user = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateImageRequest( + prompt, + model, + n, + quality, + responseFormat, + size, + style, + user, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateImageRequest)} does not support '{options.Format}' format."); + } + } + + CreateImageRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateImageRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateImageRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateImageRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateImageRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageRequest.cs b/.dotnet/src/Generated/Models/CreateImageRequest.cs new file mode 100644 index 000000000..d0e1f19b6 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageRequest.cs @@ -0,0 +1,140 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateImageRequest. + internal partial class CreateImageRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// A text description of the desired image(s). The maximum length is 1000 characters for + /// `dall-e-2` and 4000 characters for `dall-e-3`. + /// + /// is null. + public CreateImageRequest(string prompt) + { + Argument.AssertNotNull(prompt, nameof(prompt)); + + Prompt = prompt; + } + + /// Initializes a new instance of . + /// + /// A text description of the desired image(s). The maximum length is 1000 characters for + /// `dall-e-2` and 4000 characters for `dall-e-3`. + /// + /// The model to use for image generation. + /// + /// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is + /// supported. + /// + /// + /// The quality of the image that will be generated. `hd` creates images with finer details and + /// greater consistency across the image. This param is only supported for `dall-e-3`. + /// + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + /// + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for + /// `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + /// + /// + /// The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model + /// to lean towards generating hyper-real and dramatic images. Natural causes the model to produce + /// more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// Keeps track of any properties unknown to the library. + internal CreateImageRequest(string prompt, CreateImageRequestModel? model, long? n, CreateImageRequestQuality? quality, CreateImageRequestResponseFormat? responseFormat, CreateImageRequestSize? size, CreateImageRequestStyle? style, string user, IDictionary serializedAdditionalRawData) + { + Prompt = prompt; + Model = model; + N = n; + Quality = quality; + ResponseFormat = responseFormat; + Size = size; + Style = style; + User = user; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateImageRequest() + { + } + + /// + /// A text description of the desired image(s). The maximum length is 1000 characters for + /// `dall-e-2` and 4000 characters for `dall-e-3`. + /// + public string Prompt { get; } + /// The model to use for image generation. + public CreateImageRequestModel? Model { get; set; } + /// + /// The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is + /// supported. + /// + public long? N { get; set; } + /// + /// The quality of the image that will be generated. `hd` creates images with finer details and + /// greater consistency across the image. This param is only supported for `dall-e-3`. + /// + public CreateImageRequestQuality? Quality { get; set; } + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + public CreateImageRequestResponseFormat? ResponseFormat { get; set; } + /// + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for + /// `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + /// + public CreateImageRequestSize? Size { get; set; } + /// + /// The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model + /// to lean towards generating hyper-real and dramatic images. Natural causes the model to produce + /// more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + /// + public CreateImageRequestStyle? Style { get; set; } + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + public string User { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageRequestModel.cs b/.dotnet/src/Generated/Models/CreateImageRequestModel.cs new file mode 100644 index 000000000..f5226fa5b --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageRequestModel.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateImageRequestModel. + internal readonly partial struct CreateImageRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string DallE2Value = "dall-e-2"; + private const string DallE3Value = "dall-e-3"; + + /// dall-e-2. + public static CreateImageRequestModel DallE2 { get; } = new CreateImageRequestModel(DallE2Value); + /// dall-e-3. + public static CreateImageRequestModel DallE3 { get; } = new CreateImageRequestModel(DallE3Value); + /// Determines if two values are the same. + public static bool operator ==(CreateImageRequestModel left, CreateImageRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageRequestModel left, CreateImageRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageRequestModel(string value) => new CreateImageRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageRequestModel other && Equals(other); + /// + public bool Equals(CreateImageRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageRequestQuality.cs b/.dotnet/src/Generated/Models/CreateImageRequestQuality.cs new file mode 100644 index 000000000..01060a04f --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageRequestQuality.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for quality in CreateImageRequest. + internal readonly partial struct CreateImageRequestQuality : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageRequestQuality(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string StandardValue = "standard"; + private const string HdValue = "hd"; + + /// standard. + public static CreateImageRequestQuality Standard { get; } = new CreateImageRequestQuality(StandardValue); + /// hd. + public static CreateImageRequestQuality Hd { get; } = new CreateImageRequestQuality(HdValue); + /// Determines if two values are the same. + public static bool operator ==(CreateImageRequestQuality left, CreateImageRequestQuality right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageRequestQuality left, CreateImageRequestQuality right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageRequestQuality(string value) => new CreateImageRequestQuality(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageRequestQuality other && Equals(other); + /// + public bool Equals(CreateImageRequestQuality other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateImageRequestResponseFormat.cs new file mode 100644 index 000000000..22d92d612 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageRequestResponseFormat.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for response_format in CreateImageRequest. + internal readonly partial struct CreateImageRequestResponseFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageRequestResponseFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UrlValue = "url"; + private const string B64JsonValue = "b64_json"; + + /// url. + public static CreateImageRequestResponseFormat Url { get; } = new CreateImageRequestResponseFormat(UrlValue); + /// b64_json. + public static CreateImageRequestResponseFormat B64Json { get; } = new CreateImageRequestResponseFormat(B64JsonValue); + /// Determines if two values are the same. + public static bool operator ==(CreateImageRequestResponseFormat left, CreateImageRequestResponseFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageRequestResponseFormat left, CreateImageRequestResponseFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageRequestResponseFormat(string value) => new CreateImageRequestResponseFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageRequestResponseFormat other && Equals(other); + /// + public bool Equals(CreateImageRequestResponseFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageRequestSize.cs b/.dotnet/src/Generated/Models/CreateImageRequestSize.cs new file mode 100644 index 000000000..9bff12b78 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageRequestSize.cs @@ -0,0 +1,55 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for size in CreateImageRequest. + internal readonly partial struct CreateImageRequestSize : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageRequestSize(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string _256x256Value = "256x256"; + private const string _512x512Value = "512x512"; + private const string _1024x1024Value = "1024x1024"; + private const string _1792x1024Value = "1792x1024"; + private const string _1024x1792Value = "1024x1792"; + + /// 256x256. + public static CreateImageRequestSize _256x256 { get; } = new CreateImageRequestSize(_256x256Value); + /// 512x512. + public static CreateImageRequestSize _512x512 { get; } = new CreateImageRequestSize(_512x512Value); + /// 1024x1024. + public static CreateImageRequestSize _1024x1024 { get; } = new CreateImageRequestSize(_1024x1024Value); + /// 1792x1024. + public static CreateImageRequestSize _1792x1024 { get; } = new CreateImageRequestSize(_1792x1024Value); + /// 1024x1792. + public static CreateImageRequestSize _1024x1792 { get; } = new CreateImageRequestSize(_1024x1792Value); + /// Determines if two values are the same. + public static bool operator ==(CreateImageRequestSize left, CreateImageRequestSize right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageRequestSize left, CreateImageRequestSize right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageRequestSize(string value) => new CreateImageRequestSize(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageRequestSize other && Equals(other); + /// + public bool Equals(CreateImageRequestSize other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageRequestStyle.cs b/.dotnet/src/Generated/Models/CreateImageRequestStyle.cs new file mode 100644 index 000000000..200565a5f --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageRequestStyle.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for style in CreateImageRequest. + internal readonly partial struct CreateImageRequestStyle : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageRequestStyle(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string VividValue = "vivid"; + private const string NaturalValue = "natural"; + + /// vivid. + public static CreateImageRequestStyle Vivid { get; } = new CreateImageRequestStyle(VividValue); + /// natural. + public static CreateImageRequestStyle Natural { get; } = new CreateImageRequestStyle(NaturalValue); + /// Determines if two values are the same. + public static bool operator ==(CreateImageRequestStyle left, CreateImageRequestStyle right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageRequestStyle left, CreateImageRequestStyle right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageRequestStyle(string value) => new CreateImageRequestStyle(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageRequestStyle other && Equals(other); + /// + public bool Equals(CreateImageRequestStyle other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequest.Serialization.cs new file mode 100644 index 000000000..50ff69ef6 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequest.Serialization.cs @@ -0,0 +1,216 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateImageVariationRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateImageVariationRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("image"u8); + writer.WriteBase64StringValue(Image.ToArray(), "D"); + if (Optional.IsDefined(Model)) + { + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.Value.ToString()); + } + if (Optional.IsDefined(N)) + { + if (N != null) + { + writer.WritePropertyName("n"u8); + writer.WriteNumberValue(N.Value); + } + else + { + writer.WriteNull("n"); + } + } + if (Optional.IsDefined(ResponseFormat)) + { + writer.WritePropertyName("response_format"u8); + writer.WriteStringValue(ResponseFormat.Value.ToString()); + } + if (Optional.IsDefined(Size)) + { + writer.WritePropertyName("size"u8); + writer.WriteStringValue(Size.Value.ToString()); + } + if (Optional.IsDefined(User)) + { + writer.WritePropertyName("user"u8); + writer.WriteStringValue(User); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateImageVariationRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateImageVariationRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateImageVariationRequest(document.RootElement, options); + } + + internal static CreateImageVariationRequest DeserializeCreateImageVariationRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData image = default; + CreateImageVariationRequestModel? model = default; + long? n = default; + CreateImageVariationRequestResponseFormat? responseFormat = default; + CreateImageVariationRequestSize? size = default; + string user = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("image"u8)) + { + image = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); + continue; + } + if (property.NameEquals("model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + model = new CreateImageVariationRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("n"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + n = null; + continue; + } + n = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("response_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + responseFormat = new CreateImageVariationRequestResponseFormat(property.Value.GetString()); + continue; + } + if (property.NameEquals("size"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + size = new CreateImageVariationRequestSize(property.Value.GetString()); + continue; + } + if (property.NameEquals("user"u8)) + { + user = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateImageVariationRequest( + image, + model, + n, + responseFormat, + size, + user, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateImageVariationRequest)} does not support '{options.Format}' format."); + } + } + + CreateImageVariationRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateImageVariationRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateImageVariationRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateImageVariationRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateImageVariationRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequest.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequest.cs new file mode 100644 index 000000000..f96ae71aa --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequest.cs @@ -0,0 +1,119 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateImageVariationRequest. + internal partial class CreateImageVariationRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + /// and square. + /// + /// is null. + public CreateImageVariationRequest(BinaryData image) + { + Argument.AssertNotNull(image, nameof(image)); + + Image = image; + } + + /// Initializes a new instance of . + /// + /// The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + /// and square. + /// + /// The model to use for image generation. Only `dall-e-2` is supported at this time. + /// The number of images to generate. Must be between 1 and 10. + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// Keeps track of any properties unknown to the library. + internal CreateImageVariationRequest(BinaryData image, CreateImageVariationRequestModel? model, long? n, CreateImageVariationRequestResponseFormat? responseFormat, CreateImageVariationRequestSize? size, string user, IDictionary serializedAdditionalRawData) + { + Image = image; + Model = model; + N = n; + ResponseFormat = responseFormat; + Size = size; + User = user; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateImageVariationRequest() + { + } + + /// + /// The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + /// and square. + /// + /// To assign a byte[] to this property use . + /// The byte[] will be serialized to a Base64 encoded string. + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) + /// Creates a payload of "AQID". + /// + /// + /// + /// + public BinaryData Image { get; } + /// The model to use for image generation. Only `dall-e-2` is supported at this time. + public CreateImageVariationRequestModel? Model { get; set; } + /// The number of images to generate. Must be between 1 and 10. + public long? N { get; set; } + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + public CreateImageVariationRequestResponseFormat? ResponseFormat { get; set; } + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + public CreateImageVariationRequestSize? Size { get; set; } + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + public string User { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequestModel.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequestModel.cs new file mode 100644 index 000000000..e7a09b2c7 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequestModel.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateImageVariationRequestModel. + internal readonly partial struct CreateImageVariationRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageVariationRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string DallE2Value = "dall-e-2"; + + /// dall-e-2. + public static CreateImageVariationRequestModel DallE2 { get; } = new CreateImageVariationRequestModel(DallE2Value); + /// Determines if two values are the same. + public static bool operator ==(CreateImageVariationRequestModel left, CreateImageVariationRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageVariationRequestModel left, CreateImageVariationRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageVariationRequestModel(string value) => new CreateImageVariationRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageVariationRequestModel other && Equals(other); + /// + public bool Equals(CreateImageVariationRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequestResponseFormat.cs new file mode 100644 index 000000000..5f99e6bcf --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequestResponseFormat.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for response_format in CreateImageVariationRequest. + internal readonly partial struct CreateImageVariationRequestResponseFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageVariationRequestResponseFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UrlValue = "url"; + private const string B64JsonValue = "b64_json"; + + /// url. + public static CreateImageVariationRequestResponseFormat Url { get; } = new CreateImageVariationRequestResponseFormat(UrlValue); + /// b64_json. + public static CreateImageVariationRequestResponseFormat B64Json { get; } = new CreateImageVariationRequestResponseFormat(B64JsonValue); + /// Determines if two values are the same. + public static bool operator ==(CreateImageVariationRequestResponseFormat left, CreateImageVariationRequestResponseFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageVariationRequestResponseFormat left, CreateImageVariationRequestResponseFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageVariationRequestResponseFormat(string value) => new CreateImageVariationRequestResponseFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageVariationRequestResponseFormat other && Equals(other); + /// + public bool Equals(CreateImageVariationRequestResponseFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateImageVariationRequestSize.cs b/.dotnet/src/Generated/Models/CreateImageVariationRequestSize.cs new file mode 100644 index 000000000..f10e28466 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateImageVariationRequestSize.cs @@ -0,0 +1,49 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for size in CreateImageVariationRequest. + internal readonly partial struct CreateImageVariationRequestSize : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateImageVariationRequestSize(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string _256x256Value = "256x256"; + private const string _512x512Value = "512x512"; + private const string _1024x1024Value = "1024x1024"; + + /// 256x256. + public static CreateImageVariationRequestSize _256x256 { get; } = new CreateImageVariationRequestSize(_256x256Value); + /// 512x512. + public static CreateImageVariationRequestSize _512x512 { get; } = new CreateImageVariationRequestSize(_512x512Value); + /// 1024x1024. + public static CreateImageVariationRequestSize _1024x1024 { get; } = new CreateImageVariationRequestSize(_1024x1024Value); + /// Determines if two values are the same. + public static bool operator ==(CreateImageVariationRequestSize left, CreateImageVariationRequestSize right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateImageVariationRequestSize left, CreateImageVariationRequestSize right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateImageVariationRequestSize(string value) => new CreateImageVariationRequestSize(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateImageVariationRequestSize other && Equals(other); + /// + public bool Equals(CreateImageVariationRequestSize other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateMessageRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateMessageRequest.Serialization.cs new file mode 100644 index 000000000..447fa0a9c --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateMessageRequest.Serialization.cs @@ -0,0 +1,196 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateMessageRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateMessageRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("role"u8); + writer.WriteStringValue(Role.ToString()); + writer.WritePropertyName("content"u8); + writer.WriteStringValue(Content); + if (Optional.IsCollectionDefined(FileIds)) + { + writer.WritePropertyName("file_ids"u8); + writer.WriteStartArray(); + foreach (var item in FileIds) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateMessageRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateMessageRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateMessageRequest(document.RootElement, options); + } + + internal static CreateMessageRequest DeserializeCreateMessageRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + CreateMessageRequestRole role = default; + string content = default; + IList fileIds = default; + IDictionary metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("role"u8)) + { + role = new CreateMessageRequestRole(property.Value.GetString()); + continue; + } + if (property.NameEquals("content"u8)) + { + content = property.Value.GetString(); + continue; + } + if (property.NameEquals("file_ids"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + fileIds = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateMessageRequest(role, content, fileIds ?? new ChangeTrackingList(), metadata ?? new ChangeTrackingDictionary(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateMessageRequest)} does not support '{options.Format}' format."); + } + } + + CreateMessageRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateMessageRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateMessageRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateMessageRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateMessageRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateMessageRequest.cs b/.dotnet/src/Generated/Models/CreateMessageRequest.cs new file mode 100644 index 000000000..b08262e6b --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateMessageRequest.cs @@ -0,0 +1,102 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateMessageRequest. + internal partial class CreateMessageRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The content of the message. + /// is null. + public CreateMessageRequest(string content) + { + Argument.AssertNotNull(content, nameof(content)); + + Content = content; + FileIds = new ChangeTrackingList(); + Metadata = new ChangeTrackingDictionary(); + } + + /// Initializes a new instance of . + /// The role of the entity that is creating the message. Currently only `user` is supported. + /// The content of the message. + /// + /// A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a + /// maximum of 10 files attached to a message. Useful for tools like `retrieval` and + /// `code_interpreter` that can access and use files. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal CreateMessageRequest(CreateMessageRequestRole role, string content, IList fileIds, IDictionary metadata, IDictionary serializedAdditionalRawData) + { + Role = role; + Content = content; + FileIds = fileIds; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateMessageRequest() + { + } + + /// The role of the entity that is creating the message. Currently only `user` is supported. + public CreateMessageRequestRole Role { get; } = CreateMessageRequestRole.User; + + /// The content of the message. + public string Content { get; } + /// + /// A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a + /// maximum of 10 files attached to a message. Useful for tools like `retrieval` and + /// `code_interpreter` that can access and use files. + /// + public IList FileIds { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateMessageRequestRole.cs b/.dotnet/src/Generated/Models/CreateMessageRequestRole.cs new file mode 100644 index 000000000..d0118f1cb --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateMessageRequestRole.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateMessageRequest_role. + internal readonly partial struct CreateMessageRequestRole : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateMessageRequestRole(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UserValue = "user"; + + /// user. + public static CreateMessageRequestRole User { get; } = new CreateMessageRequestRole(UserValue); + /// Determines if two values are the same. + public static bool operator ==(CreateMessageRequestRole left, CreateMessageRequestRole right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateMessageRequestRole left, CreateMessageRequestRole right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateMessageRequestRole(string value) => new CreateMessageRequestRole(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateMessageRequestRole other && Equals(other); + /// + public bool Equals(CreateMessageRequestRole other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationRequest.Serialization.cs new file mode 100644 index 000000000..2379ab7a5 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationRequest.Serialization.cs @@ -0,0 +1,152 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateModerationRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("input"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(Input); +#else + using (JsonDocument document = JsonDocument.Parse(Input)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + if (Optional.IsDefined(Model)) + { + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateModerationRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateModerationRequest(document.RootElement, options); + } + + internal static CreateModerationRequest DeserializeCreateModerationRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData input = default; + CreateModerationRequestModel? model = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("input"u8)) + { + input = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + model = new CreateModerationRequestModel(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateModerationRequest(input, model, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateModerationRequest)} does not support '{options.Format}' format."); + } + } + + CreateModerationRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateModerationRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateModerationRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateModerationRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateModerationRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationRequest.cs b/.dotnet/src/Generated/Models/CreateModerationRequest.cs new file mode 100644 index 000000000..e1ed62cf0 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationRequest.cs @@ -0,0 +1,127 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateModerationRequest. + internal partial class CreateModerationRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The input text to classify. + /// is null. + public CreateModerationRequest(BinaryData input) + { + Argument.AssertNotNull(input, nameof(input)); + + Input = input; + } + + /// Initializes a new instance of . + /// The input text to classify. + /// + /// Two content moderations models are available: `text-moderation-stable` and + /// `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically + /// upgraded over time. This ensures you are always using our most accurate model. If you use + /// `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy + /// of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + /// + /// Keeps track of any properties unknown to the library. + internal CreateModerationRequest(BinaryData input, CreateModerationRequestModel? model, IDictionary serializedAdditionalRawData) + { + Input = input; + Model = model; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateModerationRequest() + { + } + + /// + /// The input text to classify + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// + /// + /// + /// where T is of type + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData Input { get; } + /// + /// Two content moderations models are available: `text-moderation-stable` and + /// `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically + /// upgraded over time. This ensures you are always using our most accurate model. If you use + /// `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy + /// of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + /// + public CreateModerationRequestModel? Model { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationRequestModel.cs b/.dotnet/src/Generated/Models/CreateModerationRequestModel.cs new file mode 100644 index 000000000..12a283230 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationRequestModel.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateModerationRequestModel. + internal readonly partial struct CreateModerationRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateModerationRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TextModerationLatestValue = "text-moderation-latest"; + private const string TextModerationStableValue = "text-moderation-stable"; + + /// text-moderation-latest. + public static CreateModerationRequestModel TextModerationLatest { get; } = new CreateModerationRequestModel(TextModerationLatestValue); + /// text-moderation-stable. + public static CreateModerationRequestModel TextModerationStable { get; } = new CreateModerationRequestModel(TextModerationStableValue); + /// Determines if two values are the same. + public static bool operator ==(CreateModerationRequestModel left, CreateModerationRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateModerationRequestModel left, CreateModerationRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateModerationRequestModel(string value) => new CreateModerationRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateModerationRequestModel other && Equals(other); + /// + public bool Equals(CreateModerationRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponse.Serialization.cs new file mode 100644 index 000000000..c88e8b991 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponse.Serialization.cs @@ -0,0 +1,156 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateModerationResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + writer.WritePropertyName("results"u8); + writer.WriteStartArray(); + foreach (var item in Results) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateModerationResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateModerationResponse(document.RootElement, options); + } + + internal static CreateModerationResponse DeserializeCreateModerationResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string model = default; + IReadOnlyList results = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("results"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(CreateModerationResponseResult.DeserializeCreateModerationResponseResult(item, options)); + } + results = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateModerationResponse(id, model, results, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateModerationResponse)} does not support '{options.Format}' format."); + } + } + + CreateModerationResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateModerationResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateModerationResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateModerationResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateModerationResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponse.cs b/.dotnet/src/Generated/Models/CreateModerationResponse.cs new file mode 100644 index 000000000..aaacf1180 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponse.cs @@ -0,0 +1,86 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// Represents policy compliance report by OpenAI's content moderation model against a given input. + internal partial class CreateModerationResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The unique identifier for the moderation request. + /// The model used to generate the moderation results. + /// A list of moderation objects. + /// , or is null. + internal CreateModerationResponse(string id, string model, IEnumerable results) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(model, nameof(model)); + Argument.AssertNotNull(results, nameof(results)); + + Id = id; + Model = model; + Results = results.ToList(); + } + + /// Initializes a new instance of . + /// The unique identifier for the moderation request. + /// The model used to generate the moderation results. + /// A list of moderation objects. + /// Keeps track of any properties unknown to the library. + internal CreateModerationResponse(string id, string model, IReadOnlyList results, IDictionary serializedAdditionalRawData) + { + Id = id; + Model = model; + Results = results; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateModerationResponse() + { + } + + /// The unique identifier for the moderation request. + public string Id { get; } + /// The model used to generate the moderation results. + public string Model { get; } + /// A list of moderation objects. + public IReadOnlyList Results { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResult.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResult.Serialization.cs new file mode 100644 index 000000000..d55831a2e --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResult.Serialization.cs @@ -0,0 +1,146 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateModerationResponseResult : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponseResult)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("flagged"u8); + writer.WriteBooleanValue(Flagged); + writer.WritePropertyName("categories"u8); + writer.WriteObjectValue(Categories); + writer.WritePropertyName("category_scores"u8); + writer.WriteObjectValue(CategoryScores); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateModerationResponseResult IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponseResult)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateModerationResponseResult(document.RootElement, options); + } + + internal static CreateModerationResponseResult DeserializeCreateModerationResponseResult(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + bool flagged = default; + CreateModerationResponseResultCategories categories = default; + CreateModerationResponseResultCategoryScores categoryScores = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("flagged"u8)) + { + flagged = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("categories"u8)) + { + categories = CreateModerationResponseResultCategories.DeserializeCreateModerationResponseResultCategories(property.Value, options); + continue; + } + if (property.NameEquals("category_scores"u8)) + { + categoryScores = CreateModerationResponseResultCategoryScores.DeserializeCreateModerationResponseResultCategoryScores(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateModerationResponseResult(flagged, categories, categoryScores, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateModerationResponseResult)} does not support '{options.Format}' format."); + } + } + + CreateModerationResponseResult IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateModerationResponseResult(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateModerationResponseResult)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateModerationResponseResult FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateModerationResponseResult(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResult.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResult.cs new file mode 100644 index 000000000..e9ccb59c9 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResult.cs @@ -0,0 +1,84 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateModerationResponseResult. + internal partial class CreateModerationResponseResult + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + /// A list of the categories, and whether they are flagged or not. + /// A list of the categories along with their scores as predicted by model. + /// or is null. + internal CreateModerationResponseResult(bool flagged, CreateModerationResponseResultCategories categories, CreateModerationResponseResultCategoryScores categoryScores) + { + Argument.AssertNotNull(categories, nameof(categories)); + Argument.AssertNotNull(categoryScores, nameof(categoryScores)); + + Flagged = flagged; + Categories = categories; + CategoryScores = categoryScores; + } + + /// Initializes a new instance of . + /// Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + /// A list of the categories, and whether they are flagged or not. + /// A list of the categories along with their scores as predicted by model. + /// Keeps track of any properties unknown to the library. + internal CreateModerationResponseResult(bool flagged, CreateModerationResponseResultCategories categories, CreateModerationResponseResultCategoryScores categoryScores, IDictionary serializedAdditionalRawData) + { + Flagged = flagged; + Categories = categories; + CategoryScores = categoryScores; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateModerationResponseResult() + { + } + + /// Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + public bool Flagged { get; } + /// A list of the categories, and whether they are flagged or not. + public CreateModerationResponseResultCategories Categories { get; } + /// A list of the categories along with their scores as predicted by model. + public CreateModerationResponseResultCategoryScores CategoryScores { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.Serialization.cs new file mode 100644 index 000000000..62fa766f1 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.Serialization.cs @@ -0,0 +1,222 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateModerationResponseResultCategories : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategories)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("hate"u8); + writer.WriteBooleanValue(Hate); + writer.WritePropertyName("hate/threatening"u8); + writer.WriteBooleanValue(HateThreatening); + writer.WritePropertyName("harassment"u8); + writer.WriteBooleanValue(Harassment); + writer.WritePropertyName("harassment/threatening"u8); + writer.WriteBooleanValue(HarassmentThreatening); + writer.WritePropertyName("self-harm"u8); + writer.WriteBooleanValue(SelfHarm); + writer.WritePropertyName("self-harm/intent"u8); + writer.WriteBooleanValue(SelfHarmIntent); + writer.WritePropertyName("self-harm/instructions"u8); + writer.WriteBooleanValue(SelfHarmInstructions); + writer.WritePropertyName("sexual"u8); + writer.WriteBooleanValue(Sexual); + writer.WritePropertyName("sexual/minors"u8); + writer.WriteBooleanValue(SexualMinors); + writer.WritePropertyName("violence"u8); + writer.WriteBooleanValue(Violence); + writer.WritePropertyName("violence/graphic"u8); + writer.WriteBooleanValue(ViolenceGraphic); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateModerationResponseResultCategories IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategories)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateModerationResponseResultCategories(document.RootElement, options); + } + + internal static CreateModerationResponseResultCategories DeserializeCreateModerationResponseResultCategories(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + bool hate = default; + bool hateThreatening = default; + bool harassment = default; + bool harassmentThreatening = default; + bool selfHarm = default; + bool selfHarmIntent = default; + bool selfHarmInstructions = default; + bool sexual = default; + bool sexualMinors = default; + bool violence = default; + bool violenceGraphic = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("hate"u8)) + { + hate = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("hate/threatening"u8)) + { + hateThreatening = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("harassment"u8)) + { + harassment = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("harassment/threatening"u8)) + { + harassmentThreatening = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("self-harm"u8)) + { + selfHarm = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("self-harm/intent"u8)) + { + selfHarmIntent = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("self-harm/instructions"u8)) + { + selfHarmInstructions = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("sexual"u8)) + { + sexual = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("sexual/minors"u8)) + { + sexualMinors = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("violence"u8)) + { + violence = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("violence/graphic"u8)) + { + violenceGraphic = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateModerationResponseResultCategories( + hate, + hateThreatening, + harassment, + harassmentThreatening, + selfHarm, + selfHarmIntent, + selfHarmInstructions, + sexual, + sexualMinors, + violence, + violenceGraphic, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategories)} does not support '{options.Format}' format."); + } + } + + CreateModerationResponseResultCategories IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateModerationResponseResultCategories(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategories)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateModerationResponseResultCategories FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateModerationResponseResultCategories(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.cs new file mode 100644 index 000000000..2bdc02449 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategories.cs @@ -0,0 +1,187 @@ +// + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal.Models +{ + /// The CreateModerationResponseResultCategories. + internal partial class CreateModerationResponseResultCategories + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, + /// religion, nationality, sexual orientation, disability status, or caste. Hateful content + /// aimed at non-protected groups (e.g., chess players) is harrassment. + /// + /// + /// Hateful content that also includes violence or serious harm towards the targeted group + /// based on race, gender, ethnicity, religion, nationality, sexual orientation, disability + /// status, or caste. + /// + /// Content that expresses, incites, or promotes harassing language towards any target. + /// Harassment content that also includes violence or serious harm towards any target. + /// + /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, + /// and eating disorders. + /// + /// + /// Content where the speaker expresses that they are engaging or intend to engage in acts of + /// self-harm, such as suicide, cutting, and eating disorders. + /// + /// + /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating + /// disorders, or that gives instructions or advice on how to commit such acts. + /// + /// + /// Content meant to arouse sexual excitement, such as the description of sexual activity, or + /// that promotes sexual services (excluding sex education and wellness). + /// + /// Sexual content that includes an individual who is under 18 years old. + /// Content that depicts death, violence, or physical injury. + /// Content that depicts death, violence, or physical injury in graphic detail. + internal CreateModerationResponseResultCategories(bool hate, bool hateThreatening, bool harassment, bool harassmentThreatening, bool selfHarm, bool selfHarmIntent, bool selfHarmInstructions, bool sexual, bool sexualMinors, bool violence, bool violenceGraphic) + { + Hate = hate; + HateThreatening = hateThreatening; + Harassment = harassment; + HarassmentThreatening = harassmentThreatening; + SelfHarm = selfHarm; + SelfHarmIntent = selfHarmIntent; + SelfHarmInstructions = selfHarmInstructions; + Sexual = sexual; + SexualMinors = sexualMinors; + Violence = violence; + ViolenceGraphic = violenceGraphic; + } + + /// Initializes a new instance of . + /// + /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, + /// religion, nationality, sexual orientation, disability status, or caste. Hateful content + /// aimed at non-protected groups (e.g., chess players) is harrassment. + /// + /// + /// Hateful content that also includes violence or serious harm towards the targeted group + /// based on race, gender, ethnicity, religion, nationality, sexual orientation, disability + /// status, or caste. + /// + /// Content that expresses, incites, or promotes harassing language towards any target. + /// Harassment content that also includes violence or serious harm towards any target. + /// + /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, + /// and eating disorders. + /// + /// + /// Content where the speaker expresses that they are engaging or intend to engage in acts of + /// self-harm, such as suicide, cutting, and eating disorders. + /// + /// + /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating + /// disorders, or that gives instructions or advice on how to commit such acts. + /// + /// + /// Content meant to arouse sexual excitement, such as the description of sexual activity, or + /// that promotes sexual services (excluding sex education and wellness). + /// + /// Sexual content that includes an individual who is under 18 years old. + /// Content that depicts death, violence, or physical injury. + /// Content that depicts death, violence, or physical injury in graphic detail. + /// Keeps track of any properties unknown to the library. + internal CreateModerationResponseResultCategories(bool hate, bool hateThreatening, bool harassment, bool harassmentThreatening, bool selfHarm, bool selfHarmIntent, bool selfHarmInstructions, bool sexual, bool sexualMinors, bool violence, bool violenceGraphic, IDictionary serializedAdditionalRawData) + { + Hate = hate; + HateThreatening = hateThreatening; + Harassment = harassment; + HarassmentThreatening = harassmentThreatening; + SelfHarm = selfHarm; + SelfHarmIntent = selfHarmIntent; + SelfHarmInstructions = selfHarmInstructions; + Sexual = sexual; + SexualMinors = sexualMinors; + Violence = violence; + ViolenceGraphic = violenceGraphic; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateModerationResponseResultCategories() + { + } + + /// + /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, + /// religion, nationality, sexual orientation, disability status, or caste. Hateful content + /// aimed at non-protected groups (e.g., chess players) is harrassment. + /// + public bool Hate { get; } + /// + /// Hateful content that also includes violence or serious harm towards the targeted group + /// based on race, gender, ethnicity, religion, nationality, sexual orientation, disability + /// status, or caste. + /// + public bool HateThreatening { get; } + /// Content that expresses, incites, or promotes harassing language towards any target. + public bool Harassment { get; } + /// Harassment content that also includes violence or serious harm towards any target. + public bool HarassmentThreatening { get; } + /// + /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, + /// and eating disorders. + /// + public bool SelfHarm { get; } + /// + /// Content where the speaker expresses that they are engaging or intend to engage in acts of + /// self-harm, such as suicide, cutting, and eating disorders. + /// + public bool SelfHarmIntent { get; } + /// + /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating + /// disorders, or that gives instructions or advice on how to commit such acts. + /// + public bool SelfHarmInstructions { get; } + /// + /// Content meant to arouse sexual excitement, such as the description of sexual activity, or + /// that promotes sexual services (excluding sex education and wellness). + /// + public bool Sexual { get; } + /// Sexual content that includes an individual who is under 18 years old. + public bool SexualMinors { get; } + /// Content that depicts death, violence, or physical injury. + public bool Violence { get; } + /// Content that depicts death, violence, or physical injury in graphic detail. + public bool ViolenceGraphic { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.Serialization.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.Serialization.cs new file mode 100644 index 000000000..3ea9f0a1f --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.Serialization.cs @@ -0,0 +1,222 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateModerationResponseResultCategoryScores : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategoryScores)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("hate"u8); + writer.WriteNumberValue(Hate); + writer.WritePropertyName("hate/threatening"u8); + writer.WriteNumberValue(HateThreatening); + writer.WritePropertyName("harassment"u8); + writer.WriteNumberValue(Harassment); + writer.WritePropertyName("harassment/threatening"u8); + writer.WriteNumberValue(HarassmentThreatening); + writer.WritePropertyName("self-harm"u8); + writer.WriteNumberValue(SelfHarm); + writer.WritePropertyName("self-harm/intent"u8); + writer.WriteNumberValue(SelfHarmIntent); + writer.WritePropertyName("self-harm/instructions"u8); + writer.WriteNumberValue(SelfHarmInstructions); + writer.WritePropertyName("sexual"u8); + writer.WriteNumberValue(Sexual); + writer.WritePropertyName("sexual/minors"u8); + writer.WriteNumberValue(SexualMinors); + writer.WritePropertyName("violence"u8); + writer.WriteNumberValue(Violence); + writer.WritePropertyName("violence/graphic"u8); + writer.WriteNumberValue(ViolenceGraphic); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateModerationResponseResultCategoryScores IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategoryScores)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateModerationResponseResultCategoryScores(document.RootElement, options); + } + + internal static CreateModerationResponseResultCategoryScores DeserializeCreateModerationResponseResultCategoryScores(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + double hate = default; + double hateThreatening = default; + double harassment = default; + double harassmentThreatening = default; + double selfHarm = default; + double selfHarmIntent = default; + double selfHarmInstructions = default; + double sexual = default; + double sexualMinors = default; + double violence = default; + double violenceGraphic = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("hate"u8)) + { + hate = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("hate/threatening"u8)) + { + hateThreatening = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("harassment"u8)) + { + harassment = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("harassment/threatening"u8)) + { + harassmentThreatening = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("self-harm"u8)) + { + selfHarm = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("self-harm/intent"u8)) + { + selfHarmIntent = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("self-harm/instructions"u8)) + { + selfHarmInstructions = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("sexual"u8)) + { + sexual = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("sexual/minors"u8)) + { + sexualMinors = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("violence"u8)) + { + violence = property.Value.GetDouble(); + continue; + } + if (property.NameEquals("violence/graphic"u8)) + { + violenceGraphic = property.Value.GetDouble(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateModerationResponseResultCategoryScores( + hate, + hateThreatening, + harassment, + harassmentThreatening, + selfHarm, + selfHarmIntent, + selfHarmInstructions, + sexual, + sexualMinors, + violence, + violenceGraphic, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategoryScores)} does not support '{options.Format}' format."); + } + } + + CreateModerationResponseResultCategoryScores IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateModerationResponseResultCategoryScores(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateModerationResponseResultCategoryScores)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateModerationResponseResultCategoryScores FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateModerationResponseResultCategoryScores(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.cs b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.cs new file mode 100644 index 000000000..17d49937e --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateModerationResponseResultCategoryScores.cs @@ -0,0 +1,127 @@ +// + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal.Models +{ + /// The CreateModerationResponseResultCategoryScores. + internal partial class CreateModerationResponseResultCategoryScores + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The score for the category 'hate'. + /// The score for the category 'hate/threatening'. + /// The score for the category 'harassment'. + /// The score for the category 'harassment/threatening'. + /// The score for the category 'self-harm'. + /// The score for the category 'self-harm/intent'. + /// The score for the category 'self-harm/instructive'. + /// The score for the category 'sexual'. + /// The score for the category 'sexual/minors'. + /// The score for the category 'violence'. + /// The score for the category 'violence/graphic'. + internal CreateModerationResponseResultCategoryScores(double hate, double hateThreatening, double harassment, double harassmentThreatening, double selfHarm, double selfHarmIntent, double selfHarmInstructions, double sexual, double sexualMinors, double violence, double violenceGraphic) + { + Hate = hate; + HateThreatening = hateThreatening; + Harassment = harassment; + HarassmentThreatening = harassmentThreatening; + SelfHarm = selfHarm; + SelfHarmIntent = selfHarmIntent; + SelfHarmInstructions = selfHarmInstructions; + Sexual = sexual; + SexualMinors = sexualMinors; + Violence = violence; + ViolenceGraphic = violenceGraphic; + } + + /// Initializes a new instance of . + /// The score for the category 'hate'. + /// The score for the category 'hate/threatening'. + /// The score for the category 'harassment'. + /// The score for the category 'harassment/threatening'. + /// The score for the category 'self-harm'. + /// The score for the category 'self-harm/intent'. + /// The score for the category 'self-harm/instructive'. + /// The score for the category 'sexual'. + /// The score for the category 'sexual/minors'. + /// The score for the category 'violence'. + /// The score for the category 'violence/graphic'. + /// Keeps track of any properties unknown to the library. + internal CreateModerationResponseResultCategoryScores(double hate, double hateThreatening, double harassment, double harassmentThreatening, double selfHarm, double selfHarmIntent, double selfHarmInstructions, double sexual, double sexualMinors, double violence, double violenceGraphic, IDictionary serializedAdditionalRawData) + { + Hate = hate; + HateThreatening = hateThreatening; + Harassment = harassment; + HarassmentThreatening = harassmentThreatening; + SelfHarm = selfHarm; + SelfHarmIntent = selfHarmIntent; + SelfHarmInstructions = selfHarmInstructions; + Sexual = sexual; + SexualMinors = sexualMinors; + Violence = violence; + ViolenceGraphic = violenceGraphic; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateModerationResponseResultCategoryScores() + { + } + + /// The score for the category 'hate'. + public double Hate { get; } + /// The score for the category 'hate/threatening'. + public double HateThreatening { get; } + /// The score for the category 'harassment'. + public double Harassment { get; } + /// The score for the category 'harassment/threatening'. + public double HarassmentThreatening { get; } + /// The score for the category 'self-harm'. + public double SelfHarm { get; } + /// The score for the category 'self-harm/intent'. + public double SelfHarmIntent { get; } + /// The score for the category 'self-harm/instructive'. + public double SelfHarmInstructions { get; } + /// The score for the category 'sexual'. + public double Sexual { get; } + /// The score for the category 'sexual/minors'. + public double SexualMinors { get; } + /// The score for the category 'violence'. + public double Violence { get; } + /// The score for the category 'violence/graphic'. + public double ViolenceGraphic { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateRunRequest.Serialization.cs new file mode 100644 index 000000000..15ade4fd0 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateRunRequest.Serialization.cs @@ -0,0 +1,290 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateRunRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateRunRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("assistant_id"u8); + writer.WriteStringValue(AssistantId); + if (Optional.IsDefined(Model)) + { + if (Model != null) + { + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + } + else + { + writer.WriteNull("model"); + } + } + if (Optional.IsDefined(Instructions)) + { + if (Instructions != null) + { + writer.WritePropertyName("instructions"u8); + writer.WriteStringValue(Instructions); + } + else + { + writer.WriteNull("instructions"); + } + } + if (Optional.IsDefined(AdditionalInstructions)) + { + if (AdditionalInstructions != null) + { + writer.WritePropertyName("additional_instructions"u8); + writer.WriteStringValue(AdditionalInstructions); + } + else + { + writer.WriteNull("additional_instructions"); + } + } + if (Optional.IsCollectionDefined(Tools)) + { + if (Tools != null) + { + writer.WritePropertyName("tools"u8); + writer.WriteStartArray(); + foreach (var item in Tools) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + } + else + { + writer.WriteNull("tools"); + } + } + if (Optional.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateRunRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateRunRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateRunRequest(document.RootElement, options); + } + + internal static CreateRunRequest DeserializeCreateRunRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string assistantId = default; + string model = default; + string instructions = default; + string additionalInstructions = default; + IList tools = default; + IDictionary metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("assistant_id"u8)) + { + assistantId = property.Value.GetString(); + continue; + } + if (property.NameEquals("model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + model = null; + continue; + } + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("instructions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + instructions = null; + continue; + } + instructions = property.Value.GetString(); + continue; + } + if (property.NameEquals("additional_instructions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + additionalInstructions = null; + continue; + } + additionalInstructions = property.Value.GetString(); + continue; + } + if (property.NameEquals("tools"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + tools = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateRunRequest( + assistantId, + model, + instructions, + additionalInstructions, + tools ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingDictionary(), + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateRunRequest)} does not support '{options.Format}' format."); + } + } + + CreateRunRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateRunRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateRunRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateRunRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateRunRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateRunRequest.cs b/.dotnet/src/Generated/Models/CreateRunRequest.cs new file mode 100644 index 000000000..db7f2f182 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateRunRequest.cs @@ -0,0 +1,154 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateRunRequest. + internal partial class CreateRunRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + /// is null. + public CreateRunRequest(string assistantId) + { + Argument.AssertNotNull(assistantId, nameof(assistantId)); + + AssistantId = assistantId; + Tools = new ChangeTrackingList(); + Metadata = new ChangeTrackingDictionary(); + } + + /// Initializes a new instance of . + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + /// + /// The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value + /// is provided here, it will override the model associated with the assistant. If not, the model + /// associated with the assistant will be used. + /// + /// + /// Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. + /// This is useful for modifying the behavior on a per-run basis. + /// + /// + /// Appends additional instructions at the end of the instructions for the run. This is useful for + /// modifying the behavior on a per-run basis without overriding other instructions. + /// + /// + /// Override the tools the assistant can use for this run. This is useful for modifying the + /// behavior on a per-run basis. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal CreateRunRequest(string assistantId, string model, string instructions, string additionalInstructions, IList tools, IDictionary metadata, IDictionary serializedAdditionalRawData) + { + AssistantId = assistantId; + Model = model; + Instructions = instructions; + AdditionalInstructions = additionalInstructions; + Tools = tools; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateRunRequest() + { + } + + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + public string AssistantId { get; } + /// + /// The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value + /// is provided here, it will override the model associated with the assistant. If not, the model + /// associated with the assistant will be used. + /// + public string Model { get; set; } + /// + /// Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. + /// This is useful for modifying the behavior on a per-run basis. + /// + public string Instructions { get; set; } + /// + /// Appends additional instructions at the end of the instructions for the run. This is useful for + /// modifying the behavior on a per-run basis without overriding other instructions. + /// + public string AdditionalInstructions { get; set; } + /// + /// Override the tools the assistant can use for this run. This is useful for modifying the + /// behavior on a per-run basis. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IList Tools { get; set; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateSpeechRequest.Serialization.cs new file mode 100644 index 000000000..d33a4ed9c --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateSpeechRequest.Serialization.cs @@ -0,0 +1,182 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateSpeechRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateSpeechRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.ToString()); + writer.WritePropertyName("input"u8); + writer.WriteStringValue(Input); + writer.WritePropertyName("voice"u8); + writer.WriteStringValue(Voice.ToString()); + if (Optional.IsDefined(ResponseFormat)) + { + writer.WritePropertyName("response_format"u8); + writer.WriteStringValue(ResponseFormat.Value.ToString()); + } + if (Optional.IsDefined(Speed)) + { + writer.WritePropertyName("speed"u8); + writer.WriteNumberValue(Speed.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateSpeechRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateSpeechRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateSpeechRequest(document.RootElement, options); + } + + internal static CreateSpeechRequest DeserializeCreateSpeechRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + CreateSpeechRequestModel model = default; + string input = default; + CreateSpeechRequestVoice voice = default; + CreateSpeechRequestResponseFormat? responseFormat = default; + double? speed = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("model"u8)) + { + model = new CreateSpeechRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("input"u8)) + { + input = property.Value.GetString(); + continue; + } + if (property.NameEquals("voice"u8)) + { + voice = new CreateSpeechRequestVoice(property.Value.GetString()); + continue; + } + if (property.NameEquals("response_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + responseFormat = new CreateSpeechRequestResponseFormat(property.Value.GetString()); + continue; + } + if (property.NameEquals("speed"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + speed = property.Value.GetDouble(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateSpeechRequest( + model, + input, + voice, + responseFormat, + speed, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateSpeechRequest)} does not support '{options.Format}' format."); + } + } + + CreateSpeechRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateSpeechRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateSpeechRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateSpeechRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateSpeechRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequest.cs b/.dotnet/src/Generated/Models/CreateSpeechRequest.cs new file mode 100644 index 000000000..591623c69 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateSpeechRequest.cs @@ -0,0 +1,103 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateSpeechRequest. + internal partial class CreateSpeechRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`. + /// The text to generate audio for. The maximum length is 4096 characters. + /// + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, + /// `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the + /// [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// + /// is null. + public CreateSpeechRequest(CreateSpeechRequestModel model, string input, CreateSpeechRequestVoice voice) + { + Argument.AssertNotNull(input, nameof(input)); + + Model = model; + Input = input; + Voice = voice; + } + + /// Initializes a new instance of . + /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`. + /// The text to generate audio for. The maximum length is 4096 characters. + /// + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, + /// `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the + /// [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// + /// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. + /// The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. + /// Keeps track of any properties unknown to the library. + internal CreateSpeechRequest(CreateSpeechRequestModel model, string input, CreateSpeechRequestVoice voice, CreateSpeechRequestResponseFormat? responseFormat, double? speed, IDictionary serializedAdditionalRawData) + { + Model = model; + Input = input; + Voice = voice; + ResponseFormat = responseFormat; + Speed = speed; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateSpeechRequest() + { + } + + /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`. + public CreateSpeechRequestModel Model { get; } + /// The text to generate audio for. The maximum length is 4096 characters. + public string Input { get; } + /// + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, + /// `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the + /// [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// + public CreateSpeechRequestVoice Voice { get; } + /// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. + public CreateSpeechRequestResponseFormat? ResponseFormat { get; set; } + /// The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. + public double? Speed { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequestModel.cs b/.dotnet/src/Generated/Models/CreateSpeechRequestModel.cs new file mode 100644 index 000000000..576c7c624 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateSpeechRequestModel.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateSpeechRequestModel. + internal readonly partial struct CreateSpeechRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateSpeechRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string Tts1Value = "tts-1"; + private const string Tts1HdValue = "tts-1-hd"; + + /// tts-1. + public static CreateSpeechRequestModel Tts1 { get; } = new CreateSpeechRequestModel(Tts1Value); + /// tts-1-hd. + public static CreateSpeechRequestModel Tts1Hd { get; } = new CreateSpeechRequestModel(Tts1HdValue); + /// Determines if two values are the same. + public static bool operator ==(CreateSpeechRequestModel left, CreateSpeechRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateSpeechRequestModel left, CreateSpeechRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateSpeechRequestModel(string value) => new CreateSpeechRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateSpeechRequestModel other && Equals(other); + /// + public bool Equals(CreateSpeechRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateSpeechRequestResponseFormat.cs new file mode 100644 index 000000000..186f59cc2 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateSpeechRequestResponseFormat.cs @@ -0,0 +1,52 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for response_format in CreateSpeechRequest. + internal readonly partial struct CreateSpeechRequestResponseFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateSpeechRequestResponseFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string Mp3Value = "mp3"; + private const string OpusValue = "opus"; + private const string AacValue = "aac"; + private const string FlacValue = "flac"; + + /// mp3. + public static CreateSpeechRequestResponseFormat Mp3 { get; } = new CreateSpeechRequestResponseFormat(Mp3Value); + /// opus. + public static CreateSpeechRequestResponseFormat Opus { get; } = new CreateSpeechRequestResponseFormat(OpusValue); + /// aac. + public static CreateSpeechRequestResponseFormat Aac { get; } = new CreateSpeechRequestResponseFormat(AacValue); + /// flac. + public static CreateSpeechRequestResponseFormat Flac { get; } = new CreateSpeechRequestResponseFormat(FlacValue); + /// Determines if two values are the same. + public static bool operator ==(CreateSpeechRequestResponseFormat left, CreateSpeechRequestResponseFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateSpeechRequestResponseFormat left, CreateSpeechRequestResponseFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateSpeechRequestResponseFormat(string value) => new CreateSpeechRequestResponseFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateSpeechRequestResponseFormat other && Equals(other); + /// + public bool Equals(CreateSpeechRequestResponseFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateSpeechRequestVoice.cs b/.dotnet/src/Generated/Models/CreateSpeechRequestVoice.cs new file mode 100644 index 000000000..be68af49d --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateSpeechRequestVoice.cs @@ -0,0 +1,58 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for voice in CreateSpeechRequest. + internal readonly partial struct CreateSpeechRequestVoice : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateSpeechRequestVoice(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AlloyValue = "alloy"; + private const string EchoValue = "echo"; + private const string FableValue = "fable"; + private const string OnyxValue = "onyx"; + private const string NovaValue = "nova"; + private const string ShimmerValue = "shimmer"; + + /// alloy. + public static CreateSpeechRequestVoice Alloy { get; } = new CreateSpeechRequestVoice(AlloyValue); + /// echo. + public static CreateSpeechRequestVoice Echo { get; } = new CreateSpeechRequestVoice(EchoValue); + /// fable. + public static CreateSpeechRequestVoice Fable { get; } = new CreateSpeechRequestVoice(FableValue); + /// onyx. + public static CreateSpeechRequestVoice Onyx { get; } = new CreateSpeechRequestVoice(OnyxValue); + /// nova. + public static CreateSpeechRequestVoice Nova { get; } = new CreateSpeechRequestVoice(NovaValue); + /// shimmer. + public static CreateSpeechRequestVoice Shimmer { get; } = new CreateSpeechRequestVoice(ShimmerValue); + /// Determines if two values are the same. + public static bool operator ==(CreateSpeechRequestVoice left, CreateSpeechRequestVoice right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateSpeechRequestVoice left, CreateSpeechRequestVoice right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateSpeechRequestVoice(string value) => new CreateSpeechRequestVoice(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateSpeechRequestVoice other && Equals(other); + /// + public bool Equals(CreateSpeechRequestVoice other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.Serialization.cs new file mode 100644 index 000000000..1613f3680 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.Serialization.cs @@ -0,0 +1,282 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateThreadAndRunRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateThreadAndRunRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("assistant_id"u8); + writer.WriteStringValue(AssistantId); + if (Optional.IsDefined(Thread)) + { + writer.WritePropertyName("thread"u8); + writer.WriteObjectValue(Thread); + } + if (Optional.IsDefined(Model)) + { + if (Model != null) + { + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + } + else + { + writer.WriteNull("model"); + } + } + if (Optional.IsDefined(Instructions)) + { + if (Instructions != null) + { + writer.WritePropertyName("instructions"u8); + writer.WriteStringValue(Instructions); + } + else + { + writer.WriteNull("instructions"); + } + } + if (Optional.IsCollectionDefined(Tools)) + { + if (Tools != null) + { + writer.WritePropertyName("tools"u8); + writer.WriteStartArray(); + foreach (var item in Tools) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + } + else + { + writer.WriteNull("tools"); + } + } + if (Optional.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateThreadAndRunRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateThreadAndRunRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateThreadAndRunRequest(document.RootElement, options); + } + + internal static CreateThreadAndRunRequest DeserializeCreateThreadAndRunRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string assistantId = default; + CreateThreadRequest thread = default; + string model = default; + string instructions = default; + IList tools = default; + IDictionary metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("assistant_id"u8)) + { + assistantId = property.Value.GetString(); + continue; + } + if (property.NameEquals("thread"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + thread = CreateThreadRequest.DeserializeCreateThreadRequest(property.Value, options); + continue; + } + if (property.NameEquals("model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + model = null; + continue; + } + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("instructions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + instructions = null; + continue; + } + instructions = property.Value.GetString(); + continue; + } + if (property.NameEquals("tools"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + tools = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateThreadAndRunRequest( + assistantId, + thread, + model, + instructions, + tools ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingDictionary(), + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateThreadAndRunRequest)} does not support '{options.Format}' format."); + } + } + + CreateThreadAndRunRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateThreadAndRunRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateThreadAndRunRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateThreadAndRunRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateThreadAndRunRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.cs b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.cs new file mode 100644 index 000000000..eb2b0b0ba --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateThreadAndRunRequest.cs @@ -0,0 +1,148 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateThreadAndRunRequest. + internal partial class CreateThreadAndRunRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + /// is null. + public CreateThreadAndRunRequest(string assistantId) + { + Argument.AssertNotNull(assistantId, nameof(assistantId)); + + AssistantId = assistantId; + Tools = new ChangeTrackingList(); + Metadata = new ChangeTrackingDictionary(); + } + + /// Initializes a new instance of . + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + /// If no thread is provided, an empty thread will be created. + /// + /// The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is + /// provided here, it will override the model associated with the assistant. If not, the model + /// associated with the assistant will be used. + /// + /// + /// Override the default system message of the assistant. This is useful for modifying the behavior + /// on a per-run basis. + /// + /// + /// Override the tools the assistant can use for this run. This is useful for modifying the + /// behavior on a per-run basis. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal CreateThreadAndRunRequest(string assistantId, CreateThreadRequest thread, string model, string instructions, IList tools, IDictionary metadata, IDictionary serializedAdditionalRawData) + { + AssistantId = assistantId; + Thread = thread; + Model = model; + Instructions = instructions; + Tools = tools; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateThreadAndRunRequest() + { + } + + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + public string AssistantId { get; } + /// If no thread is provided, an empty thread will be created. + public CreateThreadRequest Thread { get; set; } + /// + /// The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is + /// provided here, it will override the model associated with the assistant. If not, the model + /// associated with the assistant will be used. + /// + public string Model { get; set; } + /// + /// Override the default system message of the assistant. This is useful for modifying the behavior + /// on a per-run basis. + /// + public string Instructions { get; set; } + /// + /// Override the tools the assistant can use for this run. This is useful for modifying the + /// behavior on a per-run basis. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IList Tools { get; set; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateThreadRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateThreadRequest.Serialization.cs new file mode 100644 index 000000000..c763c3fe2 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateThreadRequest.Serialization.cs @@ -0,0 +1,180 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateThreadRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateThreadRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsCollectionDefined(Messages)) + { + writer.WritePropertyName("messages"u8); + writer.WriteStartArray(); + foreach (var item in Messages) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateThreadRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateThreadRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateThreadRequest(document.RootElement, options); + } + + internal static CreateThreadRequest DeserializeCreateThreadRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IList messages = default; + IDictionary metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("messages"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(CreateMessageRequest.DeserializeCreateMessageRequest(item, options)); + } + messages = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateThreadRequest(messages ?? new ChangeTrackingList(), metadata ?? new ChangeTrackingDictionary(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateThreadRequest)} does not support '{options.Format}' format."); + } + } + + CreateThreadRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateThreadRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateThreadRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateThreadRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateThreadRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateThreadRequest.cs b/.dotnet/src/Generated/Models/CreateThreadRequest.cs new file mode 100644 index 000000000..b48c1e449 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateThreadRequest.cs @@ -0,0 +1,75 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateThreadRequest. + internal partial class CreateThreadRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public CreateThreadRequest() + { + Messages = new ChangeTrackingList(); + Metadata = new ChangeTrackingDictionary(); + } + + /// Initializes a new instance of . + /// A list of [messages](/docs/api-reference/messages) to start the thread with. + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal CreateThreadRequest(IList messages, IDictionary metadata, IDictionary serializedAdditionalRawData) + { + Messages = messages; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// A list of [messages](/docs/api-reference/messages) to start the thread with. + public IList Messages { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.Serialization.cs new file mode 100644 index 000000000..bd02c679e --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.Serialization.cs @@ -0,0 +1,197 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateTranscriptionRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranscriptionRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("file"u8); + writer.WriteBase64StringValue(File.ToArray(), "D"); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.ToString()); + if (Optional.IsDefined(Language)) + { + writer.WritePropertyName("language"u8); + writer.WriteStringValue(Language); + } + if (Optional.IsDefined(Prompt)) + { + writer.WritePropertyName("prompt"u8); + writer.WriteStringValue(Prompt); + } + if (Optional.IsDefined(ResponseFormat)) + { + writer.WritePropertyName("response_format"u8); + writer.WriteStringValue(ResponseFormat.Value.ToString()); + } + if (Optional.IsDefined(Temperature)) + { + writer.WritePropertyName("temperature"u8); + writer.WriteNumberValue(Temperature.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateTranscriptionRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranscriptionRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateTranscriptionRequest(document.RootElement, options); + } + + internal static CreateTranscriptionRequest DeserializeCreateTranscriptionRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData file = default; + CreateTranscriptionRequestModel model = default; + string language = default; + string prompt = default; + CreateTranscriptionRequestResponseFormat? responseFormat = default; + double? temperature = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("file"u8)) + { + file = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); + continue; + } + if (property.NameEquals("model"u8)) + { + model = new CreateTranscriptionRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("language"u8)) + { + language = property.Value.GetString(); + continue; + } + if (property.NameEquals("prompt"u8)) + { + prompt = property.Value.GetString(); + continue; + } + if (property.NameEquals("response_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + responseFormat = new CreateTranscriptionRequestResponseFormat(property.Value.GetString()); + continue; + } + if (property.NameEquals("temperature"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + temperature = property.Value.GetDouble(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateTranscriptionRequest( + file, + model, + language, + prompt, + responseFormat, + temperature, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateTranscriptionRequest)} does not support '{options.Format}' format."); + } + } + + CreateTranscriptionRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateTranscriptionRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateTranscriptionRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateTranscriptionRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateTranscriptionRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequest.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.cs new file mode 100644 index 000000000..0b52b69b5 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequest.cs @@ -0,0 +1,145 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateTranscriptionRequest. + internal partial class CreateTranscriptionRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + /// mpeg, mpga, m4a, ogg, wav, or webm. + /// + /// ID of the model to use. Only `whisper-1` is currently available. + /// is null. + public CreateTranscriptionRequest(BinaryData file, CreateTranscriptionRequestModel model) + { + Argument.AssertNotNull(file, nameof(file)); + + File = file; + Model = model; + } + + /// Initializes a new instance of . + /// + /// The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + /// mpeg, mpga, m4a, ogg, wav, or webm. + /// + /// ID of the model to use. Only `whisper-1` is currently available. + /// + /// The language of the input audio. Supplying the input language in + /// [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + /// and latency. + /// + /// + /// An optional text to guide the model's style or continue a previous audio segment. The + /// [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// + /// + /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + /// vtt. + /// + /// + /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + /// random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + /// the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + /// automatically increase the temperature until certain thresholds are hit. + /// + /// Keeps track of any properties unknown to the library. + internal CreateTranscriptionRequest(BinaryData file, CreateTranscriptionRequestModel model, string language, string prompt, CreateTranscriptionRequestResponseFormat? responseFormat, double? temperature, IDictionary serializedAdditionalRawData) + { + File = file; + Model = model; + Language = language; + Prompt = prompt; + ResponseFormat = responseFormat; + Temperature = temperature; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateTranscriptionRequest() + { + } + + /// + /// The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + /// mpeg, mpga, m4a, ogg, wav, or webm. + /// + /// To assign a byte[] to this property use . + /// The byte[] will be serialized to a Base64 encoded string. + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) + /// Creates a payload of "AQID". + /// + /// + /// + /// + public BinaryData File { get; } + /// ID of the model to use. Only `whisper-1` is currently available. + public CreateTranscriptionRequestModel Model { get; } + /// + /// The language of the input audio. Supplying the input language in + /// [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + /// and latency. + /// + public string Language { get; set; } + /// + /// An optional text to guide the model's style or continue a previous audio segment. The + /// [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// + public string Prompt { get; set; } + /// + /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + /// vtt. + /// + public CreateTranscriptionRequestResponseFormat? ResponseFormat { get; set; } + /// + /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + /// random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + /// the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + /// automatically increase the temperature until certain thresholds are hit. + /// + public double? Temperature { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequestModel.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequestModel.cs new file mode 100644 index 000000000..a4890440f --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequestModel.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateTranscriptionRequestModel. + internal readonly partial struct CreateTranscriptionRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateTranscriptionRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string Whisper1Value = "whisper-1"; + + /// whisper-1. + public static CreateTranscriptionRequestModel Whisper1 { get; } = new CreateTranscriptionRequestModel(Whisper1Value); + /// Determines if two values are the same. + public static bool operator ==(CreateTranscriptionRequestModel left, CreateTranscriptionRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateTranscriptionRequestModel left, CreateTranscriptionRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateTranscriptionRequestModel(string value) => new CreateTranscriptionRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateTranscriptionRequestModel other && Equals(other); + /// + public bool Equals(CreateTranscriptionRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateTranscriptionRequestResponseFormat.cs new file mode 100644 index 000000000..e8211eeef --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranscriptionRequestResponseFormat.cs @@ -0,0 +1,55 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for response_format in CreateTranscriptionRequest. + internal readonly partial struct CreateTranscriptionRequestResponseFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateTranscriptionRequestResponseFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string JsonValue = "json"; + private const string TextValue = "text"; + private const string SrtValue = "srt"; + private const string VerboseJsonValue = "verbose_json"; + private const string VttValue = "vtt"; + + /// json. + public static CreateTranscriptionRequestResponseFormat Json { get; } = new CreateTranscriptionRequestResponseFormat(JsonValue); + /// text. + public static CreateTranscriptionRequestResponseFormat Text { get; } = new CreateTranscriptionRequestResponseFormat(TextValue); + /// srt. + public static CreateTranscriptionRequestResponseFormat Srt { get; } = new CreateTranscriptionRequestResponseFormat(SrtValue); + /// verbose_json. + public static CreateTranscriptionRequestResponseFormat VerboseJson { get; } = new CreateTranscriptionRequestResponseFormat(VerboseJsonValue); + /// vtt. + public static CreateTranscriptionRequestResponseFormat Vtt { get; } = new CreateTranscriptionRequestResponseFormat(VttValue); + /// Determines if two values are the same. + public static bool operator ==(CreateTranscriptionRequestResponseFormat left, CreateTranscriptionRequestResponseFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateTranscriptionRequestResponseFormat left, CreateTranscriptionRequestResponseFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateTranscriptionRequestResponseFormat(string value) => new CreateTranscriptionRequestResponseFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateTranscriptionRequestResponseFormat other && Equals(other); + /// + public bool Equals(CreateTranscriptionRequestResponseFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.Serialization.cs new file mode 100644 index 000000000..6bf802a18 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.Serialization.cs @@ -0,0 +1,202 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateTranscriptionResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranscriptionResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("text"u8); + writer.WriteStringValue(Text); + if (Optional.IsDefined(Task)) + { + writer.WritePropertyName("task"u8); + writer.WriteStringValue(Task.Value.ToString()); + } + if (Optional.IsDefined(Language)) + { + writer.WritePropertyName("language"u8); + writer.WriteStringValue(Language); + } + if (Optional.IsDefined(Duration)) + { + writer.WritePropertyName("duration"u8); + writer.WriteNumberValue(Convert.ToInt32(Duration.Value.ToString("%s"))); + } + if (Optional.IsCollectionDefined(Segments)) + { + writer.WritePropertyName("segments"u8); + writer.WriteStartArray(); + foreach (var item in Segments) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateTranscriptionResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranscriptionResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateTranscriptionResponse(document.RootElement, options); + } + + internal static CreateTranscriptionResponse DeserializeCreateTranscriptionResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string text = default; + CreateTranscriptionResponseTask? task = default; + string language = default; + TimeSpan? duration = default; + IReadOnlyList segments = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("text"u8)) + { + text = property.Value.GetString(); + continue; + } + if (property.NameEquals("task"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + task = new CreateTranscriptionResponseTask(property.Value.GetString()); + continue; + } + if (property.NameEquals("language"u8)) + { + language = property.Value.GetString(); + continue; + } + if (property.NameEquals("duration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + duration = TimeSpan.FromSeconds(property.Value.GetInt32()); + continue; + } + if (property.NameEquals("segments"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(AudioSegment.DeserializeAudioSegment(item, options)); + } + segments = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateTranscriptionResponse( + text, + task, + language, + duration, + segments ?? new ChangeTrackingList(), + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateTranscriptionResponse)} does not support '{options.Format}' format."); + } + } + + CreateTranscriptionResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateTranscriptionResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateTranscriptionResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateTranscriptionResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateTranscriptionResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionResponse.cs b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.cs new file mode 100644 index 000000000..0fb8162d3 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranscriptionResponse.cs @@ -0,0 +1,94 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateTranscriptionResponse. + internal partial class CreateTranscriptionResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The transcribed text for the provided audio data. + /// is null. + internal CreateTranscriptionResponse(string text) + { + Argument.AssertNotNull(text, nameof(text)); + + Text = text; + Segments = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The transcribed text for the provided audio data. + /// The label that describes which operation type generated the accompanying response data. + /// The spoken language that was detected in the audio data. + /// The total duration of the audio processed to produce accompanying transcription information. + /// + /// A collection of information about the timing, probabilities, and other detail of each processed + /// audio segment. + /// + /// Keeps track of any properties unknown to the library. + internal CreateTranscriptionResponse(string text, CreateTranscriptionResponseTask? task, string language, TimeSpan? duration, IReadOnlyList segments, IDictionary serializedAdditionalRawData) + { + Text = text; + Task = task; + Language = language; + Duration = duration; + Segments = segments; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateTranscriptionResponse() + { + } + + /// The transcribed text for the provided audio data. + public string Text { get; } + /// The label that describes which operation type generated the accompanying response data. + public CreateTranscriptionResponseTask? Task { get; } + /// The spoken language that was detected in the audio data. + public string Language { get; } + /// The total duration of the audio processed to produce accompanying transcription information. + public TimeSpan? Duration { get; } + /// + /// A collection of information about the timing, probabilities, and other detail of each processed + /// audio segment. + /// + public IReadOnlyList Segments { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranscriptionResponseTask.cs b/.dotnet/src/Generated/Models/CreateTranscriptionResponseTask.cs new file mode 100644 index 000000000..167892816 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranscriptionResponseTask.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateTranscriptionResponse_task. + internal readonly partial struct CreateTranscriptionResponseTask : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateTranscriptionResponseTask(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TranscribeValue = "transcribe"; + + /// transcribe. + public static CreateTranscriptionResponseTask Transcribe { get; } = new CreateTranscriptionResponseTask(TranscribeValue); + /// Determines if two values are the same. + public static bool operator ==(CreateTranscriptionResponseTask left, CreateTranscriptionResponseTask right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateTranscriptionResponseTask left, CreateTranscriptionResponseTask right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateTranscriptionResponseTask(string value) => new CreateTranscriptionResponseTask(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateTranscriptionResponseTask other && Equals(other); + /// + public bool Equals(CreateTranscriptionResponseTask other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequest.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranslationRequest.Serialization.cs new file mode 100644 index 000000000..aa71298f1 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranslationRequest.Serialization.cs @@ -0,0 +1,185 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateTranslationRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranslationRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("file"u8); + writer.WriteBase64StringValue(File.ToArray(), "D"); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model.ToString()); + if (Optional.IsDefined(Prompt)) + { + writer.WritePropertyName("prompt"u8); + writer.WriteStringValue(Prompt); + } + if (Optional.IsDefined(ResponseFormat)) + { + writer.WritePropertyName("response_format"u8); + writer.WriteStringValue(ResponseFormat.Value.ToString()); + } + if (Optional.IsDefined(Temperature)) + { + writer.WritePropertyName("temperature"u8); + writer.WriteNumberValue(Temperature.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateTranslationRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranslationRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateTranslationRequest(document.RootElement, options); + } + + internal static CreateTranslationRequest DeserializeCreateTranslationRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData file = default; + CreateTranslationRequestModel model = default; + string prompt = default; + CreateTranslationRequestResponseFormat? responseFormat = default; + double? temperature = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("file"u8)) + { + file = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); + continue; + } + if (property.NameEquals("model"u8)) + { + model = new CreateTranslationRequestModel(property.Value.GetString()); + continue; + } + if (property.NameEquals("prompt"u8)) + { + prompt = property.Value.GetString(); + continue; + } + if (property.NameEquals("response_format"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + responseFormat = new CreateTranslationRequestResponseFormat(property.Value.GetString()); + continue; + } + if (property.NameEquals("temperature"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + temperature = property.Value.GetDouble(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateTranslationRequest( + file, + model, + prompt, + responseFormat, + temperature, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateTranslationRequest)} does not support '{options.Format}' format."); + } + } + + CreateTranslationRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateTranslationRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateTranslationRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateTranslationRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateTranslationRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequest.cs b/.dotnet/src/Generated/Models/CreateTranslationRequest.cs new file mode 100644 index 000000000..848333e45 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranslationRequest.cs @@ -0,0 +1,133 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateTranslationRequest. + internal partial class CreateTranslationRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + /// mpeg, mpga, m4a, ogg, wav, or webm. + /// + /// ID of the model to use. Only `whisper-1` is currently available. + /// is null. + public CreateTranslationRequest(BinaryData file, CreateTranslationRequestModel model) + { + Argument.AssertNotNull(file, nameof(file)); + + File = file; + Model = model; + } + + /// Initializes a new instance of . + /// + /// The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + /// mpeg, mpga, m4a, ogg, wav, or webm. + /// + /// ID of the model to use. Only `whisper-1` is currently available. + /// + /// An optional text to guide the model's style or continue a previous audio segment. The + /// [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// + /// + /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + /// vtt. + /// + /// + /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + /// random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + /// the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + /// automatically increase the temperature until certain thresholds are hit. + /// + /// Keeps track of any properties unknown to the library. + internal CreateTranslationRequest(BinaryData file, CreateTranslationRequestModel model, string prompt, CreateTranslationRequestResponseFormat? responseFormat, double? temperature, IDictionary serializedAdditionalRawData) + { + File = file; + Model = model; + Prompt = prompt; + ResponseFormat = responseFormat; + Temperature = temperature; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateTranslationRequest() + { + } + + /// + /// The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + /// mpeg, mpga, m4a, ogg, wav, or webm. + /// + /// To assign a byte[] to this property use . + /// The byte[] will be serialized to a Base64 encoded string. + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) + /// Creates a payload of "AQID". + /// + /// + /// + /// + public BinaryData File { get; } + /// ID of the model to use. Only `whisper-1` is currently available. + public CreateTranslationRequestModel Model { get; } + /// + /// An optional text to guide the model's style or continue a previous audio segment. The + /// [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// + public string Prompt { get; set; } + /// + /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + /// vtt. + /// + public CreateTranslationRequestResponseFormat? ResponseFormat { get; set; } + /// + /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + /// random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + /// the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + /// automatically increase the temperature until certain thresholds are hit. + /// + public double? Temperature { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequestModel.cs b/.dotnet/src/Generated/Models/CreateTranslationRequestModel.cs new file mode 100644 index 000000000..2c58aab50 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranslationRequestModel.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateTranslationRequestModel. + internal readonly partial struct CreateTranslationRequestModel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateTranslationRequestModel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string Whisper1Value = "whisper-1"; + + /// whisper-1. + public static CreateTranslationRequestModel Whisper1 { get; } = new CreateTranslationRequestModel(Whisper1Value); + /// Determines if two values are the same. + public static bool operator ==(CreateTranslationRequestModel left, CreateTranslationRequestModel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateTranslationRequestModel left, CreateTranslationRequestModel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateTranslationRequestModel(string value) => new CreateTranslationRequestModel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateTranslationRequestModel other && Equals(other); + /// + public bool Equals(CreateTranslationRequestModel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranslationRequestResponseFormat.cs b/.dotnet/src/Generated/Models/CreateTranslationRequestResponseFormat.cs new file mode 100644 index 000000000..39ef19095 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranslationRequestResponseFormat.cs @@ -0,0 +1,55 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for response_format in CreateTranslationRequest. + internal readonly partial struct CreateTranslationRequestResponseFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateTranslationRequestResponseFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string JsonValue = "json"; + private const string TextValue = "text"; + private const string SrtValue = "srt"; + private const string VerboseJsonValue = "verbose_json"; + private const string VttValue = "vtt"; + + /// json. + public static CreateTranslationRequestResponseFormat Json { get; } = new CreateTranslationRequestResponseFormat(JsonValue); + /// text. + public static CreateTranslationRequestResponseFormat Text { get; } = new CreateTranslationRequestResponseFormat(TextValue); + /// srt. + public static CreateTranslationRequestResponseFormat Srt { get; } = new CreateTranslationRequestResponseFormat(SrtValue); + /// verbose_json. + public static CreateTranslationRequestResponseFormat VerboseJson { get; } = new CreateTranslationRequestResponseFormat(VerboseJsonValue); + /// vtt. + public static CreateTranslationRequestResponseFormat Vtt { get; } = new CreateTranslationRequestResponseFormat(VttValue); + /// Determines if two values are the same. + public static bool operator ==(CreateTranslationRequestResponseFormat left, CreateTranslationRequestResponseFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateTranslationRequestResponseFormat left, CreateTranslationRequestResponseFormat right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateTranslationRequestResponseFormat(string value) => new CreateTranslationRequestResponseFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateTranslationRequestResponseFormat other && Equals(other); + /// + public bool Equals(CreateTranslationRequestResponseFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranslationResponse.Serialization.cs b/.dotnet/src/Generated/Models/CreateTranslationResponse.Serialization.cs new file mode 100644 index 000000000..d59ef520b --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranslationResponse.Serialization.cs @@ -0,0 +1,202 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class CreateTranslationResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranslationResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("text"u8); + writer.WriteStringValue(Text); + if (Optional.IsDefined(Task)) + { + writer.WritePropertyName("task"u8); + writer.WriteStringValue(Task.Value.ToString()); + } + if (Optional.IsDefined(Language)) + { + writer.WritePropertyName("language"u8); + writer.WriteStringValue(Language); + } + if (Optional.IsDefined(Duration)) + { + writer.WritePropertyName("duration"u8); + writer.WriteNumberValue(Convert.ToInt32(Duration.Value.ToString("%s"))); + } + if (Optional.IsCollectionDefined(Segments)) + { + writer.WritePropertyName("segments"u8); + writer.WriteStartArray(); + foreach (var item in Segments) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CreateTranslationResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CreateTranslationResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCreateTranslationResponse(document.RootElement, options); + } + + internal static CreateTranslationResponse DeserializeCreateTranslationResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string text = default; + CreateTranslationResponseTask? task = default; + string language = default; + TimeSpan? duration = default; + IReadOnlyList segments = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("text"u8)) + { + text = property.Value.GetString(); + continue; + } + if (property.NameEquals("task"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + task = new CreateTranslationResponseTask(property.Value.GetString()); + continue; + } + if (property.NameEquals("language"u8)) + { + language = property.Value.GetString(); + continue; + } + if (property.NameEquals("duration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + duration = TimeSpan.FromSeconds(property.Value.GetInt32()); + continue; + } + if (property.NameEquals("segments"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(AudioSegment.DeserializeAudioSegment(item, options)); + } + segments = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new CreateTranslationResponse( + text, + task, + language, + duration, + segments ?? new ChangeTrackingList(), + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CreateTranslationResponse)} does not support '{options.Format}' format."); + } + } + + CreateTranslationResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCreateTranslationResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CreateTranslationResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static CreateTranslationResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCreateTranslationResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranslationResponse.cs b/.dotnet/src/Generated/Models/CreateTranslationResponse.cs new file mode 100644 index 000000000..1cc325c68 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranslationResponse.cs @@ -0,0 +1,94 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The CreateTranslationResponse. + internal partial class CreateTranslationResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The translated text for the provided audio data. + /// is null. + internal CreateTranslationResponse(string text) + { + Argument.AssertNotNull(text, nameof(text)); + + Text = text; + Segments = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The translated text for the provided audio data. + /// The label that describes which operation type generated the accompanying response data. + /// The spoken language that was detected in the audio data. + /// The total duration of the audio processed to produce accompanying translation information. + /// + /// A collection of information about the timing, probabilities, and other detail of each processed + /// audio segment. + /// + /// Keeps track of any properties unknown to the library. + internal CreateTranslationResponse(string text, CreateTranslationResponseTask? task, string language, TimeSpan? duration, IReadOnlyList segments, IDictionary serializedAdditionalRawData) + { + Text = text; + Task = task; + Language = language; + Duration = duration; + Segments = segments; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CreateTranslationResponse() + { + } + + /// The translated text for the provided audio data. + public string Text { get; } + /// The label that describes which operation type generated the accompanying response data. + public CreateTranslationResponseTask? Task { get; } + /// The spoken language that was detected in the audio data. + public string Language { get; } + /// The total duration of the audio processed to produce accompanying translation information. + public TimeSpan? Duration { get; } + /// + /// A collection of information about the timing, probabilities, and other detail of each processed + /// audio segment. + /// + public IReadOnlyList Segments { get; } + } +} diff --git a/.dotnet/src/Generated/Models/CreateTranslationResponseTask.cs b/.dotnet/src/Generated/Models/CreateTranslationResponseTask.cs new file mode 100644 index 000000000..db1eaf0f3 --- /dev/null +++ b/.dotnet/src/Generated/Models/CreateTranslationResponseTask.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The CreateTranslationResponse_task. + internal readonly partial struct CreateTranslationResponseTask : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CreateTranslationResponseTask(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TranslateValue = "translate"; + + /// translate. + public static CreateTranslationResponseTask Translate { get; } = new CreateTranslationResponseTask(TranslateValue); + /// Determines if two values are the same. + public static bool operator ==(CreateTranslationResponseTask left, CreateTranslationResponseTask right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CreateTranslationResponseTask left, CreateTranslationResponseTask right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CreateTranslationResponseTask(string value) => new CreateTranslationResponseTask(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CreateTranslationResponseTask other && Equals(other); + /// + public bool Equals(CreateTranslationResponseTask other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.Serialization.cs new file mode 100644 index 000000000..95e3075a0 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.Serialization.cs @@ -0,0 +1,146 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class DeleteAssistantFileResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteAssistantFileResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("deleted"u8); + writer.WriteBooleanValue(Deleted); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + DeleteAssistantFileResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteAssistantFileResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeDeleteAssistantFileResponse(document.RootElement, options); + } + + internal static DeleteAssistantFileResponse DeserializeDeleteAssistantFileResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + bool deleted = default; + DeleteAssistantFileResponseObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("deleted"u8)) + { + deleted = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new DeleteAssistantFileResponseObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new DeleteAssistantFileResponse(id, deleted, @object, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(DeleteAssistantFileResponse)} does not support '{options.Format}' format."); + } + } + + DeleteAssistantFileResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeDeleteAssistantFileResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(DeleteAssistantFileResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static DeleteAssistantFileResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeDeleteAssistantFileResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.cs b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.cs new file mode 100644 index 000000000..c55f79685 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteAssistantFileResponse.cs @@ -0,0 +1,84 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// + /// Deletes the association between the assistant and the file, but does not delete the + /// [File](/docs/api-reference/files) object itself. + /// + internal partial class DeleteAssistantFileResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// is null. + internal DeleteAssistantFileResponse(string id, bool deleted) + { + Argument.AssertNotNull(id, nameof(id)); + + Id = id; + Deleted = deleted; + } + + /// Initializes a new instance of . + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal DeleteAssistantFileResponse(string id, bool deleted, DeleteAssistantFileResponseObject @object, IDictionary serializedAdditionalRawData) + { + Id = id; + Deleted = deleted; + Object = @object; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal DeleteAssistantFileResponse() + { + } + + /// Gets the id. + public string Id { get; } + /// Gets the deleted. + public bool Deleted { get; } + /// Gets the object. + public DeleteAssistantFileResponseObject Object { get; } = DeleteAssistantFileResponseObject.AssistantFileDeleted; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteAssistantFileResponseObject.cs b/.dotnet/src/Generated/Models/DeleteAssistantFileResponseObject.cs new file mode 100644 index 000000000..d40f946af --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteAssistantFileResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The DeleteAssistantFileResponse_object. + internal readonly partial struct DeleteAssistantFileResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public DeleteAssistantFileResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AssistantFileDeletedValue = "assistant.file.deleted"; + + /// assistant.file.deleted. + public static DeleteAssistantFileResponseObject AssistantFileDeleted { get; } = new DeleteAssistantFileResponseObject(AssistantFileDeletedValue); + /// Determines if two values are the same. + public static bool operator ==(DeleteAssistantFileResponseObject left, DeleteAssistantFileResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(DeleteAssistantFileResponseObject left, DeleteAssistantFileResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator DeleteAssistantFileResponseObject(string value) => new DeleteAssistantFileResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is DeleteAssistantFileResponseObject other && Equals(other); + /// + public bool Equals(DeleteAssistantFileResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteAssistantResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteAssistantResponse.Serialization.cs new file mode 100644 index 000000000..3cb6f9c8a --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteAssistantResponse.Serialization.cs @@ -0,0 +1,146 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class DeleteAssistantResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteAssistantResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("deleted"u8); + writer.WriteBooleanValue(Deleted); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + DeleteAssistantResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteAssistantResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeDeleteAssistantResponse(document.RootElement, options); + } + + internal static DeleteAssistantResponse DeserializeDeleteAssistantResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + bool deleted = default; + DeleteAssistantResponseObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("deleted"u8)) + { + deleted = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new DeleteAssistantResponseObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new DeleteAssistantResponse(id, deleted, @object, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(DeleteAssistantResponse)} does not support '{options.Format}' format."); + } + } + + DeleteAssistantResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeDeleteAssistantResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(DeleteAssistantResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static DeleteAssistantResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeDeleteAssistantResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/DeleteAssistantResponse.cs b/.dotnet/src/Generated/Models/DeleteAssistantResponse.cs new file mode 100644 index 000000000..0648ca31e --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteAssistantResponse.cs @@ -0,0 +1,81 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The DeleteAssistantResponse. + internal partial class DeleteAssistantResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// is null. + internal DeleteAssistantResponse(string id, bool deleted) + { + Argument.AssertNotNull(id, nameof(id)); + + Id = id; + Deleted = deleted; + } + + /// Initializes a new instance of . + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal DeleteAssistantResponse(string id, bool deleted, DeleteAssistantResponseObject @object, IDictionary serializedAdditionalRawData) + { + Id = id; + Deleted = deleted; + Object = @object; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal DeleteAssistantResponse() + { + } + + /// Gets the id. + public string Id { get; } + /// Gets the deleted. + public bool Deleted { get; } + /// Gets the object. + public DeleteAssistantResponseObject Object { get; } = DeleteAssistantResponseObject.AssistantDeleted; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteAssistantResponseObject.cs b/.dotnet/src/Generated/Models/DeleteAssistantResponseObject.cs new file mode 100644 index 000000000..acc386d38 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteAssistantResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The DeleteAssistantResponse_object. + internal readonly partial struct DeleteAssistantResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public DeleteAssistantResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AssistantDeletedValue = "assistant.deleted"; + + /// assistant.deleted. + public static DeleteAssistantResponseObject AssistantDeleted { get; } = new DeleteAssistantResponseObject(AssistantDeletedValue); + /// Determines if two values are the same. + public static bool operator ==(DeleteAssistantResponseObject left, DeleteAssistantResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(DeleteAssistantResponseObject left, DeleteAssistantResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator DeleteAssistantResponseObject(string value) => new DeleteAssistantResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is DeleteAssistantResponseObject other && Equals(other); + /// + public bool Equals(DeleteAssistantResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteFileResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteFileResponse.Serialization.cs new file mode 100644 index 000000000..dab447883 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteFileResponse.Serialization.cs @@ -0,0 +1,146 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class DeleteFileResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteFileResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("deleted"u8); + writer.WriteBooleanValue(Deleted); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + DeleteFileResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteFileResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeDeleteFileResponse(document.RootElement, options); + } + + internal static DeleteFileResponse DeserializeDeleteFileResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + DeleteFileResponseObject @object = default; + bool deleted = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new DeleteFileResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("deleted"u8)) + { + deleted = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new DeleteFileResponse(id, @object, deleted, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(DeleteFileResponse)} does not support '{options.Format}' format."); + } + } + + DeleteFileResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeDeleteFileResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(DeleteFileResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static DeleteFileResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeDeleteFileResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/DeleteFileResponse.cs b/.dotnet/src/Generated/Models/DeleteFileResponse.cs new file mode 100644 index 000000000..28dd2df8c --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteFileResponse.cs @@ -0,0 +1,82 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The DeleteFileResponse. + internal partial class DeleteFileResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// is null. + internal DeleteFileResponse(string id, bool deleted) + { + Argument.AssertNotNull(id, nameof(id)); + + Id = id; + Deleted = deleted; + } + + /// Initializes a new instance of . + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal DeleteFileResponse(string id, DeleteFileResponseObject @object, bool deleted, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + Deleted = deleted; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal DeleteFileResponse() + { + } + + /// Gets the id. + public string Id { get; } + /// Gets the object. + public DeleteFileResponseObject Object { get; } = DeleteFileResponseObject.File; + + /// Gets the deleted. + public bool Deleted { get; } + } +} diff --git a/.dotnet/src/Generated/Models/DeleteFileResponseObject.cs b/.dotnet/src/Generated/Models/DeleteFileResponseObject.cs new file mode 100644 index 000000000..777211698 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteFileResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The DeleteFileResponse_object. + internal readonly partial struct DeleteFileResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public DeleteFileResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FileValue = "file"; + + /// file. + public static DeleteFileResponseObject File { get; } = new DeleteFileResponseObject(FileValue); + /// Determines if two values are the same. + public static bool operator ==(DeleteFileResponseObject left, DeleteFileResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(DeleteFileResponseObject left, DeleteFileResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator DeleteFileResponseObject(string value) => new DeleteFileResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is DeleteFileResponseObject other && Equals(other); + /// + public bool Equals(DeleteFileResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteModelResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteModelResponse.Serialization.cs new file mode 100644 index 000000000..a5c786b8d --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteModelResponse.Serialization.cs @@ -0,0 +1,146 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class DeleteModelResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteModelResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("deleted"u8); + writer.WriteBooleanValue(Deleted); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + DeleteModelResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteModelResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeDeleteModelResponse(document.RootElement, options); + } + + internal static DeleteModelResponse DeserializeDeleteModelResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + bool deleted = default; + DeleteModelResponseObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("deleted"u8)) + { + deleted = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new DeleteModelResponseObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new DeleteModelResponse(id, deleted, @object, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(DeleteModelResponse)} does not support '{options.Format}' format."); + } + } + + DeleteModelResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeDeleteModelResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(DeleteModelResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static DeleteModelResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeDeleteModelResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/DeleteModelResponse.cs b/.dotnet/src/Generated/Models/DeleteModelResponse.cs new file mode 100644 index 000000000..1caef0aa6 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteModelResponse.cs @@ -0,0 +1,81 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The DeleteModelResponse. + internal partial class DeleteModelResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// is null. + internal DeleteModelResponse(string id, bool deleted) + { + Argument.AssertNotNull(id, nameof(id)); + + Id = id; + Deleted = deleted; + } + + /// Initializes a new instance of . + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal DeleteModelResponse(string id, bool deleted, DeleteModelResponseObject @object, IDictionary serializedAdditionalRawData) + { + Id = id; + Deleted = deleted; + Object = @object; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal DeleteModelResponse() + { + } + + /// Gets the id. + public string Id { get; } + /// Gets the deleted. + public bool Deleted { get; } + /// Gets the object. + public DeleteModelResponseObject Object { get; } = DeleteModelResponseObject.Model; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteModelResponseObject.cs b/.dotnet/src/Generated/Models/DeleteModelResponseObject.cs new file mode 100644 index 000000000..9a0e654a6 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteModelResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The DeleteModelResponse_object. + internal readonly partial struct DeleteModelResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public DeleteModelResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ModelValue = "model"; + + /// model. + public static DeleteModelResponseObject Model { get; } = new DeleteModelResponseObject(ModelValue); + /// Determines if two values are the same. + public static bool operator ==(DeleteModelResponseObject left, DeleteModelResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(DeleteModelResponseObject left, DeleteModelResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator DeleteModelResponseObject(string value) => new DeleteModelResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is DeleteModelResponseObject other && Equals(other); + /// + public bool Equals(DeleteModelResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteThreadResponse.Serialization.cs b/.dotnet/src/Generated/Models/DeleteThreadResponse.Serialization.cs new file mode 100644 index 000000000..164a4f8bc --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteThreadResponse.Serialization.cs @@ -0,0 +1,146 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class DeleteThreadResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteThreadResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("deleted"u8); + writer.WriteBooleanValue(Deleted); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + DeleteThreadResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteThreadResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeDeleteThreadResponse(document.RootElement, options); + } + + internal static DeleteThreadResponse DeserializeDeleteThreadResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + bool deleted = default; + DeleteThreadResponseObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("deleted"u8)) + { + deleted = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new DeleteThreadResponseObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new DeleteThreadResponse(id, deleted, @object, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(DeleteThreadResponse)} does not support '{options.Format}' format."); + } + } + + DeleteThreadResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeDeleteThreadResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(DeleteThreadResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static DeleteThreadResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeDeleteThreadResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/DeleteThreadResponse.cs b/.dotnet/src/Generated/Models/DeleteThreadResponse.cs new file mode 100644 index 000000000..934684af3 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteThreadResponse.cs @@ -0,0 +1,81 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The DeleteThreadResponse. + internal partial class DeleteThreadResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// is null. + internal DeleteThreadResponse(string id, bool deleted) + { + Argument.AssertNotNull(id, nameof(id)); + + Id = id; + Deleted = deleted; + } + + /// Initializes a new instance of . + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal DeleteThreadResponse(string id, bool deleted, DeleteThreadResponseObject @object, IDictionary serializedAdditionalRawData) + { + Id = id; + Deleted = deleted; + Object = @object; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal DeleteThreadResponse() + { + } + + /// Gets the id. + public string Id { get; } + /// Gets the deleted. + public bool Deleted { get; } + /// Gets the object. + public DeleteThreadResponseObject Object { get; } = DeleteThreadResponseObject.ThreadDeleted; + } +} diff --git a/.dotnet/src/Generated/Models/DeleteThreadResponseObject.cs b/.dotnet/src/Generated/Models/DeleteThreadResponseObject.cs new file mode 100644 index 000000000..5dcbdb507 --- /dev/null +++ b/.dotnet/src/Generated/Models/DeleteThreadResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The DeleteThreadResponse_object. + internal readonly partial struct DeleteThreadResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public DeleteThreadResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ThreadDeletedValue = "thread.deleted"; + + /// thread.deleted. + public static DeleteThreadResponseObject ThreadDeleted { get; } = new DeleteThreadResponseObject(ThreadDeletedValue); + /// Determines if two values are the same. + public static bool operator ==(DeleteThreadResponseObject left, DeleteThreadResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(DeleteThreadResponseObject left, DeleteThreadResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator DeleteThreadResponseObject(string value) => new DeleteThreadResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is DeleteThreadResponseObject other && Equals(other); + /// + public bool Equals(DeleteThreadResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/Embedding.Serialization.cs b/.dotnet/src/Generated/Models/Embedding.Serialization.cs new file mode 100644 index 000000000..92ebec50c --- /dev/null +++ b/.dotnet/src/Generated/Models/Embedding.Serialization.cs @@ -0,0 +1,153 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class Embedding : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(Embedding)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("index"u8); + writer.WriteNumberValue(Index); + writer.WritePropertyName("embedding"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(EmbeddingProperty); +#else + using (JsonDocument document = JsonDocument.Parse(EmbeddingProperty)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + Embedding IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(Embedding)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeEmbedding(document.RootElement, options); + } + + internal static Embedding DeserializeEmbedding(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + long index = default; + BinaryData embedding = default; + EmbeddingObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("index"u8)) + { + index = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("embedding"u8)) + { + embedding = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new EmbeddingObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new Embedding(index, embedding, @object, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(Embedding)} does not support '{options.Format}' format."); + } + } + + Embedding IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeEmbedding(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(Embedding)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static Embedding FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeEmbedding(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/Embedding.cs b/.dotnet/src/Generated/Models/Embedding.cs new file mode 100644 index 000000000..508a7cd39 --- /dev/null +++ b/.dotnet/src/Generated/Models/Embedding.cs @@ -0,0 +1,128 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// Represents an embedding vector returned by embedding endpoint. + internal partial class Embedding + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The index of the embedding in the list of embeddings. + /// + /// The embedding vector, which is a list of floats. The length of vector depends on the model as + /// listed in the [embedding guide](/docs/guides/embeddings). + /// + /// is null. + internal Embedding(long index, BinaryData embeddingProperty) + { + Argument.AssertNotNull(embeddingProperty, nameof(embeddingProperty)); + + Index = index; + EmbeddingProperty = embeddingProperty; + } + + /// Initializes a new instance of . + /// The index of the embedding in the list of embeddings. + /// + /// The embedding vector, which is a list of floats. The length of vector depends on the model as + /// listed in the [embedding guide](/docs/guides/embeddings). + /// + /// The object type, which is always "embedding". + /// Keeps track of any properties unknown to the library. + internal Embedding(long index, BinaryData embeddingProperty, EmbeddingObject @object, IDictionary serializedAdditionalRawData) + { + Index = index; + EmbeddingProperty = embeddingProperty; + Object = @object; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal Embedding() + { + } + + /// The index of the embedding in the list of embeddings. + public long Index { get; } + /// + /// The embedding vector, which is a list of floats. The length of vector depends on the model as + /// listed in the [embedding guide](/docs/guides/embeddings). + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// where T is of type + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData EmbeddingProperty { get; } + /// The object type, which is always "embedding". + public EmbeddingObject Object { get; } = EmbeddingObject.Embedding; + } +} diff --git a/.dotnet/src/Generated/Models/EmbeddingObject.cs b/.dotnet/src/Generated/Models/EmbeddingObject.cs new file mode 100644 index 000000000..6f4bac9bd --- /dev/null +++ b/.dotnet/src/Generated/Models/EmbeddingObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The Embedding_object. + internal readonly partial struct EmbeddingObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public EmbeddingObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string EmbeddingValue = "embedding"; + + /// embedding. + public static EmbeddingObject Embedding { get; } = new EmbeddingObject(EmbeddingValue); + /// Determines if two values are the same. + public static bool operator ==(EmbeddingObject left, EmbeddingObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(EmbeddingObject left, EmbeddingObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator EmbeddingObject(string value) => new EmbeddingObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is EmbeddingObject other && Equals(other); + /// + public bool Equals(EmbeddingObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/EmbeddingUsage.Serialization.cs b/.dotnet/src/Generated/Models/EmbeddingUsage.Serialization.cs new file mode 100644 index 000000000..e47db418f --- /dev/null +++ b/.dotnet/src/Generated/Models/EmbeddingUsage.Serialization.cs @@ -0,0 +1,138 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class EmbeddingUsage : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(EmbeddingUsage)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("prompt_tokens"u8); + writer.WriteNumberValue(PromptTokens); + writer.WritePropertyName("total_tokens"u8); + writer.WriteNumberValue(TotalTokens); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + EmbeddingUsage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(EmbeddingUsage)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeEmbeddingUsage(document.RootElement, options); + } + + internal static EmbeddingUsage DeserializeEmbeddingUsage(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + long promptTokens = default; + long totalTokens = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("prompt_tokens"u8)) + { + promptTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("total_tokens"u8)) + { + totalTokens = property.Value.GetInt64(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new EmbeddingUsage(promptTokens, totalTokens, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(EmbeddingUsage)} does not support '{options.Format}' format."); + } + } + + EmbeddingUsage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeEmbeddingUsage(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(EmbeddingUsage)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static EmbeddingUsage FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeEmbeddingUsage(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/EmbeddingUsage.cs b/.dotnet/src/Generated/Models/EmbeddingUsage.cs new file mode 100644 index 000000000..a3c1bf356 --- /dev/null +++ b/.dotnet/src/Generated/Models/EmbeddingUsage.cs @@ -0,0 +1,73 @@ +// + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal.Models +{ + /// The EmbeddingUsage. + internal partial class EmbeddingUsage + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The number of tokens used by the prompt. + /// The total number of tokens used by the request. + internal EmbeddingUsage(long promptTokens, long totalTokens) + { + PromptTokens = promptTokens; + TotalTokens = totalTokens; + } + + /// Initializes a new instance of . + /// The number of tokens used by the prompt. + /// The total number of tokens used by the request. + /// Keeps track of any properties unknown to the library. + internal EmbeddingUsage(long promptTokens, long totalTokens, IDictionary serializedAdditionalRawData) + { + PromptTokens = promptTokens; + TotalTokens = totalTokens; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal EmbeddingUsage() + { + } + + /// The number of tokens used by the prompt. + public long PromptTokens { get; } + /// The total number of tokens used by the request. + public long TotalTokens { get; } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJob.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJob.Serialization.cs new file mode 100644 index 000000000..59e0a3592 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJob.Serialization.cs @@ -0,0 +1,319 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class FineTuningJob : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJob)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + if (Error != null) + { + writer.WritePropertyName("error"u8); + writer.WriteObjectValue(Error); + } + else + { + writer.WriteNull("error"); + } + if (FineTunedModel != null) + { + writer.WritePropertyName("fine_tuned_model"u8); + writer.WriteStringValue(FineTunedModel); + } + else + { + writer.WriteNull("fine_tuned_model"); + } + if (FinishedAt != null) + { + writer.WritePropertyName("finished_at"u8); + writer.WriteStringValue(FinishedAt.Value, "O"); + } + else + { + writer.WriteNull("finished_at"); + } + writer.WritePropertyName("hyperparameters"u8); + writer.WriteObjectValue(Hyperparameters); + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("organization_id"u8); + writer.WriteStringValue(OrganizationId); + writer.WritePropertyName("result_files"u8); + writer.WriteStartArray(); + foreach (var item in ResultFiles) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("status"u8); + writer.WriteStringValue(Status.ToString()); + if (TrainedTokens != null) + { + writer.WritePropertyName("trained_tokens"u8); + writer.WriteNumberValue(TrainedTokens.Value); + } + else + { + writer.WriteNull("trained_tokens"); + } + writer.WritePropertyName("training_file"u8); + writer.WriteStringValue(TrainingFile); + if (ValidationFile != null) + { + writer.WritePropertyName("validation_file"u8); + writer.WriteStringValue(ValidationFile); + } + else + { + writer.WriteNull("validation_file"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + FineTuningJob IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJob)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFineTuningJob(document.RootElement, options); + } + + internal static FineTuningJob DeserializeFineTuningJob(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + DateTimeOffset createdAt = default; + FineTuningJobError error = default; + string fineTunedModel = default; + DateTimeOffset? finishedAt = default; + FineTuningJobHyperparameters hyperparameters = default; + string model = default; + FineTuningJobObject @object = default; + string organizationId = default; + IReadOnlyList resultFiles = default; + FineTuningJobStatus status = default; + long? trainedTokens = default; + string trainingFile = default; + string validationFile = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("error"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + error = null; + continue; + } + error = FineTuningJobError.DeserializeFineTuningJobError(property.Value, options); + continue; + } + if (property.NameEquals("fine_tuned_model"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + fineTunedModel = null; + continue; + } + fineTunedModel = property.Value.GetString(); + continue; + } + if (property.NameEquals("finished_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + finishedAt = null; + continue; + } + finishedAt = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("hyperparameters"u8)) + { + hyperparameters = FineTuningJobHyperparameters.DeserializeFineTuningJobHyperparameters(property.Value, options); + continue; + } + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new FineTuningJobObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("organization_id"u8)) + { + organizationId = property.Value.GetString(); + continue; + } + if (property.NameEquals("result_files"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + resultFiles = array; + continue; + } + if (property.NameEquals("status"u8)) + { + status = new FineTuningJobStatus(property.Value.GetString()); + continue; + } + if (property.NameEquals("trained_tokens"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + trainedTokens = null; + continue; + } + trainedTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("training_file"u8)) + { + trainingFile = property.Value.GetString(); + continue; + } + if (property.NameEquals("validation_file"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + validationFile = null; + continue; + } + validationFile = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new FineTuningJob( + id, + createdAt, + error, + fineTunedModel, + finishedAt, + hyperparameters, + model, + @object, + organizationId, + resultFiles, + status, + trainedTokens, + trainingFile, + validationFile, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FineTuningJob)} does not support '{options.Format}' format."); + } + } + + FineTuningJob IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFineTuningJob(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FineTuningJob)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static FineTuningJob FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFineTuningJob(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJob.cs b/.dotnet/src/Generated/Models/FineTuningJob.cs new file mode 100644 index 000000000..d0b6dc99c --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJob.cs @@ -0,0 +1,235 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The FineTuningJob. + internal partial class FineTuningJob + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The object identifier, which can be referenced in the API endpoints. + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + /// + /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + /// failure. + /// + /// + /// The name of the fine-tuned model that is being created. The value will be null if the + /// fine-tuning job is still running. + /// + /// + /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + /// null if the fine-tuning job is still running. + /// + /// + /// The hyperparameters used for the fine-tuning job. See the + /// [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// The base model that is being fine-tuned. + /// The organization that owns the fine-tuning job. + /// + /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, + /// `running`, `succeeded`, `failed`, or `cancelled`. + /// + /// + /// The total number of billable tokens processed by this fine-tuning job. The value will be null + /// if the fine-tuning job is still running. + /// + /// + /// The file ID used for training. You can retrieve the training data with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The file ID used for validation. You can retrieve the validation results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// , , , , or is null. + internal FineTuningJob(string id, DateTimeOffset createdAt, FineTuningJobError error, string fineTunedModel, DateTimeOffset? finishedAt, FineTuningJobHyperparameters hyperparameters, string model, string organizationId, IEnumerable resultFiles, FineTuningJobStatus status, long? trainedTokens, string trainingFile, string validationFile) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(hyperparameters, nameof(hyperparameters)); + Argument.AssertNotNull(model, nameof(model)); + Argument.AssertNotNull(organizationId, nameof(organizationId)); + Argument.AssertNotNull(resultFiles, nameof(resultFiles)); + Argument.AssertNotNull(trainingFile, nameof(trainingFile)); + + Id = id; + CreatedAt = createdAt; + Error = error; + FineTunedModel = fineTunedModel; + FinishedAt = finishedAt; + Hyperparameters = hyperparameters; + Model = model; + OrganizationId = organizationId; + ResultFiles = resultFiles.ToList(); + Status = status; + TrainedTokens = trainedTokens; + TrainingFile = trainingFile; + ValidationFile = validationFile; + } + + /// Initializes a new instance of . + /// The object identifier, which can be referenced in the API endpoints. + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + /// + /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + /// failure. + /// + /// + /// The name of the fine-tuned model that is being created. The value will be null if the + /// fine-tuning job is still running. + /// + /// + /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + /// null if the fine-tuning job is still running. + /// + /// + /// The hyperparameters used for the fine-tuning job. See the + /// [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// The base model that is being fine-tuned. + /// The object type, which is always "fine_tuning.job". + /// The organization that owns the fine-tuning job. + /// + /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, + /// `running`, `succeeded`, `failed`, or `cancelled`. + /// + /// + /// The total number of billable tokens processed by this fine-tuning job. The value will be null + /// if the fine-tuning job is still running. + /// + /// + /// The file ID used for training. You can retrieve the training data with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The file ID used for validation. You can retrieve the validation results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// Keeps track of any properties unknown to the library. + internal FineTuningJob(string id, DateTimeOffset createdAt, FineTuningJobError error, string fineTunedModel, DateTimeOffset? finishedAt, FineTuningJobHyperparameters hyperparameters, string model, FineTuningJobObject @object, string organizationId, IReadOnlyList resultFiles, FineTuningJobStatus status, long? trainedTokens, string trainingFile, string validationFile, IDictionary serializedAdditionalRawData) + { + Id = id; + CreatedAt = createdAt; + Error = error; + FineTunedModel = fineTunedModel; + FinishedAt = finishedAt; + Hyperparameters = hyperparameters; + Model = model; + Object = @object; + OrganizationId = organizationId; + ResultFiles = resultFiles; + Status = status; + TrainedTokens = trainedTokens; + TrainingFile = trainingFile; + ValidationFile = validationFile; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal FineTuningJob() + { + } + + /// The object identifier, which can be referenced in the API endpoints. + public string Id { get; } + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + public DateTimeOffset CreatedAt { get; } + /// + /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + /// failure. + /// + public FineTuningJobError Error { get; } + /// + /// The name of the fine-tuned model that is being created. The value will be null if the + /// fine-tuning job is still running. + /// + public string FineTunedModel { get; } + /// + /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + /// null if the fine-tuning job is still running. + /// + public DateTimeOffset? FinishedAt { get; } + /// + /// The hyperparameters used for the fine-tuning job. See the + /// [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + public FineTuningJobHyperparameters Hyperparameters { get; } + /// The base model that is being fine-tuned. + public string Model { get; } + /// The object type, which is always "fine_tuning.job". + public FineTuningJobObject Object { get; } = FineTuningJobObject.FineTuningJob; + + /// The organization that owns the fine-tuning job. + public string OrganizationId { get; } + /// + /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + public IReadOnlyList ResultFiles { get; } + /// + /// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, + /// `running`, `succeeded`, `failed`, or `cancelled`. + /// + public FineTuningJobStatus Status { get; } + /// + /// The total number of billable tokens processed by this fine-tuning job. The value will be null + /// if the fine-tuning job is still running. + /// + public long? TrainedTokens { get; } + /// + /// The file ID used for training. You can retrieve the training data with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + public string TrainingFile { get; } + /// + /// The file ID used for validation. You can retrieve the validation results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + public string ValidationFile { get; } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobError.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJobError.Serialization.cs new file mode 100644 index 000000000..abf697421 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobError.Serialization.cs @@ -0,0 +1,158 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class FineTuningJobError : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJobError)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("code"u8); + writer.WriteStringValue(Code); + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + if (Param != null) + { + writer.WritePropertyName("param"u8); + writer.WriteStringValue(Param); + } + else + { + writer.WriteNull("param"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + FineTuningJobError IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJobError)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFineTuningJobError(document.RootElement, options); + } + + internal static FineTuningJobError DeserializeFineTuningJobError(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string code = default; + string message = default; + string param = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("code"u8)) + { + code = property.Value.GetString(); + continue; + } + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (property.NameEquals("param"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + param = null; + continue; + } + param = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new FineTuningJobError(code, message, param, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FineTuningJobError)} does not support '{options.Format}' format."); + } + } + + FineTuningJobError IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFineTuningJobError(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FineTuningJobError)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static FineTuningJobError FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFineTuningJobError(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobError.cs b/.dotnet/src/Generated/Models/FineTuningJobError.cs new file mode 100644 index 000000000..d4ee65889 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobError.cs @@ -0,0 +1,93 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The FineTuningJobError. + internal partial class FineTuningJobError + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A machine-readable error code. + /// A human-readable error message. + /// + /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will + /// be null if the failure was not parameter-specific. + /// + /// or is null. + internal FineTuningJobError(string code, string message, string param) + { + Argument.AssertNotNull(code, nameof(code)); + Argument.AssertNotNull(message, nameof(message)); + + Code = code; + Message = message; + Param = param; + } + + /// Initializes a new instance of . + /// A machine-readable error code. + /// A human-readable error message. + /// + /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will + /// be null if the failure was not parameter-specific. + /// + /// Keeps track of any properties unknown to the library. + internal FineTuningJobError(string code, string message, string param, IDictionary serializedAdditionalRawData) + { + Code = code; + Message = message; + Param = param; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal FineTuningJobError() + { + } + + /// A machine-readable error code. + public string Code { get; } + /// A human-readable error message. + public string Message { get; } + /// + /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will + /// be null if the failure was not parameter-specific. + /// + public string Param { get; } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobEvent.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJobEvent.Serialization.cs new file mode 100644 index 000000000..097064e59 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobEvent.Serialization.cs @@ -0,0 +1,168 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class FineTuningJobEvent : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJobEvent)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("level"u8); + writer.WriteStringValue(Level.ToString()); + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + FineTuningJobEvent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJobEvent)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFineTuningJobEvent(document.RootElement, options); + } + + internal static FineTuningJobEvent DeserializeFineTuningJobEvent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + DateTimeOffset createdAt = default; + FineTuningJobEventLevel level = default; + string message = default; + FineTuningJobEventObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("level"u8)) + { + level = new FineTuningJobEventLevel(property.Value.GetString()); + continue; + } + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new FineTuningJobEventObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new FineTuningJobEvent( + id, + createdAt, + level, + message, + @object, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FineTuningJobEvent)} does not support '{options.Format}' format."); + } + } + + FineTuningJobEvent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFineTuningJobEvent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FineTuningJobEvent)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static FineTuningJobEvent FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFineTuningJobEvent(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobEvent.cs b/.dotnet/src/Generated/Models/FineTuningJobEvent.cs new file mode 100644 index 000000000..2ae386632 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobEvent.cs @@ -0,0 +1,94 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// Fine-tuning job event object. + internal partial class FineTuningJobEvent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// or is null. + internal FineTuningJobEvent(string id, DateTimeOffset createdAt, FineTuningJobEventLevel level, string message) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(message, nameof(message)); + + Id = id; + CreatedAt = createdAt; + Level = level; + Message = message; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal FineTuningJobEvent(string id, DateTimeOffset createdAt, FineTuningJobEventLevel level, string message, FineTuningJobEventObject @object, IDictionary serializedAdditionalRawData) + { + Id = id; + CreatedAt = createdAt; + Level = level; + Message = message; + Object = @object; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal FineTuningJobEvent() + { + } + + /// Gets the id. + public string Id { get; } + /// Gets the created at. + public DateTimeOffset CreatedAt { get; } + /// Gets the level. + public FineTuningJobEventLevel Level { get; } + /// Gets the message. + public string Message { get; } + /// Gets the object. + public FineTuningJobEventObject Object { get; } = FineTuningJobEventObject.FineTuningJobEvent; + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobEventLevel.cs b/.dotnet/src/Generated/Models/FineTuningJobEventLevel.cs new file mode 100644 index 000000000..cc9aa24a5 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobEventLevel.cs @@ -0,0 +1,49 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for level in FineTuningJobEvent. + internal readonly partial struct FineTuningJobEventLevel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public FineTuningJobEventLevel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string InfoValue = "info"; + private const string WarnValue = "warn"; + private const string ErrorValue = "error"; + + /// info. + public static FineTuningJobEventLevel Info { get; } = new FineTuningJobEventLevel(InfoValue); + /// warn. + public static FineTuningJobEventLevel Warn { get; } = new FineTuningJobEventLevel(WarnValue); + /// error. + public static FineTuningJobEventLevel Error { get; } = new FineTuningJobEventLevel(ErrorValue); + /// Determines if two values are the same. + public static bool operator ==(FineTuningJobEventLevel left, FineTuningJobEventLevel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(FineTuningJobEventLevel left, FineTuningJobEventLevel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator FineTuningJobEventLevel(string value) => new FineTuningJobEventLevel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is FineTuningJobEventLevel other && Equals(other); + /// + public bool Equals(FineTuningJobEventLevel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobEventObject.cs b/.dotnet/src/Generated/Models/FineTuningJobEventObject.cs new file mode 100644 index 000000000..f73cd4fc1 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobEventObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The FineTuningJobEvent_object. + internal readonly partial struct FineTuningJobEventObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public FineTuningJobEventObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FineTuningJobEventValue = "fine_tuning.job.event"; + + /// fine_tuning.job.event. + public static FineTuningJobEventObject FineTuningJobEvent { get; } = new FineTuningJobEventObject(FineTuningJobEventValue); + /// Determines if two values are the same. + public static bool operator ==(FineTuningJobEventObject left, FineTuningJobEventObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(FineTuningJobEventObject left, FineTuningJobEventObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator FineTuningJobEventObject(string value) => new FineTuningJobEventObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is FineTuningJobEventObject other && Equals(other); + /// + public bool Equals(FineTuningJobEventObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.Serialization.cs b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.Serialization.cs new file mode 100644 index 000000000..81e7cbb45 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.Serialization.cs @@ -0,0 +1,137 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class FineTuningJobHyperparameters : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJobHyperparameters)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("n_epochs"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(NEpochs); +#else + using (JsonDocument document = JsonDocument.Parse(NEpochs)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + FineTuningJobHyperparameters IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FineTuningJobHyperparameters)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFineTuningJobHyperparameters(document.RootElement, options); + } + + internal static FineTuningJobHyperparameters DeserializeFineTuningJobHyperparameters(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData nEpochs = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("n_epochs"u8)) + { + nEpochs = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new FineTuningJobHyperparameters(nEpochs, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FineTuningJobHyperparameters)} does not support '{options.Format}' format."); + } + } + + FineTuningJobHyperparameters IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFineTuningJobHyperparameters(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FineTuningJobHyperparameters)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static FineTuningJobHyperparameters FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFineTuningJobHyperparameters(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.cs b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.cs new file mode 100644 index 000000000..1e4314c88 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobHyperparameters.cs @@ -0,0 +1,127 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The FineTuningJobHyperparameters. + internal partial class FineTuningJobHyperparameters + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the + /// number manually, we support any number between 1 and 50 epochs. + /// + /// is null. + internal FineTuningJobHyperparameters(BinaryData nEpochs) + { + Argument.AssertNotNull(nEpochs, nameof(nEpochs)); + + NEpochs = nEpochs; + } + + /// Initializes a new instance of . + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the + /// number manually, we support any number between 1 and 50 epochs. + /// + /// Keeps track of any properties unknown to the library. + internal FineTuningJobHyperparameters(BinaryData nEpochs, IDictionary serializedAdditionalRawData) + { + NEpochs = nEpochs; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal FineTuningJobHyperparameters() + { + } + + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the + /// number manually, we support any number between 1 and 50 epochs. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// "auto" + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData NEpochs { get; } + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobObject.cs b/.dotnet/src/Generated/Models/FineTuningJobObject.cs new file mode 100644 index 000000000..9e718560f --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The FineTuningJob_object. + internal readonly partial struct FineTuningJobObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public FineTuningJobObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FineTuningJobValue = "fine_tuning.job"; + + /// fine_tuning.job. + public static FineTuningJobObject FineTuningJob { get; } = new FineTuningJobObject(FineTuningJobValue); + /// Determines if two values are the same. + public static bool operator ==(FineTuningJobObject left, FineTuningJobObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(FineTuningJobObject left, FineTuningJobObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator FineTuningJobObject(string value) => new FineTuningJobObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is FineTuningJobObject other && Equals(other); + /// + public bool Equals(FineTuningJobObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/FineTuningJobStatus.cs b/.dotnet/src/Generated/Models/FineTuningJobStatus.cs new file mode 100644 index 000000000..660c4d469 --- /dev/null +++ b/.dotnet/src/Generated/Models/FineTuningJobStatus.cs @@ -0,0 +1,58 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for status in FineTuningJob. + internal readonly partial struct FineTuningJobStatus : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public FineTuningJobStatus(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ValidatingFilesValue = "validating_files"; + private const string QueuedValue = "queued"; + private const string RunningValue = "running"; + private const string SucceededValue = "succeeded"; + private const string FailedValue = "failed"; + private const string CancelledValue = "cancelled"; + + /// validating_files. + public static FineTuningJobStatus ValidatingFiles { get; } = new FineTuningJobStatus(ValidatingFilesValue); + /// queued. + public static FineTuningJobStatus Queued { get; } = new FineTuningJobStatus(QueuedValue); + /// running. + public static FineTuningJobStatus Running { get; } = new FineTuningJobStatus(RunningValue); + /// succeeded. + public static FineTuningJobStatus Succeeded { get; } = new FineTuningJobStatus(SucceededValue); + /// failed. + public static FineTuningJobStatus Failed { get; } = new FineTuningJobStatus(FailedValue); + /// cancelled. + public static FineTuningJobStatus Cancelled { get; } = new FineTuningJobStatus(CancelledValue); + /// Determines if two values are the same. + public static bool operator ==(FineTuningJobStatus left, FineTuningJobStatus right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(FineTuningJobStatus left, FineTuningJobStatus right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator FineTuningJobStatus(string value) => new FineTuningJobStatus(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is FineTuningJobStatus other && Equals(other); + /// + public bool Equals(FineTuningJobStatus other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/FunctionObject.Serialization.cs b/.dotnet/src/Generated/Models/FunctionObject.Serialization.cs new file mode 100644 index 000000000..6b0e5e7e5 --- /dev/null +++ b/.dotnet/src/Generated/Models/FunctionObject.Serialization.cs @@ -0,0 +1,156 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class FunctionObject : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FunctionObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Description)) + { + writer.WritePropertyName("description"u8); + writer.WriteStringValue(Description); + } + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + if (Optional.IsDefined(Parameters)) + { + writer.WritePropertyName("parameters"u8); + writer.WriteObjectValue(Parameters); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + FunctionObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FunctionObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFunctionObject(document.RootElement, options); + } + + internal static FunctionObject DeserializeFunctionObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string description = default; + string name = default; + FunctionParameters parameters = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("description"u8)) + { + description = property.Value.GetString(); + continue; + } + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("parameters"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + parameters = FunctionParameters.DeserializeFunctionParameters(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new FunctionObject(description, name, parameters, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FunctionObject)} does not support '{options.Format}' format."); + } + } + + FunctionObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFunctionObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FunctionObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static FunctionObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFunctionObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/FunctionObject.cs b/.dotnet/src/Generated/Models/FunctionObject.cs new file mode 100644 index 000000000..f4ecaf182 --- /dev/null +++ b/.dotnet/src/Generated/Models/FunctionObject.cs @@ -0,0 +1,94 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The FunctionObject. + internal partial class FunctionObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + /// dashes, with a maximum length of 64. + /// + /// is null. + public FunctionObject(string name) + { + Argument.AssertNotNull(name, nameof(name)); + + Name = name; + } + + /// Initializes a new instance of . + /// + /// A description of what the function does, used by the model to choose when and how to call the + /// function. + /// + /// + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + /// dashes, with a maximum length of 64. + /// + /// + /// Keeps track of any properties unknown to the library. + internal FunctionObject(string description, string name, FunctionParameters parameters, IDictionary serializedAdditionalRawData) + { + Description = description; + Name = name; + Parameters = parameters; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal FunctionObject() + { + } + + /// + /// A description of what the function does, used by the model to choose when and how to call the + /// function. + /// + public string Description { get; set; } + /// + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + /// dashes, with a maximum length of 64. + /// + public string Name { get; set; } + /// Gets or sets the parameters. + public FunctionParameters Parameters { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/FunctionParameters.Serialization.cs b/.dotnet/src/Generated/Models/FunctionParameters.Serialization.cs new file mode 100644 index 000000000..54a182600 --- /dev/null +++ b/.dotnet/src/Generated/Models/FunctionParameters.Serialization.cs @@ -0,0 +1,116 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class FunctionParameters : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FunctionParameters)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + foreach (var item in AdditionalProperties) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndObject(); + } + + FunctionParameters IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FunctionParameters)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFunctionParameters(document.RootElement, options); + } + + internal static FunctionParameters DeserializeFunctionParameters(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IDictionary additionalProperties = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + additionalProperties = additionalPropertiesDictionary; + return new FunctionParameters(additionalProperties); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FunctionParameters)} does not support '{options.Format}' format."); + } + } + + FunctionParameters IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFunctionParameters(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FunctionParameters)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static FunctionParameters FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFunctionParameters(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/FunctionParameters.cs b/.dotnet/src/Generated/Models/FunctionParameters.cs new file mode 100644 index 000000000..56a1024fb --- /dev/null +++ b/.dotnet/src/Generated/Models/FunctionParameters.cs @@ -0,0 +1,63 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// + /// The parameters the functions accepts, described as a JSON Schema object. See the + /// [guide](/docs/guides/gpt/function-calling) for examples, and the + /// [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation + /// about the format.\n\nTo describe a function that accepts no parameters, provide the value + /// `{\"type\": \"object\", \"properties\": {}}`. + /// + internal partial class FunctionParameters + { + /// Initializes a new instance of . + public FunctionParameters() + { + AdditionalProperties = new ChangeTrackingDictionary(); + } + + /// Initializes a new instance of . + /// Additional Properties. + internal FunctionParameters(IDictionary additionalProperties) + { + AdditionalProperties = additionalProperties; + } + + /// + /// Additional Properties + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IDictionary AdditionalProperties { get; } + } +} diff --git a/.dotnet/src/Generated/Models/Image.Serialization.cs b/.dotnet/src/Generated/Models/Image.Serialization.cs new file mode 100644 index 000000000..7e221fa2c --- /dev/null +++ b/.dotnet/src/Generated/Models/Image.Serialization.cs @@ -0,0 +1,163 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class Image : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(Image)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(B64Json)) + { + writer.WritePropertyName("b64_json"u8); + writer.WriteBase64StringValue(B64Json.ToArray(), "D"); + } + if (Optional.IsDefined(Url)) + { + writer.WritePropertyName("url"u8); + writer.WriteStringValue(Url.AbsoluteUri); + } + if (Optional.IsDefined(RevisedPrompt)) + { + writer.WritePropertyName("revised_prompt"u8); + writer.WriteStringValue(RevisedPrompt); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + Image IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(Image)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeImage(document.RootElement, options); + } + + internal static Image DeserializeImage(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BinaryData b64Json = default; + Uri url = default; + string revisedPrompt = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("b64_json"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + b64Json = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); + continue; + } + if (property.NameEquals("url"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + url = new Uri(property.Value.GetString()); + continue; + } + if (property.NameEquals("revised_prompt"u8)) + { + revisedPrompt = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new Image(b64Json, url, revisedPrompt, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(Image)} does not support '{options.Format}' format."); + } + } + + Image IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeImage(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(Image)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static Image FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeImage(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/Image.cs b/.dotnet/src/Generated/Models/Image.cs new file mode 100644 index 000000000..ca021414d --- /dev/null +++ b/.dotnet/src/Generated/Models/Image.cs @@ -0,0 +1,83 @@ +// + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal.Models +{ + /// Represents the url or the content of an image generated by the OpenAI API. + internal partial class Image + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal Image() + { + } + + /// Initializes a new instance of . + /// The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + /// The URL of the generated image, if `response_format` is `url` (default). + /// The prompt that was used to generate the image, if there was any revision to the prompt. + /// Keeps track of any properties unknown to the library. + internal Image(BinaryData b64Json, Uri url, string revisedPrompt, IDictionary serializedAdditionalRawData) + { + B64Json = b64Json; + Url = url; + RevisedPrompt = revisedPrompt; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// + /// The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + /// + /// To assign a byte[] to this property use . + /// The byte[] will be serialized to a Base64 encoded string. + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) + /// Creates a payload of "AQID". + /// + /// + /// + /// + public BinaryData B64Json { get; } + /// The URL of the generated image, if `response_format` is `url` (default). + public Uri Url { get; } + /// The prompt that was used to generate the image, if there was any revision to the prompt. + public string RevisedPrompt { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ImagesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ImagesResponse.Serialization.cs new file mode 100644 index 000000000..2b30c5157 --- /dev/null +++ b/.dotnet/src/Generated/Models/ImagesResponse.Serialization.cs @@ -0,0 +1,148 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ImagesResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ImagesResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("created"u8); + writer.WriteNumberValue(Created, "U"); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ImagesResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ImagesResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeImagesResponse(document.RootElement, options); + } + + internal static ImagesResponse DeserializeImagesResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + DateTimeOffset created = default; + IReadOnlyList data = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("created"u8)) + { + created = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(Image.DeserializeImage(item, options)); + } + data = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ImagesResponse(created, data, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ImagesResponse)} does not support '{options.Format}' format."); + } + } + + ImagesResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeImagesResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ImagesResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ImagesResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeImagesResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ImagesResponse.cs b/.dotnet/src/Generated/Models/ImagesResponse.cs new file mode 100644 index 000000000..bcd67116c --- /dev/null +++ b/.dotnet/src/Generated/Models/ImagesResponse.cs @@ -0,0 +1,78 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ImagesResponse. + internal partial class ImagesResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// is null. + internal ImagesResponse(DateTimeOffset created, IEnumerable data) + { + Argument.AssertNotNull(data, nameof(data)); + + Created = created; + Data = data.ToList(); + } + + /// Initializes a new instance of . + /// + /// + /// Keeps track of any properties unknown to the library. + internal ImagesResponse(DateTimeOffset created, IReadOnlyList data, IDictionary serializedAdditionalRawData) + { + Created = created; + Data = data; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ImagesResponse() + { + } + + /// Gets the created. + public DateTimeOffset Created { get; } + /// Gets the data. + public IReadOnlyList Data { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListAssistantFilesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.Serialization.cs new file mode 100644 index 000000000..141222ad6 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.Serialization.cs @@ -0,0 +1,178 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ListAssistantFilesResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListAssistantFilesResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("first_id"u8); + writer.WriteStringValue(FirstId); + writer.WritePropertyName("last_id"u8); + writer.WriteStringValue(LastId); + writer.WritePropertyName("has_more"u8); + writer.WriteBooleanValue(HasMore); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListAssistantFilesResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListAssistantFilesResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListAssistantFilesResponse(document.RootElement, options); + } + + internal static ListAssistantFilesResponse DeserializeListAssistantFilesResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ListAssistantFilesResponseObject @object = default; + IReadOnlyList data = default; + string firstId = default; + string lastId = default; + bool hasMore = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = new ListAssistantFilesResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(AssistantFileObject.DeserializeAssistantFileObject(item, options)); + } + data = array; + continue; + } + if (property.NameEquals("first_id"u8)) + { + firstId = property.Value.GetString(); + continue; + } + if (property.NameEquals("last_id"u8)) + { + lastId = property.Value.GetString(); + continue; + } + if (property.NameEquals("has_more"u8)) + { + hasMore = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListAssistantFilesResponse( + @object, + data, + firstId, + lastId, + hasMore, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListAssistantFilesResponse)} does not support '{options.Format}' format."); + } + } + + ListAssistantFilesResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListAssistantFilesResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListAssistantFilesResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListAssistantFilesResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListAssistantFilesResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListAssistantFilesResponse.cs b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.cs new file mode 100644 index 000000000..54d7af52c --- /dev/null +++ b/.dotnet/src/Generated/Models/ListAssistantFilesResponse.cs @@ -0,0 +1,97 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ListAssistantFilesResponse. + internal partial class ListAssistantFilesResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// , or is null. + internal ListAssistantFilesResponse(IEnumerable data, string firstId, string lastId, bool hasMore) + { + Argument.AssertNotNull(data, nameof(data)); + Argument.AssertNotNull(firstId, nameof(firstId)); + Argument.AssertNotNull(lastId, nameof(lastId)); + + Data = data.ToList(); + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListAssistantFilesResponse(ListAssistantFilesResponseObject @object, IReadOnlyList data, string firstId, string lastId, bool hasMore, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListAssistantFilesResponse() + { + } + + /// Gets the object. + public ListAssistantFilesResponseObject Object { get; } = ListAssistantFilesResponseObject.List; + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the first id. + public string FirstId { get; } + /// Gets the last id. + public string LastId { get; } + /// Gets the has more. + public bool HasMore { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListAssistantFilesResponseObject.cs b/.dotnet/src/Generated/Models/ListAssistantFilesResponseObject.cs new file mode 100644 index 000000000..bfb6cd5f6 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListAssistantFilesResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ListAssistantFilesResponse_object. + internal readonly partial struct ListAssistantFilesResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListAssistantFilesResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListAssistantFilesResponseObject List { get; } = new ListAssistantFilesResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListAssistantFilesResponseObject left, ListAssistantFilesResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListAssistantFilesResponseObject left, ListAssistantFilesResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListAssistantFilesResponseObject(string value) => new ListAssistantFilesResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListAssistantFilesResponseObject other && Equals(other); + /// + public bool Equals(ListAssistantFilesResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListAssistantsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListAssistantsResponse.Serialization.cs new file mode 100644 index 000000000..6377a1c9f --- /dev/null +++ b/.dotnet/src/Generated/Models/ListAssistantsResponse.Serialization.cs @@ -0,0 +1,178 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ListAssistantsResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListAssistantsResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("first_id"u8); + writer.WriteStringValue(FirstId); + writer.WritePropertyName("last_id"u8); + writer.WriteStringValue(LastId); + writer.WritePropertyName("has_more"u8); + writer.WriteBooleanValue(HasMore); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListAssistantsResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListAssistantsResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListAssistantsResponse(document.RootElement, options); + } + + internal static ListAssistantsResponse DeserializeListAssistantsResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ListAssistantsResponseObject @object = default; + IReadOnlyList data = default; + string firstId = default; + string lastId = default; + bool hasMore = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = new ListAssistantsResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(AssistantObject.DeserializeAssistantObject(item, options)); + } + data = array; + continue; + } + if (property.NameEquals("first_id"u8)) + { + firstId = property.Value.GetString(); + continue; + } + if (property.NameEquals("last_id"u8)) + { + lastId = property.Value.GetString(); + continue; + } + if (property.NameEquals("has_more"u8)) + { + hasMore = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListAssistantsResponse( + @object, + data, + firstId, + lastId, + hasMore, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListAssistantsResponse)} does not support '{options.Format}' format."); + } + } + + ListAssistantsResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListAssistantsResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListAssistantsResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListAssistantsResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListAssistantsResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListAssistantsResponse.cs b/.dotnet/src/Generated/Models/ListAssistantsResponse.cs new file mode 100644 index 000000000..7a8a094c5 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListAssistantsResponse.cs @@ -0,0 +1,97 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ListAssistantsResponse. + internal partial class ListAssistantsResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// , or is null. + internal ListAssistantsResponse(IEnumerable data, string firstId, string lastId, bool hasMore) + { + Argument.AssertNotNull(data, nameof(data)); + Argument.AssertNotNull(firstId, nameof(firstId)); + Argument.AssertNotNull(lastId, nameof(lastId)); + + Data = data.ToList(); + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListAssistantsResponse(ListAssistantsResponseObject @object, IReadOnlyList data, string firstId, string lastId, bool hasMore, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListAssistantsResponse() + { + } + + /// Gets the object. + public ListAssistantsResponseObject Object { get; } = ListAssistantsResponseObject.List; + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the first id. + public string FirstId { get; } + /// Gets the last id. + public string LastId { get; } + /// Gets the has more. + public bool HasMore { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListAssistantsResponseObject.cs b/.dotnet/src/Generated/Models/ListAssistantsResponseObject.cs new file mode 100644 index 000000000..8c9ff2360 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListAssistantsResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ListAssistantsResponse_object. + internal readonly partial struct ListAssistantsResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListAssistantsResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListAssistantsResponseObject List { get; } = new ListAssistantsResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListAssistantsResponseObject left, ListAssistantsResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListAssistantsResponseObject left, ListAssistantsResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListAssistantsResponseObject(string value) => new ListAssistantsResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListAssistantsResponseObject other && Equals(other); + /// + public bool Equals(ListAssistantsResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListFilesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListFilesResponse.Serialization.cs new file mode 100644 index 000000000..f40f8faac --- /dev/null +++ b/.dotnet/src/Generated/Models/ListFilesResponse.Serialization.cs @@ -0,0 +1,148 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ListFilesResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListFilesResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListFilesResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListFilesResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListFilesResponse(document.RootElement, options); + } + + internal static ListFilesResponse DeserializeListFilesResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IReadOnlyList data = default; + ListFilesResponseObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(OpenAIFile.DeserializeOpenAIFile(item, options)); + } + data = array; + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new ListFilesResponseObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListFilesResponse(data, @object, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListFilesResponse)} does not support '{options.Format}' format."); + } + } + + ListFilesResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListFilesResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListFilesResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListFilesResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListFilesResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListFilesResponse.cs b/.dotnet/src/Generated/Models/ListFilesResponse.cs new file mode 100644 index 000000000..caff96df8 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListFilesResponse.cs @@ -0,0 +1,76 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ListFilesResponse. + internal partial class ListFilesResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// is null. + internal ListFilesResponse(IEnumerable data) + { + Argument.AssertNotNull(data, nameof(data)); + + Data = data.ToList(); + } + + /// Initializes a new instance of . + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListFilesResponse(IReadOnlyList data, ListFilesResponseObject @object, IDictionary serializedAdditionalRawData) + { + Data = data; + Object = @object; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListFilesResponse() + { + } + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the object. + public ListFilesResponseObject Object { get; } = ListFilesResponseObject.List; + } +} diff --git a/.dotnet/src/Generated/Models/ListFilesResponseObject.cs b/.dotnet/src/Generated/Models/ListFilesResponseObject.cs new file mode 100644 index 000000000..c2a5e6e52 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListFilesResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ListFilesResponse_object. + internal readonly partial struct ListFilesResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListFilesResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListFilesResponseObject List { get; } = new ListFilesResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListFilesResponseObject left, ListFilesResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListFilesResponseObject left, ListFilesResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListFilesResponseObject(string value) => new ListFilesResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListFilesResponseObject other && Equals(other); + /// + public bool Equals(ListFilesResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.Serialization.cs new file mode 100644 index 000000000..b8673d973 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.Serialization.cs @@ -0,0 +1,148 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ListFineTuningJobEventsResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListFineTuningJobEventsResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListFineTuningJobEventsResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListFineTuningJobEventsResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListFineTuningJobEventsResponse(document.RootElement, options); + } + + internal static ListFineTuningJobEventsResponse DeserializeListFineTuningJobEventsResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IReadOnlyList data = default; + ListFineTuningJobEventsResponseObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(FineTuningJobEvent.DeserializeFineTuningJobEvent(item, options)); + } + data = array; + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new ListFineTuningJobEventsResponseObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListFineTuningJobEventsResponse(data, @object, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListFineTuningJobEventsResponse)} does not support '{options.Format}' format."); + } + } + + ListFineTuningJobEventsResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListFineTuningJobEventsResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListFineTuningJobEventsResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListFineTuningJobEventsResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListFineTuningJobEventsResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.cs b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.cs new file mode 100644 index 000000000..838a80317 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponse.cs @@ -0,0 +1,76 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ListFineTuningJobEventsResponse. + internal partial class ListFineTuningJobEventsResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// is null. + internal ListFineTuningJobEventsResponse(IEnumerable data) + { + Argument.AssertNotNull(data, nameof(data)); + + Data = data.ToList(); + } + + /// Initializes a new instance of . + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListFineTuningJobEventsResponse(IReadOnlyList data, ListFineTuningJobEventsResponseObject @object, IDictionary serializedAdditionalRawData) + { + Data = data; + Object = @object; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListFineTuningJobEventsResponse() + { + } + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the object. + public ListFineTuningJobEventsResponseObject Object { get; } = ListFineTuningJobEventsResponseObject.List; + } +} diff --git a/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponseObject.cs b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponseObject.cs new file mode 100644 index 000000000..adbb30c46 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListFineTuningJobEventsResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ListFineTuningJobEventsResponse_object. + internal readonly partial struct ListFineTuningJobEventsResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListFineTuningJobEventsResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListFineTuningJobEventsResponseObject List { get; } = new ListFineTuningJobEventsResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListFineTuningJobEventsResponseObject left, ListFineTuningJobEventsResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListFineTuningJobEventsResponseObject left, ListFineTuningJobEventsResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListFineTuningJobEventsResponseObject(string value) => new ListFineTuningJobEventsResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListFineTuningJobEventsResponseObject other && Equals(other); + /// + public bool Equals(ListFineTuningJobEventsResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListMessageFilesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListMessageFilesResponse.Serialization.cs new file mode 100644 index 000000000..1f412eb8f --- /dev/null +++ b/.dotnet/src/Generated/Models/ListMessageFilesResponse.Serialization.cs @@ -0,0 +1,178 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ListMessageFilesResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListMessageFilesResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("first_id"u8); + writer.WriteStringValue(FirstId); + writer.WritePropertyName("last_id"u8); + writer.WriteStringValue(LastId); + writer.WritePropertyName("has_more"u8); + writer.WriteBooleanValue(HasMore); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListMessageFilesResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListMessageFilesResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListMessageFilesResponse(document.RootElement, options); + } + + internal static ListMessageFilesResponse DeserializeListMessageFilesResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ListMessageFilesResponseObject @object = default; + IReadOnlyList data = default; + string firstId = default; + string lastId = default; + bool hasMore = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = new ListMessageFilesResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MessageFileObject.DeserializeMessageFileObject(item, options)); + } + data = array; + continue; + } + if (property.NameEquals("first_id"u8)) + { + firstId = property.Value.GetString(); + continue; + } + if (property.NameEquals("last_id"u8)) + { + lastId = property.Value.GetString(); + continue; + } + if (property.NameEquals("has_more"u8)) + { + hasMore = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListMessageFilesResponse( + @object, + data, + firstId, + lastId, + hasMore, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListMessageFilesResponse)} does not support '{options.Format}' format."); + } + } + + ListMessageFilesResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListMessageFilesResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListMessageFilesResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListMessageFilesResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListMessageFilesResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListMessageFilesResponse.cs b/.dotnet/src/Generated/Models/ListMessageFilesResponse.cs new file mode 100644 index 000000000..4884dad96 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListMessageFilesResponse.cs @@ -0,0 +1,97 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ListMessageFilesResponse. + internal partial class ListMessageFilesResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// , or is null. + internal ListMessageFilesResponse(IEnumerable data, string firstId, string lastId, bool hasMore) + { + Argument.AssertNotNull(data, nameof(data)); + Argument.AssertNotNull(firstId, nameof(firstId)); + Argument.AssertNotNull(lastId, nameof(lastId)); + + Data = data.ToList(); + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListMessageFilesResponse(ListMessageFilesResponseObject @object, IReadOnlyList data, string firstId, string lastId, bool hasMore, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListMessageFilesResponse() + { + } + + /// Gets the object. + public ListMessageFilesResponseObject Object { get; } = ListMessageFilesResponseObject.List; + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the first id. + public string FirstId { get; } + /// Gets the last id. + public string LastId { get; } + /// Gets the has more. + public bool HasMore { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListMessageFilesResponseObject.cs b/.dotnet/src/Generated/Models/ListMessageFilesResponseObject.cs new file mode 100644 index 000000000..b1eae36d8 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListMessageFilesResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ListMessageFilesResponse_object. + internal readonly partial struct ListMessageFilesResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListMessageFilesResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListMessageFilesResponseObject List { get; } = new ListMessageFilesResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListMessageFilesResponseObject left, ListMessageFilesResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListMessageFilesResponseObject left, ListMessageFilesResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListMessageFilesResponseObject(string value) => new ListMessageFilesResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListMessageFilesResponseObject other && Equals(other); + /// + public bool Equals(ListMessageFilesResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListMessagesResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListMessagesResponse.Serialization.cs new file mode 100644 index 000000000..dcfc3f1c0 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListMessagesResponse.Serialization.cs @@ -0,0 +1,178 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ListMessagesResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListMessagesResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("first_id"u8); + writer.WriteStringValue(FirstId); + writer.WritePropertyName("last_id"u8); + writer.WriteStringValue(LastId); + writer.WritePropertyName("has_more"u8); + writer.WriteBooleanValue(HasMore); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListMessagesResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListMessagesResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListMessagesResponse(document.RootElement, options); + } + + internal static ListMessagesResponse DeserializeListMessagesResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ListMessagesResponseObject @object = default; + IReadOnlyList data = default; + string firstId = default; + string lastId = default; + bool hasMore = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = new ListMessagesResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MessageObject.DeserializeMessageObject(item, options)); + } + data = array; + continue; + } + if (property.NameEquals("first_id"u8)) + { + firstId = property.Value.GetString(); + continue; + } + if (property.NameEquals("last_id"u8)) + { + lastId = property.Value.GetString(); + continue; + } + if (property.NameEquals("has_more"u8)) + { + hasMore = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListMessagesResponse( + @object, + data, + firstId, + lastId, + hasMore, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListMessagesResponse)} does not support '{options.Format}' format."); + } + } + + ListMessagesResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListMessagesResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListMessagesResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListMessagesResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListMessagesResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListMessagesResponse.cs b/.dotnet/src/Generated/Models/ListMessagesResponse.cs new file mode 100644 index 000000000..514c43c95 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListMessagesResponse.cs @@ -0,0 +1,97 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ListMessagesResponse. + internal partial class ListMessagesResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// , or is null. + internal ListMessagesResponse(IEnumerable data, string firstId, string lastId, bool hasMore) + { + Argument.AssertNotNull(data, nameof(data)); + Argument.AssertNotNull(firstId, nameof(firstId)); + Argument.AssertNotNull(lastId, nameof(lastId)); + + Data = data.ToList(); + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListMessagesResponse(ListMessagesResponseObject @object, IReadOnlyList data, string firstId, string lastId, bool hasMore, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListMessagesResponse() + { + } + + /// Gets the object. + public ListMessagesResponseObject Object { get; } = ListMessagesResponseObject.List; + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the first id. + public string FirstId { get; } + /// Gets the last id. + public string LastId { get; } + /// Gets the has more. + public bool HasMore { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListMessagesResponseObject.cs b/.dotnet/src/Generated/Models/ListMessagesResponseObject.cs new file mode 100644 index 000000000..a3d4be1eb --- /dev/null +++ b/.dotnet/src/Generated/Models/ListMessagesResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ListMessagesResponse_object. + internal readonly partial struct ListMessagesResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListMessagesResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListMessagesResponseObject List { get; } = new ListMessagesResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListMessagesResponseObject left, ListMessagesResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListMessagesResponseObject left, ListMessagesResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListMessagesResponseObject(string value) => new ListMessagesResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListMessagesResponseObject other && Equals(other); + /// + public bool Equals(ListMessagesResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListModelsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListModelsResponse.Serialization.cs new file mode 100644 index 000000000..6a5ba3d11 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListModelsResponse.Serialization.cs @@ -0,0 +1,148 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ListModelsResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListModelsResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListModelsResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListModelsResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListModelsResponse(document.RootElement, options); + } + + internal static ListModelsResponse DeserializeListModelsResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ListModelsResponseObject @object = default; + IReadOnlyList data = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = new ListModelsResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(Model.DeserializeModel(item, options)); + } + data = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListModelsResponse(@object, data, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListModelsResponse)} does not support '{options.Format}' format."); + } + } + + ListModelsResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListModelsResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListModelsResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListModelsResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListModelsResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListModelsResponse.cs b/.dotnet/src/Generated/Models/ListModelsResponse.cs new file mode 100644 index 000000000..6e3ee41af --- /dev/null +++ b/.dotnet/src/Generated/Models/ListModelsResponse.cs @@ -0,0 +1,77 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ListModelsResponse. + internal partial class ListModelsResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// is null. + internal ListModelsResponse(IEnumerable data) + { + Argument.AssertNotNull(data, nameof(data)); + + Data = data.ToList(); + } + + /// Initializes a new instance of . + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListModelsResponse(ListModelsResponseObject @object, IReadOnlyList data, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListModelsResponse() + { + } + + /// Gets the object. + public ListModelsResponseObject Object { get; } = ListModelsResponseObject.List; + + /// Gets the data. + public IReadOnlyList Data { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListModelsResponseObject.cs b/.dotnet/src/Generated/Models/ListModelsResponseObject.cs new file mode 100644 index 000000000..b9e471c75 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListModelsResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ListModelsResponse_object. + internal readonly partial struct ListModelsResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListModelsResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListModelsResponseObject List { get; } = new ListModelsResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListModelsResponseObject left, ListModelsResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListModelsResponseObject left, ListModelsResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListModelsResponseObject(string value) => new ListModelsResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListModelsResponseObject other && Equals(other); + /// + public bool Equals(ListModelsResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListOrder.cs b/.dotnet/src/Generated/Models/ListOrder.cs new file mode 100644 index 000000000..9eb4a9780 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListOrder.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ListOrder. + internal readonly partial struct ListOrder : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListOrder(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AscValue = "asc"; + private const string DescValue = "desc"; + + /// asc. + public static ListOrder Asc { get; } = new ListOrder(AscValue); + /// desc. + public static ListOrder Desc { get; } = new ListOrder(DescValue); + /// Determines if two values are the same. + public static bool operator ==(ListOrder left, ListOrder right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListOrder left, ListOrder right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListOrder(string value) => new ListOrder(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListOrder other && Equals(other); + /// + public bool Equals(ListOrder other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.Serialization.cs new file mode 100644 index 000000000..5603812f6 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.Serialization.cs @@ -0,0 +1,156 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ListPaginatedFineTuningJobsResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListPaginatedFineTuningJobsResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("has_more"u8); + writer.WriteBooleanValue(HasMore); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListPaginatedFineTuningJobsResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListPaginatedFineTuningJobsResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListPaginatedFineTuningJobsResponse(document.RootElement, options); + } + + internal static ListPaginatedFineTuningJobsResponse DeserializeListPaginatedFineTuningJobsResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IReadOnlyList data = default; + bool hasMore = default; + ListPaginatedFineTuningJobsResponseObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(FineTuningJob.DeserializeFineTuningJob(item, options)); + } + data = array; + continue; + } + if (property.NameEquals("has_more"u8)) + { + hasMore = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new ListPaginatedFineTuningJobsResponseObject(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListPaginatedFineTuningJobsResponse(data, hasMore, @object, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListPaginatedFineTuningJobsResponse)} does not support '{options.Format}' format."); + } + } + + ListPaginatedFineTuningJobsResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListPaginatedFineTuningJobsResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListPaginatedFineTuningJobsResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListPaginatedFineTuningJobsResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListPaginatedFineTuningJobsResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.cs b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.cs new file mode 100644 index 000000000..d258099cd --- /dev/null +++ b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponse.cs @@ -0,0 +1,82 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ListPaginatedFineTuningJobsResponse. + internal partial class ListPaginatedFineTuningJobsResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// is null. + internal ListPaginatedFineTuningJobsResponse(IEnumerable data, bool hasMore) + { + Argument.AssertNotNull(data, nameof(data)); + + Data = data.ToList(); + HasMore = hasMore; + } + + /// Initializes a new instance of . + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListPaginatedFineTuningJobsResponse(IReadOnlyList data, bool hasMore, ListPaginatedFineTuningJobsResponseObject @object, IDictionary serializedAdditionalRawData) + { + Data = data; + HasMore = hasMore; + Object = @object; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListPaginatedFineTuningJobsResponse() + { + } + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the has more. + public bool HasMore { get; } + /// Gets the object. + public ListPaginatedFineTuningJobsResponseObject Object { get; } = ListPaginatedFineTuningJobsResponseObject.List; + } +} diff --git a/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponseObject.cs b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponseObject.cs new file mode 100644 index 000000000..faa11b860 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListPaginatedFineTuningJobsResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ListPaginatedFineTuningJobsResponse_object. + internal readonly partial struct ListPaginatedFineTuningJobsResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListPaginatedFineTuningJobsResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListPaginatedFineTuningJobsResponseObject List { get; } = new ListPaginatedFineTuningJobsResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListPaginatedFineTuningJobsResponseObject left, ListPaginatedFineTuningJobsResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListPaginatedFineTuningJobsResponseObject left, ListPaginatedFineTuningJobsResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListPaginatedFineTuningJobsResponseObject(string value) => new ListPaginatedFineTuningJobsResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListPaginatedFineTuningJobsResponseObject other && Equals(other); + /// + public bool Equals(ListPaginatedFineTuningJobsResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListRunStepsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListRunStepsResponse.Serialization.cs new file mode 100644 index 000000000..9f429e0fd --- /dev/null +++ b/.dotnet/src/Generated/Models/ListRunStepsResponse.Serialization.cs @@ -0,0 +1,178 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ListRunStepsResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListRunStepsResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("first_id"u8); + writer.WriteStringValue(FirstId); + writer.WritePropertyName("last_id"u8); + writer.WriteStringValue(LastId); + writer.WritePropertyName("has_more"u8); + writer.WriteBooleanValue(HasMore); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListRunStepsResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListRunStepsResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListRunStepsResponse(document.RootElement, options); + } + + internal static ListRunStepsResponse DeserializeListRunStepsResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ListRunStepsResponseObject @object = default; + IReadOnlyList data = default; + string firstId = default; + string lastId = default; + bool hasMore = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = new ListRunStepsResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(RunStepObject.DeserializeRunStepObject(item, options)); + } + data = array; + continue; + } + if (property.NameEquals("first_id"u8)) + { + firstId = property.Value.GetString(); + continue; + } + if (property.NameEquals("last_id"u8)) + { + lastId = property.Value.GetString(); + continue; + } + if (property.NameEquals("has_more"u8)) + { + hasMore = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListRunStepsResponse( + @object, + data, + firstId, + lastId, + hasMore, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListRunStepsResponse)} does not support '{options.Format}' format."); + } + } + + ListRunStepsResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListRunStepsResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListRunStepsResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListRunStepsResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListRunStepsResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListRunStepsResponse.cs b/.dotnet/src/Generated/Models/ListRunStepsResponse.cs new file mode 100644 index 000000000..393a84b03 --- /dev/null +++ b/.dotnet/src/Generated/Models/ListRunStepsResponse.cs @@ -0,0 +1,97 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ListRunStepsResponse. + internal partial class ListRunStepsResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// , or is null. + internal ListRunStepsResponse(IEnumerable data, string firstId, string lastId, bool hasMore) + { + Argument.AssertNotNull(data, nameof(data)); + Argument.AssertNotNull(firstId, nameof(firstId)); + Argument.AssertNotNull(lastId, nameof(lastId)); + + Data = data.ToList(); + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListRunStepsResponse(ListRunStepsResponseObject @object, IReadOnlyList data, string firstId, string lastId, bool hasMore, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListRunStepsResponse() + { + } + + /// Gets the object. + public ListRunStepsResponseObject Object { get; } = ListRunStepsResponseObject.List; + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the first id. + public string FirstId { get; } + /// Gets the last id. + public string LastId { get; } + /// Gets the has more. + public bool HasMore { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListRunStepsResponseObject.cs b/.dotnet/src/Generated/Models/ListRunStepsResponseObject.cs new file mode 100644 index 000000000..98dd2d37c --- /dev/null +++ b/.dotnet/src/Generated/Models/ListRunStepsResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ListRunStepsResponse_object. + internal readonly partial struct ListRunStepsResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListRunStepsResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListRunStepsResponseObject List { get; } = new ListRunStepsResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListRunStepsResponseObject left, ListRunStepsResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListRunStepsResponseObject left, ListRunStepsResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListRunStepsResponseObject(string value) => new ListRunStepsResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListRunStepsResponseObject other && Equals(other); + /// + public bool Equals(ListRunStepsResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListRunsResponse.Serialization.cs b/.dotnet/src/Generated/Models/ListRunsResponse.Serialization.cs new file mode 100644 index 000000000..47d162aba --- /dev/null +++ b/.dotnet/src/Generated/Models/ListRunsResponse.Serialization.cs @@ -0,0 +1,178 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ListRunsResponse : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListRunsResponse)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("data"u8); + writer.WriteStartArray(); + foreach (var item in Data) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + writer.WritePropertyName("first_id"u8); + writer.WriteStringValue(FirstId); + writer.WritePropertyName("last_id"u8); + writer.WriteStringValue(LastId); + writer.WritePropertyName("has_more"u8); + writer.WriteBooleanValue(HasMore); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ListRunsResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ListRunsResponse)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeListRunsResponse(document.RootElement, options); + } + + internal static ListRunsResponse DeserializeListRunsResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ListRunsResponseObject @object = default; + IReadOnlyList data = default; + string firstId = default; + string lastId = default; + bool hasMore = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("object"u8)) + { + @object = new ListRunsResponseObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("data"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(RunObject.DeserializeRunObject(item, options)); + } + data = array; + continue; + } + if (property.NameEquals("first_id"u8)) + { + firstId = property.Value.GetString(); + continue; + } + if (property.NameEquals("last_id"u8)) + { + lastId = property.Value.GetString(); + continue; + } + if (property.NameEquals("has_more"u8)) + { + hasMore = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ListRunsResponse( + @object, + data, + firstId, + lastId, + hasMore, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ListRunsResponse)} does not support '{options.Format}' format."); + } + } + + ListRunsResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeListRunsResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ListRunsResponse)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ListRunsResponse FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeListRunsResponse(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ListRunsResponse.cs b/.dotnet/src/Generated/Models/ListRunsResponse.cs new file mode 100644 index 000000000..deb6ee3ba --- /dev/null +++ b/.dotnet/src/Generated/Models/ListRunsResponse.cs @@ -0,0 +1,97 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ListRunsResponse. + internal partial class ListRunsResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// + /// + /// + /// , or is null. + internal ListRunsResponse(IEnumerable data, string firstId, string lastId, bool hasMore) + { + Argument.AssertNotNull(data, nameof(data)); + Argument.AssertNotNull(firstId, nameof(firstId)); + Argument.AssertNotNull(lastId, nameof(lastId)); + + Data = data.ToList(); + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// Keeps track of any properties unknown to the library. + internal ListRunsResponse(ListRunsResponseObject @object, IReadOnlyList data, string firstId, string lastId, bool hasMore, IDictionary serializedAdditionalRawData) + { + Object = @object; + Data = data; + FirstId = firstId; + LastId = lastId; + HasMore = hasMore; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ListRunsResponse() + { + } + + /// Gets the object. + public ListRunsResponseObject Object { get; } = ListRunsResponseObject.List; + + /// Gets the data. + public IReadOnlyList Data { get; } + /// Gets the first id. + public string FirstId { get; } + /// Gets the last id. + public string LastId { get; } + /// Gets the has more. + public bool HasMore { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ListRunsResponseObject.cs b/.dotnet/src/Generated/Models/ListRunsResponseObject.cs new file mode 100644 index 000000000..f3f0e1a1e --- /dev/null +++ b/.dotnet/src/Generated/Models/ListRunsResponseObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ListRunsResponse_object. + internal readonly partial struct ListRunsResponseObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ListRunsResponseObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ListValue = "list"; + + /// list. + public static ListRunsResponseObject List { get; } = new ListRunsResponseObject(ListValue); + /// Determines if two values are the same. + public static bool operator ==(ListRunsResponseObject left, ListRunsResponseObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ListRunsResponseObject left, ListRunsResponseObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ListRunsResponseObject(string value) => new ListRunsResponseObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ListRunsResponseObject other && Equals(other); + /// + public bool Equals(ListRunsResponseObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/MessageFileObject.Serialization.cs b/.dotnet/src/Generated/Models/MessageFileObject.Serialization.cs new file mode 100644 index 000000000..37dd97d7f --- /dev/null +++ b/.dotnet/src/Generated/Models/MessageFileObject.Serialization.cs @@ -0,0 +1,154 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class MessageFileObject : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MessageFileObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("message_id"u8); + writer.WriteStringValue(MessageId); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + MessageFileObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MessageFileObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeMessageFileObject(document.RootElement, options); + } + + internal static MessageFileObject DeserializeMessageFileObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + MessageFileObjectObject @object = default; + DateTimeOffset createdAt = default; + string messageId = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new MessageFileObjectObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("message_id"u8)) + { + messageId = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new MessageFileObject(id, @object, createdAt, messageId, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(MessageFileObject)} does not support '{options.Format}' format."); + } + } + + MessageFileObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeMessageFileObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(MessageFileObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static MessageFileObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeMessageFileObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/MessageFileObject.cs b/.dotnet/src/Generated/Models/MessageFileObject.cs new file mode 100644 index 000000000..7927dfff5 --- /dev/null +++ b/.dotnet/src/Generated/Models/MessageFileObject.cs @@ -0,0 +1,89 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// A list of files attached to a `message`. + internal partial class MessageFileObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// TThe identifier, which can be referenced in API endpoints. + /// The Unix timestamp (in seconds) for when the message file was created. + /// The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. + /// or is null. + internal MessageFileObject(string id, DateTimeOffset createdAt, string messageId) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(messageId, nameof(messageId)); + + Id = id; + CreatedAt = createdAt; + MessageId = messageId; + } + + /// Initializes a new instance of . + /// TThe identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread.message.file`. + /// The Unix timestamp (in seconds) for when the message file was created. + /// The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. + /// Keeps track of any properties unknown to the library. + internal MessageFileObject(string id, MessageFileObjectObject @object, DateTimeOffset createdAt, string messageId, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + MessageId = messageId; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal MessageFileObject() + { + } + + /// TThe identifier, which can be referenced in API endpoints. + public string Id { get; } + /// The object type, which is always `thread.message.file`. + public MessageFileObjectObject Object { get; } = MessageFileObjectObject.ThreadMessageFile; + + /// The Unix timestamp (in seconds) for when the message file was created. + public DateTimeOffset CreatedAt { get; } + /// The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. + public string MessageId { get; } + } +} diff --git a/.dotnet/src/Generated/Models/MessageFileObjectObject.cs b/.dotnet/src/Generated/Models/MessageFileObjectObject.cs new file mode 100644 index 000000000..19c46e334 --- /dev/null +++ b/.dotnet/src/Generated/Models/MessageFileObjectObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The MessageFileObject_object. + internal readonly partial struct MessageFileObjectObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public MessageFileObjectObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ThreadMessageFileValue = "thread.message.file"; + + /// thread.message.file. + public static MessageFileObjectObject ThreadMessageFile { get; } = new MessageFileObjectObject(ThreadMessageFileValue); + /// Determines if two values are the same. + public static bool operator ==(MessageFileObjectObject left, MessageFileObjectObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(MessageFileObjectObject left, MessageFileObjectObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator MessageFileObjectObject(string value) => new MessageFileObjectObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is MessageFileObjectObject other && Equals(other); + /// + public bool Equals(MessageFileObjectObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/MessageObject.Serialization.cs b/.dotnet/src/Generated/Models/MessageObject.Serialization.cs new file mode 100644 index 000000000..5c0cb8c53 --- /dev/null +++ b/.dotnet/src/Generated/Models/MessageObject.Serialization.cs @@ -0,0 +1,299 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class MessageObject : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MessageObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("thread_id"u8); + writer.WriteStringValue(ThreadId); + writer.WritePropertyName("role"u8); + writer.WriteStringValue(Role.ToString()); + writer.WritePropertyName("content"u8); + writer.WriteStartArray(); + foreach (var item in Content) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + if (AssistantId != null) + { + writer.WritePropertyName("assistant_id"u8); + writer.WriteStringValue(AssistantId); + } + else + { + writer.WriteNull("assistant_id"); + } + if (RunId != null) + { + writer.WritePropertyName("run_id"u8); + writer.WriteStringValue(RunId); + } + else + { + writer.WriteNull("run_id"); + } + writer.WritePropertyName("file_ids"u8); + writer.WriteStartArray(); + foreach (var item in FileIds) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + if (Metadata != null && Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + MessageObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MessageObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeMessageObject(document.RootElement, options); + } + + internal static MessageObject DeserializeMessageObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + MessageObjectObject @object = default; + DateTimeOffset createdAt = default; + string threadId = default; + MessageObjectRole role = default; + IReadOnlyList content = default; + string assistantId = default; + string runId = default; + IReadOnlyList fileIds = default; + IReadOnlyDictionary metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new MessageObjectObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("thread_id"u8)) + { + threadId = property.Value.GetString(); + continue; + } + if (property.NameEquals("role"u8)) + { + role = new MessageObjectRole(property.Value.GetString()); + continue; + } + if (property.NameEquals("content"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + content = array; + continue; + } + if (property.NameEquals("assistant_id"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + assistantId = null; + continue; + } + assistantId = property.Value.GetString(); + continue; + } + if (property.NameEquals("run_id"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + runId = null; + continue; + } + runId = property.Value.GetString(); + continue; + } + if (property.NameEquals("file_ids"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + fileIds = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + metadata = new ChangeTrackingDictionary(); + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new MessageObject( + id, + @object, + createdAt, + threadId, + role, + content, + assistantId, + runId, + fileIds, + metadata, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(MessageObject)} does not support '{options.Format}' format."); + } + } + + MessageObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeMessageObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(MessageObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static MessageObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeMessageObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/MessageObject.cs b/.dotnet/src/Generated/Models/MessageObject.cs new file mode 100644 index 000000000..e314eaf06 --- /dev/null +++ b/.dotnet/src/Generated/Models/MessageObject.cs @@ -0,0 +1,199 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The MessageObject. + internal partial class MessageObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The Unix timestamp (in seconds) for when the message was created. + /// The [thread](/docs/api-reference/threads) ID that this message belongs to. + /// The entity that produced the message. One of `user` or `assistant`. + /// The content of the message in array of text and/or images. + /// + /// If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this + /// message. + /// + /// + /// If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of + /// this message. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for + /// tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be + /// attached to a message. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// , , or is null. + internal MessageObject(string id, DateTimeOffset createdAt, string threadId, MessageObjectRole role, IEnumerable content, string assistantId, string runId, IEnumerable fileIds, IReadOnlyDictionary metadata) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(threadId, nameof(threadId)); + Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNull(fileIds, nameof(fileIds)); + + Id = id; + CreatedAt = createdAt; + ThreadId = threadId; + Role = role; + Content = content.ToList(); + AssistantId = assistantId; + RunId = runId; + FileIds = fileIds.ToList(); + Metadata = metadata; + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread.message`. + /// The Unix timestamp (in seconds) for when the message was created. + /// The [thread](/docs/api-reference/threads) ID that this message belongs to. + /// The entity that produced the message. One of `user` or `assistant`. + /// The content of the message in array of text and/or images. + /// + /// If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this + /// message. + /// + /// + /// If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of + /// this message. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for + /// tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be + /// attached to a message. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal MessageObject(string id, MessageObjectObject @object, DateTimeOffset createdAt, string threadId, MessageObjectRole role, IReadOnlyList content, string assistantId, string runId, IReadOnlyList fileIds, IReadOnlyDictionary metadata, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + ThreadId = threadId; + Role = role; + Content = content; + AssistantId = assistantId; + RunId = runId; + FileIds = fileIds; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal MessageObject() + { + } + + /// The identifier, which can be referenced in API endpoints. + public string Id { get; } + /// The object type, which is always `thread.message`. + public MessageObjectObject Object { get; } = MessageObjectObject.ThreadMessage; + + /// The Unix timestamp (in seconds) for when the message was created. + public DateTimeOffset CreatedAt { get; } + /// The [thread](/docs/api-reference/threads) ID that this message belongs to. + public string ThreadId { get; } + /// The entity that produced the message. One of `user` or `assistant`. + public MessageObjectRole Role { get; } + /// + /// The content of the message in array of text and/or images. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IReadOnlyList Content { get; } + /// + /// If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this + /// message. + /// + public string AssistantId { get; } + /// + /// If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of + /// this message. + /// + public string RunId { get; } + /// + /// A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for + /// tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be + /// attached to a message. + /// + public IReadOnlyList FileIds { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IReadOnlyDictionary Metadata { get; } + } +} diff --git a/.dotnet/src/Generated/Models/MessageObjectObject.cs b/.dotnet/src/Generated/Models/MessageObjectObject.cs new file mode 100644 index 000000000..120249ab6 --- /dev/null +++ b/.dotnet/src/Generated/Models/MessageObjectObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The MessageObject_object. + internal readonly partial struct MessageObjectObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public MessageObjectObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ThreadMessageValue = "thread.message"; + + /// thread.message. + public static MessageObjectObject ThreadMessage { get; } = new MessageObjectObject(ThreadMessageValue); + /// Determines if two values are the same. + public static bool operator ==(MessageObjectObject left, MessageObjectObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(MessageObjectObject left, MessageObjectObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator MessageObjectObject(string value) => new MessageObjectObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is MessageObjectObject other && Equals(other); + /// + public bool Equals(MessageObjectObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/MessageObjectRole.cs b/.dotnet/src/Generated/Models/MessageObjectRole.cs new file mode 100644 index 000000000..1b637beb6 --- /dev/null +++ b/.dotnet/src/Generated/Models/MessageObjectRole.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for role in MessageObject. + internal readonly partial struct MessageObjectRole : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public MessageObjectRole(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UserValue = "user"; + private const string AssistantValue = "assistant"; + + /// user. + public static MessageObjectRole User { get; } = new MessageObjectRole(UserValue); + /// assistant. + public static MessageObjectRole Assistant { get; } = new MessageObjectRole(AssistantValue); + /// Determines if two values are the same. + public static bool operator ==(MessageObjectRole left, MessageObjectRole right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(MessageObjectRole left, MessageObjectRole right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator MessageObjectRole(string value) => new MessageObjectRole(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is MessageObjectRole other && Equals(other); + /// + public bool Equals(MessageObjectRole other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/Model.Serialization.cs b/.dotnet/src/Generated/Models/Model.Serialization.cs new file mode 100644 index 000000000..5efab5846 --- /dev/null +++ b/.dotnet/src/Generated/Models/Model.Serialization.cs @@ -0,0 +1,154 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class Model : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(Model)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("created"u8); + writer.WriteNumberValue(Created, "U"); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("owned_by"u8); + writer.WriteStringValue(OwnedBy); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + Model IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(Model)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeModel(document.RootElement, options); + } + + internal static Model DeserializeModel(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + DateTimeOffset created = default; + ModelObject @object = default; + string ownedBy = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("created"u8)) + { + created = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new ModelObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("owned_by"u8)) + { + ownedBy = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new Model(id, created, @object, ownedBy, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(Model)} does not support '{options.Format}' format."); + } + } + + Model IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeModel(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(Model)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static Model FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeModel(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/Model.cs b/.dotnet/src/Generated/Models/Model.cs new file mode 100644 index 000000000..a15763f6c --- /dev/null +++ b/.dotnet/src/Generated/Models/Model.cs @@ -0,0 +1,89 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// Describes an OpenAI model offering that can be used with the API. + internal partial class Model + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The model identifier, which can be referenced in the API endpoints. + /// The Unix timestamp (in seconds) when the model was created. + /// The organization that owns the model. + /// or is null. + internal Model(string id, DateTimeOffset created, string ownedBy) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(ownedBy, nameof(ownedBy)); + + Id = id; + Created = created; + OwnedBy = ownedBy; + } + + /// Initializes a new instance of . + /// The model identifier, which can be referenced in the API endpoints. + /// The Unix timestamp (in seconds) when the model was created. + /// The object type, which is always "model". + /// The organization that owns the model. + /// Keeps track of any properties unknown to the library. + internal Model(string id, DateTimeOffset created, ModelObject @object, string ownedBy, IDictionary serializedAdditionalRawData) + { + Id = id; + Created = created; + Object = @object; + OwnedBy = ownedBy; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal Model() + { + } + + /// The model identifier, which can be referenced in the API endpoints. + public string Id { get; } + /// The Unix timestamp (in seconds) when the model was created. + public DateTimeOffset Created { get; } + /// The object type, which is always "model". + public ModelObject Object { get; } = ModelObject.Model; + + /// The organization that owns the model. + public string OwnedBy { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ModelObject.cs b/.dotnet/src/Generated/Models/ModelObject.cs new file mode 100644 index 000000000..5971c3917 --- /dev/null +++ b/.dotnet/src/Generated/Models/ModelObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The Model_object. + internal readonly partial struct ModelObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ModelObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ModelValue = "model"; + + /// model. + public static ModelObject Model { get; } = new ModelObject(ModelValue); + /// Determines if two values are the same. + public static bool operator ==(ModelObject left, ModelObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ModelObject left, ModelObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ModelObject(string value) => new ModelObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ModelObject other && Equals(other); + /// + public bool Equals(ModelObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ModifyAssistantRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyAssistantRequest.Serialization.cs new file mode 100644 index 000000000..9e6843435 --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyAssistantRequest.Serialization.cs @@ -0,0 +1,312 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ModifyAssistantRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyAssistantRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Model)) + { + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + } + if (Optional.IsDefined(Name)) + { + if (Name != null) + { + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + } + else + { + writer.WriteNull("name"); + } + } + if (Optional.IsDefined(Description)) + { + if (Description != null) + { + writer.WritePropertyName("description"u8); + writer.WriteStringValue(Description); + } + else + { + writer.WriteNull("description"); + } + } + if (Optional.IsDefined(Instructions)) + { + if (Instructions != null) + { + writer.WritePropertyName("instructions"u8); + writer.WriteStringValue(Instructions); + } + else + { + writer.WriteNull("instructions"); + } + } + if (Optional.IsCollectionDefined(Tools)) + { + writer.WritePropertyName("tools"u8); + writer.WriteStartArray(); + foreach (var item in Tools) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(FileIds)) + { + writer.WritePropertyName("file_ids"u8); + writer.WriteStartArray(); + foreach (var item in FileIds) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ModifyAssistantRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyAssistantRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeModifyAssistantRequest(document.RootElement, options); + } + + internal static ModifyAssistantRequest DeserializeModifyAssistantRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string model = default; + string name = default; + string description = default; + string instructions = default; + IList tools = default; + IList fileIds = default; + IDictionary metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("name"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + name = null; + continue; + } + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("description"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + description = null; + continue; + } + description = property.Value.GetString(); + continue; + } + if (property.NameEquals("instructions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + instructions = null; + continue; + } + instructions = property.Value.GetString(); + continue; + } + if (property.NameEquals("tools"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + tools = array; + continue; + } + if (property.NameEquals("file_ids"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + fileIds = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ModifyAssistantRequest( + model, + name, + description, + instructions, + tools ?? new ChangeTrackingList(), + fileIds ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingDictionary(), + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ModifyAssistantRequest)} does not support '{options.Format}' format."); + } + } + + ModifyAssistantRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeModifyAssistantRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ModifyAssistantRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ModifyAssistantRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeModifyAssistantRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ModifyAssistantRequest.cs b/.dotnet/src/Generated/Models/ModifyAssistantRequest.cs new file mode 100644 index 000000000..a6e7b82a2 --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyAssistantRequest.cs @@ -0,0 +1,145 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ModifyAssistantRequest. + internal partial class ModifyAssistantRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public ModifyAssistantRequest() + { + Tools = new ChangeTrackingList(); + FileIds = new ChangeTrackingList(); + Metadata = new ChangeTrackingDictionary(); + } + + /// Initializes a new instance of . + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// The name of the assistant. The maximum length is 256 characters. + /// The description of the assistant. The maximum length is 512 characters. + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal ModifyAssistantRequest(string model, string name, string description, string instructions, IList tools, IList fileIds, IDictionary metadata, IDictionary serializedAdditionalRawData) + { + Model = model; + Name = name; + Description = description; + Instructions = instructions; + Tools = tools; + FileIds = fileIds; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + public string Model { get; set; } + /// The name of the assistant. The maximum length is 256 characters. + public string Name { get; set; } + /// The description of the assistant. The maximum length is 512 characters. + public string Description { get; set; } + /// The system instructions that the assistant uses. The maximum length is 32768 characters. + public string Instructions { get; set; } + /// + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + /// Tools can be of types `code_interpreter`, `retrieval`, or `function`. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IList Tools { get; } + /// + /// A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + /// maximum of 20 files attached to the assistant. Files are ordered by their creation date in + /// ascending order. + /// + public IList FileIds { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/ModifyMessageRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyMessageRequest.Serialization.cs new file mode 100644 index 000000000..22d68d73a --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyMessageRequest.Serialization.cs @@ -0,0 +1,155 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ModifyMessageRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyMessageRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ModifyMessageRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyMessageRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeModifyMessageRequest(document.RootElement, options); + } + + internal static ModifyMessageRequest DeserializeModifyMessageRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IDictionary metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ModifyMessageRequest(metadata ?? new ChangeTrackingDictionary(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ModifyMessageRequest)} does not support '{options.Format}' format."); + } + } + + ModifyMessageRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeModifyMessageRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ModifyMessageRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ModifyMessageRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeModifyMessageRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ModifyMessageRequest.cs b/.dotnet/src/Generated/Models/ModifyMessageRequest.cs new file mode 100644 index 000000000..9fac04db8 --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyMessageRequest.cs @@ -0,0 +1,70 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ModifyMessageRequest. + internal partial class ModifyMessageRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public ModifyMessageRequest() + { + Metadata = new ChangeTrackingDictionary(); + } + + /// Initializes a new instance of . + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal ModifyMessageRequest(IDictionary metadata, IDictionary serializedAdditionalRawData) + { + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/ModifyRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyRunRequest.Serialization.cs new file mode 100644 index 000000000..c984d804a --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyRunRequest.Serialization.cs @@ -0,0 +1,155 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ModifyRunRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyRunRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ModifyRunRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyRunRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeModifyRunRequest(document.RootElement, options); + } + + internal static ModifyRunRequest DeserializeModifyRunRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IDictionary metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ModifyRunRequest(metadata ?? new ChangeTrackingDictionary(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ModifyRunRequest)} does not support '{options.Format}' format."); + } + } + + ModifyRunRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeModifyRunRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ModifyRunRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ModifyRunRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeModifyRunRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ModifyRunRequest.cs b/.dotnet/src/Generated/Models/ModifyRunRequest.cs new file mode 100644 index 000000000..9cbe9b5fa --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyRunRequest.cs @@ -0,0 +1,70 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ModifyRunRequest. + internal partial class ModifyRunRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public ModifyRunRequest() + { + Metadata = new ChangeTrackingDictionary(); + } + + /// Initializes a new instance of . + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal ModifyRunRequest(IDictionary metadata, IDictionary serializedAdditionalRawData) + { + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/ModifyThreadRequest.Serialization.cs b/.dotnet/src/Generated/Models/ModifyThreadRequest.Serialization.cs new file mode 100644 index 000000000..cd64524a1 --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyThreadRequest.Serialization.cs @@ -0,0 +1,155 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ModifyThreadRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyThreadRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsCollectionDefined(Metadata)) + { + if (Metadata != null) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ModifyThreadRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ModifyThreadRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeModifyThreadRequest(document.RootElement, options); + } + + internal static ModifyThreadRequest DeserializeModifyThreadRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IDictionary metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ModifyThreadRequest(metadata ?? new ChangeTrackingDictionary(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ModifyThreadRequest)} does not support '{options.Format}' format."); + } + } + + ModifyThreadRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeModifyThreadRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ModifyThreadRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ModifyThreadRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeModifyThreadRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ModifyThreadRequest.cs b/.dotnet/src/Generated/Models/ModifyThreadRequest.cs new file mode 100644 index 000000000..346f01a71 --- /dev/null +++ b/.dotnet/src/Generated/Models/ModifyThreadRequest.cs @@ -0,0 +1,70 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The ModifyThreadRequest. + internal partial class ModifyThreadRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public ModifyThreadRequest() + { + Metadata = new ChangeTrackingDictionary(); + } + + /// Initializes a new instance of . + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal ModifyThreadRequest(IDictionary metadata, IDictionary serializedAdditionalRawData) + { + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IDictionary Metadata { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/OpenAIFile.Serialization.cs b/.dotnet/src/Generated/Models/OpenAIFile.Serialization.cs new file mode 100644 index 000000000..bd066a711 --- /dev/null +++ b/.dotnet/src/Generated/Models/OpenAIFile.Serialization.cs @@ -0,0 +1,210 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class OpenAIFile : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(OpenAIFile)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + if (Bytes != null) + { + writer.WritePropertyName("bytes"u8); + writer.WriteNumberValue(Bytes.Value); + } + else + { + writer.WriteNull("bytes"); + } + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("filename"u8); + writer.WriteStringValue(Filename); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("purpose"u8); + writer.WriteStringValue(Purpose.ToString()); + writer.WritePropertyName("status"u8); + writer.WriteStringValue(Status.ToString()); + if (Optional.IsDefined(StatusDetails)) + { + writer.WritePropertyName("status_details"u8); + writer.WriteStringValue(StatusDetails); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + OpenAIFile IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(OpenAIFile)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeOpenAIFile(document.RootElement, options); + } + + internal static OpenAIFile DeserializeOpenAIFile(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + long? bytes = default; + DateTimeOffset createdAt = default; + string filename = default; + OpenAIFileObject @object = default; + OpenAIFilePurpose purpose = default; + OpenAIFileStatus status = default; + string statusDetails = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("bytes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + bytes = null; + continue; + } + bytes = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("filename"u8)) + { + filename = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new OpenAIFileObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("purpose"u8)) + { + purpose = new OpenAIFilePurpose(property.Value.GetString()); + continue; + } + if (property.NameEquals("status"u8)) + { + status = new OpenAIFileStatus(property.Value.GetString()); + continue; + } + if (property.NameEquals("status_details"u8)) + { + statusDetails = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new OpenAIFile( + id, + bytes, + createdAt, + filename, + @object, + purpose, + status, + statusDetails, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(OpenAIFile)} does not support '{options.Format}' format."); + } + } + + OpenAIFile IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeOpenAIFile(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(OpenAIFile)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static OpenAIFile FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeOpenAIFile(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/OpenAIFile.cs b/.dotnet/src/Generated/Models/OpenAIFile.cs new file mode 100644 index 000000000..e310ef98a --- /dev/null +++ b/.dotnet/src/Generated/Models/OpenAIFile.cs @@ -0,0 +1,135 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The `File` object represents a document that has been uploaded to OpenAI. + internal partial class OpenAIFile + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The file identifier, which can be referenced in the API endpoints. + /// The size of the file, in bytes. + /// The Unix timestamp (in seconds) for when the file was created. + /// The name of the file. + /// + /// The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, + /// `assistants`, and `assistants_output`. + /// + /// + /// Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or + /// `error`. + /// + /// or is null. + internal OpenAIFile(string id, long? bytes, DateTimeOffset createdAt, string filename, OpenAIFilePurpose purpose, OpenAIFileStatus status) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(filename, nameof(filename)); + + Id = id; + Bytes = bytes; + CreatedAt = createdAt; + Filename = filename; + Purpose = purpose; + Status = status; + } + + /// Initializes a new instance of . + /// The file identifier, which can be referenced in the API endpoints. + /// The size of the file, in bytes. + /// The Unix timestamp (in seconds) for when the file was created. + /// The name of the file. + /// The object type, which is always "file". + /// + /// The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, + /// `assistants`, and `assistants_output`. + /// + /// + /// Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or + /// `error`. + /// + /// + /// Deprecated. For details on why a fine-tuning training file failed validation, see the `error` + /// field on `fine_tuning.job`. + /// + /// Keeps track of any properties unknown to the library. + internal OpenAIFile(string id, long? bytes, DateTimeOffset createdAt, string filename, OpenAIFileObject @object, OpenAIFilePurpose purpose, OpenAIFileStatus status, string statusDetails, IDictionary serializedAdditionalRawData) + { + Id = id; + Bytes = bytes; + CreatedAt = createdAt; + Filename = filename; + Object = @object; + Purpose = purpose; + Status = status; + StatusDetails = statusDetails; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal OpenAIFile() + { + } + + /// The file identifier, which can be referenced in the API endpoints. + public string Id { get; } + /// The size of the file, in bytes. + public long? Bytes { get; } + /// The Unix timestamp (in seconds) for when the file was created. + public DateTimeOffset CreatedAt { get; } + /// The name of the file. + public string Filename { get; } + /// The object type, which is always "file". + public OpenAIFileObject Object { get; } = OpenAIFileObject.File; + + /// + /// The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, + /// `assistants`, and `assistants_output`. + /// + public OpenAIFilePurpose Purpose { get; } + /// + /// Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or + /// `error`. + /// + public OpenAIFileStatus Status { get; } + /// + /// Deprecated. For details on why a fine-tuning training file failed validation, see the `error` + /// field on `fine_tuning.job`. + /// + public string StatusDetails { get; } + } +} diff --git a/.dotnet/src/Generated/Models/OpenAIFileObject.cs b/.dotnet/src/Generated/Models/OpenAIFileObject.cs new file mode 100644 index 000000000..db29f0450 --- /dev/null +++ b/.dotnet/src/Generated/Models/OpenAIFileObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The OpenAIFile_object. + internal readonly partial struct OpenAIFileObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public OpenAIFileObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FileValue = "file"; + + /// file. + public static OpenAIFileObject File { get; } = new OpenAIFileObject(FileValue); + /// Determines if two values are the same. + public static bool operator ==(OpenAIFileObject left, OpenAIFileObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(OpenAIFileObject left, OpenAIFileObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator OpenAIFileObject(string value) => new OpenAIFileObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is OpenAIFileObject other && Equals(other); + /// + public bool Equals(OpenAIFileObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/OpenAIFilePurpose.cs b/.dotnet/src/Generated/Models/OpenAIFilePurpose.cs new file mode 100644 index 000000000..97d4f29b4 --- /dev/null +++ b/.dotnet/src/Generated/Models/OpenAIFilePurpose.cs @@ -0,0 +1,52 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for purpose in OpenAIFile. + internal readonly partial struct OpenAIFilePurpose : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public OpenAIFilePurpose(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FineTuneValue = "fine-tune"; + private const string FineTuneResultsValue = "fine-tune-results"; + private const string AssistantsValue = "assistants"; + private const string AssistantsOutputValue = "assistants_output"; + + /// fine-tune. + public static OpenAIFilePurpose FineTune { get; } = new OpenAIFilePurpose(FineTuneValue); + /// fine-tune-results. + public static OpenAIFilePurpose FineTuneResults { get; } = new OpenAIFilePurpose(FineTuneResultsValue); + /// assistants. + public static OpenAIFilePurpose Assistants { get; } = new OpenAIFilePurpose(AssistantsValue); + /// assistants_output. + public static OpenAIFilePurpose AssistantsOutput { get; } = new OpenAIFilePurpose(AssistantsOutputValue); + /// Determines if two values are the same. + public static bool operator ==(OpenAIFilePurpose left, OpenAIFilePurpose right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(OpenAIFilePurpose left, OpenAIFilePurpose right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator OpenAIFilePurpose(string value) => new OpenAIFilePurpose(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is OpenAIFilePurpose other && Equals(other); + /// + public bool Equals(OpenAIFilePurpose other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/OpenAIFileStatus.cs b/.dotnet/src/Generated/Models/OpenAIFileStatus.cs new file mode 100644 index 000000000..bd8c906d5 --- /dev/null +++ b/.dotnet/src/Generated/Models/OpenAIFileStatus.cs @@ -0,0 +1,49 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for status in OpenAIFile. + internal readonly partial struct OpenAIFileStatus : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public OpenAIFileStatus(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UploadedValue = "uploaded"; + private const string ProcessedValue = "processed"; + private const string ErrorValue = "error"; + + /// uploaded. + public static OpenAIFileStatus Uploaded { get; } = new OpenAIFileStatus(UploadedValue); + /// processed. + public static OpenAIFileStatus Processed { get; } = new OpenAIFileStatus(ProcessedValue); + /// error. + public static OpenAIFileStatus Error { get; } = new OpenAIFileStatus(ErrorValue); + /// Determines if two values are the same. + public static bool operator ==(OpenAIFileStatus left, OpenAIFileStatus right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(OpenAIFileStatus left, OpenAIFileStatus right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator OpenAIFileStatus(string value) => new OpenAIFileStatus(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is OpenAIFileStatus other && Equals(other); + /// + public bool Equals(OpenAIFileStatus other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunCompletionUsage.Serialization.cs b/.dotnet/src/Generated/Models/RunCompletionUsage.Serialization.cs new file mode 100644 index 000000000..6c064dc0a --- /dev/null +++ b/.dotnet/src/Generated/Models/RunCompletionUsage.Serialization.cs @@ -0,0 +1,146 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class RunCompletionUsage : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunCompletionUsage)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("completion_tokens"u8); + writer.WriteNumberValue(CompletionTokens); + writer.WritePropertyName("prompt_tokens"u8); + writer.WriteNumberValue(PromptTokens); + writer.WritePropertyName("total_tokens"u8); + writer.WriteNumberValue(TotalTokens); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunCompletionUsage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunCompletionUsage)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunCompletionUsage(document.RootElement, options); + } + + internal static RunCompletionUsage DeserializeRunCompletionUsage(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + long completionTokens = default; + long promptTokens = default; + long totalTokens = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("completion_tokens"u8)) + { + completionTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("prompt_tokens"u8)) + { + promptTokens = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("total_tokens"u8)) + { + totalTokens = property.Value.GetInt64(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunCompletionUsage(completionTokens, promptTokens, totalTokens, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunCompletionUsage)} does not support '{options.Format}' format."); + } + } + + RunCompletionUsage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunCompletionUsage(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunCompletionUsage)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunCompletionUsage FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunCompletionUsage(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunCompletionUsage.cs b/.dotnet/src/Generated/Models/RunCompletionUsage.cs new file mode 100644 index 000000000..8e804dce9 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunCompletionUsage.cs @@ -0,0 +1,82 @@ +// + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal.Models +{ + /// + /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal + /// state (i.e. `in_progress`, `queued`, etc.). + /// + internal partial class RunCompletionUsage + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// Number of completion tokens used over the course of the run. + /// Number of prompt tokens used over the course of the run. + /// Total number of tokens used (prompt + completion). + internal RunCompletionUsage(long completionTokens, long promptTokens, long totalTokens) + { + CompletionTokens = completionTokens; + PromptTokens = promptTokens; + TotalTokens = totalTokens; + } + + /// Initializes a new instance of . + /// Number of completion tokens used over the course of the run. + /// Number of prompt tokens used over the course of the run. + /// Total number of tokens used (prompt + completion). + /// Keeps track of any properties unknown to the library. + internal RunCompletionUsage(long completionTokens, long promptTokens, long totalTokens, IDictionary serializedAdditionalRawData) + { + CompletionTokens = completionTokens; + PromptTokens = promptTokens; + TotalTokens = totalTokens; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunCompletionUsage() + { + } + + /// Number of completion tokens used over the course of the run. + public long CompletionTokens { get; } + /// Number of prompt tokens used over the course of the run. + public long PromptTokens { get; } + /// Total number of tokens used (prompt + completion). + public long TotalTokens { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunObject.Serialization.cs b/.dotnet/src/Generated/Models/RunObject.Serialization.cs new file mode 100644 index 000000000..08d10f0d4 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObject.Serialization.cs @@ -0,0 +1,462 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class RunObject : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("thread_id"u8); + writer.WriteStringValue(ThreadId); + writer.WritePropertyName("assistant_id"u8); + writer.WriteStringValue(AssistantId); + writer.WritePropertyName("status"u8); + writer.WriteStringValue(Status.ToString()); + if (RequiredAction != null) + { + writer.WritePropertyName("required_action"u8); + writer.WriteObjectValue(RequiredAction); + } + else + { + writer.WriteNull("required_action"); + } + if (LastError != null) + { + writer.WritePropertyName("last_error"u8); + writer.WriteObjectValue(LastError); + } + else + { + writer.WriteNull("last_error"); + } + if (ExpiresAt != null) + { + writer.WritePropertyName("expires_at"u8); + writer.WriteStringValue(ExpiresAt.Value, "O"); + } + else + { + writer.WriteNull("expires_at"); + } + if (StartedAt != null) + { + writer.WritePropertyName("started_at"u8); + writer.WriteStringValue(StartedAt.Value, "O"); + } + else + { + writer.WriteNull("started_at"); + } + if (CancelledAt != null) + { + writer.WritePropertyName("cancelled_at"u8); + writer.WriteStringValue(CancelledAt.Value, "O"); + } + else + { + writer.WriteNull("cancelled_at"); + } + if (FailedAt != null) + { + writer.WritePropertyName("failed_at"u8); + writer.WriteStringValue(FailedAt.Value, "O"); + } + else + { + writer.WriteNull("failed_at"); + } + if (CompletedAt != null) + { + writer.WritePropertyName("completed_at"u8); + writer.WriteStringValue(CompletedAt.Value, "O"); + } + else + { + writer.WriteNull("completed_at"); + } + writer.WritePropertyName("model"u8); + writer.WriteStringValue(Model); + writer.WritePropertyName("instructions"u8); + writer.WriteStringValue(Instructions); + writer.WritePropertyName("tools"u8); + writer.WriteStartArray(); + foreach (var item in Tools) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + writer.WritePropertyName("file_ids"u8); + writer.WriteStartArray(); + foreach (var item in FileIds) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + if (Metadata != null && Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + if (Usage != null) + { + writer.WritePropertyName("usage"u8); + writer.WriteObjectValue(Usage); + } + else + { + writer.WriteNull("usage"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunObject(document.RootElement, options); + } + + internal static RunObject DeserializeRunObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + RunObjectObject @object = default; + DateTimeOffset createdAt = default; + string threadId = default; + string assistantId = default; + RunObjectStatus status = default; + RunObjectRequiredAction requiredAction = default; + RunObjectLastError lastError = default; + DateTimeOffset? expiresAt = default; + DateTimeOffset? startedAt = default; + DateTimeOffset? cancelledAt = default; + DateTimeOffset? failedAt = default; + DateTimeOffset? completedAt = default; + string model = default; + string instructions = default; + IReadOnlyList tools = default; + IReadOnlyList fileIds = default; + IReadOnlyDictionary metadata = default; + RunCompletionUsage usage = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new RunObjectObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("thread_id"u8)) + { + threadId = property.Value.GetString(); + continue; + } + if (property.NameEquals("assistant_id"u8)) + { + assistantId = property.Value.GetString(); + continue; + } + if (property.NameEquals("status"u8)) + { + status = new RunObjectStatus(property.Value.GetString()); + continue; + } + if (property.NameEquals("required_action"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + requiredAction = null; + continue; + } + requiredAction = RunObjectRequiredAction.DeserializeRunObjectRequiredAction(property.Value, options); + continue; + } + if (property.NameEquals("last_error"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + lastError = null; + continue; + } + lastError = RunObjectLastError.DeserializeRunObjectLastError(property.Value, options); + continue; + } + if (property.NameEquals("expires_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + expiresAt = null; + continue; + } + // BUG: https://github.com/Azure/autorest.csharp/issues/4296 + // expiresAt = property.Value.GetDateTimeOffset("O"); + expiresAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("started_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + startedAt = null; + continue; + } + // BUG: https://github.com/Azure/autorest.csharp/issues/4296 + // startedAt = property.Value.GetDateTimeOffset("O"); + startedAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("cancelled_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + cancelledAt = null; + continue; + } + // BUG: https://github.com/Azure/autorest.csharp/issues/4296 + // cancelledAt = property.Value.GetDateTimeOffset("O"); + cancelledAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("failed_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + failedAt = null; + continue; + } + // BUG: https://github.com/Azure/autorest.csharp/issues/4296 + // failedAt = property.Value.GetDateTimeOffset("O"); + failedAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("completed_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + completedAt = null; + continue; + } + // BUG: https://github.com/Azure/autorest.csharp/issues/4296 + // completedAt = property.Value.GetDateTimeOffset("O"); + completedAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("model"u8)) + { + model = property.Value.GetString(); + continue; + } + if (property.NameEquals("instructions"u8)) + { + instructions = property.Value.GetString(); + continue; + } + if (property.NameEquals("tools"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + tools = array; + continue; + } + if (property.NameEquals("file_ids"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + fileIds = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + metadata = new ChangeTrackingDictionary(); + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (property.NameEquals("usage"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + usage = null; + continue; + } + usage = RunCompletionUsage.DeserializeRunCompletionUsage(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunObject( + id, + @object, + createdAt, + threadId, + assistantId, + status, + requiredAction, + lastError, + expiresAt, + startedAt, + cancelledAt, + failedAt, + completedAt, + model, + instructions, + tools, + fileIds, + metadata, + usage, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunObject)} does not support '{options.Format}' format."); + } + } + + RunObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunObject.cs b/.dotnet/src/Generated/Models/RunObject.cs new file mode 100644 index 000000000..dfa4a9104 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObject.cs @@ -0,0 +1,262 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// Represents an execution run on a [thread](/docs/api-reference/threads). + internal partial class RunObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The Unix timestamp (in seconds) for when the run was created. + /// + /// The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this + /// run. + /// + /// The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + /// + /// The status of the run, which can be either `queued`, `in_progress`, `requires_action`, + /// `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + /// + /// + /// Details on the action required to continue the run. Will be `null` if no action is + /// required. + /// + /// The last error associated with this run. Will be `null` if there are no errors. + /// The Unix timestamp (in seconds) for when the run will expire. + /// The Unix timestamp (in seconds) for when the run was started. + /// The Unix timestamp (in seconds) for when the run was cancelled. + /// The Unix timestamp (in seconds) for when the run failed. + /// The Unix timestamp (in seconds) for when the run was completed. + /// The model that the [assistant](/docs/api-reference/assistants) used for this run. + /// The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + /// + /// The list of [File](/docs/api-reference/files) IDs the + /// [assistant](/docs/api-reference/assistants) used for this run. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// + /// , , , , , or is null. + internal RunObject(string id, DateTimeOffset createdAt, string threadId, string assistantId, RunObjectStatus status, RunObjectRequiredAction requiredAction, RunObjectLastError lastError, DateTimeOffset? expiresAt, DateTimeOffset? startedAt, DateTimeOffset? cancelledAt, DateTimeOffset? failedAt, DateTimeOffset? completedAt, string model, string instructions, IEnumerable tools, IEnumerable fileIds, IReadOnlyDictionary metadata, RunCompletionUsage usage) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(threadId, nameof(threadId)); + Argument.AssertNotNull(assistantId, nameof(assistantId)); + Argument.AssertNotNull(model, nameof(model)); + Argument.AssertNotNull(instructions, nameof(instructions)); + Argument.AssertNotNull(tools, nameof(tools)); + Argument.AssertNotNull(fileIds, nameof(fileIds)); + + Id = id; + CreatedAt = createdAt; + ThreadId = threadId; + AssistantId = assistantId; + Status = status; + RequiredAction = requiredAction; + LastError = lastError; + ExpiresAt = expiresAt; + StartedAt = startedAt; + CancelledAt = cancelledAt; + FailedAt = failedAt; + CompletedAt = completedAt; + Model = model; + Instructions = instructions; + Tools = tools.ToList(); + FileIds = fileIds.ToList(); + Metadata = metadata; + Usage = usage; + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread.run`. + /// The Unix timestamp (in seconds) for when the run was created. + /// + /// The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this + /// run. + /// + /// The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + /// + /// The status of the run, which can be either `queued`, `in_progress`, `requires_action`, + /// `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + /// + /// + /// Details on the action required to continue the run. Will be `null` if no action is + /// required. + /// + /// The last error associated with this run. Will be `null` if there are no errors. + /// The Unix timestamp (in seconds) for when the run will expire. + /// The Unix timestamp (in seconds) for when the run was started. + /// The Unix timestamp (in seconds) for when the run was cancelled. + /// The Unix timestamp (in seconds) for when the run failed. + /// The Unix timestamp (in seconds) for when the run was completed. + /// The model that the [assistant](/docs/api-reference/assistants) used for this run. + /// The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + /// + /// The list of [File](/docs/api-reference/files) IDs the + /// [assistant](/docs/api-reference/assistants) used for this run. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// + /// Keeps track of any properties unknown to the library. + internal RunObject(string id, RunObjectObject @object, DateTimeOffset createdAt, string threadId, string assistantId, RunObjectStatus status, RunObjectRequiredAction requiredAction, RunObjectLastError lastError, DateTimeOffset? expiresAt, DateTimeOffset? startedAt, DateTimeOffset? cancelledAt, DateTimeOffset? failedAt, DateTimeOffset? completedAt, string model, string instructions, IReadOnlyList tools, IReadOnlyList fileIds, IReadOnlyDictionary metadata, RunCompletionUsage usage, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + ThreadId = threadId; + AssistantId = assistantId; + Status = status; + RequiredAction = requiredAction; + LastError = lastError; + ExpiresAt = expiresAt; + StartedAt = startedAt; + CancelledAt = cancelledAt; + FailedAt = failedAt; + CompletedAt = completedAt; + Model = model; + Instructions = instructions; + Tools = tools; + FileIds = fileIds; + Metadata = metadata; + Usage = usage; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunObject() + { + } + + /// The identifier, which can be referenced in API endpoints. + public string Id { get; } + /// The object type, which is always `thread.run`. + public RunObjectObject Object { get; } = RunObjectObject.ThreadRun; + + /// The Unix timestamp (in seconds) for when the run was created. + public DateTimeOffset CreatedAt { get; } + /// + /// The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this + /// run. + /// + public string ThreadId { get; } + /// The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + public string AssistantId { get; } + /// + /// The status of the run, which can be either `queued`, `in_progress`, `requires_action`, + /// `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + /// + public RunObjectStatus Status { get; } + /// + /// Details on the action required to continue the run. Will be `null` if no action is + /// required. + /// + public RunObjectRequiredAction RequiredAction { get; } + /// The last error associated with this run. Will be `null` if there are no errors. + public RunObjectLastError LastError { get; } + /// The Unix timestamp (in seconds) for when the run will expire. + public DateTimeOffset? ExpiresAt { get; } + /// The Unix timestamp (in seconds) for when the run was started. + public DateTimeOffset? StartedAt { get; } + /// The Unix timestamp (in seconds) for when the run was cancelled. + public DateTimeOffset? CancelledAt { get; } + /// The Unix timestamp (in seconds) for when the run failed. + public DateTimeOffset? FailedAt { get; } + /// The Unix timestamp (in seconds) for when the run was completed. + public DateTimeOffset? CompletedAt { get; } + /// The model that the [assistant](/docs/api-reference/assistants) used for this run. + public string Model { get; } + /// The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + public string Instructions { get; } + /// + /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IReadOnlyList Tools { get; } + /// + /// The list of [File](/docs/api-reference/files) IDs the + /// [assistant](/docs/api-reference/assistants) used for this run. + /// + public IReadOnlyList FileIds { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IReadOnlyDictionary Metadata { get; } + /// Gets the usage. + public RunCompletionUsage Usage { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectLastError.Serialization.cs b/.dotnet/src/Generated/Models/RunObjectLastError.Serialization.cs new file mode 100644 index 000000000..99ff3ab89 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectLastError.Serialization.cs @@ -0,0 +1,138 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class RunObjectLastError : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObjectLastError)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("code"u8); + writer.WriteStringValue(Code.ToString()); + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunObjectLastError IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObjectLastError)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunObjectLastError(document.RootElement, options); + } + + internal static RunObjectLastError DeserializeRunObjectLastError(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + RunObjectLastErrorCode code = default; + string message = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("code"u8)) + { + code = new RunObjectLastErrorCode(property.Value.GetString()); + continue; + } + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunObjectLastError(code, message, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunObjectLastError)} does not support '{options.Format}' format."); + } + } + + RunObjectLastError IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunObjectLastError(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunObjectLastError)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunObjectLastError FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunObjectLastError(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectLastError.cs b/.dotnet/src/Generated/Models/RunObjectLastError.cs new file mode 100644 index 000000000..185f4fcfa --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectLastError.cs @@ -0,0 +1,77 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The RunObjectLastError. + internal partial class RunObjectLastError + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// One of `server_error` or `rate_limit_exceeded`. + /// A human-readable description of the error. + /// is null. + internal RunObjectLastError(RunObjectLastErrorCode code, string message) + { + Argument.AssertNotNull(message, nameof(message)); + + Code = code; + Message = message; + } + + /// Initializes a new instance of . + /// One of `server_error` or `rate_limit_exceeded`. + /// A human-readable description of the error. + /// Keeps track of any properties unknown to the library. + internal RunObjectLastError(RunObjectLastErrorCode code, string message, IDictionary serializedAdditionalRawData) + { + Code = code; + Message = message; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunObjectLastError() + { + } + + /// One of `server_error` or `rate_limit_exceeded`. + public RunObjectLastErrorCode Code { get; } + /// A human-readable description of the error. + public string Message { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectLastErrorCode.cs b/.dotnet/src/Generated/Models/RunObjectLastErrorCode.cs new file mode 100644 index 000000000..500d3a25a --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectLastErrorCode.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for code in RunObjectLastError. + internal readonly partial struct RunObjectLastErrorCode : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunObjectLastErrorCode(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ServerErrorValue = "server_error"; + private const string RateLimitExceededValue = "rate_limit_exceeded"; + + /// server_error. + public static RunObjectLastErrorCode ServerError { get; } = new RunObjectLastErrorCode(ServerErrorValue); + /// rate_limit_exceeded. + public static RunObjectLastErrorCode RateLimitExceeded { get; } = new RunObjectLastErrorCode(RateLimitExceededValue); + /// Determines if two values are the same. + public static bool operator ==(RunObjectLastErrorCode left, RunObjectLastErrorCode right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunObjectLastErrorCode left, RunObjectLastErrorCode right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunObjectLastErrorCode(string value) => new RunObjectLastErrorCode(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunObjectLastErrorCode other && Equals(other); + /// + public bool Equals(RunObjectLastErrorCode other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectObject.cs b/.dotnet/src/Generated/Models/RunObjectObject.cs new file mode 100644 index 000000000..f03e8122b --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The RunObject_object. + internal readonly partial struct RunObjectObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunObjectObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ThreadRunValue = "thread.run"; + + /// thread.run. + public static RunObjectObject ThreadRun { get; } = new RunObjectObject(ThreadRunValue); + /// Determines if two values are the same. + public static bool operator ==(RunObjectObject left, RunObjectObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunObjectObject left, RunObjectObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunObjectObject(string value) => new RunObjectObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunObjectObject other && Equals(other); + /// + public bool Equals(RunObjectObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredAction.Serialization.cs b/.dotnet/src/Generated/Models/RunObjectRequiredAction.Serialization.cs new file mode 100644 index 000000000..b89e61f9b --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectRequiredAction.Serialization.cs @@ -0,0 +1,138 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class RunObjectRequiredAction : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObjectRequiredAction)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("submit_tool_outputs"u8); + writer.WriteObjectValue(SubmitToolOutputs); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunObjectRequiredAction IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObjectRequiredAction)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunObjectRequiredAction(document.RootElement, options); + } + + internal static RunObjectRequiredAction DeserializeRunObjectRequiredAction(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + RunObjectRequiredActionType type = default; + RunObjectRequiredActionSubmitToolOutputs submitToolOutputs = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = new RunObjectRequiredActionType(property.Value.GetString()); + continue; + } + if (property.NameEquals("submit_tool_outputs"u8)) + { + submitToolOutputs = RunObjectRequiredActionSubmitToolOutputs.DeserializeRunObjectRequiredActionSubmitToolOutputs(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunObjectRequiredAction(type, submitToolOutputs, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunObjectRequiredAction)} does not support '{options.Format}' format."); + } + } + + RunObjectRequiredAction IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunObjectRequiredAction(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunObjectRequiredAction)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunObjectRequiredAction FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunObjectRequiredAction(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredAction.cs b/.dotnet/src/Generated/Models/RunObjectRequiredAction.cs new file mode 100644 index 000000000..67b3a12d2 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectRequiredAction.cs @@ -0,0 +1,76 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The RunObjectRequiredAction. + internal partial class RunObjectRequiredAction + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// Details on the tool outputs needed for this run to continue. + /// is null. + internal RunObjectRequiredAction(RunObjectRequiredActionSubmitToolOutputs submitToolOutputs) + { + Argument.AssertNotNull(submitToolOutputs, nameof(submitToolOutputs)); + + SubmitToolOutputs = submitToolOutputs; + } + + /// Initializes a new instance of . + /// For now, this is always `submit_tool_outputs`. + /// Details on the tool outputs needed for this run to continue. + /// Keeps track of any properties unknown to the library. + internal RunObjectRequiredAction(RunObjectRequiredActionType type, RunObjectRequiredActionSubmitToolOutputs submitToolOutputs, IDictionary serializedAdditionalRawData) + { + Type = type; + SubmitToolOutputs = submitToolOutputs; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunObjectRequiredAction() + { + } + + /// For now, this is always `submit_tool_outputs`. + public RunObjectRequiredActionType Type { get; } = RunObjectRequiredActionType.SubmitToolOutputs; + + /// Details on the tool outputs needed for this run to continue. + public RunObjectRequiredActionSubmitToolOutputs SubmitToolOutputs { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.Serialization.cs b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.Serialization.cs new file mode 100644 index 000000000..796357c84 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.Serialization.cs @@ -0,0 +1,140 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class RunObjectRequiredActionSubmitToolOutputs : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObjectRequiredActionSubmitToolOutputs)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("tool_calls"u8); + writer.WriteStartArray(); + foreach (var item in ToolCalls) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunObjectRequiredActionSubmitToolOutputs IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunObjectRequiredActionSubmitToolOutputs)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunObjectRequiredActionSubmitToolOutputs(document.RootElement, options); + } + + internal static RunObjectRequiredActionSubmitToolOutputs DeserializeRunObjectRequiredActionSubmitToolOutputs(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IReadOnlyList toolCalls = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("tool_calls"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(RunToolCallObject.DeserializeRunToolCallObject(item, options)); + } + toolCalls = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunObjectRequiredActionSubmitToolOutputs(toolCalls, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunObjectRequiredActionSubmitToolOutputs)} does not support '{options.Format}' format."); + } + } + + RunObjectRequiredActionSubmitToolOutputs IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunObjectRequiredActionSubmitToolOutputs(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunObjectRequiredActionSubmitToolOutputs)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunObjectRequiredActionSubmitToolOutputs FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunObjectRequiredActionSubmitToolOutputs(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.cs b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.cs new file mode 100644 index 000000000..a97134507 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectRequiredActionSubmitToolOutputs.cs @@ -0,0 +1,72 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The RunObjectRequiredActionSubmitToolOutputs. + internal partial class RunObjectRequiredActionSubmitToolOutputs + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A list of the relevant tool calls. + /// is null. + internal RunObjectRequiredActionSubmitToolOutputs(IEnumerable toolCalls) + { + Argument.AssertNotNull(toolCalls, nameof(toolCalls)); + + ToolCalls = toolCalls.ToList(); + } + + /// Initializes a new instance of . + /// A list of the relevant tool calls. + /// Keeps track of any properties unknown to the library. + internal RunObjectRequiredActionSubmitToolOutputs(IReadOnlyList toolCalls, IDictionary serializedAdditionalRawData) + { + ToolCalls = toolCalls; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunObjectRequiredActionSubmitToolOutputs() + { + } + + /// A list of the relevant tool calls. + public IReadOnlyList ToolCalls { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectRequiredActionType.cs b/.dotnet/src/Generated/Models/RunObjectRequiredActionType.cs new file mode 100644 index 000000000..c1ce2fd12 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectRequiredActionType.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The RunObjectRequiredAction_type. + internal readonly partial struct RunObjectRequiredActionType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunObjectRequiredActionType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string SubmitToolOutputsValue = "submit_tool_outputs"; + + /// submit_tool_outputs. + public static RunObjectRequiredActionType SubmitToolOutputs { get; } = new RunObjectRequiredActionType(SubmitToolOutputsValue); + /// Determines if two values are the same. + public static bool operator ==(RunObjectRequiredActionType left, RunObjectRequiredActionType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunObjectRequiredActionType left, RunObjectRequiredActionType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunObjectRequiredActionType(string value) => new RunObjectRequiredActionType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunObjectRequiredActionType other && Equals(other); + /// + public bool Equals(RunObjectRequiredActionType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunObjectStatus.cs b/.dotnet/src/Generated/Models/RunObjectStatus.cs new file mode 100644 index 000000000..c369bec80 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunObjectStatus.cs @@ -0,0 +1,64 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for status in RunObject. + internal readonly partial struct RunObjectStatus : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunObjectStatus(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string QueuedValue = "queued"; + private const string InProgressValue = "in_progress"; + private const string RequiresActionValue = "requires_action"; + private const string CancellingValue = "cancelling"; + private const string CancelledValue = "cancelled"; + private const string FailedValue = "failed"; + private const string CompletedValue = "completed"; + private const string ExpiredValue = "expired"; + + /// queued. + public static RunObjectStatus Queued { get; } = new RunObjectStatus(QueuedValue); + /// in_progress. + public static RunObjectStatus InProgress { get; } = new RunObjectStatus(InProgressValue); + /// requires_action. + public static RunObjectStatus RequiresAction { get; } = new RunObjectStatus(RequiresActionValue); + /// cancelling. + public static RunObjectStatus Cancelling { get; } = new RunObjectStatus(CancellingValue); + /// cancelled. + public static RunObjectStatus Cancelled { get; } = new RunObjectStatus(CancelledValue); + /// failed. + public static RunObjectStatus Failed { get; } = new RunObjectStatus(FailedValue); + /// completed. + public static RunObjectStatus Completed { get; } = new RunObjectStatus(CompletedValue); + /// expired. + public static RunObjectStatus Expired { get; } = new RunObjectStatus(ExpiredValue); + /// Determines if two values are the same. + public static bool operator ==(RunObjectStatus left, RunObjectStatus right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunObjectStatus left, RunObjectStatus right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunObjectStatus(string value) => new RunObjectStatus(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunObjectStatus other && Equals(other); + /// + public bool Equals(RunObjectStatus other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.Serialization.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.Serialization.cs new file mode 100644 index 000000000..470744c95 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.Serialization.cs @@ -0,0 +1,138 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class RunStepDetailsMessageCreationObject : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("message_creation"u8); + writer.WriteObjectValue(MessageCreation); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunStepDetailsMessageCreationObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunStepDetailsMessageCreationObject(document.RootElement, options); + } + + internal static RunStepDetailsMessageCreationObject DeserializeRunStepDetailsMessageCreationObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + RunStepDetailsMessageCreationObjectType type = default; + RunStepDetailsMessageCreationObjectMessageCreation messageCreation = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = new RunStepDetailsMessageCreationObjectType(property.Value.GetString()); + continue; + } + if (property.NameEquals("message_creation"u8)) + { + messageCreation = RunStepDetailsMessageCreationObjectMessageCreation.DeserializeRunStepDetailsMessageCreationObjectMessageCreation(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunStepDetailsMessageCreationObject(type, messageCreation, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObject)} does not support '{options.Format}' format."); + } + } + + RunStepDetailsMessageCreationObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunStepDetailsMessageCreationObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunStepDetailsMessageCreationObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunStepDetailsMessageCreationObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.cs new file mode 100644 index 000000000..14f926190 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObject.cs @@ -0,0 +1,76 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// Details of the message creation by the run step. + internal partial class RunStepDetailsMessageCreationObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// is null. + internal RunStepDetailsMessageCreationObject(RunStepDetailsMessageCreationObjectMessageCreation messageCreation) + { + Argument.AssertNotNull(messageCreation, nameof(messageCreation)); + + MessageCreation = messageCreation; + } + + /// Initializes a new instance of . + /// Details of the message creation by the run step. + /// + /// Keeps track of any properties unknown to the library. + internal RunStepDetailsMessageCreationObject(RunStepDetailsMessageCreationObjectType type, RunStepDetailsMessageCreationObjectMessageCreation messageCreation, IDictionary serializedAdditionalRawData) + { + Type = type; + MessageCreation = messageCreation; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunStepDetailsMessageCreationObject() + { + } + + /// Details of the message creation by the run step. + public RunStepDetailsMessageCreationObjectType Type { get; } = RunStepDetailsMessageCreationObjectType.MessageCreation; + + /// Gets the message creation. + public RunStepDetailsMessageCreationObjectMessageCreation MessageCreation { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.Serialization.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.Serialization.cs new file mode 100644 index 000000000..67a374c66 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.Serialization.cs @@ -0,0 +1,130 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class RunStepDetailsMessageCreationObjectMessageCreation : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObjectMessageCreation)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("message_id"u8); + writer.WriteStringValue(MessageId); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunStepDetailsMessageCreationObjectMessageCreation IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObjectMessageCreation)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunStepDetailsMessageCreationObjectMessageCreation(document.RootElement, options); + } + + internal static RunStepDetailsMessageCreationObjectMessageCreation DeserializeRunStepDetailsMessageCreationObjectMessageCreation(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string messageId = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("message_id"u8)) + { + messageId = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunStepDetailsMessageCreationObjectMessageCreation(messageId, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObjectMessageCreation)} does not support '{options.Format}' format."); + } + } + + RunStepDetailsMessageCreationObjectMessageCreation IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunStepDetailsMessageCreationObjectMessageCreation(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunStepDetailsMessageCreationObjectMessageCreation)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunStepDetailsMessageCreationObjectMessageCreation FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunStepDetailsMessageCreationObjectMessageCreation(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.cs new file mode 100644 index 000000000..c0915a420 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectMessageCreation.cs @@ -0,0 +1,71 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The RunStepDetailsMessageCreationObjectMessageCreation. + internal partial class RunStepDetailsMessageCreationObjectMessageCreation + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The ID of the message that was created by this run step. + /// is null. + internal RunStepDetailsMessageCreationObjectMessageCreation(string messageId) + { + Argument.AssertNotNull(messageId, nameof(messageId)); + + MessageId = messageId; + } + + /// Initializes a new instance of . + /// The ID of the message that was created by this run step. + /// Keeps track of any properties unknown to the library. + internal RunStepDetailsMessageCreationObjectMessageCreation(string messageId, IDictionary serializedAdditionalRawData) + { + MessageId = messageId; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunStepDetailsMessageCreationObjectMessageCreation() + { + } + + /// The ID of the message that was created by this run step. + public string MessageId { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectType.cs b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectType.cs new file mode 100644 index 000000000..e5d49a8d3 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsMessageCreationObjectType.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The RunStepDetailsMessageCreationObject_type. + internal readonly partial struct RunStepDetailsMessageCreationObjectType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunStepDetailsMessageCreationObjectType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string MessageCreationValue = "message_creation"; + + /// message_creation. + public static RunStepDetailsMessageCreationObjectType MessageCreation { get; } = new RunStepDetailsMessageCreationObjectType(MessageCreationValue); + /// Determines if two values are the same. + public static bool operator ==(RunStepDetailsMessageCreationObjectType left, RunStepDetailsMessageCreationObjectType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunStepDetailsMessageCreationObjectType left, RunStepDetailsMessageCreationObjectType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunStepDetailsMessageCreationObjectType(string value) => new RunStepDetailsMessageCreationObjectType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunStepDetailsMessageCreationObjectType other && Equals(other); + /// + public bool Equals(RunStepDetailsMessageCreationObjectType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.Serialization.cs b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.Serialization.cs new file mode 100644 index 000000000..6ad4f9fa3 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.Serialization.cs @@ -0,0 +1,167 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class RunStepDetailsToolCallsObject : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepDetailsToolCallsObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("tool_calls"u8); + writer.WriteStartArray(); + foreach (var item in ToolCalls) + { + if (item == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item); +#else + using (JsonDocument document = JsonDocument.Parse(item)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunStepDetailsToolCallsObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepDetailsToolCallsObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunStepDetailsToolCallsObject(document.RootElement, options); + } + + internal static RunStepDetailsToolCallsObject DeserializeRunStepDetailsToolCallsObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + RunStepDetailsToolCallsObjectType type = default; + IReadOnlyList toolCalls = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = new RunStepDetailsToolCallsObjectType(property.Value.GetString()); + continue; + } + if (property.NameEquals("tool_calls"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(BinaryData.FromString(item.GetRawText())); + } + } + toolCalls = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunStepDetailsToolCallsObject(type, toolCalls, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunStepDetailsToolCallsObject)} does not support '{options.Format}' format."); + } + } + + RunStepDetailsToolCallsObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunStepDetailsToolCallsObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunStepDetailsToolCallsObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunStepDetailsToolCallsObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunStepDetailsToolCallsObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.cs b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.cs new file mode 100644 index 000000000..00a388c37 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObject.cs @@ -0,0 +1,113 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// Details of the tool call. + internal partial class RunStepDetailsToolCallsObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// An array of tool calls the run step was involved in. These can be associated with one of three + /// types of tools: `code_interpreter`, `retrieval`, or `function`. + /// + /// is null. + internal RunStepDetailsToolCallsObject(IEnumerable toolCalls) + { + Argument.AssertNotNull(toolCalls, nameof(toolCalls)); + + ToolCalls = toolCalls.ToList(); + } + + /// Initializes a new instance of . + /// Always `tool_calls`. + /// + /// An array of tool calls the run step was involved in. These can be associated with one of three + /// types of tools: `code_interpreter`, `retrieval`, or `function`. + /// + /// Keeps track of any properties unknown to the library. + internal RunStepDetailsToolCallsObject(RunStepDetailsToolCallsObjectType type, IReadOnlyList toolCalls, IDictionary serializedAdditionalRawData) + { + Type = type; + ToolCalls = toolCalls; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunStepDetailsToolCallsObject() + { + } + + /// Always `tool_calls`. + public RunStepDetailsToolCallsObjectType Type { get; } = RunStepDetailsToolCallsObjectType.ToolCalls; + + /// + /// An array of tool calls the run step was involved in. These can be associated with one of three + /// types of tools: `code_interpreter`, `retrieval`, or `function`. + /// + /// To assign an object to the element of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public IReadOnlyList ToolCalls { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObjectType.cs b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObjectType.cs new file mode 100644 index 000000000..5593d5a5b --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepDetailsToolCallsObjectType.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The RunStepDetailsToolCallsObject_type. + internal readonly partial struct RunStepDetailsToolCallsObjectType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunStepDetailsToolCallsObjectType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ToolCallsValue = "tool_calls"; + + /// tool_calls. + public static RunStepDetailsToolCallsObjectType ToolCalls { get; } = new RunStepDetailsToolCallsObjectType(ToolCallsValue); + /// Determines if two values are the same. + public static bool operator ==(RunStepDetailsToolCallsObjectType left, RunStepDetailsToolCallsObjectType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunStepDetailsToolCallsObjectType left, RunStepDetailsToolCallsObjectType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunStepDetailsToolCallsObjectType(string value) => new RunStepDetailsToolCallsObjectType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunStepDetailsToolCallsObjectType other && Equals(other); + /// + public bool Equals(RunStepDetailsToolCallsObjectType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObject.Serialization.cs b/.dotnet/src/Generated/Models/RunStepObject.Serialization.cs new file mode 100644 index 000000000..b9656d7d0 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObject.Serialization.cs @@ -0,0 +1,369 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class RunStepObject : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + writer.WritePropertyName("assistant_id"u8); + writer.WriteStringValue(AssistantId); + writer.WritePropertyName("thread_id"u8); + writer.WriteStringValue(ThreadId); + writer.WritePropertyName("run_id"u8); + writer.WriteStringValue(RunId); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("status"u8); + writer.WriteStringValue(Status.ToString()); + writer.WritePropertyName("step_details"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(StepDetails); +#else + using (JsonDocument document = JsonDocument.Parse(StepDetails)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + if (LastError != null) + { + writer.WritePropertyName("last_error"u8); + writer.WriteObjectValue(LastError); + } + else + { + writer.WriteNull("last_error"); + } + if (ExpiresAt != null) + { + writer.WritePropertyName("expires_at"u8); + writer.WriteStringValue(ExpiresAt.Value, "O"); + } + else + { + writer.WriteNull("expires_at"); + } + if (CancelledAt != null) + { + writer.WritePropertyName("cancelled_at"u8); + writer.WriteStringValue(CancelledAt.Value, "O"); + } + else + { + writer.WriteNull("cancelled_at"); + } + if (FailedAt != null) + { + writer.WritePropertyName("failed_at"u8); + writer.WriteStringValue(FailedAt.Value, "O"); + } + else + { + writer.WriteNull("failed_at"); + } + if (CompletedAt != null) + { + writer.WritePropertyName("completed_at"u8); + writer.WriteStringValue(CompletedAt.Value, "O"); + } + else + { + writer.WriteNull("completed_at"); + } + if (Metadata != null && Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + if (Usage != null) + { + writer.WritePropertyName("usage"u8); + writer.WriteObjectValue(Usage); + } + else + { + writer.WriteNull("usage"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunStepObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunStepObject(document.RootElement, options); + } + + internal static RunStepObject DeserializeRunStepObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + RunStepObjectObject @object = default; + DateTimeOffset createdAt = default; + string assistantId = default; + string threadId = default; + string runId = default; + RunStepObjectType type = default; + RunStepObjectStatus status = default; + BinaryData stepDetails = default; + RunStepObjectLastError lastError = default; + DateTimeOffset? expiresAt = default; + DateTimeOffset? cancelledAt = default; + DateTimeOffset? failedAt = default; + DateTimeOffset? completedAt = default; + IReadOnlyDictionary metadata = default; + RunCompletionUsage usage = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new RunStepObjectObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("assistant_id"u8)) + { + assistantId = property.Value.GetString(); + continue; + } + if (property.NameEquals("thread_id"u8)) + { + threadId = property.Value.GetString(); + continue; + } + if (property.NameEquals("run_id"u8)) + { + runId = property.Value.GetString(); + continue; + } + if (property.NameEquals("type"u8)) + { + type = new RunStepObjectType(property.Value.GetString()); + continue; + } + if (property.NameEquals("status"u8)) + { + status = new RunStepObjectStatus(property.Value.GetString()); + continue; + } + if (property.NameEquals("step_details"u8)) + { + stepDetails = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("last_error"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + lastError = null; + continue; + } + lastError = RunStepObjectLastError.DeserializeRunStepObjectLastError(property.Value, options); + continue; + } + if (property.NameEquals("expires_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + expiresAt = null; + continue; + } + expiresAt = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("cancelled_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + cancelledAt = null; + continue; + } + cancelledAt = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("failed_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + failedAt = null; + continue; + } + failedAt = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("completed_at"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + completedAt = null; + continue; + } + completedAt = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + metadata = new ChangeTrackingDictionary(); + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (property.NameEquals("usage"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + usage = null; + continue; + } + usage = RunCompletionUsage.DeserializeRunCompletionUsage(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunStepObject( + id, + @object, + createdAt, + assistantId, + threadId, + runId, + type, + status, + stepDetails, + lastError, + expiresAt, + cancelledAt, + failedAt, + completedAt, + metadata, + usage, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunStepObject)} does not support '{options.Format}' format."); + } + } + + RunStepObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunStepObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunStepObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunStepObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunStepObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObject.cs b/.dotnet/src/Generated/Models/RunStepObject.cs new file mode 100644 index 000000000..2827c4f8d --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObject.cs @@ -0,0 +1,234 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// Represents a step in execution of a run. + internal partial class RunStepObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The identifier of the run step, which can be referenced in API endpoints. + /// The Unix timestamp (in seconds) for when the run step was created. + /// The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + /// The type of run step, which can be either `message_creation` or `tool_calls`. + /// + /// The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, + /// `completed`, or `expired`. + /// + /// The details of the run step. + /// The last error associated with this run step. Will be `null` if there are no errors. + /// + /// The Unix timestamp (in seconds) for when the run step expired. A step is considered expired + /// if the parent run is expired. + /// + /// The Unix timestamp (in seconds) for when the run step was cancelled. + /// The Unix timestamp (in seconds) for when the run step failed. + /// T The Unix timestamp (in seconds) for when the run step completed.. + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// + /// , , , or is null. + internal RunStepObject(string id, DateTimeOffset createdAt, string assistantId, string threadId, string runId, RunStepObjectType type, RunStepObjectStatus status, BinaryData stepDetails, RunStepObjectLastError lastError, DateTimeOffset? expiresAt, DateTimeOffset? cancelledAt, DateTimeOffset? failedAt, DateTimeOffset? completedAt, IReadOnlyDictionary metadata, RunCompletionUsage usage) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(assistantId, nameof(assistantId)); + Argument.AssertNotNull(threadId, nameof(threadId)); + Argument.AssertNotNull(runId, nameof(runId)); + Argument.AssertNotNull(stepDetails, nameof(stepDetails)); + + Id = id; + CreatedAt = createdAt; + AssistantId = assistantId; + ThreadId = threadId; + RunId = runId; + Type = type; + Status = status; + StepDetails = stepDetails; + LastError = lastError; + ExpiresAt = expiresAt; + CancelledAt = cancelledAt; + FailedAt = failedAt; + CompletedAt = completedAt; + Metadata = metadata; + Usage = usage; + } + + /// Initializes a new instance of . + /// The identifier of the run step, which can be referenced in API endpoints. + /// The object type, which is always `thread.run.step`. + /// The Unix timestamp (in seconds) for when the run step was created. + /// The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + /// The type of run step, which can be either `message_creation` or `tool_calls`. + /// + /// The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, + /// `completed`, or `expired`. + /// + /// The details of the run step. + /// The last error associated with this run step. Will be `null` if there are no errors. + /// + /// The Unix timestamp (in seconds) for when the run step expired. A step is considered expired + /// if the parent run is expired. + /// + /// The Unix timestamp (in seconds) for when the run step was cancelled. + /// The Unix timestamp (in seconds) for when the run step failed. + /// T The Unix timestamp (in seconds) for when the run step completed.. + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// + /// Keeps track of any properties unknown to the library. + internal RunStepObject(string id, RunStepObjectObject @object, DateTimeOffset createdAt, string assistantId, string threadId, string runId, RunStepObjectType type, RunStepObjectStatus status, BinaryData stepDetails, RunStepObjectLastError lastError, DateTimeOffset? expiresAt, DateTimeOffset? cancelledAt, DateTimeOffset? failedAt, DateTimeOffset? completedAt, IReadOnlyDictionary metadata, RunCompletionUsage usage, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + AssistantId = assistantId; + ThreadId = threadId; + RunId = runId; + Type = type; + Status = status; + StepDetails = stepDetails; + LastError = lastError; + ExpiresAt = expiresAt; + CancelledAt = cancelledAt; + FailedAt = failedAt; + CompletedAt = completedAt; + Metadata = metadata; + Usage = usage; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunStepObject() + { + } + + /// The identifier of the run step, which can be referenced in API endpoints. + public string Id { get; } + /// The object type, which is always `thread.run.step`. + public RunStepObjectObject Object { get; } = RunStepObjectObject.ThreadRunStep; + + /// The Unix timestamp (in seconds) for when the run step was created. + public DateTimeOffset CreatedAt { get; } + /// The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + public string AssistantId { get; } + /// The ID of the [thread](/docs/api-reference/threads) that was run. + public string ThreadId { get; } + /// The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + public string RunId { get; } + /// The type of run step, which can be either `message_creation` or `tool_calls`. + public RunStepObjectType Type { get; } + /// + /// The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, + /// `completed`, or `expired`. + /// + public RunStepObjectStatus Status { get; } + /// + /// The details of the run step. + /// + /// To assign an object to this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// + /// Supported types: + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + public BinaryData StepDetails { get; } + /// The last error associated with this run step. Will be `null` if there are no errors. + public RunStepObjectLastError LastError { get; } + /// + /// The Unix timestamp (in seconds) for when the run step expired. A step is considered expired + /// if the parent run is expired. + /// + public DateTimeOffset? ExpiresAt { get; } + /// The Unix timestamp (in seconds) for when the run step was cancelled. + public DateTimeOffset? CancelledAt { get; } + /// The Unix timestamp (in seconds) for when the run step failed. + public DateTimeOffset? FailedAt { get; } + /// T The Unix timestamp (in seconds) for when the run step completed.. + public DateTimeOffset? CompletedAt { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IReadOnlyDictionary Metadata { get; } + /// Gets the usage. + public RunCompletionUsage Usage { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObjectLastError.Serialization.cs b/.dotnet/src/Generated/Models/RunStepObjectLastError.Serialization.cs new file mode 100644 index 000000000..832446fd7 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObjectLastError.Serialization.cs @@ -0,0 +1,138 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class RunStepObjectLastError : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepObjectLastError)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("code"u8); + writer.WriteStringValue(Code.ToString()); + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunStepObjectLastError IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunStepObjectLastError)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunStepObjectLastError(document.RootElement, options); + } + + internal static RunStepObjectLastError DeserializeRunStepObjectLastError(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + RunStepObjectLastErrorCode code = default; + string message = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("code"u8)) + { + code = new RunStepObjectLastErrorCode(property.Value.GetString()); + continue; + } + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunStepObjectLastError(code, message, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunStepObjectLastError)} does not support '{options.Format}' format."); + } + } + + RunStepObjectLastError IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunStepObjectLastError(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunStepObjectLastError)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunStepObjectLastError FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunStepObjectLastError(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObjectLastError.cs b/.dotnet/src/Generated/Models/RunStepObjectLastError.cs new file mode 100644 index 000000000..ea325cd55 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObjectLastError.cs @@ -0,0 +1,77 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The RunStepObjectLastError. + internal partial class RunStepObjectLastError + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// One of `server_error` or `rate_limit_exceeded`. + /// A human-readable description of the error. + /// is null. + internal RunStepObjectLastError(RunStepObjectLastErrorCode code, string message) + { + Argument.AssertNotNull(message, nameof(message)); + + Code = code; + Message = message; + } + + /// Initializes a new instance of . + /// One of `server_error` or `rate_limit_exceeded`. + /// A human-readable description of the error. + /// Keeps track of any properties unknown to the library. + internal RunStepObjectLastError(RunStepObjectLastErrorCode code, string message, IDictionary serializedAdditionalRawData) + { + Code = code; + Message = message; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunStepObjectLastError() + { + } + + /// One of `server_error` or `rate_limit_exceeded`. + public RunStepObjectLastErrorCode Code { get; } + /// A human-readable description of the error. + public string Message { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObjectLastErrorCode.cs b/.dotnet/src/Generated/Models/RunStepObjectLastErrorCode.cs new file mode 100644 index 000000000..2d8832e99 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObjectLastErrorCode.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for code in RunStepObjectLastError. + internal readonly partial struct RunStepObjectLastErrorCode : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunStepObjectLastErrorCode(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ServerErrorValue = "server_error"; + private const string RateLimitExceededValue = "rate_limit_exceeded"; + + /// server_error. + public static RunStepObjectLastErrorCode ServerError { get; } = new RunStepObjectLastErrorCode(ServerErrorValue); + /// rate_limit_exceeded. + public static RunStepObjectLastErrorCode RateLimitExceeded { get; } = new RunStepObjectLastErrorCode(RateLimitExceededValue); + /// Determines if two values are the same. + public static bool operator ==(RunStepObjectLastErrorCode left, RunStepObjectLastErrorCode right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunStepObjectLastErrorCode left, RunStepObjectLastErrorCode right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunStepObjectLastErrorCode(string value) => new RunStepObjectLastErrorCode(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunStepObjectLastErrorCode other && Equals(other); + /// + public bool Equals(RunStepObjectLastErrorCode other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObjectObject.cs b/.dotnet/src/Generated/Models/RunStepObjectObject.cs new file mode 100644 index 000000000..376e9e3cc --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObjectObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The RunStepObject_object. + internal readonly partial struct RunStepObjectObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunStepObjectObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ThreadRunStepValue = "thread.run.step"; + + /// thread.run.step. + public static RunStepObjectObject ThreadRunStep { get; } = new RunStepObjectObject(ThreadRunStepValue); + /// Determines if two values are the same. + public static bool operator ==(RunStepObjectObject left, RunStepObjectObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunStepObjectObject left, RunStepObjectObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunStepObjectObject(string value) => new RunStepObjectObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunStepObjectObject other && Equals(other); + /// + public bool Equals(RunStepObjectObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObjectStatus.cs b/.dotnet/src/Generated/Models/RunStepObjectStatus.cs new file mode 100644 index 000000000..f46f639d7 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObjectStatus.cs @@ -0,0 +1,55 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for status in RunStepObject. + internal readonly partial struct RunStepObjectStatus : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunStepObjectStatus(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string InProgressValue = "in_progress"; + private const string CancelledValue = "cancelled"; + private const string FailedValue = "failed"; + private const string CompletedValue = "completed"; + private const string ExpiredValue = "expired"; + + /// in_progress. + public static RunStepObjectStatus InProgress { get; } = new RunStepObjectStatus(InProgressValue); + /// cancelled. + public static RunStepObjectStatus Cancelled { get; } = new RunStepObjectStatus(CancelledValue); + /// failed. + public static RunStepObjectStatus Failed { get; } = new RunStepObjectStatus(FailedValue); + /// completed. + public static RunStepObjectStatus Completed { get; } = new RunStepObjectStatus(CompletedValue); + /// expired. + public static RunStepObjectStatus Expired { get; } = new RunStepObjectStatus(ExpiredValue); + /// Determines if two values are the same. + public static bool operator ==(RunStepObjectStatus left, RunStepObjectStatus right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunStepObjectStatus left, RunStepObjectStatus right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunStepObjectStatus(string value) => new RunStepObjectStatus(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunStepObjectStatus other && Equals(other); + /// + public bool Equals(RunStepObjectStatus other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunStepObjectType.cs b/.dotnet/src/Generated/Models/RunStepObjectType.cs new file mode 100644 index 000000000..30257f499 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepObjectType.cs @@ -0,0 +1,46 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// Enum for type in RunStepObject. + internal readonly partial struct RunStepObjectType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunStepObjectType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string MessageCreationValue = "message_creation"; + private const string ToolCallsValue = "tool_calls"; + + /// message_creation. + public static RunStepObjectType MessageCreation { get; } = new RunStepObjectType(MessageCreationValue); + /// tool_calls. + public static RunStepObjectType ToolCalls { get; } = new RunStepObjectType(ToolCallsValue); + /// Determines if two values are the same. + public static bool operator ==(RunStepObjectType left, RunStepObjectType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunStepObjectType left, RunStepObjectType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunStepObjectType(string value) => new RunStepObjectType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunStepObjectType other && Equals(other); + /// + public bool Equals(RunStepObjectType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunToolCallObject.Serialization.cs b/.dotnet/src/Generated/Models/RunToolCallObject.Serialization.cs new file mode 100644 index 000000000..ae09f2a8a --- /dev/null +++ b/.dotnet/src/Generated/Models/RunToolCallObject.Serialization.cs @@ -0,0 +1,146 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class RunToolCallObject : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunToolCallObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + writer.WritePropertyName("function"u8); + writer.WriteObjectValue(Function); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunToolCallObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunToolCallObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunToolCallObject(document.RootElement, options); + } + + internal static RunToolCallObject DeserializeRunToolCallObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + RunToolCallObjectType type = default; + RunToolCallObjectFunction function = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("type"u8)) + { + type = new RunToolCallObjectType(property.Value.GetString()); + continue; + } + if (property.NameEquals("function"u8)) + { + function = RunToolCallObjectFunction.DeserializeRunToolCallObjectFunction(property.Value, options); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunToolCallObject(id, type, function, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunToolCallObject)} does not support '{options.Format}' format."); + } + } + + RunToolCallObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunToolCallObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunToolCallObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunToolCallObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunToolCallObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunToolCallObject.cs b/.dotnet/src/Generated/Models/RunToolCallObject.cs new file mode 100644 index 000000000..dd632156c --- /dev/null +++ b/.dotnet/src/Generated/Models/RunToolCallObject.cs @@ -0,0 +1,92 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// Tool call objects. + internal partial class RunToolCallObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// + /// The ID of the tool call. This ID must be referenced when you submit the tool outputs in using + /// the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + /// + /// The function definition. + /// or is null. + internal RunToolCallObject(string id, RunToolCallObjectFunction function) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(function, nameof(function)); + + Id = id; + Function = function; + } + + /// Initializes a new instance of . + /// + /// The ID of the tool call. This ID must be referenced when you submit the tool outputs in using + /// the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + /// + /// The type of tool call the output is required for. For now, this is always `function`. + /// The function definition. + /// Keeps track of any properties unknown to the library. + internal RunToolCallObject(string id, RunToolCallObjectType type, RunToolCallObjectFunction function, IDictionary serializedAdditionalRawData) + { + Id = id; + Type = type; + Function = function; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunToolCallObject() + { + } + + /// + /// The ID of the tool call. This ID must be referenced when you submit the tool outputs in using + /// the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + /// + public string Id { get; } + /// The type of tool call the output is required for. For now, this is always `function`. + public RunToolCallObjectType Type { get; } = RunToolCallObjectType.Function; + + /// The function definition. + public RunToolCallObjectFunction Function { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunToolCallObjectFunction.Serialization.cs b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.Serialization.cs new file mode 100644 index 000000000..3837aaa2e --- /dev/null +++ b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.Serialization.cs @@ -0,0 +1,138 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class RunToolCallObjectFunction : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunToolCallObjectFunction)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + writer.WritePropertyName("arguments"u8); + writer.WriteStringValue(Arguments); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RunToolCallObjectFunction IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RunToolCallObjectFunction)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRunToolCallObjectFunction(document.RootElement, options); + } + + internal static RunToolCallObjectFunction DeserializeRunToolCallObjectFunction(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + string arguments = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("arguments"u8)) + { + arguments = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new RunToolCallObjectFunction(name, arguments, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RunToolCallObjectFunction)} does not support '{options.Format}' format."); + } + } + + RunToolCallObjectFunction IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRunToolCallObjectFunction(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RunToolCallObjectFunction)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static RunToolCallObjectFunction FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRunToolCallObjectFunction(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/RunToolCallObjectFunction.cs b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.cs new file mode 100644 index 000000000..0a867e576 --- /dev/null +++ b/.dotnet/src/Generated/Models/RunToolCallObjectFunction.cs @@ -0,0 +1,78 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The RunToolCallObjectFunction. + internal partial class RunToolCallObjectFunction + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The name of the function. + /// The arguments that the model expects you to pass to the function. + /// or is null. + internal RunToolCallObjectFunction(string name, string arguments) + { + Argument.AssertNotNull(name, nameof(name)); + Argument.AssertNotNull(arguments, nameof(arguments)); + + Name = name; + Arguments = arguments; + } + + /// Initializes a new instance of . + /// The name of the function. + /// The arguments that the model expects you to pass to the function. + /// Keeps track of any properties unknown to the library. + internal RunToolCallObjectFunction(string name, string arguments, IDictionary serializedAdditionalRawData) + { + Name = name; + Arguments = arguments; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal RunToolCallObjectFunction() + { + } + + /// The name of the function. + public string Name { get; } + /// The arguments that the model expects you to pass to the function. + public string Arguments { get; } + } +} diff --git a/.dotnet/src/Generated/Models/RunToolCallObjectType.cs b/.dotnet/src/Generated/Models/RunToolCallObjectType.cs new file mode 100644 index 000000000..b036b215c --- /dev/null +++ b/.dotnet/src/Generated/Models/RunToolCallObjectType.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The RunToolCallObject_type. + internal readonly partial struct RunToolCallObjectType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public RunToolCallObjectType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string FunctionValue = "function"; + + /// function. + public static RunToolCallObjectType Function { get; } = new RunToolCallObjectType(FunctionValue); + /// Determines if two values are the same. + public static bool operator ==(RunToolCallObjectType left, RunToolCallObjectType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(RunToolCallObjectType left, RunToolCallObjectType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator RunToolCallObjectType(string value) => new RunToolCallObjectType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunToolCallObjectType other && Equals(other); + /// + public bool Equals(RunToolCallObjectType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs new file mode 100644 index 000000000..d38502d9b --- /dev/null +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.Serialization.cs @@ -0,0 +1,140 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class SubmitToolOutputsRunRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequest)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("tool_outputs"u8); + writer.WriteStartArray(); + foreach (var item in ToolOutputs) + { + writer.WriteObjectValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + SubmitToolOutputsRunRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequest)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeSubmitToolOutputsRunRequest(document.RootElement, options); + } + + internal static SubmitToolOutputsRunRequest DeserializeSubmitToolOutputsRunRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IList toolOutputs = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("tool_outputs"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(SubmitToolOutputsRunRequestToolOutput.DeserializeSubmitToolOutputsRunRequestToolOutput(item, options)); + } + toolOutputs = array; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new SubmitToolOutputsRunRequest(toolOutputs, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequest)} does not support '{options.Format}' format."); + } + } + + SubmitToolOutputsRunRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeSubmitToolOutputsRunRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequest)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static SubmitToolOutputsRunRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeSubmitToolOutputsRunRequest(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs new file mode 100644 index 000000000..2bed85b35 --- /dev/null +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequest.cs @@ -0,0 +1,72 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// The SubmitToolOutputsRunRequest. + internal partial class SubmitToolOutputsRunRequest + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A list of tools for which the outputs are being submitted. + /// is null. + public SubmitToolOutputsRunRequest(IEnumerable toolOutputs) + { + Argument.AssertNotNull(toolOutputs, nameof(toolOutputs)); + + ToolOutputs = toolOutputs.ToList(); + } + + /// Initializes a new instance of . + /// A list of tools for which the outputs are being submitted. + /// Keeps track of any properties unknown to the library. + internal SubmitToolOutputsRunRequest(IList toolOutputs, IDictionary serializedAdditionalRawData) + { + ToolOutputs = toolOutputs; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal SubmitToolOutputsRunRequest() + { + } + + /// A list of tools for which the outputs are being submitted. + public IList ToolOutputs { get; } + } +} diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.Serialization.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.Serialization.cs new file mode 100644 index 000000000..c18a1644b --- /dev/null +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.Serialization.cs @@ -0,0 +1,144 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class SubmitToolOutputsRunRequestToolOutput : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutput)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(ToolCallId)) + { + writer.WritePropertyName("tool_call_id"u8); + writer.WriteStringValue(ToolCallId); + } + if (Optional.IsDefined(Output)) + { + writer.WritePropertyName("output"u8); + writer.WriteStringValue(Output); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + SubmitToolOutputsRunRequestToolOutput IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutput)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeSubmitToolOutputsRunRequestToolOutput(document.RootElement, options); + } + + internal static SubmitToolOutputsRunRequestToolOutput DeserializeSubmitToolOutputsRunRequestToolOutput(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string toolCallId = default; + string output = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("tool_call_id"u8)) + { + toolCallId = property.Value.GetString(); + continue; + } + if (property.NameEquals("output"u8)) + { + output = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new SubmitToolOutputsRunRequestToolOutput(toolCallId, output, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutput)} does not support '{options.Format}' format."); + } + } + + SubmitToolOutputsRunRequestToolOutput IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeSubmitToolOutputsRunRequestToolOutput(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(SubmitToolOutputsRunRequestToolOutput)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static SubmitToolOutputsRunRequestToolOutput FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeSubmitToolOutputsRunRequestToolOutput(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.cs b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.cs new file mode 100644 index 000000000..78c4c374f --- /dev/null +++ b/.dotnet/src/Generated/Models/SubmitToolOutputsRunRequestToolOutput.cs @@ -0,0 +1,70 @@ +// + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal.Models +{ + /// The SubmitToolOutputsRunRequestToolOutput. + internal partial class SubmitToolOutputsRunRequestToolOutput + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public SubmitToolOutputsRunRequestToolOutput() + { + } + + /// Initializes a new instance of . + /// + /// The ID of the tool call in the `required_action` object within the run object the output is + /// being submitted for. + /// + /// The output of the tool call to be submitted to continue the run. + /// Keeps track of any properties unknown to the library. + internal SubmitToolOutputsRunRequestToolOutput(string toolCallId, string output, IDictionary serializedAdditionalRawData) + { + ToolCallId = toolCallId; + Output = output; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// + /// The ID of the tool call in the `required_action` object within the run object the output is + /// being submitted for. + /// + public string ToolCallId { get; set; } + /// The output of the tool call to be submitted to continue the run. + public string Output { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/ThreadObject.Serialization.cs b/.dotnet/src/Generated/Models/ThreadObject.Serialization.cs new file mode 100644 index 000000000..cb9c70752 --- /dev/null +++ b/.dotnet/src/Generated/Models/ThreadObject.Serialization.cs @@ -0,0 +1,177 @@ +// + +using System; +using OpenAI.ClientShared.Internal; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + internal partial class ThreadObject : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ThreadObject)} does not support '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + if (Metadata != null && Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartObject(); + foreach (var item in Metadata) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + else + { + writer.WriteNull("metadata"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ThreadObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ThreadObject)} does not support '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeThreadObject(document.RootElement, options); + } + + internal static ThreadObject DeserializeThreadObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= new ModelReaderWriterOptions("W"); + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + ThreadObjectObject @object = default; + DateTimeOffset createdAt = default; + IReadOnlyDictionary metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new ThreadObjectObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + metadata = new ChangeTrackingDictionary(); + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + metadata = dictionary; + continue; + } + if (options.Format != "W") + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = additionalPropertiesDictionary; + return new ThreadObject(id, @object, createdAt, metadata, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ThreadObject)} does not support '{options.Format}' format."); + } + } + + ThreadObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeThreadObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ThreadObject)} does not support '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The result to deserialize the model from. + internal static ThreadObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeThreadObject(document.RootElement); + } + + /// Convert into a Utf8JsonRequestBody. + internal virtual BinaryContent ToRequestBody() + { + var content = new Utf8JsonRequestBody(); + content.JsonWriter.WriteObjectValue(this); + return content; + } + } +} diff --git a/.dotnet/src/Generated/Models/ThreadObject.cs b/.dotnet/src/Generated/Models/ThreadObject.cs new file mode 100644 index 000000000..4176811ee --- /dev/null +++ b/.dotnet/src/Generated/Models/ThreadObject.cs @@ -0,0 +1,100 @@ +// + +using System; +using System.Collections.Generic; +using OpenAI; + +namespace OpenAI.Internal.Models +{ + /// Represents a thread that contains [messages](/docs/api-reference/messages). + internal partial class ThreadObject + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The Unix timestamp (in seconds) for when the thread was created. + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// is null. + internal ThreadObject(string id, DateTimeOffset createdAt, IReadOnlyDictionary metadata) + { + Argument.AssertNotNull(id, nameof(id)); + + Id = id; + CreatedAt = createdAt; + Metadata = metadata; + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread`. + /// The Unix timestamp (in seconds) for when the thread was created. + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// Keeps track of any properties unknown to the library. + internal ThreadObject(string id, ThreadObjectObject @object, DateTimeOffset createdAt, IReadOnlyDictionary metadata, IDictionary serializedAdditionalRawData) + { + Id = id; + Object = @object; + CreatedAt = createdAt; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ThreadObject() + { + } + + /// The identifier, which can be referenced in API endpoints. + public string Id { get; } + /// The object type, which is always `thread`. + public ThreadObjectObject Object { get; } = ThreadObjectObject.Thread; + + /// The Unix timestamp (in seconds) for when the thread was created. + public DateTimeOffset CreatedAt { get; } + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + public IReadOnlyDictionary Metadata { get; } + } +} diff --git a/.dotnet/src/Generated/Models/ThreadObjectObject.cs b/.dotnet/src/Generated/Models/ThreadObjectObject.cs new file mode 100644 index 000000000..0b10ab32c --- /dev/null +++ b/.dotnet/src/Generated/Models/ThreadObjectObject.cs @@ -0,0 +1,43 @@ +// + +using System; +using System.ComponentModel; + +namespace OpenAI.Internal.Models +{ + /// The ThreadObject_object. + internal readonly partial struct ThreadObjectObject : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ThreadObjectObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ThreadValue = "thread"; + + /// thread. + public static ThreadObjectObject Thread { get; } = new ThreadObjectObject(ThreadValue); + /// Determines if two values are the same. + public static bool operator ==(ThreadObjectObject left, ThreadObjectObject right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ThreadObjectObject left, ThreadObjectObject right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ThreadObjectObject(string value) => new ThreadObjectObject(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ThreadObjectObject other && Equals(other); + /// + public bool Equals(ThreadObjectObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/ModelsOps.cs b/.dotnet/src/Generated/ModelsOps.cs new file mode 100644 index 000000000..6ae259a43 --- /dev/null +++ b/.dotnet/src/Generated/ModelsOps.cs @@ -0,0 +1,402 @@ +// + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Internal.Models; + +namespace OpenAI.Internal +{ + // Data plane generated sub-client. + /// The ModelsOps sub-client. + internal partial class ModelsOps + { + private const string AuthorizationHeader = "Authorization"; + private readonly ApiKeyCredential _credential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual ClientPipeline Pipeline => _pipeline; + + /// Initializes a new instance of ModelsOps for mocking. + protected ModelsOps() + { + } + + /// Initializes a new instance of ModelsOps. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal ModelsOps(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) + { + _pipeline = pipeline; + _credential = credential; + _endpoint = endpoint; + } + + /// + /// Lists the currently available models, and provides basic information about each one such as the + /// owner and availability. + /// + public virtual async Task> GetModelsAsync() + { + ClientResult result = await GetModelsAsync(DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ListModelsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Lists the currently available models, and provides basic information about each one such as the + /// owner and availability. + /// + public virtual ClientResult GetModels() + { + ClientResult result = GetModels(DefaultRequestContext); + return ClientResult.FromValue(ListModelsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Lists the currently available models, and provides basic information about each one such as the + /// owner and availability. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetModelsAsync(RequestOptions options) + { + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("ModelsOps.GetModels"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetModelsRequest(options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Lists the currently available models, and provides basic information about each one such as the + /// owner and availability. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetModels(RequestOptions options) + { + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("ModelsOps.GetModels"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetModelsRequest(options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// Retrieves a model instance, providing basic information about the model such as the owner and + /// permissioning. + /// + /// The ID of the model to use for this request. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> RetrieveAsync(string model) + { + Argument.AssertNotNullOrEmpty(model, nameof(model)); + + ClientResult result = await RetrieveAsync(model, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(Model.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// Retrieves a model instance, providing basic information about the model such as the owner and + /// permissioning. + /// + /// The ID of the model to use for this request. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult Retrieve(string model) + { + Argument.AssertNotNullOrEmpty(model, nameof(model)); + + ClientResult result = Retrieve(model, DefaultRequestContext); + return ClientResult.FromValue(Model.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieves a model instance, providing basic information about the model such as the owner and + /// permissioning. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the model to use for this request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task RetrieveAsync(string model, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(model, nameof(model)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("ModelsOps.Retrieve"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveRequest(model, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieves a model instance, providing basic information about the model such as the owner and + /// permissioning. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the model to use for this request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult Retrieve(string model, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(model, nameof(model)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("ModelsOps.Retrieve"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateRetrieveRequest(model, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + /// The model to delete. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> DeleteAsync(string model) + { + Argument.AssertNotNullOrEmpty(model, nameof(model)); + + ClientResult result = await DeleteAsync(model, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(DeleteModelResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + /// The model to delete. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult Delete(string model) + { + Argument.AssertNotNullOrEmpty(model, nameof(model)); + + ClientResult result = Delete(model, DefaultRequestContext); + return ClientResult.FromValue(DeleteModelResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The model to delete. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task DeleteAsync(string model, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(model, nameof(model)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("ModelsOps.Delete"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateDeleteRequest(model, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The model to delete. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult Delete(string model, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(model, nameof(model)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("ModelsOps.Delete"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateDeleteRequest(model, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateGetModelsRequest(RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/models"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateRetrieveRequest(string model, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/models/"); + path.Append(model); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateDeleteRequest(string model, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "DELETE"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/models/"); + path.Append(model); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Moderations.cs b/.dotnet/src/Generated/Moderations.cs new file mode 100644 index 000000000..0e76f8341 --- /dev/null +++ b/.dotnet/src/Generated/Moderations.cs @@ -0,0 +1,167 @@ +// + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Internal.Models; + +namespace OpenAI.Internal +{ + // Data plane generated sub-client. + /// The Moderations sub-client. + internal partial class Moderations + { + private const string AuthorizationHeader = "Authorization"; + private readonly ApiKeyCredential _credential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual ClientPipeline Pipeline => _pipeline; + + /// Initializes a new instance of Moderations for mocking. + protected Moderations() + { + } + + /// Initializes a new instance of Moderations. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Moderations(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) + { + _pipeline = pipeline; + _credential = credential; + _endpoint = endpoint; + } + + /// Classifies if text violates OpenAI's Content Policy. + /// The to use. + /// is null. + public virtual async Task> CreateModerationAsync(CreateModerationRequest content) + { + Argument.AssertNotNull(content, nameof(content)); + + using BinaryContent content0 = BinaryContent.Create(content); + ClientResult result = await CreateModerationAsync(content0, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(CreateModerationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Classifies if text violates OpenAI's Content Policy. + /// The to use. + /// is null. + public virtual ClientResult CreateModeration(CreateModerationRequest content) + { + Argument.AssertNotNull(content, nameof(content)); + + using BinaryContent content0 = BinaryContent.Create(content); + ClientResult result = CreateModeration(content0, DefaultRequestContext); + return ClientResult.FromValue(CreateModerationResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Classifies if text violates OpenAI's Content Policy + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateModerationAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Moderations.CreateModeration"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateModerationRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Classifies if text violates OpenAI's Content Policy + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateModeration(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Moderations.CreateModeration"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateModerationRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateModerationRequest(BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/moderations"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/OpenAIClient.cs b/.dotnet/src/Generated/OpenAIClient.cs new file mode 100644 index 000000000..7fe8c431f --- /dev/null +++ b/.dotnet/src/Generated/OpenAIClient.cs @@ -0,0 +1,146 @@ +// + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Threading; + +namespace OpenAI.Internal +{ + // Data plane generated client. + /// The OpenAI service client. + internal partial class OpenAIClient + { + private const string AuthorizationHeader = "Authorization"; + private readonly ApiKeyCredential _credential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual ClientPipeline Pipeline => _pipeline; + + /// Initializes a new instance of OpenAIClient for mocking. + protected OpenAIClient() + { + } + + /// Initializes a new instance of OpenAIClient. + /// A credential used to authenticate to an Azure Service. + /// is null. + public OpenAIClient(ApiKeyCredential credential) : this(new Uri("https://api.openai.com/v1"), credential, new OpenAIClientOptions()) + { + } + + /// Initializes a new instance of OpenAIClient. + /// OpenAI Endpoint. + /// A credential used to authenticate to an Azure Service. + /// The options for configuring the client. + /// or is null. + public OpenAIClient(Uri endpoint, ApiKeyCredential credential, OpenAIClientOptions options) + { + Argument.AssertNotNull(endpoint, nameof(endpoint)); + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); + _credential = credential; + var authenticationPolicy = ApiKeyAuthenticationPolicy.CreateBearerAuthorizationPolicy(_credential); + _pipeline = ClientPipeline.Create(options, + perCallPolicies: ReadOnlySpan.Empty, + perTryPolicies: new PipelinePolicy[] { authenticationPolicy }, + beforeTransportPolicies: ReadOnlySpan.Empty); + _endpoint = endpoint; + } + + private OpenAI.Internal.Audio _cachedAudio; + private OpenAI.Internal.Assistants _cachedAssistants; + private OpenAI.Internal.Chat _cachedChat; + private OpenAI.Internal.Completions _cachedCompletions; + private OpenAI.Internal.Embeddings _cachedEmbeddings; + private OpenAI.Internal.Files _cachedFiles; + private OpenAI.Internal.FineTuning _cachedFineTuning; + private OpenAI.Internal.Images _cachedImages; + private OpenAI.Internal.Messages _cachedMessages; + private OpenAI.Internal.ModelsOps _cachedModelsOps; + private OpenAI.Internal.Moderations _cachedModerations; + private OpenAI.Internal.Runs _cachedRuns; + private OpenAI.Internal.Threads _cachedThreads; + + /// Initializes a new instance of Audio. + public virtual OpenAI.Internal.Audio GetAudioClient() + { + return Volatile.Read(ref _cachedAudio) ?? Interlocked.CompareExchange(ref _cachedAudio, new OpenAI.Internal.Audio(_pipeline, _credential, _endpoint), null) ?? _cachedAudio; + } + + /// Initializes a new instance of Assistants. + public virtual OpenAI.Internal.Assistants GetAssistantsClient() + { + return Volatile.Read(ref _cachedAssistants) ?? Interlocked.CompareExchange(ref _cachedAssistants, new OpenAI.Internal.Assistants(_pipeline, _credential, _endpoint), null) ?? _cachedAssistants; + } + + /// Initializes a new instance of Chat. + public virtual OpenAI.Internal.Chat GetChatClient() + { + return Volatile.Read(ref _cachedChat) ?? Interlocked.CompareExchange(ref _cachedChat, new OpenAI.Internal.Chat(_pipeline, _credential, _endpoint), null) ?? _cachedChat; + } + + /// Initializes a new instance of Completions. + public virtual OpenAI.Internal.Completions GetCompletionsClient() + { + return Volatile.Read(ref _cachedCompletions) ?? Interlocked.CompareExchange(ref _cachedCompletions, new OpenAI.Internal.Completions(_pipeline, _credential, _endpoint), null) ?? _cachedCompletions; + } + + /// Initializes a new instance of Embeddings. + public virtual OpenAI.Internal.Embeddings GetEmbeddingsClient() + { + return Volatile.Read(ref _cachedEmbeddings) ?? Interlocked.CompareExchange(ref _cachedEmbeddings, new OpenAI.Internal.Embeddings(_pipeline, _credential, _endpoint), null) ?? _cachedEmbeddings; + } + + /// Initializes a new instance of Files. + public virtual OpenAI.Internal.Files GetFilesClient() + { + return Volatile.Read(ref _cachedFiles) ?? Interlocked.CompareExchange(ref _cachedFiles, new OpenAI.Internal.Files(_pipeline, _credential, _endpoint), null) ?? _cachedFiles; + } + + /// Initializes a new instance of FineTuning. + public virtual OpenAI.Internal.FineTuning GetFineTuningClient() + { + return Volatile.Read(ref _cachedFineTuning) ?? Interlocked.CompareExchange(ref _cachedFineTuning, new OpenAI.Internal.FineTuning(_pipeline, _credential, _endpoint), null) ?? _cachedFineTuning; + } + + /// Initializes a new instance of Images. + public virtual OpenAI.Internal.Images GetImagesClient() + { + return Volatile.Read(ref _cachedImages) ?? Interlocked.CompareExchange(ref _cachedImages, new OpenAI.Internal.Images(_pipeline, _credential, _endpoint), null) ?? _cachedImages; + } + + /// Initializes a new instance of Messages. + public virtual OpenAI.Internal.Messages GetMessagesClient() + { + return Volatile.Read(ref _cachedMessages) ?? Interlocked.CompareExchange(ref _cachedMessages, new OpenAI.Internal.Messages(_pipeline, _credential, _endpoint), null) ?? _cachedMessages; + } + + /// Initializes a new instance of ModelsOps. + public virtual OpenAI.Internal.ModelsOps GetModelsOpsClient() + { + return Volatile.Read(ref _cachedModelsOps) ?? Interlocked.CompareExchange(ref _cachedModelsOps, new OpenAI.Internal.ModelsOps(_pipeline, _credential, _endpoint), null) ?? _cachedModelsOps; + } + + /// Initializes a new instance of Moderations. + public virtual OpenAI.Internal.Moderations GetModerationsClient() + { + return Volatile.Read(ref _cachedModerations) ?? Interlocked.CompareExchange(ref _cachedModerations, new OpenAI.Internal.Moderations(_pipeline, _credential, _endpoint), null) ?? _cachedModerations; + } + + /// Initializes a new instance of Runs. + public virtual OpenAI.Internal.Runs GetRunsClient() + { + return Volatile.Read(ref _cachedRuns) ?? Interlocked.CompareExchange(ref _cachedRuns, new OpenAI.Internal.Runs(_pipeline, _credential, _endpoint), null) ?? _cachedRuns; + } + + /// Initializes a new instance of Threads. + public virtual OpenAI.Internal.Threads GetThreadsClient() + { + return Volatile.Read(ref _cachedThreads) ?? Interlocked.CompareExchange(ref _cachedThreads, new OpenAI.Internal.Threads(_pipeline, _credential, _endpoint), null) ?? _cachedThreads; + } + } +} diff --git a/.dotnet/src/Generated/OpenAIClientOptions.cs b/.dotnet/src/Generated/OpenAIClientOptions.cs new file mode 100644 index 000000000..10b7372d4 --- /dev/null +++ b/.dotnet/src/Generated/OpenAIClientOptions.cs @@ -0,0 +1,11 @@ +// + +using System.ClientModel.Primitives; + +namespace OpenAI.Internal +{ + /// Client options for OpenAIClient. + internal partial class OpenAIClientOptions : ClientPipelineOptions + { + } +} diff --git a/.dotnet/src/Generated/OpenAIModelFactory.cs b/.dotnet/src/Generated/OpenAIModelFactory.cs new file mode 100644 index 000000000..5722c2d7a --- /dev/null +++ b/.dotnet/src/Generated/OpenAIModelFactory.cs @@ -0,0 +1,874 @@ +// + +using System; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Internal.Models +{ + /// Model factory for models. + internal static partial class OpenAIModelFactory + { + /// Initializes a new instance of . + /// Number of tokens in the prompt. + /// Number of tokens in the generated completion. + /// Total number of tokens used in the request (prompt + completion). + /// A new instance for mocking. + public static CompletionUsage CompletionUsage(long promptTokens = default, long completionTokens = default, long totalTokens = default) + { + return new CompletionUsage(promptTokens, completionTokens, totalTokens, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + /// see all of your available models, or see our [Model overview](/docs/models/overview) for + /// descriptions of them. + /// + /// + /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of + /// tokens, or array of token arrays. + /// + /// Note that <|endoftext|> is the document separator that the model sees during training, so if a + /// prompt is not specified the model will generate as if from the beginning of a new document. + /// + /// + /// Generates `best_of` completions server-side and returns the "best" (the one with the highest + /// log probability per token). ClientResults cannot be streamed. + /// + /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + /// how many to return – `best_of` must be greater than `n`. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token + /// quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + /// + /// Echo back the prompt in addition to the completion. + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + /// frequency in the text so far, decreasing the model's likelihood to repeat the same line + /// verbatim. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + /// + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an + /// associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) + /// to convert text to token IDs. Mathematically, the bias is added to the logits generated by the + /// model prior to sampling. The exact effect will vary per model, but values between -1 and 1 + /// should decrease or increase likelihood of selection; values like -100 or 100 should result in a + /// ban or exclusive selection of the relevant token. + /// + /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being + /// generated. + /// + /// + /// Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + /// For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + /// API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + /// elements in the response. + /// + /// The maximum value for `logprobs` is 5. + /// + /// + /// The maximum number of [tokens](/tokenizer) to generate in the completion. + /// + /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + /// for counting tokens. + /// + /// + /// How many completions to generate for each prompt. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token + /// quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + /// + /// + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + /// in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + /// + /// + /// If specified, our system will make a best effort to sample deterministically, such that + /// repeated requests with the same `seed` and parameters should return the same result. + /// + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + /// parameter to monitor changes in the backend. + /// + /// Up to 4 sequences where the API will stop generating further tokens. + /// + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + /// as they become available, with the stream terminated by a `data: [DONE]` message. + /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + /// + /// The suffix that comes after a completion of inserted text. + /// + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + /// more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + /// + /// + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers + /// the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + /// the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect + /// abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + /// + /// A new instance for mocking. + public static CreateCompletionRequest CreateCompletionRequest(CreateCompletionRequestModel model = default, BinaryData prompt = null, long? bestOf = null, bool? echo = null, double? frequencyPenalty = null, IDictionary logitBias = null, long? logprobs = null, long? maxTokens = null, long? n = null, double? presencePenalty = null, long? seed = null, BinaryData stop = null, bool? stream = null, string suffix = null, double? temperature = null, double? topP = null, string user = null) + { + logitBias ??= new Dictionary(); + + return new CreateCompletionRequest( + model, + prompt, + bestOf, + echo, + frequencyPenalty, + logitBias, + logprobs, + maxTokens, + n, + presencePenalty, + seed, + stop, + stream, + suffix, + temperature, + topP, + user, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A unique identifier for the completion. + /// The list of completion choices the model generated for the input. + /// The Unix timestamp (in seconds) of when the completion was created. + /// The model used for the completion. + /// + /// This fingerprint represents the backend configuration that the model runs with. + /// + /// Can be used in conjunction with the `seed` request parameter to understand when backend changes + /// have been made that might impact determinism. + /// + /// The object type, which is always `text_completion`. + /// Usage statistics for the completion request. + /// A new instance for mocking. + public static CreateCompletionResponse CreateCompletionResponse(string id = null, IEnumerable choices = null, DateTimeOffset created = default, string model = null, string systemFingerprint = null, CreateCompletionResponseObject @object = default, CompletionUsage usage = null) + { + choices ??= new List(); + + return new CreateCompletionResponse( + id, + choices?.ToList(), + created, + model, + systemFingerprint, + @object, + usage, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a + /// natural stop point or a provided stop sequence, or `content_filter` if content was omitted + /// due to a flag from our content filters, `length` if the maximum number of tokens specified + /// in the request was reached, or `content_filter` if content was omitted due to a flag from our + /// content filters. + /// + /// A new instance for mocking. + public static CreateCompletionResponseChoice CreateCompletionResponseChoice(long index = default, string text = null, CreateCompletionResponseChoiceLogprobs logprobs = null, CreateCompletionResponseChoiceFinishReason finishReason = default) + { + return new CreateCompletionResponseChoice(index, text, logprobs, finishReason, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// A new instance for mocking. + public static CreateCompletionResponseChoiceLogprobs CreateCompletionResponseChoiceLogprobs(IEnumerable tokens = null, IEnumerable tokenLogprobs = null, IEnumerable> topLogprobs = null, IEnumerable textOffset = null) + { + tokens ??= new List(); + tokenLogprobs ??= new List(); + topLogprobs ??= new List>(); + textOffset ??= new List(); + + return new CreateCompletionResponseChoiceLogprobs(tokens?.ToList(), tokenLogprobs?.ToList(), topLogprobs?.ToList(), textOffset?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// The name of the model to fine-tune. You can select one of the + /// [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + /// + /// + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + /// the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// The hyperparameters used for the fine-tuning job. + /// + /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// + /// For example, a `suffix` of "custom-model-name" would produce a model name like + /// `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + /// + /// + /// The ID of an uploaded file that contains validation data. + /// + /// If you provide this file, the data is used to generate validation metrics periodically during + /// fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + /// not be present in both train and validation files. + /// + /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + /// `fine-tune`. + /// + /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// A new instance for mocking. + public static CreateFineTuningJobRequest CreateFineTuningJobRequest(CreateFineTuningJobRequestModel model = default, string trainingFile = null, CreateFineTuningJobRequestHyperparameters hyperparameters = null, string suffix = null, string validationFile = null) + { + return new CreateFineTuningJobRequest( + model, + trainingFile, + hyperparameters, + suffix, + validationFile, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The object identifier, which can be referenced in the API endpoints. + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + /// + /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + /// failure. + /// + /// + /// The name of the fine-tuned model that is being created. The value will be null if the + /// fine-tuning job is still running. + /// + /// + /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + /// null if the fine-tuning job is still running. + /// + /// + /// The hyperparameters used for the fine-tuning job. See the + /// [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// + /// The base model that is being fine-tuned. + /// The object type, which is always "fine_tuning.job". + /// The organization that owns the fine-tuning job. + /// + /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, + /// `running`, `succeeded`, `failed`, or `cancelled`. + /// + /// + /// The total number of billable tokens processed by this fine-tuning job. The value will be null + /// if the fine-tuning job is still running. + /// + /// + /// The file ID used for training. You can retrieve the training data with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// + /// The file ID used for validation. You can retrieve the validation results with the + /// [Files API](/docs/api-reference/files/retrieve-contents). + /// + /// A new instance for mocking. + public static FineTuningJob FineTuningJob(string id = null, DateTimeOffset createdAt = default, FineTuningJobError error = null, string fineTunedModel = null, DateTimeOffset? finishedAt = null, FineTuningJobHyperparameters hyperparameters = null, string model = null, FineTuningJobObject @object = default, string organizationId = null, IEnumerable resultFiles = null, FineTuningJobStatus status = default, long? trainedTokens = null, string trainingFile = null, string validationFile = null) + { + resultFiles ??= new List(); + + return new FineTuningJob( + id, + createdAt, + error, + fineTunedModel, + finishedAt, + hyperparameters, + model, + @object, + organizationId, + resultFiles?.ToList(), + status, + trainedTokens, + trainingFile, + validationFile, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A machine-readable error code. + /// A human-readable error message. + /// + /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will + /// be null if the failure was not parameter-specific. + /// + /// A new instance for mocking. + public static FineTuningJobError FineTuningJobError(string code = null, string message = null, string param = null) + { + return new FineTuningJobError(code, message, param, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the + /// training dataset. + /// + /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the + /// number manually, we support any number between 1 and 50 epochs. + /// + /// A new instance for mocking. + public static FineTuningJobHyperparameters FineTuningJobHyperparameters(BinaryData nEpochs = null) + { + return new FineTuningJobHyperparameters(nEpochs, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// A new instance for mocking. + public static ListPaginatedFineTuningJobsResponse ListPaginatedFineTuningJobsResponse(IEnumerable data = null, bool hasMore = default, ListPaginatedFineTuningJobsResponseObject @object = default) + { + data ??= new List(); + + return new ListPaginatedFineTuningJobsResponse(data?.ToList(), hasMore, @object, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// A new instance for mocking. + public static ListFineTuningJobEventsResponse ListFineTuningJobEventsResponse(IEnumerable data = null, ListFineTuningJobEventsResponseObject @object = default) + { + data ??= new List(); + + return new ListFineTuningJobEventsResponse(data?.ToList(), @object, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// A new instance for mocking. + public static FineTuningJobEvent FineTuningJobEvent(string id = null, DateTimeOffset createdAt = default, FineTuningJobEventLevel level = default, string message = null, FineTuningJobEventObject @object = default) + { + return new FineTuningJobEvent( + id, + createdAt, + level, + message, + @object, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The role of the entity that is creating the message. Currently only `user` is supported. + /// The content of the message. + /// + /// A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a + /// maximum of 10 files attached to a message. Useful for tools like `retrieval` and + /// `code_interpreter` that can access and use files. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// A new instance for mocking. + public static CreateMessageRequest CreateMessageRequest(CreateMessageRequestRole role = default, string content = null, IEnumerable fileIds = null, IDictionary metadata = null) + { + fileIds ??= new List(); + metadata ??= new Dictionary(); + + return new CreateMessageRequest(role, content, fileIds?.ToList(), metadata, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread.message`. + /// The Unix timestamp (in seconds) for when the message was created. + /// The [thread](/docs/api-reference/threads) ID that this message belongs to. + /// The entity that produced the message. One of `user` or `assistant`. + /// The content of the message in array of text and/or images. + /// + /// If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this + /// message. + /// + /// + /// If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of + /// this message. + /// + /// + /// A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for + /// tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be + /// attached to a message. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// A new instance for mocking. + public static MessageObject MessageObject(string id = null, MessageObjectObject @object = default, DateTimeOffset createdAt = default, string threadId = null, MessageObjectRole role = default, IEnumerable content = null, string assistantId = null, string runId = null, IEnumerable fileIds = null, IReadOnlyDictionary metadata = null) + { + content ??= new List(); + fileIds ??= new List(); + metadata ??= new Dictionary(); + + return new MessageObject( + id, + @object, + createdAt, + threadId, + role, + content?.ToList(), + assistantId, + runId, + fileIds?.ToList(), + metadata, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// A new instance for mocking. + public static ListMessagesResponse ListMessagesResponse(ListMessagesResponseObject @object = default, IEnumerable data = null, string firstId = null, string lastId = null, bool hasMore = default) + { + data ??= new List(); + + return new ListMessagesResponse( + @object, + data?.ToList(), + firstId, + lastId, + hasMore, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// A new instance for mocking. + public static ListMessageFilesResponse ListMessageFilesResponse(ListMessageFilesResponseObject @object = default, IEnumerable data = null, string firstId = null, string lastId = null, bool hasMore = default) + { + data ??= new List(); + + return new ListMessageFilesResponse( + @object, + data?.ToList(), + firstId, + lastId, + hasMore, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// TThe identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread.message.file`. + /// The Unix timestamp (in seconds) for when the message file was created. + /// The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. + /// A new instance for mocking. + public static MessageFileObject MessageFileObject(string id = null, MessageFileObjectObject @object = default, DateTimeOffset createdAt = default, string messageId = null) + { + return new MessageFileObject(id, @object, createdAt, messageId, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// A new instance for mocking. + public static ListModelsResponse ListModelsResponse(ListModelsResponseObject @object = default, IEnumerable data = null) + { + data ??= new List(); + + return new ListModelsResponse(@object, data?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The model identifier, which can be referenced in the API endpoints. + /// The Unix timestamp (in seconds) when the model was created. + /// The object type, which is always "model". + /// The organization that owns the model. + /// A new instance for mocking. + public static Model Model(string id = null, DateTimeOffset created = default, ModelObject @object = default, string ownedBy = null) + { + return new Model(id, created, @object, ownedBy, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// A new instance for mocking. + public static DeleteModelResponse DeleteModelResponse(string id = null, bool deleted = default, DeleteModelResponseObject @object = default) + { + return new DeleteModelResponse(id, deleted, @object, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + /// If no thread is provided, an empty thread will be created. + /// + /// The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is + /// provided here, it will override the model associated with the assistant. If not, the model + /// associated with the assistant will be used. + /// + /// + /// Override the default system message of the assistant. This is useful for modifying the behavior + /// on a per-run basis. + /// + /// + /// Override the tools the assistant can use for this run. This is useful for modifying the + /// behavior on a per-run basis. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// A new instance for mocking. + public static CreateThreadAndRunRequest CreateThreadAndRunRequest(string assistantId = null, CreateThreadRequest thread = null, string model = null, string instructions = null, IEnumerable tools = null, IDictionary metadata = null) + { + tools ??= new List(); + metadata ??= new Dictionary(); + + return new CreateThreadAndRunRequest( + assistantId, + thread, + model, + instructions, + tools?.ToList(), + metadata, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread.run`. + /// The Unix timestamp (in seconds) for when the run was created. + /// + /// The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this + /// run. + /// + /// The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + /// + /// The status of the run, which can be either `queued`, `in_progress`, `requires_action`, + /// `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + /// + /// + /// Details on the action required to continue the run. Will be `null` if no action is + /// required. + /// + /// The last error associated with this run. Will be `null` if there are no errors. + /// The Unix timestamp (in seconds) for when the run will expire. + /// The Unix timestamp (in seconds) for when the run was started. + /// The Unix timestamp (in seconds) for when the run was cancelled. + /// The Unix timestamp (in seconds) for when the run failed. + /// The Unix timestamp (in seconds) for when the run was completed. + /// The model that the [assistant](/docs/api-reference/assistants) used for this run. + /// The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + /// + /// The list of [File](/docs/api-reference/files) IDs the + /// [assistant](/docs/api-reference/assistants) used for this run. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// + /// A new instance for mocking. + public static RunObject RunObject(string id = null, RunObjectObject @object = default, DateTimeOffset createdAt = default, string threadId = null, string assistantId = null, RunObjectStatus status = default, RunObjectRequiredAction requiredAction = null, RunObjectLastError lastError = null, DateTimeOffset? expiresAt = null, DateTimeOffset? startedAt = null, DateTimeOffset? cancelledAt = null, DateTimeOffset? failedAt = null, DateTimeOffset? completedAt = null, string model = null, string instructions = null, IEnumerable tools = null, IEnumerable fileIds = null, IReadOnlyDictionary metadata = null, RunCompletionUsage usage = null) + { + tools ??= new List(); + fileIds ??= new List(); + metadata ??= new Dictionary(); + + return new RunObject( + id, + @object, + createdAt, + threadId, + assistantId, + status, + requiredAction, + lastError, + expiresAt, + startedAt, + cancelledAt, + failedAt, + completedAt, + model, + instructions, + tools?.ToList(), + fileIds?.ToList(), + metadata, + usage, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// For now, this is always `submit_tool_outputs`. + /// Details on the tool outputs needed for this run to continue. + /// A new instance for mocking. + public static RunObjectRequiredAction RunObjectRequiredAction(RunObjectRequiredActionType type = default, RunObjectRequiredActionSubmitToolOutputs submitToolOutputs = null) + { + return new RunObjectRequiredAction(type, submitToolOutputs, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A list of the relevant tool calls. + /// A new instance for mocking. + public static RunObjectRequiredActionSubmitToolOutputs RunObjectRequiredActionSubmitToolOutputs(IEnumerable toolCalls = null) + { + toolCalls ??= new List(); + + return new RunObjectRequiredActionSubmitToolOutputs(toolCalls?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// The ID of the tool call. This ID must be referenced when you submit the tool outputs in using + /// the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + /// + /// The type of tool call the output is required for. For now, this is always `function`. + /// The function definition. + /// A new instance for mocking. + public static RunToolCallObject RunToolCallObject(string id = null, RunToolCallObjectType type = default, RunToolCallObjectFunction function = null) + { + return new RunToolCallObject(id, type, function, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The name of the function. + /// The arguments that the model expects you to pass to the function. + /// A new instance for mocking. + public static RunToolCallObjectFunction RunToolCallObjectFunction(string name = null, string arguments = null) + { + return new RunToolCallObjectFunction(name, arguments, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// One of `server_error` or `rate_limit_exceeded`. + /// A human-readable description of the error. + /// A new instance for mocking. + public static RunObjectLastError RunObjectLastError(RunObjectLastErrorCode code = default, string message = null) + { + return new RunObjectLastError(code, message, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// Number of completion tokens used over the course of the run. + /// Number of prompt tokens used over the course of the run. + /// Total number of tokens used (prompt + completion). + /// A new instance for mocking. + public static RunCompletionUsage RunCompletionUsage(long completionTokens = default, long promptTokens = default, long totalTokens = default) + { + return new RunCompletionUsage(completionTokens, promptTokens, totalTokens, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + /// + /// The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value + /// is provided here, it will override the model associated with the assistant. If not, the model + /// associated with the assistant will be used. + /// + /// + /// Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. + /// This is useful for modifying the behavior on a per-run basis. + /// + /// + /// Appends additional instructions at the end of the instructions for the run. This is useful for + /// modifying the behavior on a per-run basis without overriding other instructions. + /// + /// + /// Override the tools the assistant can use for this run. This is useful for modifying the + /// behavior on a per-run basis. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// A new instance for mocking. + public static CreateRunRequest CreateRunRequest(string assistantId = null, string model = null, string instructions = null, string additionalInstructions = null, IEnumerable tools = null, IDictionary metadata = null) + { + tools ??= new List(); + metadata ??= new Dictionary(); + + return new CreateRunRequest( + assistantId, + model, + instructions, + additionalInstructions, + tools?.ToList(), + metadata, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// A new instance for mocking. + public static ListRunsResponse ListRunsResponse(ListRunsResponseObject @object = default, IEnumerable data = null, string firstId = null, string lastId = null, bool hasMore = default) + { + data ??= new List(); + + return new ListRunsResponse( + @object, + data?.ToList(), + firstId, + lastId, + hasMore, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// + /// + /// A new instance for mocking. + public static ListRunStepsResponse ListRunStepsResponse(ListRunStepsResponseObject @object = default, IEnumerable data = null, string firstId = null, string lastId = null, bool hasMore = default) + { + data ??= new List(); + + return new ListRunStepsResponse( + @object, + data?.ToList(), + firstId, + lastId, + hasMore, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The identifier of the run step, which can be referenced in API endpoints. + /// The object type, which is always `thread.run.step`. + /// The Unix timestamp (in seconds) for when the run step was created. + /// The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + /// The type of run step, which can be either `message_creation` or `tool_calls`. + /// + /// The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, + /// `completed`, or `expired`. + /// + /// The details of the run step. + /// The last error associated with this run step. Will be `null` if there are no errors. + /// + /// The Unix timestamp (in seconds) for when the run step expired. A step is considered expired + /// if the parent run is expired. + /// + /// The Unix timestamp (in seconds) for when the run step was cancelled. + /// The Unix timestamp (in seconds) for when the run step failed. + /// T The Unix timestamp (in seconds) for when the run step completed.. + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// + /// A new instance for mocking. + public static RunStepObject RunStepObject(string id = null, RunStepObjectObject @object = default, DateTimeOffset createdAt = default, string assistantId = null, string threadId = null, string runId = null, RunStepObjectType type = default, RunStepObjectStatus status = default, BinaryData stepDetails = null, RunStepObjectLastError lastError = null, DateTimeOffset? expiresAt = null, DateTimeOffset? cancelledAt = null, DateTimeOffset? failedAt = null, DateTimeOffset? completedAt = null, IReadOnlyDictionary metadata = null, RunCompletionUsage usage = null) + { + metadata ??= new Dictionary(); + + return new RunStepObject( + id, + @object, + createdAt, + assistantId, + threadId, + runId, + type, + status, + stepDetails, + lastError, + expiresAt, + cancelledAt, + failedAt, + completedAt, + metadata, + usage, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// One of `server_error` or `rate_limit_exceeded`. + /// A human-readable description of the error. + /// A new instance for mocking. + public static RunStepObjectLastError RunStepObjectLastError(RunStepObjectLastErrorCode code = default, string message = null) + { + return new RunStepObjectLastError(code, message, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The identifier, which can be referenced in API endpoints. + /// The object type, which is always `thread`. + /// The Unix timestamp (in seconds) for when the thread was created. + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + /// additional information about the object in a structured format. Keys can be a maximum of 64 + /// characters long and values can be a maxium of 512 characters long. + /// + /// A new instance for mocking. + public static ThreadObject ThreadObject(string id = null, ThreadObjectObject @object = default, DateTimeOffset createdAt = default, IReadOnlyDictionary metadata = null) + { + metadata ??= new Dictionary(); + + return new ThreadObject(id, @object, createdAt, metadata, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// + /// + /// + /// A new instance for mocking. + public static DeleteThreadResponse DeleteThreadResponse(string id = null, bool deleted = default, DeleteThreadResponseObject @object = default) + { + return new DeleteThreadResponse(id, deleted, @object, serializedAdditionalRawData: null); + } + } +} diff --git a/.dotnet/src/Generated/Runs.cs b/.dotnet/src/Generated/Runs.cs new file mode 100644 index 000000000..2ef98a2c1 --- /dev/null +++ b/.dotnet/src/Generated/Runs.cs @@ -0,0 +1,1485 @@ +// + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Internal.Models; + +namespace OpenAI.Internal +{ + // Data plane generated sub-client. + /// The Runs sub-client. + internal partial class Runs + { + private const string AuthorizationHeader = "Authorization"; + private readonly ApiKeyCredential _credential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual ClientPipeline Pipeline => _pipeline; + + /// Initializes a new instance of Runs for mocking. + protected Runs() + { + } + + /// Initializes a new instance of Runs. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Runs(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) + { + _pipeline = pipeline; + _credential = credential; + _endpoint = endpoint; + } + + /// Create a thread and run it in one request. + /// The to use. + /// is null. + public virtual async Task> CreateThreadAndRunAsync(CreateThreadAndRunRequest threadAndRun) + { + Argument.AssertNotNull(threadAndRun, nameof(threadAndRun)); + + using BinaryContent content = BinaryContent.Create(threadAndRun); + ClientResult result = await CreateThreadAndRunAsync(content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Create a thread and run it in one request. + /// The to use. + /// is null. + public virtual ClientResult CreateThreadAndRun(CreateThreadAndRunRequest threadAndRun) + { + Argument.AssertNotNull(threadAndRun, nameof(threadAndRun)); + + using BinaryContent content = BinaryContent.Create(threadAndRun); + ClientResult result = CreateThreadAndRun(content, DefaultRequestContext); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Create a thread and run it in one request. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateThreadAndRunAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.CreateThreadAndRun"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateThreadAndRunRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Create a thread and run it in one request. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateThreadAndRun(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.CreateThreadAndRun"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateThreadAndRunRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Create a run. + /// The ID of the thread to run. + /// The to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> CreateRunAsync(string threadId, CreateRunRequest run) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNull(run, nameof(run)); + + using BinaryContent content = BinaryContent.Create(run); + ClientResult result = await CreateRunAsync(threadId, content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Create a run. + /// The ID of the thread to run. + /// The to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult CreateRun(string threadId, CreateRunRequest run) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNull(run, nameof(run)); + + using BinaryContent content = BinaryContent.Create(run); + ClientResult result = CreateRun(threadId, content, DefaultRequestContext); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Create a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to run. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateRunAsync(string threadId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.CreateRun"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateRunRequest(threadId, content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Create a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to run. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateRun(string threadId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.CreateRun"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateRunRequest(threadId, content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Returns a list of runs belonging to a thread. + /// The ID of the thread the run belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> GetRunsAsync(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + ClientResult result = await GetRunsAsync(threadId, limit, order?.ToString(), after, before, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ListRunsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns a list of runs belonging to a thread. + /// The ID of the thread the run belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult GetRuns(string threadId, int? limit = null, ListOrder? order = null, string after = null, string before = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + ClientResult result = GetRuns(threadId, limit, order?.ToString(), after, before, DefaultRequestContext); + return ClientResult.FromValue(ListRunsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns a list of runs belonging to a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread the run belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetRunsAsync(string threadId, int? limit, string order, string after, string before, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.GetRuns"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetRunsRequest(threadId, limit, order, after, before, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns a list of runs belonging to a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread the run belongs to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetRuns(string threadId, int? limit, string order, string after, string before, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.GetRuns"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetRunsRequest(threadId, limit, order, after, before, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Retrieves a run. + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to retrieve. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> GetRunAsync(string threadId, string runId) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + + ClientResult result = await GetRunAsync(threadId, runId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Retrieves a run. + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to retrieve. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual ClientResult GetRun(string threadId, string runId) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + + ClientResult result = GetRun(threadId, runId, DefaultRequestContext); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieves a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to retrieve. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetRunAsync(string threadId, string runId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.GetRun"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetRunRequest(threadId, runId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieves a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to retrieve. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetRun(string threadId, string runId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.GetRun"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetRunRequest(threadId, runId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Modifies a run. + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to modify. + /// The to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> ModifyRunAsync(string threadId, string runId, ModifyRunRequest run) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + Argument.AssertNotNull(run, nameof(run)); + + using BinaryContent content = BinaryContent.Create(run); + ClientResult result = await ModifyRunAsync(threadId, runId, content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Modifies a run. + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to modify. + /// The to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual ClientResult ModifyRun(string threadId, string runId, ModifyRunRequest run) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + Argument.AssertNotNull(run, nameof(run)); + + using BinaryContent content = BinaryContent.Create(run); + ClientResult result = ModifyRun(threadId, runId, content, DefaultRequestContext); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Modifies a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to modify. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task ModifyRunAsync(string threadId, string runId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.ModifyRun"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateModifyRunRequest(threadId, runId, content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Modifies a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to modify. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult ModifyRun(string threadId, string runId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.ModifyRun"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateModifyRunRequest(threadId, runId, content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Cancels a run that is `in_progress`. + /// The ID of the thread to which this run belongs. + /// The ID of the run to cancel. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> CancelRunAsync(string threadId, string runId) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + + ClientResult result = await CancelRunAsync(threadId, runId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Cancels a run that is `in_progress`. + /// The ID of the thread to which this run belongs. + /// The ID of the run to cancel. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual ClientResult CancelRun(string threadId, string runId) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + + ClientResult result = CancelRun(threadId, runId, DefaultRequestContext); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Cancels a run that is `in_progress`. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which this run belongs. + /// The ID of the run to cancel. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CancelRunAsync(string threadId, string runId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.CancelRun"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCancelRunRequest(threadId, runId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Cancels a run that is `in_progress`. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which this run belongs. + /// The ID of the run to cancel. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CancelRun(string threadId, string runId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.CancelRun"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCancelRunRequest(threadId, runId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// When a run has the `status: "requires_action"` and `required_action.type` is + /// `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once + /// they're all completed. All outputs must be submitted in a single request. + /// + /// The ID of the [thread](/docs/api-reference/threads) to which this run belongs. + /// The ID of the run that requires the tool output submission. + /// The to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> SubmitToolOuputsToRunAsync(string threadId, string runId, SubmitToolOutputsRunRequest submitToolOutputsRun) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + Argument.AssertNotNull(submitToolOutputsRun, nameof(submitToolOutputsRun)); + + using BinaryContent content = BinaryContent.Create(submitToolOutputsRun); + ClientResult result = await SubmitToolOuputsToRunAsync(threadId, runId, content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// When a run has the `status: "requires_action"` and `required_action.type` is + /// `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once + /// they're all completed. All outputs must be submitted in a single request. + /// + /// The ID of the [thread](/docs/api-reference/threads) to which this run belongs. + /// The ID of the run that requires the tool output submission. + /// The to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual ClientResult SubmitToolOuputsToRun(string threadId, string runId, SubmitToolOutputsRunRequest submitToolOutputsRun) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + Argument.AssertNotNull(submitToolOutputsRun, nameof(submitToolOutputsRun)); + + using BinaryContent content = BinaryContent.Create(submitToolOutputsRun); + ClientResult result = SubmitToolOuputsToRun(threadId, runId, content, DefaultRequestContext); + return ClientResult.FromValue(RunObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] When a run has the `status: "requires_action"` and `required_action.type` is + /// `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once + /// they're all completed. All outputs must be submitted in a single request. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) to which this run belongs. + /// The ID of the run that requires the tool output submission. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task SubmitToolOuputsToRunAsync(string threadId, string runId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.SubmitToolOuputsToRun"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateSubmitToolOuputsToRunRequest(threadId, runId, content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] When a run has the `status: "requires_action"` and `required_action.type` is + /// `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once + /// they're all completed. All outputs must be submitted in a single request. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the [thread](/docs/api-reference/threads) to which this run belongs. + /// The ID of the run that requires the tool output submission. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult SubmitToolOuputsToRun(string threadId, string runId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.SubmitToolOuputsToRun"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateSubmitToolOuputsToRunRequest(threadId, runId, content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Returns a list of run steps belonging to a run. + /// The ID of the thread the run and run steps belong to. + /// The ID of the run the run steps belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual async Task> GetRunStepsAsync(string threadId, string runId, int? limit = null, ListOrder? order = null, string after = null, string before = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + + ClientResult result = await GetRunStepsAsync(threadId, runId, limit, order?.ToString(), after, before, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ListRunStepsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Returns a list of run steps belonging to a run. + /// The ID of the thread the run and run steps belong to. + /// The ID of the run the run steps belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// or is null. + /// or is an empty string, and was expected to be non-empty. + public virtual ClientResult GetRunSteps(string threadId, string runId, int? limit = null, ListOrder? order = null, string after = null, string before = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + + ClientResult result = GetRunSteps(threadId, runId, limit, order?.ToString(), after, before, DefaultRequestContext); + return ClientResult.FromValue(ListRunStepsResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Returns a list of run steps belonging to a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread the run and run steps belong to. + /// The ID of the run the run steps belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetRunStepsAsync(string threadId, string runId, int? limit, string order, string after, string before, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.GetRunSteps"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetRunStepsRequest(threadId, runId, limit, order, after, before, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns a list of run steps belonging to a run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread the run and run steps belong to. + /// The ID of the run the run steps belong to. + /// + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + /// default is 20. + /// + /// + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + /// for descending order. Allowed values: "asc" | "desc" + /// + /// + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// + /// + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + /// For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + /// subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetRunSteps(string threadId, string runId, int? limit, string order, string after, string before, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.GetRunSteps"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetRunStepsRequest(threadId, runId, limit, order, after, before, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Retrieves a run step. + /// The ID of the thread to which the run and run step belongs. + /// The ID of the run to which the run step belongs. + /// The ID of the run step to retrieve. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + public virtual async Task> GetRunStepAsync(string threadId, string runId, string stepId) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + Argument.AssertNotNullOrEmpty(stepId, nameof(stepId)); + + ClientResult result = await GetRunStepAsync(threadId, runId, stepId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(RunStepObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Retrieves a run step. + /// The ID of the thread to which the run and run step belongs. + /// The ID of the run to which the run step belongs. + /// The ID of the run step to retrieve. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + public virtual ClientResult GetRunStep(string threadId, string runId, string stepId) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + Argument.AssertNotNullOrEmpty(stepId, nameof(stepId)); + + ClientResult result = GetRunStep(threadId, runId, stepId, DefaultRequestContext); + return ClientResult.FromValue(RunStepObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieves a run step. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which the run and run step belongs. + /// The ID of the run to which the run step belongs. + /// The ID of the run step to retrieve. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetRunStepAsync(string threadId, string runId, string stepId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + Argument.AssertNotNullOrEmpty(stepId, nameof(stepId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.GetRunStep"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetRunStepRequest(threadId, runId, stepId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieves a run step. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to which the run and run step belongs. + /// The ID of the run to which the run step belongs. + /// The ID of the run step to retrieve. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetRunStep(string threadId, string runId, string stepId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNullOrEmpty(runId, nameof(runId)); + Argument.AssertNotNullOrEmpty(stepId, nameof(stepId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Runs.GetRunStep"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetRunStepRequest(threadId, runId, stepId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateThreadAndRunRequest(BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/runs"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateCreateRunRequest(string threadId, BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + path.Append("/runs"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateGetRunsRequest(string threadId, int? limit, string order, string after, string before, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + path.Append("/runs"); + if (limit != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } + } + if (order != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&order={order}"; + } + else + { + uriBuilder.Query = $"order={order}"; + } + } + if (after != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } + } + if (before != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&before={before}"; + } + else + { + uriBuilder.Query = $"before={before}"; + } + } + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetRunRequest(string threadId, string runId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + path.Append("/runs/"); + path.Append(runId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateModifyRunRequest(string threadId, string runId, BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + path.Append("/runs/"); + path.Append(runId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateCancelRunRequest(string threadId, string runId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + path.Append("/runs/"); + path.Append(runId); + path.Append("/cancel"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateSubmitToolOuputsToRunRequest(string threadId, string runId, BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + path.Append("/runs/"); + path.Append(runId); + path.Append("/submit_tool_outputs"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateGetRunStepsRequest(string threadId, string runId, int? limit, string order, string after, string before, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + path.Append("/runs/"); + path.Append(runId); + path.Append("/steps"); + if (limit != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&limit={limit.Value}"; + } + else + { + uriBuilder.Query = $"limit={limit.Value}"; + } + } + if (order != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&order={order}"; + } + else + { + uriBuilder.Query = $"order={order}"; + } + } + if (after != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&after={after}"; + } + else + { + uriBuilder.Query = $"after={after}"; + } + } + if (before != null) + { + if (uriBuilder.Query != null && uriBuilder.Query.Length > 1) + { + uriBuilder.Query += $"&before={before}"; + } + else + { + uriBuilder.Query = $"before={before}"; + } + } + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateGetRunStepRequest(string threadId, string runId, string stepId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + path.Append("/runs/"); + path.Append(runId); + path.Append("/steps/"); + path.Append(stepId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/Threads.cs b/.dotnet/src/Generated/Threads.cs new file mode 100644 index 000000000..bc2d4cbd0 --- /dev/null +++ b/.dotnet/src/Generated/Threads.cs @@ -0,0 +1,540 @@ +// + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using OpenAI.Internal.Models; + +namespace OpenAI.Internal +{ + // Data plane generated sub-client. + /// The Threads sub-client. + internal partial class Threads + { + private const string AuthorizationHeader = "Authorization"; + private readonly ApiKeyCredential _credential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual ClientPipeline Pipeline => _pipeline; + + /// Initializes a new instance of Threads for mocking. + protected Threads() + { + } + + /// Initializes a new instance of Threads. + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// The key credential to copy. + /// OpenAI Endpoint. + internal Threads(ClientPipeline pipeline, ApiKeyCredential credential, Uri endpoint) + { + _pipeline = pipeline; + _credential = credential; + _endpoint = endpoint; + } + + /// Create a thread. + /// The to use. + /// is null. + public virtual async Task> CreateThreadAsync(CreateThreadRequest thread) + { + Argument.AssertNotNull(thread, nameof(thread)); + + using BinaryContent content = BinaryContent.Create(thread); + ClientResult result = await CreateThreadAsync(content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Create a thread. + /// The to use. + /// is null. + public virtual ClientResult CreateThread(CreateThreadRequest thread) + { + Argument.AssertNotNull(thread, nameof(thread)); + + using BinaryContent content = BinaryContent.Create(thread); + ClientResult result = CreateThread(content, DefaultRequestContext); + return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Create a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task CreateThreadAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Threads.CreateThread"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateThreadRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Create a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult CreateThread(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Threads.CreateThread"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateCreateThreadRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Retrieves a thread. + /// The ID of the thread to retrieve. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> GetThreadAsync(string threadId) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + ClientResult result = await GetThreadAsync(threadId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Retrieves a thread. + /// The ID of the thread to retrieve. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult GetThread(string threadId) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + ClientResult result = GetThread(threadId, DefaultRequestContext); + return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Retrieves a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to retrieve. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task GetThreadAsync(string threadId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Threads.GetThread"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetThreadRequest(threadId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Retrieves a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to retrieve. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult GetThread(string threadId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Threads.GetThread"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateGetThreadRequest(threadId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Modifies a thread. + /// The ID of the thread to modify. Only the `metadata` can be modified. + /// The to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> ModifyThreadAsync(string threadId, ModifyThreadRequest thread) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNull(thread, nameof(thread)); + + using BinaryContent content = BinaryContent.Create(thread); + ClientResult result = await ModifyThreadAsync(threadId, content, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Modifies a thread. + /// The ID of the thread to modify. Only the `metadata` can be modified. + /// The to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult ModifyThread(string threadId, ModifyThreadRequest thread) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNull(thread, nameof(thread)); + + using BinaryContent content = BinaryContent.Create(thread); + ClientResult result = ModifyThread(threadId, content, DefaultRequestContext); + return ClientResult.FromValue(ThreadObject.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Modifies a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to modify. Only the `metadata` can be modified. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task ModifyThreadAsync(string threadId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Threads.ModifyThread"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateModifyThreadRequest(threadId, content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Modifies a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to modify. Only the `metadata` can be modified. + /// The content to send as the body of the request. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult ModifyThread(string threadId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + Argument.AssertNotNull(content, nameof(content)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Threads.ModifyThread"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateModifyThreadRequest(threadId, content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// Delete a thread. + /// The ID of the thread to delete. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual async Task> DeleteThreadAsync(string threadId) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + ClientResult result = await DeleteThreadAsync(threadId, DefaultRequestContext).ConfigureAwait(false); + return ClientResult.FromValue(DeleteThreadResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// Delete a thread. + /// The ID of the thread to delete. + /// is null. + /// is an empty string, and was expected to be non-empty. + public virtual ClientResult DeleteThread(string threadId) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + ClientResult result = DeleteThread(threadId, DefaultRequestContext); + return ClientResult.FromValue(DeleteThreadResponse.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + /// + /// [Protocol Method] Delete a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to delete. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task DeleteThreadAsync(string threadId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Threads.DeleteThread"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateDeleteThreadRequest(threadId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Delete a thread. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the thread to delete. + /// The request options, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual ClientResult DeleteThread(string threadId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); + + options ??= new RequestOptions(); + // using var scope = ClientDiagnostics.CreateSpan("Threads.DeleteThread"\); + // scope.Start(); + try + { + using PipelineMessage message = CreateDeleteThreadRequest(threadId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + catch (Exception e) + { + // scope.Failed(e); + throw; + } + } + + internal PipelineMessage CreateCreateThreadRequest(BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads"); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateGetThreadRequest(string threadId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "GET"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + internal PipelineMessage CreateModifyThreadRequest(string threadId, BinaryContent content, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "POST"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateDeleteThreadRequest(string threadId, RequestOptions options) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.ResponseClassifier = ResponseErrorClassifier200; + PipelineRequest request = message.Request; + request.Method = "DELETE"; + UriBuilder uriBuilder = new(_endpoint.ToString()); + StringBuilder path = new(); + path.Append("/threads/"); + path.Append(threadId); + uriBuilder.Path += path.ToString(); + request.Uri = uriBuilder.Uri; + request.Headers.Set("Accept", "application/json"); + return message; + } + + private static RequestOptions DefaultRequestContext = new RequestOptions(); + + private static PipelineMessageClassifier _responseErrorClassifier200; + private static PipelineMessageClassifier ResponseErrorClassifier200 => _responseErrorClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/OpenAI.csproj b/.dotnet/src/OpenAI.csproj new file mode 100644 index 000000000..6d4778553 --- /dev/null +++ b/.dotnet/src/OpenAI.csproj @@ -0,0 +1,18 @@ + + + This is the OpenAI client library for developing .NET applications with rich experience. + SDK Code Generation OpenAI + OpenAI + netstandard2.0 + latest + true + + + + + + + + + + diff --git a/.dotnet/src/OpenAI.snk b/.dotnet/src/OpenAI.snk new file mode 100644 index 000000000..fa09b4dee Binary files /dev/null and b/.dotnet/src/OpenAI.snk differ diff --git a/.dotnet/src/Polyfill/System.Diagnostics.CodeAnalysis.ExperimentalAttribute.cs b/.dotnet/src/Polyfill/System.Diagnostics.CodeAnalysis.ExperimentalAttribute.cs new file mode 100644 index 000000000..163d98482 --- /dev/null +++ b/.dotnet/src/Polyfill/System.Diagnostics.CodeAnalysis.ExperimentalAttribute.cs @@ -0,0 +1,59 @@ +#if !NET8_0_OR_GREATER + +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +namespace System.Diagnostics.CodeAnalysis +{ + /// + /// Indicates that an API is experimental and it may change in the future. + /// + /// + /// This attribute allows call sites to be flagged with a diagnostic that indicates that an experimental + /// feature is used. Authors can use this attribute to ship preview features in their assemblies. + /// + [AttributeUsage(AttributeTargets.Assembly | + AttributeTargets.Module | + AttributeTargets.Class | + AttributeTargets.Struct | + AttributeTargets.Enum | + AttributeTargets.Constructor | + AttributeTargets.Method | + AttributeTargets.Property | + AttributeTargets.Field | + AttributeTargets.Event | + AttributeTargets.Interface | + AttributeTargets.Delegate, Inherited = false)] + internal sealed class ExperimentalAttribute : Attribute + { + /// + /// Initializes a new instance of the class, specifying the ID that the compiler will use + /// when reporting a use of the API the attribute applies to. + /// + /// The ID that the compiler will use when reporting a use of the API the attribute applies to. + public ExperimentalAttribute(string diagnosticId) + { + DiagnosticId = diagnosticId; + } + + /// + /// Gets the ID that the compiler will use when reporting a use of the API the attribute applies to. + /// + /// The unique diagnostic ID. + /// + /// The diagnostic ID is shown in build output for warnings and errors. + /// This property represents the unique ID that can be used to suppress the warnings or errors, if needed. + /// + public string DiagnosticId { get; } + + /// + /// Gets or sets the URL for corresponding documentation. + /// The API accepts a format string instead of an actual URL, creating a generic URL that includes the diagnostic ID. + /// + /// The format string that represents a URL to corresponding documentation. + /// An example format string is https://contoso.com/obsoletion-warnings/{0}. + public string? UrlFormat { get; set; } + } +} + +#endif // !NET8_0_OR_LATER \ No newline at end of file diff --git a/.dotnet/src/Polyfill/System.Diagnostics.CodeAnalysis.SetsRequiredMembersAttribute.cs b/.dotnet/src/Polyfill/System.Diagnostics.CodeAnalysis.SetsRequiredMembersAttribute.cs new file mode 100644 index 000000000..26cd00bce --- /dev/null +++ b/.dotnet/src/Polyfill/System.Diagnostics.CodeAnalysis.SetsRequiredMembersAttribute.cs @@ -0,0 +1,8 @@ +#if !NET7_0_OR_GREATER + +namespace System.Diagnostics.CodeAnalysis; + +[AttributeUsage(AttributeTargets.Constructor, AllowMultiple = false, Inherited = false)] +internal sealed class SetsRequiredMembersAttribute : Attribute { } + +#endif // !NET7_0_OR_GREATER diff --git a/.dotnet/src/Polyfill/System.Runtime.CompilerServices.CompilerFeatureRequiredAttribute.cs b/.dotnet/src/Polyfill/System.Runtime.CompilerServices.CompilerFeatureRequiredAttribute.cs new file mode 100644 index 000000000..1b9abe47c --- /dev/null +++ b/.dotnet/src/Polyfill/System.Runtime.CompilerServices.CompilerFeatureRequiredAttribute.cs @@ -0,0 +1,15 @@ +#if !NET7_0_OR_GREATER + +namespace System.Runtime.CompilerServices; + +[AttributeUsage(AttributeTargets.All, AllowMultiple = true, Inherited = false)] +internal sealed class CompilerFeatureRequiredAttribute(string featureName) : Attribute +{ + public string FeatureName { get; } = featureName; + public bool IsOptional { get; init; } + + public const string RefStructs = nameof(RefStructs); + public const string RequiredMembers = nameof(RequiredMembers); +} + +#endif // !NET7_0_OR_GREATER diff --git a/.dotnet/src/Polyfill/System.Runtime.CompilerServices.IsExternalInit.cs b/.dotnet/src/Polyfill/System.Runtime.CompilerServices.IsExternalInit.cs new file mode 100644 index 000000000..f4b6d744f --- /dev/null +++ b/.dotnet/src/Polyfill/System.Runtime.CompilerServices.IsExternalInit.cs @@ -0,0 +1,9 @@ +#if !NET5_0_OR_GREATER + +using System.ComponentModel; +namespace System.Runtime.CompilerServices; + +[EditorBrowsable(EditorBrowsableState.Never)] +internal static class IsExternalInit { } + +#endif // !NET5_0_OR_GREATER diff --git a/.dotnet/src/Polyfill/System.Runtime.CompilerServices.RequiredMemberAttribute.cs b/.dotnet/src/Polyfill/System.Runtime.CompilerServices.RequiredMemberAttribute.cs new file mode 100644 index 000000000..216d76910 --- /dev/null +++ b/.dotnet/src/Polyfill/System.Runtime.CompilerServices.RequiredMemberAttribute.cs @@ -0,0 +1,8 @@ +#if !NET7_0_OR_GREATER + +namespace System.Runtime.CompilerServices; + +[AttributeUsage(AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Field | AttributeTargets.Property, AllowMultiple = false, Inherited = false)] +internal sealed class RequiredMemberAttribute : Attribute { } + +#endif // !NET7_0_OR_GREATER diff --git a/.dotnet/src/Utility/GenericActionPipelinePolicy.cs b/.dotnet/src/Utility/GenericActionPipelinePolicy.cs new file mode 100644 index 000000000..41ccef5ec --- /dev/null +++ b/.dotnet/src/Utility/GenericActionPipelinePolicy.cs @@ -0,0 +1,35 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace OpenAI; + +internal partial class GenericActionPipelinePolicy : PipelinePolicy +{ + private Action _processMessageAction; + + public GenericActionPipelinePolicy(Action processMessageAction) + { + _processMessageAction = processMessageAction; + } + + public override void Process(PipelineMessage message, IReadOnlyList pipeline, int currentIndex) + { + _processMessageAction(message); + if (currentIndex < pipeline.Count - 1) + { + pipeline[currentIndex + 1].Process(message, pipeline, currentIndex + 1); + } + } + + public override async ValueTask ProcessAsync(PipelineMessage message, IReadOnlyList pipeline, int currentIndex) + { + _processMessageAction(message); + if (currentIndex < pipeline.Count - 1) + { + await pipeline[currentIndex + 1].ProcessAsync(message, pipeline, currentIndex + 1); + } + } +} \ No newline at end of file diff --git a/.dotnet/src/Utility/MultipartFormDataBinaryContent.cs b/.dotnet/src/Utility/MultipartFormDataBinaryContent.cs new file mode 100644 index 000000000..adce9e4f1 --- /dev/null +++ b/.dotnet/src/Utility/MultipartFormDataBinaryContent.cs @@ -0,0 +1,155 @@ +using System; +using System.ClientModel; +using System.Diagnostics; +using System.Globalization; +using System.IO; +using System.Net.Http; +using System.Net.Http.Headers; +using System.Threading; +using System.Threading.Tasks; + +namespace OpenAI.Internal; + +internal class MultipartFormDataBinaryContent : BinaryContent +{ + private readonly MultipartFormDataContent _multipartContent; + + private static Random _random = new(); + private static readonly char[] _boundaryValues = "0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz".ToCharArray(); + + public MultipartFormDataBinaryContent() + { + _multipartContent = new MultipartFormDataContent(CreateBoundary()); + } + + public string ContentType + { + get + { + Debug.Assert(_multipartContent.Headers.ContentType is not null); + + return _multipartContent.Headers.ContentType!.ToString(); + } + } + + internal HttpContent HttpContent => _multipartContent; + + public void Add(Stream stream, string name, string fileName = default) + { + Add(new StreamContent(stream), name, fileName); + } + + public void Add(string content, string name, string fileName = default) + { + Add(new StringContent(content), name, fileName); + } + + public void Add(int content, string name, string fileName = default) + { + // https://learn.microsoft.com/en-us/dotnet/standard/base-types/standard-numeric-format-strings#GFormatString + string value = content.ToString("G", CultureInfo.InvariantCulture); + Add(new StringContent(value), name, fileName); + } + + public void Add(double content, string name, string fileName = default) + { + // https://learn.microsoft.com/en-us/dotnet/standard/base-types/standard-numeric-format-strings#GFormatString + string value = content.ToString("G", CultureInfo.InvariantCulture); + Add(new StringContent(value), name, fileName); + } + + public void Add(byte[] content, string name, string fileName = default) + { + Add(new ByteArrayContent(content), name, fileName); + } + + public void Add(BinaryData content, string name, string fileName = default) + { + Add(new ByteArrayContent(content.ToArray()), name, fileName); + } + + private void Add(HttpContent content, string name, string fileName) + { + if (fileName is not null) + { + AddFileNameHeader(content, name, fileName); + } + + _multipartContent.Add(content, name); + } + + private static void AddFileNameHeader(HttpContent content, string name, string filename) + { + // Add the content header manually because the default implementation + // adds a `filename*` parameter to the header, which RFC 7578 says not + // to do. We are following up with the BCL team per correctness. + ContentDispositionHeaderValue header = new("form-data") + { + Name = name, + FileName = filename + }; + content.Headers.ContentDisposition = header; + } + + private static string CreateBoundary() + { + Span chars = new char[70]; + + byte[] random = new byte[70]; + _random.NextBytes(random); + + // The following will sample evenly from the possible values. + // This is important to ensuring that the odds of creating a boundary + // that occurs in any content part are astronomically small. + int mask = 255 >> 2; + + Debug.Assert(_boundaryValues.Length - 1 == mask); + + for (int i = 0; i < 70; i++) + { + chars[i] = _boundaryValues[random[i] & mask]; + } + + return chars.ToString(); + } + + public override bool TryComputeLength(out long length) + { + // We can't call the protected method on HttpContent + + if (_multipartContent.Headers.ContentLength is long contentLength) + { + length = contentLength; + return true; + } + + length = 0; + return false; + } + + public override void WriteTo(Stream stream, CancellationToken cancellationToken = default) + { + // TODO: polyfill sync-over-async for netstandard2.0 for Azure clients. + // Tracked by https://github.com/Azure/azure-sdk-for-net/issues/42674 + +#if NET6_0_OR_GREATER + _multipartContent.CopyTo(stream, default, cancellationToken); +#else + _multipartContent.CopyToAsync(stream).GetAwaiter().GetResult(); +#endif + } + + public override async Task WriteToAsync(Stream stream, CancellationToken cancellationToken = default) + { +#if NET6_0_OR_GREATER + await _multipartContent.CopyToAsync(stream, cancellationToken).ConfigureAwait(false); +#else + await _multipartContent.CopyToAsync(stream).ConfigureAwait(false); +#endif + } + + public override void Dispose() + { + _multipartContent.Dispose(); + } +} diff --git a/.dotnet/src/Utility/SseAsyncEnumerator.cs b/.dotnet/src/Utility/SseAsyncEnumerator.cs new file mode 100644 index 000000000..743a1bedd --- /dev/null +++ b/.dotnet/src/Utility/SseAsyncEnumerator.cs @@ -0,0 +1,59 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Runtime.CompilerServices; +using System.Text.Json; +using System.Threading; + +namespace OpenAI; + +internal static class SseAsyncEnumerator +{ + internal static async IAsyncEnumerable EnumerateFromSseStream( + Stream stream, + Func> multiElementDeserializer, + [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + try + { + using SseReader sseReader = new(stream); + while (!cancellationToken.IsCancellationRequested) + { + SseLine? sseEvent = await sseReader.TryReadSingleFieldEventAsync().ConfigureAwait(false); + if (sseEvent is not null) + { + ReadOnlyMemory name = sseEvent.Value.FieldName; + if (!name.Span.SequenceEqual("data".AsSpan())) + { + throw new InvalidDataException(); + } + ReadOnlyMemory value = sseEvent.Value.FieldValue; + if (value.Span.SequenceEqual("[DONE]".AsSpan())) + { + break; + } + using JsonDocument sseMessageJson = JsonDocument.Parse(value); + IEnumerable newItems = multiElementDeserializer.Invoke(sseMessageJson.RootElement); + foreach (T item in newItems) + { + yield return item; + } + } + } + } + finally + { + // Always dispose the stream immediately once enumeration is complete for any reason + stream.Dispose(); + } + } + + internal static IAsyncEnumerable EnumerateFromSseStream( + Stream stream, + Func elementDeserializer, + CancellationToken cancellationToken = default) + => EnumerateFromSseStream( + stream, + (element) => new T[] { elementDeserializer.Invoke(element) }, + cancellationToken); +} \ No newline at end of file diff --git a/.dotnet/src/Utility/SseLine.cs b/.dotnet/src/Utility/SseLine.cs new file mode 100644 index 000000000..4d82315f9 --- /dev/null +++ b/.dotnet/src/Utility/SseLine.cs @@ -0,0 +1,29 @@ +using System; + +namespace OpenAI; + +// SSE specification: https://html.spec.whatwg.org/multipage/server-sent-events.html#parsing-an-event-stream +internal readonly struct SseLine +{ + private readonly string _original; + private readonly int _colonIndex; + private readonly int _valueIndex; + + public static SseLine Empty { get; } = new SseLine(string.Empty, 0, false); + + internal SseLine(string original, int colonIndex, bool hasSpaceAfterColon) + { + _original = original; + _colonIndex = colonIndex; + _valueIndex = colonIndex + (hasSpaceAfterColon ? 2 : 1); + } + + public bool IsEmpty => _original.Length == 0; + public bool IsComment => !IsEmpty && _original[0] == ':'; + + // TODO: we should not expose UTF16 publicly + public ReadOnlyMemory FieldName => _original.AsMemory(0, _colonIndex); + public ReadOnlyMemory FieldValue => _original.AsMemory(_valueIndex); + + public override string ToString() => _original; +} \ No newline at end of file diff --git a/.dotnet/src/Utility/SseReader.cs b/.dotnet/src/Utility/SseReader.cs new file mode 100644 index 000000000..cf0301408 --- /dev/null +++ b/.dotnet/src/Utility/SseReader.cs @@ -0,0 +1,118 @@ +using System; +using System.ClientModel; +using System.ClientModel.Internal; +using System.IO; +using System.Threading.Tasks; + +namespace OpenAI; + +internal sealed class SseReader : IDisposable + { + private readonly Stream _stream; + private readonly StreamReader _reader; + private bool _disposedValue; + + public SseReader(Stream stream) + { + _stream = stream; + _reader = new StreamReader(stream); + } + + public SseLine? TryReadSingleFieldEvent() + { + while (true) + { + SseLine? line = TryReadLine(); + if (line == null) + return null; + if (line.Value.IsEmpty) + throw new InvalidDataException("event expected."); + SseLine? empty = TryReadLine(); + if (empty != null && !empty.Value.IsEmpty) + throw new NotSupportedException("Multi-filed events not supported."); + if (!line.Value.IsComment) + return line; // skip comment lines + } + } + + // TODO: we should support cancellation tokens, but StreamReader does not in NS2 + public async Task TryReadSingleFieldEventAsync() + { + while (true) + { + SseLine? line = await TryReadLineAsync().ConfigureAwait(false); + if (line == null) + return null; + if (line.Value.IsEmpty) + throw new InvalidDataException("event expected."); + SseLine? empty = await TryReadLineAsync().ConfigureAwait(false); + if (empty != null && !empty.Value.IsEmpty) + throw new NotSupportedException("Multi-filed events not supported."); + if (!line.Value.IsComment) + return line; // skip comment lines + } + } + + public SseLine? TryReadLine() + { + string lineText = _reader.ReadLine(); + if (lineText == null) + return null; + if (lineText.Length == 0) + return SseLine.Empty; + if (TryParseLine(lineText, out SseLine line)) + return line; + return null; + } + + // TODO: we should support cancellation tokens, but StreamReader does not in NS2 + public async Task TryReadLineAsync() + { + string lineText = await _reader.ReadLineAsync().ConfigureAwait(false); + if (lineText == null) + return null; + if (lineText.Length == 0) + return SseLine.Empty; + if (TryParseLine(lineText, out SseLine line)) + return line; + return null; + } + + private static bool TryParseLine(string lineText, out SseLine line) + { + if (lineText.Length == 0) + { + line = default; + return false; + } + + ReadOnlySpan lineSpan = lineText.AsSpan(); + int colonIndex = lineSpan.IndexOf(':'); + ReadOnlySpan fieldValue = lineSpan.Slice(colonIndex + 1); + + bool hasSpace = false; + if (fieldValue.Length > 0 && fieldValue[0] == ' ') + hasSpace = true; + line = new SseLine(lineText, colonIndex, hasSpace); + return true; + } + + private void Dispose(bool disposing) + { + if (!_disposedValue) + { + if (disposing) + { + _reader.Dispose(); + _stream.Dispose(); + } + + _disposedValue = true; + } + } + public void Dispose() + { + Dispose(disposing: true); + GC.SuppressFinalize(this); + } + } \ No newline at end of file diff --git a/.dotnet/src/Utility/StreamingResult.cs b/.dotnet/src/Utility/StreamingResult.cs new file mode 100644 index 000000000..b3f676732 --- /dev/null +++ b/.dotnet/src/Utility/StreamingResult.cs @@ -0,0 +1,43 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Threading; + +namespace OpenAI; + + +#pragma warning disable CS1591 // public XML comments + +/// +/// Represents an operation response with streaming content that can be deserialized and enumerated while the response +/// is still being received. +/// +/// The data type representative of distinct, streamable items. +public class StreamingEventResult : StreamingClientResult +{ + private IAsyncEnumerable _asyncEnumerableSource { get; } + + private StreamingEventResult(PipelineResponse response, + Func> asyncEnumerableProcessor) + : base(response) + { + _asyncEnumerableSource = asyncEnumerableProcessor.Invoke(response); + } + + internal static StreamingEventResult CreateFromResponse( + PipelineResponse response, + Func> asyncEnumerableProcessor) + { + return new(response, asyncEnumerableProcessor); + } + + // TODO: Handle disposal via Enumerator? Validate that this will work. + // If it doesn't, we likely need to implement IDisposable or IAsyncDisposable + // on StreamingClientResult. + + public override IAsyncEnumerator GetAsyncEnumerator(CancellationToken cancellationToken = default) + => _asyncEnumerableSource.GetAsyncEnumerator(cancellationToken); +} + +#pragma warning restore CS1591 // public XML comments \ No newline at end of file diff --git a/.dotnet/tests/Assets/edit_sample_image.png b/.dotnet/tests/Assets/edit_sample_image.png new file mode 100644 index 000000000..869bb1e04 Binary files /dev/null and b/.dotnet/tests/Assets/edit_sample_image.png differ diff --git a/.dotnet/tests/Assets/edit_sample_mask.png b/.dotnet/tests/Assets/edit_sample_mask.png new file mode 100644 index 000000000..98b9c237c Binary files /dev/null and b/.dotnet/tests/Assets/edit_sample_mask.png differ diff --git a/.dotnet/tests/Assets/hello_world.m4a b/.dotnet/tests/Assets/hello_world.m4a new file mode 100644 index 000000000..ed8e09c8f Binary files /dev/null and b/.dotnet/tests/Assets/hello_world.m4a differ diff --git a/.dotnet/tests/Assets/multilingual.wav b/.dotnet/tests/Assets/multilingual.wav new file mode 100644 index 000000000..847f3463a Binary files /dev/null and b/.dotnet/tests/Assets/multilingual.wav differ diff --git a/.dotnet/tests/Assets/stop_sign.png b/.dotnet/tests/Assets/stop_sign.png new file mode 100644 index 000000000..002b3ae1a Binary files /dev/null and b/.dotnet/tests/Assets/stop_sign.png differ diff --git a/.dotnet/tests/Assets/variation_sample_image.png b/.dotnet/tests/Assets/variation_sample_image.png new file mode 100644 index 000000000..119a13e8f Binary files /dev/null and b/.dotnet/tests/Assets/variation_sample_image.png differ diff --git a/.dotnet/tests/Directory.Build.targets b/.dotnet/tests/Directory.Build.targets new file mode 100644 index 000000000..9108ca3f9 --- /dev/null +++ b/.dotnet/tests/Directory.Build.targets @@ -0,0 +1,7 @@ + + + + PreserveNewest + + + \ No newline at end of file diff --git a/.dotnet/tests/OpenAI.Tests.csproj b/.dotnet/tests/OpenAI.Tests.csproj new file mode 100644 index 000000000..252866ce8 --- /dev/null +++ b/.dotnet/tests/OpenAI.Tests.csproj @@ -0,0 +1,19 @@ + + + net8.0 + + $(NoWarn);CS1591 + latest + + + + + + + + + + + + + \ No newline at end of file diff --git a/.dotnet/tests/Samples/Assistants/Sample01_RetrievalAugmentedGeneration.cs b/.dotnet/tests/Samples/Assistants/Sample01_RetrievalAugmentedGeneration.cs new file mode 100644 index 000000000..0f7856c2f --- /dev/null +++ b/.dotnet/tests/Samples/Assistants/Sample01_RetrievalAugmentedGeneration.cs @@ -0,0 +1,139 @@ +using NUnit.Framework; +using OpenAI.Assistants; +using OpenAI.Files; +using System; +using System.IO; +using System.Threading; + +namespace OpenAI.Samples +{ + public partial class AssistantSamples + { + [Test] + [Ignore("Compilation validation only")] + public void Sample01_RetrievalAugmentedGeneration() + { + // Assistants is a beta API and subject to change; acknowledge its experimental status by suppressing the matching warning. +#pragma warning disable OPENAI001 + OpenAIClient openAIClient = new(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + FileClient fileClient = openAIClient.GetFileClient(); + AssistantClient assistantClient = openAIClient.GetAssistantClient(); + + // First, let's contrive a document we'll use retrieval with and upload it. + BinaryData document = BinaryData.FromString(""" + { + "description": "This document contains the sale history data for Contoso products.", + "sales": [ + { + "month": "January", + "by_product": { + "113043": 15, + "113045": 12, + "113049": 2 + } + }, + { + "month": "February", + "by_product": { + "113045": 22 + } + }, + { + "month": "March", + "by_product": { + "113045": 16, + "113055": 5 + } + } + ] + } + """); + + OpenAIFileInfo openAIFileInfo = fileClient.UploadFile(document, "test-rag-file-delete-me.json", OpenAIFilePurpose.Assistants); + + // Now, we'll create a client intended to help with that data + AssistantCreationOptions assistantOptions = new() + { + Name = "Example: Contoso sales RAG", + Instructions = + "You are an assistant that looks up sales data and helps visualize the information based" + + " on user queries. When asked to generate a graph, chart, or other visualization, use" + + " the code interpreter tool to do so.", + FileIds = { openAIFileInfo.Id }, + Tools = + { + new RetrievalToolDefinition(), + new CodeInterpreterToolDefinition(), + }, + Metadata = { ["test_key_delete_me"] = "true" }, + }; + + Assistant assistant = assistantClient.CreateAssistant("gpt-4-1106-preview", assistantOptions); + + // Now we'll create a thread with a user query about the data already associated with the assistant, then run it + ThreadCreationOptions threadOptions = new() + { + Messages = + { + new ThreadInitializationMessage( + MessageRole.User, + "How well did product 113045 sell in February? Graph its trend over time."), + } + }; + + ThreadRun threadRun = assistantClient.CreateThreadAndRun(assistant.Id, threadOptions); + + // Check back to see when the run is done + do + { + Thread.Sleep(TimeSpan.FromSeconds(1)); + threadRun = assistantClient.GetRun(threadRun.ThreadId, threadRun.Id); + } while (threadRun.Status == RunStatus.Queued || threadRun.Status == RunStatus.InProgress); + + // Finally, we'll print out the full history for the thread that includes the augmented generation + ListQueryPage messages = assistantClient.GetMessages(threadRun.ThreadId); + + for (int i = messages.Count - 1; i >= 0; i--) + { + ThreadMessage message = messages[i]; + + Console.WriteLine($"[{message.Role.ToString().ToUpper()}]:"); + foreach (MessageContent contentItem in message.ContentItems) + { + if (contentItem is MessageTextContent textContent) + { + Console.WriteLine($"{textContent.Text}"); + + if (textContent.Annotations.Count > 0) + { + Console.WriteLine(); + } + + // Include annotations, if any. + foreach (TextContentAnnotation annotation in textContent.Annotations) + { + if (annotation is TextContentFileCitationAnnotation citationAnnotation) + { + Console.WriteLine($"* File citation, file ID: {citationAnnotation.FileId}"); + } + else if (annotation is TextContentFilePathAnnotation pathAnnotation) + { + Console.WriteLine($"* File path, file ID: {pathAnnotation.FileId}"); + } + } + } + else if (contentItem is MessageImageFileContent imageFileContent) + { + OpenAIFileInfo imageInfo = fileClient.GetFileInfo(imageFileContent.FileId); + BinaryData imageBytes = fileClient.DownloadFile(imageFileContent.FileId); + using FileStream stream = File.OpenWrite($"{imageInfo.Filename}.png"); + imageBytes.ToStream().CopyTo(stream); + + Console.WriteLine($""); + } + } + Console.WriteLine(); + } + } + } +} diff --git a/.dotnet/tests/Samples/Assistants/Sample01_RetrievalAugmentedGenerationAsync.cs b/.dotnet/tests/Samples/Assistants/Sample01_RetrievalAugmentedGenerationAsync.cs new file mode 100644 index 000000000..7737114fe --- /dev/null +++ b/.dotnet/tests/Samples/Assistants/Sample01_RetrievalAugmentedGenerationAsync.cs @@ -0,0 +1,139 @@ +using NUnit.Framework; +using OpenAI.Assistants; +using OpenAI.Files; +using System; +using System.IO; +using System.Threading.Tasks; + +namespace OpenAI.Samples +{ + public partial class AssistantSamples + { + [Test] + [Ignore("Compilation validation only")] + public async Task Sample01_RetrievalAugmentedGenerationAsync() + { + // Assistants is a beta API and subject to change; acknowledge its experimental status by suppressing the matching warning. +#pragma warning disable OPENAI001 + OpenAIClient openAIClient = new(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + FileClient fileClient = openAIClient.GetFileClient(); + AssistantClient assistantClient = openAIClient.GetAssistantClient(); + + // First, let's contrive a document we'll use retrieval with and upload it. + BinaryData document = BinaryData.FromString(""" + { + "description": "This document contains the sale history data for Contoso products.", + "sales": [ + { + "month": "January", + "by_product": { + "113043": 15, + "113045": 12, + "113049": 2 + } + }, + { + "month": "February", + "by_product": { + "113045": 22 + } + }, + { + "month": "March", + "by_product": { + "113045": 16, + "113055": 5 + } + } + ] + } + """); + + OpenAIFileInfo openAIFileInfo = await fileClient.UploadFileAsync(document, "test-rag-file-delete-me.json", OpenAIFilePurpose.Assistants); + + // Now, we'll create a client intended to help with that data + AssistantCreationOptions assistantOptions = new() + { + Name = "Example: Contoso sales RAG", + Instructions = + "You are an assistant that looks up sales data and helps visualize the information based" + + " on user queries. When asked to generate a graph, chart, or other visualization, use" + + " the code interpreter tool to do so.", + FileIds = { openAIFileInfo.Id }, + Tools = + { + new RetrievalToolDefinition(), + new CodeInterpreterToolDefinition(), + }, + Metadata = { ["test_key_delete_me"] = "true" }, + }; + + Assistant assistant = await assistantClient.CreateAssistantAsync("gpt-4-1106-preview", assistantOptions); + + // Now we'll create a thread with a user query about the data already associated with the assistant, then run it + ThreadCreationOptions threadOptions = new() + { + Messages = + { + new ThreadInitializationMessage( + MessageRole.User, + "How well did product 113045 sell in February? Graph its trend over time."), + } + }; + + ThreadRun threadRun = await assistantClient.CreateThreadAndRunAsync(assistant.Id, threadOptions); + + // Check back to see when the run is done + do + { + await Task.Delay(TimeSpan.FromSeconds(1)); + threadRun = await assistantClient.GetRunAsync(threadRun.ThreadId, threadRun.Id); + } while (threadRun.Status == RunStatus.Queued || threadRun.Status == RunStatus.InProgress); + + // Finally, we'll print out the full history for the thread that includes the augmented generation + ListQueryPage messages = await assistantClient.GetMessagesAsync(threadRun.ThreadId); + + for (int i = messages.Count - 1; i >= 0; i--) + { + ThreadMessage message = messages[i]; + + Console.WriteLine($"[{message.Role.ToString().ToUpper()}]:"); + foreach (MessageContent contentItem in message.ContentItems) + { + if (contentItem is MessageTextContent textContent) + { + Console.WriteLine($"{textContent.Text}"); + + if (textContent.Annotations.Count > 0) + { + Console.WriteLine(); + } + + // Include annotations, if any. + foreach (TextContentAnnotation annotation in textContent.Annotations) + { + if (annotation is TextContentFileCitationAnnotation citationAnnotation) + { + Console.WriteLine($"* File citation, file ID: {citationAnnotation.FileId}"); + } + else if (annotation is TextContentFilePathAnnotation pathAnnotation) + { + Console.WriteLine($"* File path, file ID: {pathAnnotation.FileId}"); + } + } + } + else if (contentItem is MessageImageFileContent imageFileContent) + { + OpenAIFileInfo imageInfo = await fileClient.GetFileInfoAsync(imageFileContent.FileId); + BinaryData imageBytes = await fileClient.DownloadFileAsync(imageFileContent.FileId); + using FileStream stream = File.OpenWrite($"{imageInfo.Filename}.png"); + imageBytes.ToStream().CopyTo(stream); + + Console.WriteLine($""); + } + } + Console.WriteLine(); + } + } + } +} diff --git a/.dotnet/tests/Samples/Assistants/Sample02_ListAssistantsWithPagination.cs b/.dotnet/tests/Samples/Assistants/Sample02_ListAssistantsWithPagination.cs new file mode 100644 index 000000000..4966ce1f8 --- /dev/null +++ b/.dotnet/tests/Samples/Assistants/Sample02_ListAssistantsWithPagination.cs @@ -0,0 +1,37 @@ +using NUnit.Framework; +using OpenAI.Assistants; +using System; + +namespace OpenAI.Samples +{ + public partial class AssistantSamples + { + [Test] + [Ignore("Compilation validation only")] + public void Sample02_ListAssistantsWithPagination() + { + // Assistants is a beta API and subject to change; acknowledge its experimental status by suppressing the matching warning. +#pragma warning disable OPENAI001 + AssistantClient client = new(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string latestId = null; + bool continueQuery = true; + int count = 0; + + while (continueQuery) + { + ListQueryPage pagedAssistants = client.GetAssistants(previousAssistantId: latestId); + + foreach (Assistant assistant in pagedAssistants) + { + Console.WriteLine($"[{count,3}] {assistant.Id} {assistant.CreatedAt:s} {assistant.Name}"); + + latestId = assistant.Id; + count++; + } + + continueQuery = pagedAssistants.HasMore; + } + } + } +} diff --git a/.dotnet/tests/Samples/Assistants/Sample02_ListAssistantsWithPaginationAsync.cs b/.dotnet/tests/Samples/Assistants/Sample02_ListAssistantsWithPaginationAsync.cs new file mode 100644 index 000000000..e834e5aa1 --- /dev/null +++ b/.dotnet/tests/Samples/Assistants/Sample02_ListAssistantsWithPaginationAsync.cs @@ -0,0 +1,38 @@ +using NUnit.Framework; +using OpenAI.Assistants; +using System; +using System.Threading.Tasks; + +namespace OpenAI.Samples +{ + public partial class AssistantSamples + { + [Test] + [Ignore("Compilation validation only")] + public async Task Sample02_ListAssistantsWithPaginationAsync() + { + // Assistants is a beta API and subject to change; acknowledge its experimental status by suppressing the matching warning. +#pragma warning disable OPENAI001 + AssistantClient client = new(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string latestId = null; + bool continueQuery = true; + int count = 0; + + while (continueQuery) + { + ListQueryPage pagedAssistants = await client.GetAssistantsAsync(previousAssistantId: latestId); + + foreach (Assistant assistant in pagedAssistants) + { + Console.WriteLine($"[{count,3}] {assistant.Id} {assistant.CreatedAt:s} {assistant.Name}"); + + latestId = assistant.Id; + count++; + } + + continueQuery = pagedAssistants.HasMore; + } + } + } +} diff --git a/.dotnet/tests/Samples/Assistants/Sample03_FunctionCalling.cs b/.dotnet/tests/Samples/Assistants/Sample03_FunctionCalling.cs new file mode 100644 index 000000000..7e5bca6a2 --- /dev/null +++ b/.dotnet/tests/Samples/Assistants/Sample03_FunctionCalling.cs @@ -0,0 +1,201 @@ +using NUnit.Framework; +using OpenAI.Assistants; +using System; +using System.Collections.Generic; +using System.Text.Json; +using System.Threading; + +namespace OpenAI.Samples +{ + public partial class AssistantSamples + { + #region + private static string GetCurrentLocation() + { + // Call the location API here. + return "San Francisco"; + } + + private static string GetCurrentWeather(string location, string unit = "celsius") + { + // Call the weather API here. + return $"31 {unit}"; + } + + private const string GetCurrentLocationFunctionName = "get_current_location"; + + private const string GetCurrentWeatherFunctionName = "get_current_weather"; + + private static readonly FunctionToolDefinition getCurrentLocationFunction = new() + { + Name = GetCurrentLocationFunctionName, + Description = "Get the user's current location" + }; + + private static readonly FunctionToolDefinition getCurrentWeatherFunction = new() + { + Name = GetCurrentWeatherFunctionName, + Description = "Get the current weather in a given location", + Parameters = BinaryData.FromString(""" + { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. Boston, MA" + }, + "unit": { + "type": "string", + "enum": [ "celsius", "fahrenheit" ], + "description": "The temperature unit to use. Infer this from the specified location." + } + }, + "required": [ "location" ] + } + """), + }; + #endregion + + [Test] + [Ignore("Compilation validation only")] + public void Sample03_FunctionCalling() + { + // Assistants is a beta API and subject to change; acknowledge its experimental status by suppressing the matching warning. +#pragma warning disable OPENAI001 + AssistantClient client = new(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + #region + // Create an assistant that can call the function tools. + AssistantCreationOptions assistantOptions = new() + { + Name = "Sample: Function Calling", + Instructions = + "Don't make assumptions about what values to plug into functions." + + " Ask for clarification if a user request is ambiguous.", + Tools = { getCurrentLocationFunction, getCurrentWeatherFunction }, + Metadata = { ["test_key_delete_me"] = "true" }, + }; + + Assistant assistant = client.CreateAssistant("gpt-4-1106-preview", assistantOptions); + #endregion + + #region + // Create a thread with an initial user message and run it. + ThreadCreationOptions threadOptions = new() + { + Messages = { new ThreadInitializationMessage(MessageRole.User, "What's the weather like today?"), } + }; + + ThreadRun threadRun = client.CreateThreadAndRun(assistant.Id, threadOptions); + #endregion + + #region + // Poll the run until it is no longer queued or in progress. + while (threadRun.Status == RunStatus.Queued || threadRun.Status == RunStatus.InProgress) + { + Thread.Sleep(TimeSpan.FromSeconds(1)); + threadRun = client.GetRun(threadRun.ThreadId, threadRun.Id); + + // If the run requires action, resolve them. + if (threadRun.Status == RunStatus.RequiresAction) + { + List toolOutputs = []; + + foreach (RunRequiredAction action in threadRun.RequiredActions) + { + RequiredFunctionToolCall requiredFunctionToolCall = action as RequiredFunctionToolCall; + + switch (requiredFunctionToolCall?.Name) + { + case GetCurrentLocationFunctionName: + { + string toolResult = GetCurrentLocation(); + toolOutputs.Add(new ToolOutput(requiredFunctionToolCall.Id, toolResult)); + break; + } + + case GetCurrentWeatherFunctionName: + { + // The arguments that the model wants to use to call the function are specified as a + // stringified JSON object based on the schema defined in the tool definition. Note that + // the model may hallucinate arguments too. Consequently, it is important to do the + // appropriate parsing and validation before calling the function. + using JsonDocument argumentsJson = JsonDocument.Parse(requiredFunctionToolCall.Arguments); + bool hasLocation = argumentsJson.RootElement.TryGetProperty("location", out JsonElement location); + bool hasUnit = argumentsJson.RootElement.TryGetProperty("unit", out JsonElement unit); + + if (!hasLocation) + { + throw new ArgumentNullException(nameof(location), "The location argument is required."); + } + + string toolResult = hasUnit + ? GetCurrentWeather(location.GetString(), unit.GetString()) + : GetCurrentWeather(location.GetString()); + toolOutputs.Add(new ToolOutput(requiredFunctionToolCall.Id, toolResult)); + break; + } + + default: + { + // Handle other or unexpected calls. + throw new NotImplementedException(); + } + } + } + + // Submit the tool outputs to the assistant, which returns the run to the queued state. + threadRun = client.SubmitToolOutputs(threadRun.ThreadId, threadRun.Id, toolOutputs); + } + } + #endregion + + #region + switch (threadRun.Status) + { + case RunStatus.CompletedSuccessfully: + { + ListQueryPage messages = client.GetMessages(threadRun.ThreadId); + + for (int i = messages.Count - 1; i >= 0; i--) + { + ThreadMessage message = messages[i]; + + Console.WriteLine($"[{message.Role.ToString().ToUpper()}]:"); + foreach (MessageContent contentItem in message.ContentItems) + { + if (contentItem is MessageTextContent textContent) + { + Console.WriteLine($"{textContent.Text}"); + + if (textContent.Annotations.Count > 0) + { + Console.WriteLine(); + } + + // Include annotations, if any. + foreach (TextContentAnnotation annotation in textContent.Annotations) + { + if (annotation is TextContentFileCitationAnnotation citationAnnotation) + { + Console.WriteLine($"* File citation, file ID: {citationAnnotation.FileId}"); + } + else if (annotation is TextContentFilePathAnnotation pathAnnotation) + { + Console.WriteLine($"* File path, file ID: {pathAnnotation.FileId}"); + } + } + } + } + Console.WriteLine(); + } + break; + } + + default: + throw new NotImplementedException(threadRun.Status.ToString()); + } + #endregion + } + } +} diff --git a/.dotnet/tests/Samples/Assistants/Sample03_FunctionCallingAsync.cs b/.dotnet/tests/Samples/Assistants/Sample03_FunctionCallingAsync.cs new file mode 100644 index 000000000..9333f51d0 --- /dev/null +++ b/.dotnet/tests/Samples/Assistants/Sample03_FunctionCallingAsync.cs @@ -0,0 +1,155 @@ +using NUnit.Framework; +using OpenAI.Assistants; +using System; +using System.Collections.Generic; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; + +namespace OpenAI.Samples +{ + public partial class AssistantSamples + { + [Test] + [Ignore("Compilation validation only")] + public async Task Sample03_FunctionCallingAsync() + { + // Assistants is a beta API and subject to change; acknowledge its experimental status by suppressing the matching warning. +#pragma warning disable OPENAI001 + AssistantClient client = new(Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + #region + // Create an assistant that can call the function tools. + AssistantCreationOptions assistantOptions = new() + { + Name = "Sample: Function Calling", + Instructions = + "Don't make assumptions about what values to plug into functions." + + " Ask for clarification if a user request is ambiguous.", + Tools = { getCurrentLocationFunction, getCurrentWeatherFunction }, + Metadata = { ["test_key_delete_me"] = "true" }, + }; + + Assistant assistant = await client.CreateAssistantAsync("gpt-4-1106-preview", assistantOptions); + #endregion + + #region + // Create a thread with an initial user message and run it. + ThreadCreationOptions threadOptions = new() + { + Messages = { new ThreadInitializationMessage(MessageRole.User, "What's the weather like today?"), } + }; + + ThreadRun threadRun = await client.CreateThreadAndRunAsync(assistant.Id, threadOptions); + #endregion + + #region + // Poll the run until it is no longer queued or in progress. + while (threadRun.Status == RunStatus.Queued || threadRun.Status == RunStatus.InProgress) + { + Thread.Sleep(TimeSpan.FromSeconds(1)); + threadRun = await client.GetRunAsync(threadRun.ThreadId, threadRun.Id); + + // If the run requires action, resolve them. + if (threadRun.Status == RunStatus.RequiresAction) + { + List toolOutputs = []; + + foreach (RunRequiredAction action in threadRun.RequiredActions) + { + RequiredFunctionToolCall requiredFunctionToolCall = action as RequiredFunctionToolCall; + + switch (requiredFunctionToolCall?.Name) + { + case GetCurrentLocationFunctionName: + { + string toolResult = GetCurrentLocation(); + toolOutputs.Add(new ToolOutput(requiredFunctionToolCall.Id, toolResult)); + break; + } + + case GetCurrentWeatherFunctionName: + { + // The arguments that the model wants to use to call the function are specified as a + // stringified JSON object based on the schema defined in the tool definition. Note that + // the model may hallucinate arguments too. Consequently, it is important to do the + // appropriate parsing and validation before calling the function. + using JsonDocument argumentsJson = JsonDocument.Parse(requiredFunctionToolCall.Arguments); + bool hasLocation = argumentsJson.RootElement.TryGetProperty("location", out JsonElement location); + bool hasUnit = argumentsJson.RootElement.TryGetProperty("unit", out JsonElement unit); + + if (!hasLocation) + { + throw new ArgumentNullException(nameof(location), "The location argument is required."); + } + + string toolResult = hasUnit + ? GetCurrentWeather(location.GetString(), unit.GetString()) + : GetCurrentWeather(location.GetString()); + toolOutputs.Add(new ToolOutput(requiredFunctionToolCall.Id, toolResult)); + break; + } + + default: + { + // Handle other or unexpected calls. + throw new NotImplementedException(); + } + } + } + + // Submit the tool outputs to the assistant, which returns the run to the queued state. + threadRun = await client.SubmitToolOutputsAsync(threadRun.ThreadId, threadRun.Id, toolOutputs); + } + } + #endregion + + #region + switch (threadRun.Status) + { + case RunStatus.CompletedSuccessfully: + { + ListQueryPage messages = await client.GetMessagesAsync(threadRun.ThreadId); + + for (int i = messages.Count - 1; i >= 0; i--) + { + ThreadMessage message = messages[i]; + + Console.WriteLine($"[{message.Role.ToString().ToUpper()}]:"); + foreach (MessageContent contentItem in message.ContentItems) + { + if (contentItem is MessageTextContent textContent) + { + Console.WriteLine($"{textContent.Text}"); + + if (textContent.Annotations.Count > 0) + { + Console.WriteLine(); + } + + // Include annotations, if any. + foreach (TextContentAnnotation annotation in textContent.Annotations) + { + if (annotation is TextContentFileCitationAnnotation citationAnnotation) + { + Console.WriteLine($"* File citation, file ID: {citationAnnotation.FileId}"); + } + else if (annotation is TextContentFilePathAnnotation pathAnnotation) + { + Console.WriteLine($"* File path, file ID: {pathAnnotation.FileId}"); + } + } + } + } + Console.WriteLine(); + } + break; + } + + default: + throw new NotImplementedException(threadRun.Status.ToString()); + } + #endregion + } + } +} diff --git a/.dotnet/tests/Samples/Chat/Sample01_SimpleChat.cs b/.dotnet/tests/Samples/Chat/Sample01_SimpleChat.cs new file mode 100644 index 000000000..57aa7e739 --- /dev/null +++ b/.dotnet/tests/Samples/Chat/Sample01_SimpleChat.cs @@ -0,0 +1,21 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; + +namespace OpenAI.Samples +{ + public partial class ChatSamples + { + [Test] + [Ignore("Compilation validation only")] + public void Sample01_SimpleChat() + { + ChatClient client = new("gpt-3.5-turbo", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + ChatCompletion chatCompletion = client.CompleteChat("How does AI work? Explain it in simple terms."); + + Console.WriteLine($"[ASSISTANT]:"); + Console.WriteLine($"{chatCompletion.Content}"); + } + } +} diff --git a/.dotnet/tests/Samples/Chat/Sample01_SimpleChatAsync.cs b/.dotnet/tests/Samples/Chat/Sample01_SimpleChatAsync.cs new file mode 100644 index 000000000..2b3d3bc04 --- /dev/null +++ b/.dotnet/tests/Samples/Chat/Sample01_SimpleChatAsync.cs @@ -0,0 +1,22 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.Threading.Tasks; + +namespace OpenAI.Samples +{ + public partial class ChatSamples + { + [Test] + [Ignore("Compilation validation only")] + public async Task Sample01_SimpleChatAsync() + { + ChatClient client = new("gpt-3.5-turbo", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + ChatCompletion chatCompletion = await client.CompleteChatAsync("How does AI work? Explain it in simple terms."); + + Console.WriteLine($"[ASSISTANT]:"); + Console.WriteLine($"{chatCompletion.Content}"); + } + } +} diff --git a/.dotnet/tests/Samples/Chat/Sample02_StreamingChatAsync.cs b/.dotnet/tests/Samples/Chat/Sample02_StreamingChatAsync.cs new file mode 100644 index 000000000..14d7bfb02 --- /dev/null +++ b/.dotnet/tests/Samples/Chat/Sample02_StreamingChatAsync.cs @@ -0,0 +1,27 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.ClientModel; +using System.Threading.Tasks; + +namespace OpenAI.Samples +{ + public partial class ChatSamples + { + [Test] + [Ignore("Compilation validation only")] + public async Task Sample02_StreamingChatAsync() + { + ChatClient client = new("gpt-3.5-turbo", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + StreamingClientResult result = + client.CompleteChatStreaming("How does AI work? Explain it in simple terms."); + + Console.WriteLine("[ASSISTANT]: "); + await foreach (StreamingChatUpdate chatUpdate in result) + { + Console.Write(chatUpdate.ContentUpdate); + } + } + } +} diff --git a/.dotnet/tests/Samples/Chat/Sample03_FunctionCalling.cs b/.dotnet/tests/Samples/Chat/Sample03_FunctionCalling.cs new file mode 100644 index 000000000..289fdd473 --- /dev/null +++ b/.dotnet/tests/Samples/Chat/Sample03_FunctionCalling.cs @@ -0,0 +1,199 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Samples +{ + public partial class ChatSamples + { + #region + private static string GetCurrentLocation() + { + // Call the location API here. + return "San Francisco"; + } + + private static string GetCurrentWeather(string location, string unit = "celsius") + { + // Call the weather API here. + return $"31 {unit}"; + } + #endregion + + #region + private const string GetCurrentLocationFunctionName = "get_current_location"; + + private const string GetCurrentWeatherFunctionName = "get_current_weather"; + + private static readonly ChatFunctionToolDefinition getCurrentLocationFunction = new() + { + Name = GetCurrentLocationFunctionName, + Description = "Get the user's current location" + }; + + private static readonly ChatFunctionToolDefinition getCurrentWeatherFunction = new() + { + Name = GetCurrentWeatherFunctionName, + Description = "Get the current weather in a given location", + Parameters = BinaryData.FromString(""" + { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. Boston, MA" + }, + "unit": { + "type": "string", + "enum": [ "celsius", "fahrenheit" ], + "description": "The temperature unit to use. Infer this from the specified location." + } + }, + "required": [ "location" ] + } + """), + }; + #endregion + + [Test] + [Ignore("Compilation validation only")] + public void Sample03_FunctionCalling() + { + ChatClient client = new("gpt-3.5-turbo", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + #region + List messages = [ + new ChatRequestSystemMessage( + "Don't make assumptions about what values to plug into functions." + + " Ask for clarification if a user request is ambiguous."), + new ChatRequestUserMessage("What's the weather like today?"), + ]; + + ChatCompletionOptions options = new() + { + Tools = { getCurrentLocationFunction, getCurrentWeatherFunction }, + }; + #endregion + + #region + bool requiresAction; + + do + { + requiresAction = false; + ChatCompletion chatCompletion = client.CompleteChat(messages, options); + + switch (chatCompletion.FinishReason) + { + case ChatFinishReason.Stopped: + { + // Add the assistant message to the conversation history. + messages.Add(new ChatRequestAssistantMessage(chatCompletion)); + break; + } + + case ChatFinishReason.ToolCalls: + { + // First, add the assistant message with tool calls to the conversation history. + messages.Add(new ChatRequestAssistantMessage(chatCompletion)); + + // Then, add a new tool message for each tool call that is resolved. + foreach (ChatToolCall toolCall in chatCompletion.ToolCalls) + { + ChatFunctionToolCall functionToolCall = toolCall as ChatFunctionToolCall; + + switch (functionToolCall?.Name) + { + case GetCurrentLocationFunctionName: + { + string toolResult = GetCurrentLocation(); + messages.Add(new ChatRequestToolMessage(toolCall.Id, toolResult)); + break; + } + + case GetCurrentWeatherFunctionName: + { + // The arguments that the model wants to use to call the function are specified as a + // stringified JSON object based on the schema defined in the tool definition. Note that + // the model may hallucinate arguments too. Consequently, it is important to do the + // appropriate parsing and validation before calling the function. + using JsonDocument argumentsJson = JsonDocument.Parse(functionToolCall.Arguments); + bool hasLocation = argumentsJson.RootElement.TryGetProperty("location", out JsonElement location); + bool hasUnit = argumentsJson.RootElement.TryGetProperty("unit", out JsonElement unit); + + if (!hasLocation) + { + throw new ArgumentNullException(nameof(location), "The location argument is required."); + } + + string toolResult = hasUnit + ? GetCurrentWeather(location.GetString(), unit.GetString()) + : GetCurrentWeather(location.GetString()); + messages.Add(new ChatRequestToolMessage(toolCall.Id, toolResult)); + break; + } + + default: + { + // Handle other or unexpected calls. + throw new NotImplementedException(); + } + } + } + + requiresAction = true; + break; + } + + case ChatFinishReason.Length: + throw new NotImplementedException("Incomplete model output due to MaxTokens parameter or token limit exceeded."); + + case ChatFinishReason.ContentFilter: + throw new NotImplementedException("Omitted content due to a content filter flag."); + + case ChatFinishReason.FunctionCall: + throw new NotImplementedException("Deprecated in favor of tool calls."); + + default: + throw new NotImplementedException(chatCompletion.FinishReason.ToString()); + } + } while (requiresAction); + #endregion + + #region + foreach (ChatRequestMessage requestMessage in messages) + { + switch (requestMessage) + { + case ChatRequestSystemMessage systemMessage: + Console.WriteLine($"[SYSTEM]:"); + Console.WriteLine($"{systemMessage.Content.Span[0].ToText()}"); + Console.WriteLine(); + break; + + case ChatRequestUserMessage userMessage: + Console.WriteLine($"[USER]:"); + Console.WriteLine($"{userMessage.Content.Span[0].ToText()}"); + Console.WriteLine(); + break; + + case ChatRequestAssistantMessage assistantMessage when assistantMessage.Content.Span[0].ToText() is not null: + Console.WriteLine($"[ASSISTANT]:"); + Console.WriteLine($"{assistantMessage.Content.Span[0].ToText()}"); + Console.WriteLine(); + break; + + case ChatRequestToolMessage: + // Do not print any tool messages; let the assistant summarize the tool results instead. + break; + + default: + break; + } + } + #endregion + } + } +} diff --git a/.dotnet/tests/Samples/Chat/Sample03_FunctionCallingAsync.cs b/.dotnet/tests/Samples/Chat/Sample03_FunctionCallingAsync.cs new file mode 100644 index 000000000..63a0b8c39 --- /dev/null +++ b/.dotnet/tests/Samples/Chat/Sample03_FunctionCallingAsync.cs @@ -0,0 +1,151 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.Collections.Generic; +using System.Text.Json; +using System.Threading.Tasks; + +namespace OpenAI.Samples +{ + public partial class ChatSamples + { + [Test] + [Ignore("Compilation validation only")] + public async Task Sample03_FunctionCallingAsync() + { + ChatClient client = new("gpt-3.5-turbo", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + #region + List messages = [ + new ChatRequestSystemMessage( + "Don't make assumptions about what values to plug into functions." + + " Ask for clarification if a user request is ambiguous."), + new ChatRequestUserMessage("What's the weather like today?"), + ]; + + ChatCompletionOptions options = new() + { + Tools = { getCurrentLocationFunction, getCurrentWeatherFunction }, + }; + #endregion + + #region + bool requiresAction; + + do + { + requiresAction = false; + ChatCompletion chatCompletion = await client.CompleteChatAsync(messages, options); + + switch (chatCompletion.FinishReason) + { + case ChatFinishReason.Stopped: + { + // Add the assistant message to the conversation history. + messages.Add(new ChatRequestAssistantMessage(chatCompletion)); + break; + } + + case ChatFinishReason.ToolCalls: + { + // First, add the assistant message with tool calls to the conversation history. + messages.Add(new ChatRequestAssistantMessage(chatCompletion)); + + // Then, add a new tool message for each tool call that is resolved. + foreach (ChatToolCall toolCall in chatCompletion.ToolCalls) + { + ChatFunctionToolCall functionToolCall = toolCall as ChatFunctionToolCall; + + switch (functionToolCall?.Name) + { + case GetCurrentLocationFunctionName: + { + string toolResult = GetCurrentLocation(); + messages.Add(new ChatRequestToolMessage(toolCall.Id, toolResult)); + break; + } + + case GetCurrentWeatherFunctionName: + { + // The arguments that the model wants to use to call the function are specified as a + // stringified JSON object based on the schema defined in the tool definition. Note that + // the model may hallucinate arguments too. Consequently, it is important to do the + // appropriate parsing and validation before calling the function. + using JsonDocument argumentsJson = JsonDocument.Parse(functionToolCall.Arguments); + bool hasLocation = argumentsJson.RootElement.TryGetProperty("location", out JsonElement location); + bool hasUnit = argumentsJson.RootElement.TryGetProperty("unit", out JsonElement unit); + + if (!hasLocation) + { + throw new ArgumentNullException(nameof(location), "The location argument is required."); + } + + string toolResult = hasUnit + ? GetCurrentWeather(location.GetString(), unit.GetString()) + : GetCurrentWeather(location.GetString()); + messages.Add(new ChatRequestToolMessage(toolCall.Id, toolResult)); + break; + } + + default: + { + // Handle other or unexpected calls. + throw new NotImplementedException(); + } + } + } + + requiresAction = true; + break; + } + + case ChatFinishReason.Length: + throw new NotImplementedException("Incomplete model output due to MaxTokens parameter or token limit exceeded."); + + case ChatFinishReason.ContentFilter: + throw new NotImplementedException("Omitted content due to a content filter flag."); + + case ChatFinishReason.FunctionCall: + throw new NotImplementedException("Deprecated in favor of tool calls."); + + default: + throw new NotImplementedException(chatCompletion.FinishReason.ToString()); + } + } while (requiresAction); + #endregion + + #region + foreach (ChatRequestMessage requestMessage in messages) + { + switch (requestMessage) + { + case ChatRequestSystemMessage systemMessage: + Console.WriteLine($"[SYSTEM]:"); + Console.WriteLine($"{systemMessage.Content.Span[0].ToText()}"); + Console.WriteLine(); + break; + + case ChatRequestUserMessage userMessage: + Console.WriteLine($"[USER]:"); + Console.WriteLine($"{userMessage.Content.Span[0].ToText()}"); + Console.WriteLine(); + break; + + case ChatRequestAssistantMessage assistantMessage when assistantMessage.Content.Span[0].ToText() is not null: + Console.WriteLine($"[ASSISTANT]:"); + Console.WriteLine($"{assistantMessage.Content.Span[0].ToText()}"); + Console.WriteLine(); + break; + + case ChatRequestToolMessage: + // Do not print any tool messages; let the assistant summarize the tool results instead. + break; + + default: + break; + } + } + #endregion + } + } +} diff --git a/.dotnet/tests/Samples/Chat/Sample04_Protocol.cs b/.dotnet/tests/Samples/Chat/Sample04_Protocol.cs new file mode 100644 index 000000000..53eb49224 --- /dev/null +++ b/.dotnet/tests/Samples/Chat/Sample04_Protocol.cs @@ -0,0 +1,44 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.ClientModel; +using System.Text.Json; + +namespace OpenAI.Samples +{ + public partial class ChatSamples + { + [Test] + [Ignore("Compilation validation only")] + public void Sample04_Protocol() + { + ChatClient client = new("gpt-3.5-turbo", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + BinaryData input = BinaryData.FromString(""" + { + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "How does AI work? Explain it in simple terms." + } + ] + } + """); + + using BinaryContent content = BinaryContent.Create(input); + ClientResult result = client.CompleteChat(content); + BinaryData output = result.GetRawResponse().Content; + + using JsonDocument outputAsJson = JsonDocument.Parse(output.ToString()); + string message = outputAsJson.RootElement + .GetProperty("choices")[0] + .GetProperty("message") + .GetProperty("content") + .GetString(); + + Console.WriteLine($"[ASSISTANT]:"); + Console.WriteLine($"{message}"); + } + } +} diff --git a/.dotnet/tests/Samples/Chat/Sample04_ProtocolAsync.cs b/.dotnet/tests/Samples/Chat/Sample04_ProtocolAsync.cs new file mode 100644 index 000000000..b11e5789c --- /dev/null +++ b/.dotnet/tests/Samples/Chat/Sample04_ProtocolAsync.cs @@ -0,0 +1,45 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.ClientModel; +using System.Text.Json; +using System.Threading.Tasks; + +namespace OpenAI.Samples +{ + public partial class ChatSamples + { + [Test] + [Ignore("Compilation validation only")] + public async Task Sample04_ProtocolAsync() + { + ChatClient client = new("gpt-3.5-turbo", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + BinaryData input = BinaryData.FromString(""" + { + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "How does AI work? Explain it in simple terms." + } + ] + } + """); + + using BinaryContent content = BinaryContent.Create(input); + ClientResult result = await client.CompleteChatAsync(content); + BinaryData output = result.GetRawResponse().Content; + + using JsonDocument outputAsJson = JsonDocument.Parse(output.ToString()); + string message = outputAsJson.RootElement + .GetProperty("choices")[0] + .GetProperty("message") + .GetProperty("content") + .GetString(); + + Console.WriteLine($"[ASSISTANT]:"); + Console.WriteLine($"{message}"); + } + } +} diff --git a/.dotnet/tests/Samples/Chat/Sample05_ChatWithVision.cs b/.dotnet/tests/Samples/Chat/Sample05_ChatWithVision.cs new file mode 100644 index 000000000..c5c696166 --- /dev/null +++ b/.dotnet/tests/Samples/Chat/Sample05_ChatWithVision.cs @@ -0,0 +1,25 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.Collections.Generic; + +namespace OpenAI.Samples +{ + public partial class ChatSamples + { + [Test] + [Ignore("Compilation validation only")] + public void Sample05_ChatWithVision(Uri imageUri = null) + { + ChatClient client = new("gpt-4-vision-preview", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + List messages = [ + new ChatRequestUserMessage( + "Describe this image for me", + ChatMessageContent.CreateImage(imageUri)) + ]; + + ChatCompletion chatCompletion = client.CompleteChat(messages); + } + } +} \ No newline at end of file diff --git a/.dotnet/tests/Samples/Chat/Sample05_ChatWithVisionAsync.cs b/.dotnet/tests/Samples/Chat/Sample05_ChatWithVisionAsync.cs new file mode 100644 index 000000000..e180985d5 --- /dev/null +++ b/.dotnet/tests/Samples/Chat/Sample05_ChatWithVisionAsync.cs @@ -0,0 +1,26 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace OpenAI.Samples +{ + public partial class ChatSamples + { + [Test] + [Ignore("Compilation validation only")] + public async Task Sample05_ChatWithVisionAsync(Uri imageUri = null) + { + ChatClient client = new("gpt-4-vision-preview", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + List messages = [ + new ChatRequestUserMessage( + "Describe this image for me", + ChatMessageContent.CreateImage(imageUri)) + ]; + + ChatCompletion chatCompletion = await client.CompleteChatAsync(messages); + } + } +} \ No newline at end of file diff --git a/.dotnet/tests/Samples/ClientSamples.cs b/.dotnet/tests/Samples/ClientSamples.cs new file mode 100644 index 000000000..fdbc3fd40 --- /dev/null +++ b/.dotnet/tests/Samples/ClientSamples.cs @@ -0,0 +1,55 @@ +using NUnit.Framework; +using OpenAI.Assistants; +using OpenAI.Audio; +using OpenAI.Chat; +using OpenAI.Embeddings; +using OpenAI.Files; +using OpenAI.Images; +using System.ClientModel; + +namespace OpenAI.Samples.Miscellaneous +{ + public partial class ClientSamples + { + [Test] + [Ignore("Compilation validation only")] + public void CreateAssistantAndFileClients() + { + OpenAIClient openAIClient = new(""); + FileClient fileClient = openAIClient.GetFileClient(); +#pragma warning disable OPENAI001 + AssistantClient assistantClient = openAIClient.GetAssistantClient(); +#pragma warning restore OPENAI001 + } + + [Test] + [Ignore("Compilation validation only")] + public void CreateChatClient() + { + ChatClient client = new("gpt-3.5-turbo", ""); + } + + [Test] + [Ignore("Compilation validation only")] + public void CreateEmbeddingClient() + { + EmbeddingClient client = new("text-embedding-3-small", new ApiKeyCredential("")); + } + + [Test] + [Ignore("Compilation validation only")] + public void CreateImageClient() + { + ImageClient client = new("dall-e-3", ""); + } + + [Test] + [Ignore("Compilation validation only")] + public void CreateMultipleAudioClients() + { + OpenAIClient client = new(""); + AudioClient ttsClient = client.GetAudioClient("tts-1"); + AudioClient whisperClient = client.GetAudioClient("whisper-1"); + } + } +} diff --git a/.dotnet/tests/Samples/CombinationSamples.cs b/.dotnet/tests/Samples/CombinationSamples.cs new file mode 100644 index 000000000..d105b3839 --- /dev/null +++ b/.dotnet/tests/Samples/CombinationSamples.cs @@ -0,0 +1,151 @@ +using NUnit.Framework; +using OpenAI.Audio; +using OpenAI.Chat; +using OpenAI.Images; +using System; +using System.ClientModel; +using System.IO; +using System.Threading.Tasks; + +namespace OpenAI.Samples.Miscellaneous +{ + public partial class CombinationSamples + { + [Test] + [Ignore("Compilation validation")] + public void AlpacaArtAssessor() + { + // First, we create an image using dall-e-3: + ImageClient imageClient = new("dall-e-3"); + ClientResult imageResult = imageClient.GenerateImage( + "a majestic alpaca on a mountain ridge, backed by an expansive blue sky accented with sparse clouds", + new() + { + Style = ImageStyle.Vivid, + Quality = ImageQuality.High, + Size = ImageSize.Size1792x1024, + }); + GeneratedImage imageGeneration = imageResult.Value; + Console.WriteLine($"Majestic alpaca available at:\n{imageGeneration.ImageUri.AbsoluteUri}"); + + // Now, we'll ask a cranky art critic to evaluate the image using gpt-4-vision-preview: + ChatClient chatClient = new("gpt-4-vision-preview"); + ClientResult chatResult = chatClient.CompleteChat( + [ + new ChatRequestSystemMessage("Assume the role of a cranky art critic. When asked to describe or " + + "evaluate imagery, focus on criticizing elements of subject, composition, and other details."), + new ChatRequestUserMessage( + "describe the following image in a few sentences", + ChatMessageContent.CreateImage(imageGeneration.ImageUri)), + ], + new ChatCompletionOptions() + { + MaxTokens = 2048, + }); + string chatResponseText = chatResult.Value.Content; + Console.WriteLine($"Art critique of majestic alpaca:\n{chatResponseText}"); + + // Finally, we'll get some text-to-speech for that critical evaluation using tts-1-hd: + AudioClient audioClient = new("tts-1-hd"); + ClientResult ttsResult = audioClient.GenerateSpeechFromText( + text: chatResponseText, + TextToSpeechVoice.Fable, + new TextToSpeechOptions() + { + SpeedMultiplier = 0.9f, + ResponseFormat = AudioDataFormat.Opus, + }); + FileInfo ttsFileInfo = new($"{chatResult.Value.Id}.opus"); + using (FileStream ttsFileStream = ttsFileInfo.Create()) + using (BinaryWriter ttsFileWriter = new(ttsFileStream)) + { + ttsFileWriter.Write(ttsResult.Value); + } + Console.WriteLine($"Alpaca evaluation audio available at:\n{new Uri(ttsFileInfo.FullName).AbsoluteUri}"); + } + + [Test] + [Ignore("Compilation validation")] + public async Task CuriousCreatureCreator() + { + // First, we'll use gpt-4 to have a creative helper imagine a twist on a household pet + ChatClient creativeWriterClient = new("gpt-4"); + ClientResult creativeWriterResult = creativeWriterClient.CompleteChat( + [ + new ChatRequestSystemMessage("You're a creative helper that specializes in brainstorming designs for concepts that fuse ordinary, mundane items with a fantastical touch. In particular, you can provide good one-paragraph descriptions of concept images."), + new ChatRequestUserMessage("Imagine a household pet. Now add in a subtle touch of magic or 'different'. What do you imagine? Provide a one-paragraph description of a picture of this new creature, focusing on the details of the imagery such that it'd be suitable for creating a picture."), + ], + new ChatCompletionOptions() + { + MaxTokens = 2048, + }); + string description = creativeWriterResult.Value.Content; + Console.WriteLine($"Creative helper's creature description:\n{description}"); + + // Asynchronously, in parallel to the next steps, we'll get the creative description in the voice of Onyx + AudioClient ttsClient = new("tts-1-hd"); + Task> imageDescriptionAudioTask = ttsClient.GenerateSpeechFromTextAsync( + description, + TextToSpeechVoice.Onyx, + new TextToSpeechOptions() + { + SpeedMultiplier = 1.1f, + ResponseFormat = AudioDataFormat.Opus, + }); + _ = Task.Run(async () => + { + ClientResult audioResult = await imageDescriptionAudioTask; + FileInfo audioFileInfo = new FileInfo($"{creativeWriterResult.Value.Id}-description.opus"); + using FileStream fileStream = audioFileInfo.Create(); + using BinaryWriter fileWriter = new(fileStream); + fileWriter.Write(audioResult.Value); + Console.WriteLine($"Spoken description available at:\n{new Uri(audioFileInfo.FullName).AbsoluteUri}"); + }); + + // Meanwhile, we'll use dall-e-3 to generate a rendition of our LLM artist's vision + ImageClient imageGenerationClient = new("dall-e-3"); + ClientResult imageGenerationResult = await imageGenerationClient.GenerateImageAsync( + description, + new ImageGenerationOptions() + { + Size = ImageSize.Size1792x1024, + Quality = ImageQuality.High, + }); + Uri imageLocation = imageGenerationResult.Value.ImageUri; + Console.WriteLine($"Creature image available at:\n{imageLocation.AbsoluteUri}"); + + // Now, we'll use gpt-4-vision-preview to get a hopelessly taken assessment from a usually exigent art connoisseur + ChatClient imageCriticClient = new("gpt-4-vision-preview"); + ClientResult criticalAppraisalResult = await imageCriticClient.CompleteChatAsync( + [ + new ChatRequestSystemMessage("Assume the role of an art critic. Although usually cranky and occasionally even referred to as a 'curmudgeon', you're somehow entirely smitten with the subject presented to you and, despite your best efforts, can't help but lavish praise when you're asked to appraise a provided image."), + new ChatRequestUserMessage( + "Evaluate this image for me. What is it, and what do you think of it?", + ChatMessageContent.CreateImage(imageLocation)), + ], + new ChatCompletionOptions() + { + MaxTokens = 2048, + }); + string appraisal = criticalAppraisalResult.Value.Content; + Console.WriteLine($"Critic's appraisal:\n{appraisal}"); + + // Finally, we'll get that art expert's laudations in the voice of Fable + ClientResult appraisalAudioResult = await ttsClient.GenerateSpeechFromTextAsync( + appraisal, + TextToSpeechVoice.Fable, + new TextToSpeechOptions() + { + ResponseFormat = AudioDataFormat.Opus, + SpeedMultiplier = 0.9f, + }); + FileInfo criticAudioFileInfo = new($"{criticalAppraisalResult.Value.Id}-appraisal.opus"); + using (FileStream criticStream = criticAudioFileInfo.Create()) + using (BinaryWriter criticFileWriter = new(criticStream)) + { + criticFileWriter.Write(appraisalAudioResult.Value); + } + Console.WriteLine($"Critical appraisal available at:\n{new Uri(criticAudioFileInfo.FullName).AbsoluteUri}"); + } + } +} diff --git a/.dotnet/tests/Samples/Embeddings/Sample01_SimpleEmbedding.cs b/.dotnet/tests/Samples/Embeddings/Sample01_SimpleEmbedding.cs new file mode 100644 index 000000000..be1461097 --- /dev/null +++ b/.dotnet/tests/Samples/Embeddings/Sample01_SimpleEmbedding.cs @@ -0,0 +1,31 @@ +using NUnit.Framework; +using OpenAI.Embeddings; +using System; + +namespace OpenAI.Samples +{ + public partial class EmbeddingSamples + { + [Test] + [Ignore("Compilation validation only")] + public void Sample01_SimpleEmbedding() + { + EmbeddingClient client = new("text-embedding-3-small", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string description = + "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa," + + " and a really helpful concierge. The location is perfect -- right downtown, close to all " + + " the tourist attractions. We highly recommend this hotel."; + + Embedding embedding = client.GenerateEmbedding(description); + ReadOnlyMemory vector = embedding.Vector; + + Console.WriteLine($"Dimension: {vector.Length}"); + Console.WriteLine($"Floats: "); + for (int i = 0; i < vector.Length; i++) + { + Console.WriteLine($" [{i}] = {vector.Span[i]}"); + } + } + } +} diff --git a/.dotnet/tests/Samples/Embeddings/Sample01_SimpleEmbeddingAsync.cs b/.dotnet/tests/Samples/Embeddings/Sample01_SimpleEmbeddingAsync.cs new file mode 100644 index 000000000..e2eb0653a --- /dev/null +++ b/.dotnet/tests/Samples/Embeddings/Sample01_SimpleEmbeddingAsync.cs @@ -0,0 +1,32 @@ +using NUnit.Framework; +using OpenAI.Embeddings; +using System; +using System.Threading.Tasks; + +namespace OpenAI.Samples +{ + public partial class EmbeddingSamples + { + [Test] + [Ignore("Compilation validation only")] + public async Task Sample01_SimpleEmbeddingAsync() + { + EmbeddingClient client = new("text-embedding-3-small", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string description = + "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa," + + " and a really helpful concierge. The location is perfect -- right downtown, close to all " + + " the tourist attractions. We highly recommend this hotel."; + + Embedding embedding = await client.GenerateEmbeddingAsync(description); + ReadOnlyMemory vector = embedding.Vector; + + Console.WriteLine($"Dimension: {vector.Length}"); + Console.WriteLine($"Floats: "); + for (int i = 0; i < vector.Length; i++) + { + Console.WriteLine($" [{i}] = {vector.Span[i]}"); + } + } + } +} diff --git a/.dotnet/tests/Samples/Embeddings/Sample02_EmbeddingWithOptions.cs b/.dotnet/tests/Samples/Embeddings/Sample02_EmbeddingWithOptions.cs new file mode 100644 index 000000000..5872c7e91 --- /dev/null +++ b/.dotnet/tests/Samples/Embeddings/Sample02_EmbeddingWithOptions.cs @@ -0,0 +1,33 @@ +using NUnit.Framework; +using OpenAI.Embeddings; +using System; + +namespace OpenAI.Samples +{ + public partial class EmbeddingSamples + { + [Test] + [Ignore("Compilation validation only")] + public void Sample02_EmbeddingWithOptions() + { + EmbeddingClient client = new("text-embedding-3-small", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string description = + "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa," + + " and a really helpful concierge. The location is perfect -- right downtown, close to all " + + " the tourist attractions. We highly recommend this hotel."; + + EmbeddingOptions options = new() { Dimensions = 512 }; + + Embedding embedding = client.GenerateEmbedding(description, options); + ReadOnlyMemory vector = embedding.Vector; + + Console.WriteLine($"Dimension: {vector.Length}"); + Console.WriteLine($"Floats: "); + for (int i = 0; i < vector.Length; i++) + { + Console.WriteLine($" [{i}] = {vector.Span[i]}"); + } + } + } +} diff --git a/.dotnet/tests/Samples/Embeddings/Sample02_EmbeddingWithOptionsAsync.cs b/.dotnet/tests/Samples/Embeddings/Sample02_EmbeddingWithOptionsAsync.cs new file mode 100644 index 000000000..a32e11646 --- /dev/null +++ b/.dotnet/tests/Samples/Embeddings/Sample02_EmbeddingWithOptionsAsync.cs @@ -0,0 +1,34 @@ +using NUnit.Framework; +using OpenAI.Embeddings; +using System; +using System.Threading.Tasks; + +namespace OpenAI.Samples +{ + public partial class EmbeddingSamples + { + [Test] + [Ignore("Compilation validation only")] + public async Task Sample02_EmbeddingWithOptionsAsync() + { + EmbeddingClient client = new("text-embedding-3-small", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string description = + "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa," + + " and a really helpful concierge. The location is perfect -- right downtown, close to all " + + " the tourist attractions. We highly recommend this hotel."; + + EmbeddingOptions options = new() { Dimensions = 512 }; + + Embedding embedding = await client.GenerateEmbeddingAsync(description, options); + ReadOnlyMemory vector = embedding.Vector; + + Console.WriteLine($"Dimension: {vector.Length}"); + Console.WriteLine($"Floats: "); + for (int i = 0; i < vector.Length; i++) + { + Console.WriteLine($" [{i}] = {vector.Span[i]}"); + } + } + } +} diff --git a/.dotnet/tests/Samples/Embeddings/Sample03_MultipleEmbeddings.cs b/.dotnet/tests/Samples/Embeddings/Sample03_MultipleEmbeddings.cs new file mode 100644 index 000000000..f74a0c05f --- /dev/null +++ b/.dotnet/tests/Samples/Embeddings/Sample03_MultipleEmbeddings.cs @@ -0,0 +1,40 @@ +using NUnit.Framework; +using OpenAI.Embeddings; +using System; +using System.Collections.Generic; + +namespace OpenAI.Samples +{ + public partial class EmbeddingSamples + { + [Test] + [Ignore("Compilation validation only")] + public void Sample03_MultipleEmbeddings() + { + EmbeddingClient client = new("text-embedding-3-small", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string category = "Luxury"; + string description = + "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa," + + " and a really helpful concierge. The location is perfect -- right downtown, close to all " + + " the tourist attractions. We highly recommend this hotel."; + List inputs = [category, description]; + + EmbeddingCollection collection = client.GenerateEmbeddings(inputs); + + foreach (Embedding embedding in collection) + { + ReadOnlyMemory vector = embedding.Vector; + + Console.WriteLine($"Dimension: {vector.Length}"); + Console.WriteLine($"Floats: "); + for (int i = 0; i < vector.Length; i++) + { + Console.WriteLine($" [{i}] = {vector.Span[i]}"); + } + + Console.WriteLine(); + } + } + } +} diff --git a/.dotnet/tests/Samples/Embeddings/Sample03_MultipleEmbeddingsAsync.cs b/.dotnet/tests/Samples/Embeddings/Sample03_MultipleEmbeddingsAsync.cs new file mode 100644 index 000000000..1ee87811f --- /dev/null +++ b/.dotnet/tests/Samples/Embeddings/Sample03_MultipleEmbeddingsAsync.cs @@ -0,0 +1,41 @@ +using NUnit.Framework; +using OpenAI.Embeddings; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace OpenAI.Samples +{ + public partial class EmbeddingSamples + { + [Test] + [Ignore("Compilation validation only")] + public async Task Sample03_MultipleEmbeddingsAsync() + { + EmbeddingClient client = new("text-embedding-3-small", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string category = "Luxury"; + string description = + "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa," + + " and a really helpful concierge. The location is perfect -- right downtown, close to all " + + " the tourist attractions. We highly recommend this hotel."; + List inputs = [category, description]; + + EmbeddingCollection collection = await client.GenerateEmbeddingsAsync(inputs); + + foreach (Embedding embedding in collection) + { + ReadOnlyMemory vector = embedding.Vector; + + Console.WriteLine($"Dimension: {vector.Length}"); + Console.WriteLine($"Floats: "); + for (int i = 0; i < vector.Length; i++) + { + Console.WriteLine($" [{i}] = {vector.Span[i]}"); + } + + Console.WriteLine(); + } + } + } +} diff --git a/.dotnet/tests/Samples/Images/Sample01_SimpleImage.cs b/.dotnet/tests/Samples/Images/Sample01_SimpleImage.cs new file mode 100644 index 000000000..725e1cab3 --- /dev/null +++ b/.dotnet/tests/Samples/Images/Sample01_SimpleImage.cs @@ -0,0 +1,39 @@ +using NUnit.Framework; +using OpenAI.Images; +using System; +using System.IO; + +namespace OpenAI.Samples +{ + public partial class ImageSamples + { + [Test] + [Ignore("Compilation validation only")] + public void Sample01_SimpleImage() + { + ImageClient client = new("dall-e-3", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string prompt = "The concept for a living room that blends Scandinavian simplicity with Japanese minimalism for" + + " a serene and cozy atmosphere. It's a space that invites relaxation and mindfulness, with natural light" + + " and fresh air. Using neutral tones, including colors like white, beige, gray, and black, that create a" + + " sense of harmony. Featuring sleek wood furniture with clean lines and subtle curves to add warmth and" + + " elegance. Plants and flowers in ceramic pots adding color and life to a space. They can serve as focal" + + " points, creating a connection with nature. Soft textiles and cushions in organic fabrics adding comfort" + + " and softness to a space. They can serve as accents, adding contrast and texture."; + + ImageGenerationOptions options = new() + { + Quality = ImageQuality.High, + Size = ImageSize.Size1792x1024, + Style = ImageStyle.Vivid, + ResponseFormat = ImageResponseFormat.Bytes + }; + + GeneratedImage image = client.GenerateImage(prompt, options); + BinaryData bytes = image.ImageBytes; + + using FileStream stream = File.OpenWrite($"{Guid.NewGuid()}.png"); + bytes.ToStream().CopyTo(stream); + } + } +} diff --git a/.dotnet/tests/Samples/Images/Sample01_SimpleImageAsync.cs b/.dotnet/tests/Samples/Images/Sample01_SimpleImageAsync.cs new file mode 100644 index 000000000..a3c55901a --- /dev/null +++ b/.dotnet/tests/Samples/Images/Sample01_SimpleImageAsync.cs @@ -0,0 +1,40 @@ +using NUnit.Framework; +using OpenAI.Images; +using System; +using System.IO; +using System.Threading.Tasks; + +namespace OpenAI.Samples +{ + public partial class ImageSamples + { + [Test] + [Ignore("Compilation validation only")] + public async Task Sample01_SimpleImageAsync() + { + ImageClient client = new("dall-e-3", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string prompt = "The concept for a living room that blends Scandinavian simplicity with Japanese minimalism for" + + " a serene and cozy atmosphere. It's a space that invites relaxation and mindfulness, with natural light" + + " and fresh air. Using neutral tones, including colors like white, beige, gray, and black, that create a" + + " sense of harmony. Featuring sleek wood furniture with clean lines and subtle curves to add warmth and" + + " elegance. Plants and flowers in ceramic pots adding color and life to a space. They can serve as focal" + + " points, creating a connection with nature. Soft textiles and cushions in organic fabrics adding comfort" + + " and softness to a space. They can serve as accents, adding contrast and texture."; + + ImageGenerationOptions options = new() + { + Quality = ImageQuality.High, + Size = ImageSize.Size1792x1024, + Style = ImageStyle.Vivid, + ResponseFormat = ImageResponseFormat.Bytes + }; + + GeneratedImage image = await client.GenerateImageAsync(prompt, options); + BinaryData bytes = image.ImageBytes; + + using FileStream stream = File.OpenWrite($"{Guid.NewGuid()}.png"); + bytes.ToStream().CopyTo(stream); + } + } +} diff --git a/.dotnet/tests/Samples/Images/Sample02_SimpleImageEdit.cs b/.dotnet/tests/Samples/Images/Sample02_SimpleImageEdit.cs new file mode 100644 index 000000000..81b0f486c --- /dev/null +++ b/.dotnet/tests/Samples/Images/Sample02_SimpleImageEdit.cs @@ -0,0 +1,39 @@ +using NUnit.Framework; +using OpenAI.Images; +using System; +using System.IO; + +namespace OpenAI.Samples +{ + public partial class ImageSamples + { + [Test] + [Ignore("Compilation validation only")] + public void Sample02_SimpleImageEdit() + { + ImageClient client = new("dall-e-2", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string imagePath = Path.Combine("Assets", "edit_sample_image.png"); + BinaryData imageBytes = BinaryData.FromBytes(File.ReadAllBytes(imagePath)); + + string prompt = "An inflatable flamingo float in a pool"; + + string maskPath = Path.Combine("Assets", "edit_sample_mask.png"); + BinaryData maskBytes = BinaryData.FromBytes(File.ReadAllBytes(maskPath)); + + ImageEditOptions options = new() + { + MaskBytes = maskBytes, + MaskFileName = "edit_sample_mask.png", + Size = ImageSize.Size1024x1024, + ResponseFormat = ImageResponseFormat.Bytes + }; + + GeneratedImageCollection image = client.GenerateImageEdits(imageBytes, "edit_sample_image.png", prompt, 1, options); + BinaryData bytes = image[0].ImageBytes; + + using FileStream stream = File.OpenWrite($"{Guid.NewGuid()}.png"); + bytes.ToStream().CopyTo(stream); + } + } +} diff --git a/.dotnet/tests/Samples/Images/Sample02_SimpleImageEditAsync.cs b/.dotnet/tests/Samples/Images/Sample02_SimpleImageEditAsync.cs new file mode 100644 index 000000000..a143ba328 --- /dev/null +++ b/.dotnet/tests/Samples/Images/Sample02_SimpleImageEditAsync.cs @@ -0,0 +1,40 @@ +using NUnit.Framework; +using OpenAI.Images; +using System; +using System.IO; +using System.Threading.Tasks; + +namespace OpenAI.Samples +{ + public partial class ImageSamples + { + [Test] + [Ignore("Compilation validation only")] + public async Task Sample02_SimpleImageEditAsync() + { + ImageClient client = new("dall-e-2", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string imagePath = Path.Combine("Assets", "edit_sample_image.png"); + BinaryData imageBytes = BinaryData.FromBytes(File.ReadAllBytes(imagePath)); + + string prompt = "An inflatable flamingo float in a pool"; + + string maskPath = Path.Combine("Assets", "edit_sample_mask.png"); + BinaryData maskBytes = BinaryData.FromBytes(File.ReadAllBytes(maskPath)); + + ImageEditOptions options = new() + { + MaskBytes = maskBytes, + MaskFileName = "edit_sample_mask.png", + Size = ImageSize.Size1024x1024, + ResponseFormat = ImageResponseFormat.Bytes + }; + + GeneratedImageCollection image = await client.GenerateImageEditsAsync(imageBytes, "edit_sample_image.png", prompt, 1, options); + BinaryData bytes = image[0].ImageBytes; + + using FileStream stream = File.OpenWrite($"{Guid.NewGuid()}.png"); + bytes.ToStream().CopyTo(stream); + } + } +} diff --git a/.dotnet/tests/Samples/Images/Sample03_SimpleImageVariation.cs b/.dotnet/tests/Samples/Images/Sample03_SimpleImageVariation.cs new file mode 100644 index 000000000..409dd3cef --- /dev/null +++ b/.dotnet/tests/Samples/Images/Sample03_SimpleImageVariation.cs @@ -0,0 +1,32 @@ +using NUnit.Framework; +using OpenAI.Images; +using System; +using System.IO; + +namespace OpenAI.Samples +{ + public partial class ImageSamples + { + [Test] + [Ignore("Compilation validation only")] + public void Sample03_SimpleImageVariation() + { + ImageClient client = new("dall-e-2", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string imagePath = Path.Combine("Assets", "variation_sample_image.png"); + BinaryData imageBytes = BinaryData.FromBytes(File.ReadAllBytes(imagePath)); + + ImageVariationOptions options = new() + { + Size = ImageSize.Size1024x1024, + ResponseFormat = ImageResponseFormat.Bytes + }; + + GeneratedImageCollection image = client.GenerateImageVariations(imageBytes, "variation_sample_image.png", 1, options); + BinaryData bytes = image[0].ImageBytes; + + using FileStream stream = File.OpenWrite($"{Guid.NewGuid()}.png"); + bytes.ToStream().CopyTo(stream); + } + } +} diff --git a/.dotnet/tests/Samples/Images/Sample03_SimpleImageVariationAsync.cs b/.dotnet/tests/Samples/Images/Sample03_SimpleImageVariationAsync.cs new file mode 100644 index 000000000..257abc1fe --- /dev/null +++ b/.dotnet/tests/Samples/Images/Sample03_SimpleImageVariationAsync.cs @@ -0,0 +1,33 @@ +using NUnit.Framework; +using OpenAI.Images; +using System; +using System.IO; +using System.Threading.Tasks; + +namespace OpenAI.Samples +{ + public partial class ImageSamples + { + [Test] + [Ignore("Compilation validation only")] + public async Task Sample03_SimpleImageVariationAsync() + { + ImageClient client = new("dall-e-2", Environment.GetEnvironmentVariable("OpenAIClient_KEY")); + + string imagePath = Path.Combine("Assets", "variation_sample_image.png"); + BinaryData imageBytes = BinaryData.FromBytes(File.ReadAllBytes(imagePath)); + + ImageVariationOptions options = new() + { + Size = ImageSize.Size1024x1024, + ResponseFormat = ImageResponseFormat.Bytes + }; + + GeneratedImageCollection image = await client.GenerateImageVariationsAsync(imageBytes, "variation_sample_image.png", 1, options); + BinaryData bytes = image[0].ImageBytes; + + using FileStream stream = File.OpenWrite($"{Guid.NewGuid()}.png"); + bytes.ToStream().CopyTo(stream); + } + } +} diff --git a/.dotnet/tests/TestScenarios/AssistantTests.cs b/.dotnet/tests/TestScenarios/AssistantTests.cs new file mode 100644 index 000000000..ecb3b1163 --- /dev/null +++ b/.dotnet/tests/TestScenarios/AssistantTests.cs @@ -0,0 +1,164 @@ +using NUnit.Framework; +using OpenAI.Assistants; +using System; +using System.ClientModel; +using System.Threading.Tasks; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Assistants; + +#pragma warning disable OPENAI001 +public partial class AssistantTests +{ + [Test] + public void ListingAssistantsWorks() + { + AssistantClient client = new(); + ClientResult> result = client.GetAssistants(); + Assert.That(result.Value, Is.Not.Null.Or.Empty); + } + + [Test] + public void CreatingAndDeletingAssistantsWorks() + { + AssistantClient client = GetTestClient(TestScenario.Assistants); + ClientResult result = client.CreateAssistant("gpt-3.5-turbo"); + Assert.That(result.Value, Is.Not.Null); + Assert.That(result.Value.Id, Is.Not.Null.Or.Empty); + ClientResult deletionResult = client.DeleteAssistant(result.Value.Id); + Assert.That(deletionResult.Value, Is.True); + } + + [Test] + public async Task AddingMessagesWorks() + { + AssistantClient client = new(); + ClientResult threadResult = await client.CreateThreadAsync(new ThreadCreationOptions() + { + Messages = + { + new(MessageRole.User, "this is an initial message on the thread"), + "this is another one done an easier way" + }, + Metadata = + { + ["test_key"] = "test_value", + [s_cleanupMetadataKey] = "true", + } + }); + ClientResult> messagesResult = await client.GetMessagesAsync(threadResult.Value.Id); + Assert.That(messagesResult.Value?.Count, Is.EqualTo(2)); + ThreadMessage latestMessage = messagesResult.Value[0]; + ThreadMessage oldestMessage = messagesResult.Value[1]; + Assert.That(latestMessage.Role, Is.EqualTo(MessageRole.User)); + Assert.That(latestMessage.ContentItems, Is.Not.Null.Or.Empty); + MessageTextContent textContent = latestMessage.ContentItems[0] as MessageTextContent; + Assert.That(textContent, Is.Not.Null); + Assert.That(textContent.Text, Is.Not.Null.Or.Empty); + Assert.That(textContent.Text, Contains.Substring("easier way")); + } + + [Test] + public async Task BasicFunctionToolWorks() + { + AssistantClient client = GetTestClient(); + ClientResult assistantResult = await client.CreateAssistantAsync( + "gpt-3.5-turbo", + new AssistantCreationOptions() + { + Tools = + { + new FunctionToolDefinition() + { + Name = "get_favorite_food_for_day_of_week", + Description = "gets the user's favorite food for a given day of the week, like Tuesday", + Parameters = BinaryData.FromObjectAsJson(new + { + type = "object", + properties = new + { + day_of_week = new + { + type = "string", + description = "a day of the week, like Tuesday or Saturday", + } + } + }), + }, + }, + Metadata = + { + [s_cleanupMetadataKey] = "true", + } + }); + Assert.That(assistantResult.Value.DefaultTools, Is.Not.Null.Or.Empty); + FunctionToolDefinition functionTool = assistantResult.Value.DefaultTools[0] as FunctionToolDefinition; + Assert.That(functionTool, Is.Not.Null); + Assert.That(functionTool.Parameters, Is.Not.Null); + + ClientResult threadResult = await client.CreateThreadAsync( + new ThreadCreationOptions() + { + Messages = + { + "what should I eat on Thursday?", + }, + Metadata = + { + [s_cleanupMetadataKey ] = "true", + } + }); + ClientResult runResult = await client.CreateRunAsync(threadResult.Value.Id, assistantResult.Value.Id); + Assert.That(runResult.Value.Id, Is.Not.Null.Or.Empty); + do + { + await Task.Delay(500); + runResult = await client.GetRunAsync(threadResult.Value.Id, runResult.Value.Id); + } while (runResult.Value.Status == RunStatus.Queued || runResult.Value.Status == RunStatus.InProgress); + Assert.That(runResult.Value.Status, Is.EqualTo(RunStatus.RequiresAction)); + Assert.That(runResult.Value.RequiredActions?.Count, Is.EqualTo(1)); + RequiredFunctionToolCall requiredFunctionToolCall = runResult.Value.RequiredActions[0] as RequiredFunctionToolCall; + Assert.That(requiredFunctionToolCall, Is.Not.Null); + _ = await client.SubmitToolOutputsAsync(threadResult.Value.Id, runResult.Value.Id, + [ + new ToolOutput(requiredFunctionToolCall, "tacos"), + ]); + runResult = await client.GetRunAsync(threadResult.Value.Id, runResult.Value.Id); + Assert.That(runResult.Value.Status, Is.Not.EqualTo(RunStatus.RequiresAction)); + } + + private async Task CreateCommonTestAssistantAsync() + { + AssistantClient client = new(); + ClientResult newAssistantResult = await client.CreateAssistantAsync("gpt-3.5-turbo", new() + { + Name = s_testAssistantName, + Metadata = + { + ["test_id"] = "test_id_goes_here", + [s_cleanupMetadataKey] = "true", + }, + }); + return newAssistantResult.Value; + } + + private async Task DeleteRecentTestThings() + { + AssistantClient client = GetTestClient(); + foreach(Assistant assistant in client.GetAssistants().Value) + { + if (assistant.Name == s_testAssistantName + || assistant.Metadata?.ContainsKey(s_cleanupMetadataKey) == true) + { + _ = await client.DeleteAssistantAsync(assistant.Id); + } + } + } + + private static AssistantClient GetTestClient() => GetTestClient(TestScenario.Assistants); + + private static readonly string s_testAssistantName = $".NET SDK Test Assistant - Please Delete Me"; + private static readonly string s_cleanupMetadataKey = $"test_metadata_cleanup_eligible"; +} + +#pragma warning restore OPENAI001 diff --git a/.dotnet/tests/TestScenarios/ChatClientTests.cs b/.dotnet/tests/TestScenarios/ChatClientTests.cs new file mode 100644 index 000000000..4624f2703 --- /dev/null +++ b/.dotnet/tests/TestScenarios/ChatClientTests.cs @@ -0,0 +1,113 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.ClientModel; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Net; +using System.Threading.Tasks; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Chat; + +public partial class ChatClientTests +{ + [Test] + public void HelloWorldChat() + { + ChatClient client = GetTestClient(TestScenario.Chat); // new("gpt-3.5-turbo"); + Assert.That(client, Is.InstanceOf()); + ClientResult result = client.CompleteChat("Hello, world!"); + Assert.That(result, Is.InstanceOf>()); + Assert.That(result.Value.Content?.ContentKind, Is.EqualTo(ChatMessageContentKind.Text)); + Assert.That(result.Value.Content.ToText().Length, Is.GreaterThan(0)); + } + + [Test] + public void HelloWorldWithTopLevelClient() + { + OpenAIClient client = new(credential: new(Environment.GetEnvironmentVariable("OPENAI_API_KEY"))); + ChatClient chatClient = client.GetChatClient("gpt-3.5-turbo"); + ClientResult result = chatClient.CompleteChat("Hello, world!"); + Assert.That(result, Is.InstanceOf>()); + Assert.That(result.Value.Content.ToString().Length, Is.GreaterThan(0)); + } + [Test] + public void MultiMessageChat() + { + ChatClient client = new("gpt-3.5-turbo"); + ClientResult result = client.CompleteChat( + [ + new ChatRequestSystemMessage("You are a helpful assistant. You always talk like a pirate."), + new ChatRequestUserMessage("Hello, assistant! Can you help me train my parrot?"), + ]); + Assert.That(new string[] { "aye", "arr", "hearty" }.Any(pirateWord => result.Value.Content.ToString().ToLowerInvariant().Contains(pirateWord))); + } + + [Test] + public async Task StreamingChat() + { + ChatClient client = new("gpt-3.5-turbo"); + + TimeSpan? firstTokenReceiptTime = null; + TimeSpan? latestTokenReceiptTime = null; + Stopwatch stopwatch = Stopwatch.StartNew(); + + StreamingClientResult streamingResult + = client.CompleteChatStreaming("What are the best pizza toppings? Give me a breakdown on the reasons."); + Assert.That(streamingResult, Is.InstanceOf>()); + int updateCount = 0; + + await foreach (StreamingChatUpdate chatUpdate in streamingResult) + { + firstTokenReceiptTime ??= stopwatch.Elapsed; + latestTokenReceiptTime = stopwatch.Elapsed; + Console.WriteLine(stopwatch.Elapsed.TotalMilliseconds); + updateCount++; + } + Assert.That(updateCount, Is.GreaterThan(1)); + Assert.That(latestTokenReceiptTime - firstTokenReceiptTime > TimeSpan.FromMilliseconds(500)); + } + + [Test] + public void TwoTurnChat() + { + ChatClient client = GetTestClient(); + + List messages = + [ + ChatRequestMessage.CreateUserMessage("What are ten of the most common colors, including the brightest and darkest?"), + ]; + ClientResult firstResult = client.CompleteChat(messages); + Assert.That(firstResult?.Value, Is.Not.Null); + Assert.That(firstResult.Value.Content.ToString().ToLowerInvariant(), Contains.Substring("white")); + Assert.That(firstResult.Value.Content.ToString().ToLowerInvariant(), Contains.Substring("black")); + messages.Add(new ChatRequestAssistantMessage(firstResult.Value)); + messages.Add(new ChatRequestUserMessage("Which of those are considered brightest, aligning with hexadecimal rgb notation?")); + ClientResult secondResult = client.CompleteChat(messages); + Assert.That(secondResult?.Value, Is.Not.Null); + Assert.That(secondResult.Value.Content.ToString().ToLowerInvariant(), Contains.Substring("white")); + Assert.That(secondResult.Value.Content.ToString().ToLowerInvariant(), Does.Not.Contains("black")); + } + + [Test] + public void AuthFailure() + { + ChatClient client = new("gpt-3.5-turbo", new ApiKeyCredential("not-a-real-key")); + Exception caughtException = null; + try + { + _ = client.CompleteChat("Uh oh, this isn't going to work with that key"); + } + catch (Exception ex) + { + caughtException = ex; + } + var clientResultException = caughtException as ClientResultException; + Assert.That(clientResultException, Is.Not.Null); + Assert.That(clientResultException.Status, Is.EqualTo((int)HttpStatusCode.Unauthorized)); + } + + private static ChatClient GetTestClient(string overrideModel = null) => GetTestClient(TestScenario.Chat, overrideModel); +} diff --git a/.dotnet/tests/TestScenarios/ChatToolConstraints.cs b/.dotnet/tests/TestScenarios/ChatToolConstraints.cs new file mode 100644 index 000000000..c6b20d28f --- /dev/null +++ b/.dotnet/tests/TestScenarios/ChatToolConstraints.cs @@ -0,0 +1,70 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.ClientModel; + +namespace OpenAI.Tests.Chat; + +public partial class ChatToolConstraintTests +{ + [Test] + public void BasicTypeManipulationWorks() + { + Assert.That(ChatToolConstraint.Auto.ToString(), Is.EqualTo("\"auto\"")); + Assert.That(ChatToolConstraint.None.ToString(), Is.EqualTo("\"none\"")); + Assert.That(ChatToolConstraint.Auto, Is.Not.EqualTo(ChatToolConstraint.None)); + + ChatFunctionToolDefinition functionTool = new() + { + Name = "test_function_tool", + Description = "description isn't applicable", + }; + + ChatToolConstraint constraintFromDefinition = new(functionTool); + Assert.That(constraintFromDefinition.ToString(), Is.EqualTo(@$"{{""type"":""function"",""function"":{{""name"":""{functionTool.Name}""}}}}")); + + ChatToolConstraint otherConstraint = new(new ChatFunctionToolDefinition("test_function_tool")); + Assert.That(constraintFromDefinition, Is.EqualTo(otherConstraint)); + Assert.That(otherConstraint, Is.Not.EqualTo(ChatToolConstraint.Auto)); + } + + [Test] + public void ConstraintsWork() + { + ChatClient client = new("gpt-3.5-turbo"); + ChatCompletionOptions options = new() + { + Tools = { s_numberForWordTool }, + }; + + foreach (var (constraint, reason) in new (ChatToolConstraint?, ChatFinishReason)[] + { + (null, ChatFinishReason.ToolCalls), + (ChatToolConstraint.None, ChatFinishReason.Stopped), + (new ChatToolConstraint(s_numberForWordTool), ChatFinishReason.Stopped), + (ChatToolConstraint.Auto, ChatFinishReason.ToolCalls), + }) + { + options.ToolConstraint = constraint; + ClientResult result = client.CompleteChat("What's the number for the word 'banana'?", options); + Assert.That(result.Value.FinishReason, Is.EqualTo(reason)); + } + } + + private static ChatFunctionToolDefinition s_numberForWordTool = new() + { + Name = "get_number_for_word", + Description = "gets an arbitrary number assigned to a given word", + Parameters = BinaryData.FromObjectAsJson(new + { + type = "object", + properties = new + { + word = new + { + type = "string" + } + } + }), + }; +} diff --git a/.dotnet/tests/TestScenarios/ChatToolTests.cs b/.dotnet/tests/TestScenarios/ChatToolTests.cs new file mode 100644 index 000000000..a59d789b3 --- /dev/null +++ b/.dotnet/tests/TestScenarios/ChatToolTests.cs @@ -0,0 +1,90 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.ClientModel; +using System.Collections.Generic; +using System.Text.Json; +using System.Text.Json.Nodes; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Chat; + +public partial class ChatToolTests +{ + [Test] + public void NoParameterToolWorks() + { + ChatClient client = new("gpt-3.5-turbo"); + ChatFunctionToolDefinition getFavoriteColorTool = new() + { + Name = "get_favorite_color", + Description = "gets the favorite color of the caller", + }; + ChatCompletionOptions options = new() + { + Tools = { getFavoriteColorTool }, + }; + ClientResult result = client.CompleteChat("What's my favorite color?", options); + Assert.That(result.Value.FinishReason, Is.EqualTo(ChatFinishReason.ToolCalls)); + Assert.That(result.Value.ToolCalls.Count, Is.EqualTo(1)); + var functionToolCall = result.Value.ToolCalls[0] as ChatFunctionToolCall; + var toolCallArguments = BinaryData.FromString(functionToolCall.Arguments).ToObjectFromJson>(); + Assert.That(functionToolCall, Is.Not.Null); + Assert.That(functionToolCall.Name, Is.EqualTo(getFavoriteColorTool.Name)); + Assert.That(functionToolCall.Id, Is.Not.Null.Or.Empty); + Assert.That(toolCallArguments.Count, Is.EqualTo(0)); + + result = client.CompleteChat( + [ + new ChatRequestUserMessage("What's my favorite color?"), + new ChatRequestAssistantMessage(result.Value), + new ChatRequestToolMessage(functionToolCall.Id, "green"), + ]); + Assert.That(result.Value.FinishReason, Is.EqualTo(ChatFinishReason.Stopped)); + Assert.That(result.Value.Content.ToString().ToLowerInvariant(), Contains.Substring("green")); + } + + [Test] + public void ParametersWork() + { + ChatClient client = GetTestClient(TestScenario.Chat); + ChatFunctionToolDefinition favoriteColorForMonthTool = new() + { + Name = "get_favorite_color_for_month", + Description = "gets the caller's favorite color for a given month", + Parameters = BinaryData.FromString(""" + { + "type": "object", + "properties": { + "month_name": { + "type": "string", + "description": "the name of a calendar month, e.g. February or October." + } + }, + "required": [ "month_name" ] + } + """), + }; + ChatCompletionOptions options = new() + { + Tools = { favoriteColorForMonthTool }, + }; + List messages = + [ + new ChatRequestUserMessage("What's my favorite color in February?"), + ]; + ClientResult result = client.CompleteChat(messages, options); + Assert.That(result.Value.FinishReason, Is.EqualTo(ChatFinishReason.ToolCalls)); + Assert.That(result.Value.ToolCalls?.Count, Is.EqualTo(1)); + var functionToolCall = result.Value.ToolCalls[0] as ChatFunctionToolCall; + Assert.That(functionToolCall.Name, Is.EqualTo(favoriteColorForMonthTool.Name)); + JsonObject argumentsJson = JsonSerializer.Deserialize(functionToolCall.Arguments); + Assert.That(argumentsJson.Count, Is.EqualTo(1)); + Assert.That(argumentsJson.ContainsKey("month_name")); + Assert.That(argumentsJson["month_name"].ToString().ToLowerInvariant(), Is.EqualTo("february")); + messages.Add(new ChatRequestAssistantMessage(result.Value)); + messages.Add(new ChatRequestToolMessage(functionToolCall.Id, "chartreuse")); + result = client.CompleteChat(messages, options); + Assert.That(result.Value.Content.ToString().ToLowerInvariant(), Contains.Substring("chartreuse")); + } +} diff --git a/.dotnet/tests/TestScenarios/ChatWithVision.cs b/.dotnet/tests/TestScenarios/ChatWithVision.cs new file mode 100644 index 000000000..fe6318bf0 --- /dev/null +++ b/.dotnet/tests/TestScenarios/ChatWithVision.cs @@ -0,0 +1,33 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.ClientModel; +using System.IO; +using System.Net.Mime; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Chat; + +public partial class ChatWithVision +{ + [Test] + public void DescribeAnImage() + { + var stopSignPath = Path.Combine("Assets", "stop_sign.png"); + var stopSignData = BinaryData.FromBytes(File.ReadAllBytes(stopSignPath)); + + ChatClient client = GetTestClient(TestScenario.VisionChat); + + ClientResult result = client.CompleteChat( + [ + new ChatRequestUserMessage( + "Describe this image for me", + ChatMessageContent.CreateImage(stopSignData, "image/png")), + ], new ChatCompletionOptions() + { + MaxTokens = 2048, + }); + Console.WriteLine(result.Value.Content); + Assert.That(result.Value.Content.ToString().ToLowerInvariant(), Contains.Substring("stop")); + } +} diff --git a/.dotnet/tests/TestScenarios/EmbeddingClientTests.cs b/.dotnet/tests/TestScenarios/EmbeddingClientTests.cs new file mode 100644 index 000000000..0915e1711 --- /dev/null +++ b/.dotnet/tests/TestScenarios/EmbeddingClientTests.cs @@ -0,0 +1,52 @@ +using NUnit.Framework; +using OpenAI.Embeddings; +using System.ClientModel; +using System.Collections.Generic; + +namespace OpenAI.Tests.Embeddings; + +public partial class EmbeddingClientTests +{ + [Test] + public void OneEmbedding() + { + EmbeddingClient client = new("text-embedding-ada-002"); + ClientResult response = client.GenerateEmbedding("hello, world"); + Assert.That(response.Value, Is.Not.Null); + Assert.That(response.Value.Index, Is.EqualTo(0)); + Assert.That(response.Value.Usage, Is.Not.Null); + Assert.That(response.Value.Usage.TotalTokens, Is.GreaterThan(0)); + Assert.That(response.Value.Vector, Is.Not.Null.Or.Empty); + Assert.That(response.Value.Model, Contains.Substring("ada")); + float[] array = response.Value.Vector.ToArray(); + Assert.That(array.Length > 100); + } + + [Test] + public void SeveralEmbeddings() + { + EmbeddingClient client = new("text-embedding-3-small"); + List prompts = + [ + "Hello, world!", + "This is a test.", + "Goodbye!" + ]; + EmbeddingOptions options = new() + { + Dimensions = 456, + }; + ClientResult response = client.GenerateEmbeddings(prompts, options); + Assert.That(response.Value, Is.Not.Null); + Assert.That(response.Value.Count, Is.EqualTo(3)); + for (int i = 0; i < response.Value.Count; i++) + { + Assert.That(response.Value[i].Index, Is.EqualTo(i)); + Assert.That(response.Value[i].Usage, Is.Not.Null); + Assert.That(response.Value[i].Usage.TotalTokens, Is.GreaterThan(0)); + Assert.That(response.Value[i].Vector, Is.Not.Null.Or.Empty); + float[] array = response.Value[i].Vector.ToArray(); + Assert.That(array.Length, Is.GreaterThan(100)); + } + } +} diff --git a/.dotnet/tests/TestScenarios/FileClientTests.cs b/.dotnet/tests/TestScenarios/FileClientTests.cs new file mode 100644 index 000000000..4a9f6d4cb --- /dev/null +++ b/.dotnet/tests/TestScenarios/FileClientTests.cs @@ -0,0 +1,51 @@ +using NUnit.Framework; +using OpenAI.Files; +using System; +using System.ClientModel; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Files; + +public partial class FileClientTests +{ + [Test] + public void ListFilesWorks() + { + FileClient client = GetTestClient(); + ClientResult result = client.GetFileInfoList(); + Assert.That(result.Value.Count, Is.GreaterThan(0)); + Console.WriteLine(result.Value.Count); + ClientResult assistantsResult = client.GetFileInfoList(OpenAIFilePurpose.Assistants); + Assert.That(assistantsResult.Value.Count, Is.GreaterThan(0)); + Assert.That(assistantsResult.Value.Count, Is.LessThan(result.Value.Count)); + Console.WriteLine(assistantsResult.Value.Count); + } + + [Test] + public void UploadFileWorks() + { + FileClient client = GetTestClient(); + BinaryData uploadData = BinaryData.FromString("hello, this is a text file, please delete me"); + + ClientResult uploadResult = client.UploadFile(uploadData, "test-file-delete-me.txt", OpenAIFilePurpose.Assistants); + Assert.That(uploadResult.Value, Is.Not.Null); + + ClientResult fileInfoResult = client.GetFileInfo(uploadResult.Value.Id); + Assert.AreEqual(uploadResult.Value.Id, fileInfoResult.Value.Id); + Assert.AreEqual(uploadResult.Value.Filename, fileInfoResult.Value.Filename); + } + + [Test] + public void DownloadAndInfoWork() + { + FileClient client = GetTestClient(); + + ClientResult fileInfoResult = client.GetFileInfo("file-S7roYWamZqfMK9D979HU4q6m"); + Assert.That(fileInfoResult.Value, Is.Not.Null); + + ClientResult downloadResult = client.DownloadFile("file-S7roYWamZqfMK9D979HU4q6m"); + Assert.That(downloadResult.Value, Is.Not.Null); + } + + private static FileClient GetTestClient() => GetTestClient(TestScenario.Files); +} \ No newline at end of file diff --git a/.dotnet/tests/TestScenarios/GitHubTests.cs b/.dotnet/tests/TestScenarios/GitHubTests.cs new file mode 100644 index 000000000..3a3644f22 --- /dev/null +++ b/.dotnet/tests/TestScenarios/GitHubTests.cs @@ -0,0 +1,23 @@ +using NUnit.Framework; +using System; + +namespace OpenAI.Tests; + +public partial class GitHubTests +{ + [Test(Description = "Test that we can use a GitHub secret")] + [Category("Online")] + public void CanUseGitHubSecret() + { + string gitHubSecretString = Environment.GetEnvironmentVariable("SECRET_VALUE"); + Assert.That(gitHubSecretString, Is.Not.Null.Or.Empty); + } + + [Test(Description = "That that we can run some tests without secrets")] + [Category("Offline")] + public void CanTestWithoutSecretAccess() + { + int result = 2 + 1; + Assert.That(result, Is.EqualTo(3)); + } +} diff --git a/.dotnet/tests/TestScenarios/ImageGenerationTests.cs b/.dotnet/tests/TestScenarios/ImageGenerationTests.cs new file mode 100644 index 000000000..1d8d3304c --- /dev/null +++ b/.dotnet/tests/TestScenarios/ImageGenerationTests.cs @@ -0,0 +1,36 @@ +using NUnit.Framework; +using OpenAI.Images; +using System; +using System.ClientModel; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Images; + +public partial class ImageGenerationTests +{ + [Test] + public void BasicGenerationWorks() + { + ImageClient client = new("dall-e-3"); + ClientResult result = client.GenerateImage("an isolated stop sign"); + Assert.That(result, Is.InstanceOf>()); + Assert.That(result.Value.ImageUri, Is.Not.Null); + Console.WriteLine(result.Value.ImageUri.AbsoluteUri); + Assert.That(result.Value.ImageBytes, Is.Null); + Assert.That(result.Value.CreatedAt, Is.GreaterThan(new DateTimeOffset(new DateTime(year: 2020, month: 1, day: 1)))); + } + + [Test] + public void GenerationWithOptionsWorks() + { + ImageClient client = GetTestClient(); + ClientResult result = client.GenerateImage("an isolated stop sign", new ImageGenerationOptions() + { + Quality = ImageQuality.Standard, + Style = ImageStyle.Natural, + }); + Assert.That(result.Value.ImageUri, Is.Not.Null); + } + + private static ImageClient GetTestClient() => GetTestClient(TestScenario.Images); +} diff --git a/.dotnet/tests/TestScenarios/LegacyCompletions.cs b/.dotnet/tests/TestScenarios/LegacyCompletions.cs new file mode 100644 index 000000000..6f5deddc1 --- /dev/null +++ b/.dotnet/tests/TestScenarios/LegacyCompletions.cs @@ -0,0 +1,32 @@ +using NUnit.Framework; +using OpenAI.LegacyCompletions; +using System; +using System.ClientModel; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; +using System.Text.Json.Nodes; + +namespace OpenAI.Tests.LegacyCompletions; + +public partial class LegacyCompletionTests +{ + [Test] + public void BasicValidationWorks() + { + LegacyCompletionClient client = new(); + BinaryData requestData = BinaryData.FromObjectAsJson(new + { + model = "gpt-3.5-turbo-instruct", + prompt = "hello world", + max_tokens = 256, + temperature = 0, + }); + BinaryContent content = BinaryContent.Create(requestData); + ClientResult result = client.GenerateLegacyCompletions(content); + Assert.That(result, Is.Not.Null); + JsonObject responseObject = JsonSerializer.Deserialize(result.GetRawResponse().Content.ToString()); + string text = responseObject["choices"].AsArray()[0].AsObject()["text"].ToString(); + Assert.That(text, Is.Not.Null.Or.Empty); + } +} diff --git a/.dotnet/tests/TestScenarios/ModelClientTests.cs b/.dotnet/tests/TestScenarios/ModelClientTests.cs new file mode 100644 index 000000000..2e668006a --- /dev/null +++ b/.dotnet/tests/TestScenarios/ModelClientTests.cs @@ -0,0 +1,28 @@ +using NUnit.Framework; +using OpenAI.ModelManagement; +using System.ClientModel; +using System.Linq; +using System.Threading.Tasks; + +namespace OpenAI.Tests.Models; + +public partial class ModelManagementClientTests +{ + [Test] + public async Task CanListModels() + { + ModelManagementClient client = new(); + ClientResult result = await client.GetModelsAsync(); + Assert.That(result.Value, Is.Not.Null.Or.Empty); + Assert.That(result.Value.Any(modelInfo => modelInfo.Id.ToLowerInvariant().Contains("whisper"))); + } + + [Test] + public async Task CanRetrieveModelInfo() + { + ModelManagementClient client = new(); + ClientResult result = await client.GetModelInfoAsync("gpt-3.5-turbo"); + Assert.That(result.Value, Is.Not.Null); + Assert.That(result.Value.OwnerOrganization.ToLowerInvariant(), Contains.Substring("openai")); + } +} diff --git a/.dotnet/tests/TestScenarios/TextToSpeechTests.cs b/.dotnet/tests/TestScenarios/TextToSpeechTests.cs new file mode 100644 index 000000000..c880ff864 --- /dev/null +++ b/.dotnet/tests/TestScenarios/TextToSpeechTests.cs @@ -0,0 +1,35 @@ +using NUnit.Framework; +using OpenAI.Audio; +using System; +using System.ClientModel; + +namespace OpenAI.Tests.Audio; + +public partial class TextToSpeechTests +{ + [Test] + public void BasicTTSWorks() + { + AudioClient client = new("tts-1"); + ClientResult result = client.GenerateSpeechFromText("hello, world, this is a test", TextToSpeechVoice.Shimmer); + Assert.That(result.Value, Is.Not.Null); + } + + [Test] + [TestCase(null)] + [TestCase(AudioDataFormat.Mp3)] + [TestCase(AudioDataFormat.Aac)] + [TestCase(AudioDataFormat.Opus)] + [TestCase(AudioDataFormat.Flac)] + public void OutputFormatWorks(AudioDataFormat? responseFormat) + { + AudioClient client = new("tts-1"); + TextToSpeechOptions options = new(); + if (responseFormat != null) + { + options.ResponseFormat = responseFormat; + } + ClientResult result = client.GenerateSpeechFromText("Hello, world!", TextToSpeechVoice.Alloy, options); + Assert.That(result.Value, Is.Not.Null); + } +} diff --git a/.dotnet/tests/TestScenarios/TranscriptionTests.cs b/.dotnet/tests/TestScenarios/TranscriptionTests.cs new file mode 100644 index 000000000..70753d47d --- /dev/null +++ b/.dotnet/tests/TestScenarios/TranscriptionTests.cs @@ -0,0 +1,43 @@ +using NUnit.Framework; +using OpenAI.Audio; +using System; +using System.ClientModel; +using System.IO; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Audio; + +public partial class TranscriptionTests +{ + [Test] + public void BasicTranscriptionWorks() + { + AudioClient client = GetTestClient(); + using FileStream inputStream = File.OpenRead(Path.Combine("Assets", "hello_world.m4a")); + BinaryData inputData = BinaryData.FromStream(inputStream); + ClientResult transcriptionResult = client.TranscribeAudio(inputData, "hello_world.m4a"); + Assert.That(transcriptionResult.Value, Is.Not.Null); + Assert.That(transcriptionResult.Value.Text.ToLowerInvariant(), Contains.Substring("hello")); + } + + [Test] + public void WordTimestampsWork() + { + AudioClient client = GetTestClient(); + using FileStream inputStream = File.OpenRead(Path.Combine("Assets", "hello_world.m4a")); + BinaryData inputData = BinaryData.FromStream(inputStream); + ClientResult transcriptionResult = client.TranscribeAudio(inputData, "hello_world.m4a", new AudioTranscriptionOptions() + { + EnableWordTimestamps = true, + EnableSegmentTimestamps = true, + ResponseFormat = AudioTranscriptionFormat.Detailed, + }); + Assert.That(transcriptionResult.Value, Is.Not.Null); + // Assert.That(transcriptionResult.Value.Segments, Is.Null); + // Assert.That(transcriptionResult.Value.Words, Is.Not.Null.Or.Empty); + // Assert.That(transcriptionResult.Value.Words[1].Word, Contains.Substring("world")); + // Assert.That(transcriptionResult.Value.Words[1].Start, Is.GreaterThan(TimeSpan.FromMilliseconds(0))); + // Assert.That(transcriptionResult.Value.Words[1].End, Is.GreaterThan(TimeSpan.FromMilliseconds(0))); + } + private static AudioClient GetTestClient() => GetTestClient(TestScenario.Transcription); +} diff --git a/.dotnet/tests/TestScenarios/TranslationTests.cs b/.dotnet/tests/TestScenarios/TranslationTests.cs new file mode 100644 index 000000000..be71bb5a2 --- /dev/null +++ b/.dotnet/tests/TestScenarios/TranslationTests.cs @@ -0,0 +1,24 @@ +using NUnit.Framework; +using OpenAI.Audio; +using System; +using System.ClientModel; +using System.IO; +using static OpenAI.Tests.TestHelpers; + +namespace OpenAI.Tests.Audio; + +public partial class TranslationTests +{ + [Test] + public void BasicTranslationWorks() + { + AudioClient client = GetTestClient(); + using FileStream inputStream = File.OpenRead(Path.Combine("Assets", "multilingual.wav")); + BinaryData inputData = BinaryData.FromStream(inputStream); + ClientResult translationResult = client.TranslateAudio(inputData, "multilingual.wav"); + Assert.That(translationResult.Value, Is.Not.Null); + // Assert.That(translationResult.Value.Text.ToLowerInvariant(), Contains.Substring("hello")); + } + + private static AudioClient GetTestClient() => GetTestClient(TestScenario.Transcription); +} diff --git a/.dotnet/tests/Utility/TestHelpers.cs b/.dotnet/tests/Utility/TestHelpers.cs new file mode 100644 index 000000000..bfc847e42 --- /dev/null +++ b/.dotnet/tests/Utility/TestHelpers.cs @@ -0,0 +1,79 @@ +using OpenAI.Assistants; +using OpenAI.Audio; +using OpenAI.Chat; +using OpenAI.Files; +using OpenAI.Images; +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.IO; + +namespace OpenAI.Tests; + +internal static class TestHelpers +{ + public enum TestScenario + { + Assistants, + TextToSpeech, + Chat, + VisionChat, + Files, + Embeddings, + FineTuning, + Images, + Transcription, + Models, + LegacyCompletions, + Moderations, + } + + public static T GetTestClient(TestScenario scenario, string overrideModel = null, bool throwOnError = true) + { + OpenAIClientOptions options = new(); + options.AddPolicy(GetDumpPolicy(), PipelinePosition.PerTry); + options.ErrorOptions = throwOnError ? ClientErrorBehaviors.Default : ClientErrorBehaviors.NoThrow; + object clientObject = scenario switch + { + TestScenario.Chat => new ChatClient(overrideModel ?? "gpt-3.5-turbo", credential: null, options), + TestScenario.VisionChat => new ChatClient(overrideModel ?? "gpt-4-vision-preview", credential: null, options), +#pragma warning disable OPENAI001 + TestScenario.Assistants => new AssistantClient(credential: null, options), +#pragma warning restore OPENAI001 + TestScenario.Images => new ImageClient(overrideModel ?? "dall-e-3", credential: null, options), + TestScenario.Files => new FileClient(credential: null, options), + TestScenario.Transcription => new AudioClient(overrideModel ?? "whisper-1", credential: null, options), + _ => throw new NotImplementedException(), + }; + return (T)clientObject; + } + + private static PipelinePolicy GetDumpPolicy() + { + return new TestPipelinePolicy((message) => + { + if (message.Request?.Uri != null) + { + Console.WriteLine($"--- Request URI: ---"); + Console.WriteLine(message.Request.Uri.AbsoluteUri); + } + if (message.Request?.Content != null) + { + Console.WriteLine($"--- Begin request content ---"); + using MemoryStream stream = new(); + message.Request.Content.WriteTo(stream, default); + stream.Position = 0; + using StreamReader reader = new(stream); + Console.WriteLine(reader.ReadToEnd()); + Console.WriteLine("--- End of request content ---"); + } + if (message.Response != null) + { + Console.WriteLine("--- Begin response content ---"); + Console.WriteLine(message.Response.Content?.ToString()); + Console.WriteLine("--- End of response content ---"); + } + }); + } +} \ No newline at end of file diff --git a/.dotnet/tests/Utility/TestPipelinePolicy.cs b/.dotnet/tests/Utility/TestPipelinePolicy.cs new file mode 100644 index 000000000..688c407ed --- /dev/null +++ b/.dotnet/tests/Utility/TestPipelinePolicy.cs @@ -0,0 +1,35 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace OpenAI.Tests; + +internal partial class TestPipelinePolicy : PipelinePolicy +{ + private Action _processMessageAction; + + public TestPipelinePolicy(Action processMessageAction) + { + _processMessageAction = processMessageAction; + } + + public override void Process(PipelineMessage message, IReadOnlyList pipeline, int currentIndex) + { + _processMessageAction(message); + if (currentIndex < pipeline.Count - 1) + { + pipeline[currentIndex + 1].Process(message, pipeline, currentIndex + 1); + } + } + + public override async ValueTask ProcessAsync(PipelineMessage message, IReadOnlyList pipeline, int currentIndex) + { + _processMessageAction(message); + if (currentIndex < pipeline.Count - 1) + { + await pipeline[currentIndex + 1].ProcessAsync(message, pipeline, currentIndex + 1); + } + } +} \ No newline at end of file diff --git a/.github/README.md b/.github/README.md new file mode 100644 index 000000000..df5b3494c --- /dev/null +++ b/.github/README.md @@ -0,0 +1,17 @@ +The workflows in this repository try to follow existing, basic samples with little customization. + +## main.yml +We use a basic dotnet build/test/pack workflow +https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-net + +- Build the solution using the dotnet cli + - Strong name the assemblies using a key stored in the repository + https://github.com/dotnet/runtime/blob/main/docs/project/strong-name-signing.md +- Test the built libraries + - Use a repository secret to hold the OpenAI token used for live testing + https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions +- Package the built libraries +- Publish the package as a GitHub Release +- Publish the package to a GitHub NuGet registry + https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-nuget-registry +- Publish a single build artifact containing test results and a nuget package \ No newline at end of file diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index bbe92ae53..521ae2598 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -1,27 +1,72 @@ -name: Validate OpenAPI definition +name: Build and Test + on: + workflow_dispatch: + push: + branches: + - main pull_request: types: [opened, reopened, synchronize] jobs: - test_swagger_editor_validator_service: + build: # Test, pack and publish the Open AI nuget package as a build artifact runs-on: ubuntu-latest - name: Swagger Editor Validator Service - - # Service containers to run with `runner-job` - services: - # Label used to access the service container - swagger-editor: - # Docker Hub image - image: swaggerapi/swagger-editor - ports: - # Maps port 8080 on service container to the host 80 - - 80:8080 - + env: + version_suffix_args: ${{ format('--version-suffix="alpha.{0}"', github.run_number) }} steps: - - uses: actions/checkout@v2 - - name: Validate OpenAPI definition - uses: char0n/swagger-editor-validate@v1 + - name: Setup .NET + uses: actions/setup-dotnet@v1 + with: + dotnet-version: 8.x + + - name: Checkout code + uses: actions/checkout@v2 + + - name: Build + run: dotnet build + -c Release + ${{ env.version_suffix_args }} + working-directory: .dotnet + + - name: Test + run: dotnet test + --no-build + --configuration Release + --filter="TestCategory~${{ github.event_name == 'pull_request' && 'Offline' || 'Online' }}" + --logger "trx;LogFileName=${{github.workspace}}/artifacts/test-results/full.trx" + env: + SECRET_VALUE: ${{ secrets.OPENAI_TOKEN }} + working-directory: .dotnet + + - name: Pack + run: dotnet pack + --no-build + --configuration Release + --output "${{github.workspace}}/artifacts/packages" + ${{ env.version_suffix_args }} + working-directory: .dotnet + + - name: Upload artifact + uses: actions/upload-artifact@v2 with: - swagger-editor-url: http://localhost/ - definition-file: openapi.yaml + name: build-artifacts + path: ${{github.workspace}}/artifacts + + - name: NuGet Autenticate + if: github.event_name != 'pull_request' + run: dotnet nuget add source + "https://nuget.pkg.github.com/${{ github.repository_owner }}/index.json" + --name "github" + --username ${{ github.actor }} + --password ${{ secrets.GITHUB_TOKEN }} + --store-password-in-clear-text + working-directory: .dotnet + + - name: Publish + if: github.event_name != 'pull_request' + run: dotnet nuget push + ${{github.workspace}}/artifacts/packages/*.nupkg + --source "github" + --api-key ${{ secrets.GITHUB_TOKEN }} + --skip-duplicate + working-directory: .dotnet diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 000000000..8e25a5651 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,74 @@ +name: Release package + +on: + release: + types: [published] + + +jobs: + deploy: + runs-on: ubuntu-latest + permissions: + packages: write + contents: write + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-dotnet@v3 + with: + dotnet-version: '8.x' # SDK Version to use. + + - name: Build + run: dotnet build + -c Release + working-directory: .dotnet + + - name: Test + run: dotnet test + --no-build + --configuration Release + --filter="TestCategory~Online" + --logger "trx;LogFileName=${{github.workspace}}/artifacts/test-results/full.trx" + env: + SECRET_VALUE: ${{ secrets.OPENAI_TOKEN }} + working-directory: .dotnet + + # Pack the client nuget package and include urls back to the repository and release tag + - name: Pack + run: dotnet pack + --no-build + --configuration Release + --output "${{github.workspace}}/artifacts/packages" + /p:RepositoryUrl="https://github.com/${{ github.repository }}" + /p:PackageProjectUrl="https://github.com/${{ github.repository }}/tree/${{ github.event.release.tag_name }}" + working-directory: .dotnet + + # Append the nuget package to the github release that triggered this workflow + - name: Upload release asset + run: gh release upload ${{ github.event.release.tag_name }} + ${{github.workspace}}/artifacts/packages/*.nupkg + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Upload artifact + uses: actions/upload-artifact@v2 + with: + name: build-artifacts + path: ${{github.workspace}}/artifacts + + - name: NuGet Autenticate + run: dotnet nuget add source + "https://nuget.pkg.github.com/${{ github.repository_owner }}/index.json" + --name "github" + --username ${{ github.actor }} + --password ${{ secrets.GITHUB_TOKEN }} + --store-password-in-clear-text + working-directory: .dotnet + + - name: Publish + run: dotnet nuget push + ${{github.workspace}}/artifacts/packages/*.nupkg + --source "github" + --api-key ${{ secrets.GITHUB_TOKEN }} + --skip-duplicate + working-directory: .dotnet + diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..f71e3dc8e --- /dev/null +++ b/.gitignore @@ -0,0 +1,179 @@ +## Ignore Visual Studio temporary files, build results, and +## files generated by popular Visual Studio add-ons. + +# User-specific files +*.suo +*.user +*.sln.docstates +.vs/ +*.lock.json +developer/ +launch.json +launchSettings.json + +# Default Assets restore directory +.assets + +# Build results +/artifacts +binaries/ +[Dd]ebug*/ +[Rr]elease/ +build/ +restoredPackages/ +PolicheckOutput/ +tools/net46/ +tools/SdkBuildTools/ +tools/Microsoft.WindowsAzure.Build.Tasks/packages/ +PublishedNugets/ +src/NuGet.Config +tools/7-zip/ +#tools/LocalNugetFeed/Microsoft.Internal.NetSdkBuild.Mgmt.Tools.*.nupkg + +[Tt]est[Rr]esult +[Bb]uild[Ll]og.* + +*_i.c +*_p.c +*.ilk +*.meta +*.obj +*.pch +*.pdb +*.pgc +*.pgd +*.rsp +*.sbr +*.tlb +*.tli +*.tlh +*.tmp +*.vspscc +*.vssscc +.builds + +*.pidb + +*.log +*.scc +# Visual C++ cache files +ipch/ +*.aps +*.ncb +*.opensdf +*.sdf + +# Visual Studio profiler +*.psess +*.vsp + +# VS Code +**/.vscode/* +!.vscode/cspell.json + +# Code analysis +*.CodeAnalysisLog.xml + +# Guidance Automation Toolkit +*.gpState + +# ReSharper is a .NET coding add-in +_ReSharper*/ + +*.[Rr]e[Ss]harper + +# Rider IDE +.idea + +# NCrunch +*.ncrunch* +.*crunch*.local.xml + +# Installshield output folder +[Ee]xpress + +# DocProject is a documentation generator add-in +DocProject/buildhelp/ +DocProject/Help/*.HxT +DocProject/Help/*.HxC +DocProject/Help/*.hhc +DocProject/Help/*.hhk +DocProject/Help/*.hhp +DocProject/Help/Html2 +DocProject/Help/html + +# Click-Once directory +publish + +# Publish Web Output +*.Publish.xml + +# Others +[Bb]in +[Oo]bj +TestResults +[Tt]est[Rr]esult* +*.Cache +ClientBin +~$* +*.dbmdl + +*.[Pp]ublish.xml + +Generated_Code #added for RIA/Silverlight projects + +# Build tasks +tools/*.dll + +# Sensitive files +*.keys +!Azure.Extensions.AspNetCore.DataProtection.Keys +!Azure.Security.KeyVault.Keys +*.pfx +TestConfigurations.xml +*.json.env +*.bicep.env + +# Backup & report files from converting an old project file to a newer +# Visual Studio version. Backup files are not needed, because we have git ;-) +_UpgradeReport_Files/ +Backup*/ +UpgradeLog*.XML + +# NuGet +packages +packages/repositories.config +testPackages + +# Mac development +.DS_Store + +# Specification DLLs +*.Specification.dll + +# Generated readme.txt files # +src/*/readme.txt + +build.out +.nuget/ + +# Azure Project +csx/ +*.GhostDoc.xml +pingme.txt + +# TS/Node files +dist/ +node_modules/ + +# MSBuild binary log files +msbuild.binlog + +# BenchmarkDotNet +BenchmarkDotNet.Artifacts + +artifacts +.assets + +# Temporary typespec folders for typespec generation +TempTypeSpecFiles/ diff --git a/README.md b/README.md index 04bcd9be3..eff608de4 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,17 @@ -# OpenAPI spec for the OpenAI API +# A conversion of the OpenAI OpenAPI to TypeSpec -This repository contains an [OpenAPI](https://www.openapis.org/) specification for the [OpenAI API](https://platform.openai.com/docs/api-reference). +Snapshot: https://raw.githubusercontent.com/openai/openai-openapi/b648b7823135e6fa5148ac9a303c16fdad050da6/openapi.yaml + +There are some deltas: + +### Changes to API Semantics + +- Many things are missing defaults (mostly due to bug where we can't set null defaults) +- Error responses have been added. +- Where known, the `object` property's type is narrowed from string to the constant value it will always be + +### Changes to API metadata or OpenAPI format + +- Much of the x-oaiMeta entries have not been added. +- In some cases, new schemas needed to be defined in order to be defined in TypeSpec (e.g. because the constraints could not be added to a model property with a heterogeneous type) +- There is presently no way to set `title` diff --git a/assistants/main.tsp b/assistants/main.tsp new file mode 100644 index 000000000..6a754bcb5 --- /dev/null +++ b/assistants/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; \ No newline at end of file diff --git a/assistants/meta.tsp b/assistants/meta.tsp new file mode 100644 index 000000000..fd2481ecb --- /dev/null +++ b/assistants/meta.tsp @@ -0,0 +1,42 @@ +import "./models.tsp"; + +import "@typespec/openapi"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +// TODO: Fill in example here. +@@extension(OpenAI.ListAssistantsResponse, + "x-oaiMeta", + """ + name: List assistants response object + group: chat + example: *list_assistants_example + """ +); + +// TODO: Fill in example here. +@@extension(OpenAI.AssistantObject, + "x-oaiMeta", + """ + name: The assistant object + beta: true + example: *create_assistants_example + """ +); + +@@extension(OpenAI.AssistantFileObject, + "x-oaiMeta", + """ + name: The assistant file object + beta: true + example: | + { + "id": "file-abc123", + "object": "assistant.file", + "created_at": 1699055364, + "assistant_id": "asst_abc123" + } + """ +); \ No newline at end of file diff --git a/assistants/models.tsp b/assistants/models.tsp new file mode 100644 index 000000000..acea5a1d4 --- /dev/null +++ b/assistants/models.tsp @@ -0,0 +1,231 @@ +import "../common/models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +model CreateAssistantRequest { + /** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + * see all of your available models, or see our [Model overview](/docs/models/overview) for + * descriptions of them. + */ + `model`: string; + + /** The name of the assistant. The maximum length is 256 characters. */ + @maxLength(256) + name?: string | null; + + /** The description of the assistant. The maximum length is 512 characters. */ + @maxLength(512) + description?: string | null; + + /** The system instructions that the assistant uses. The maximum length is 32768 characters. */ + @maxLength(32768) + instructions?: string | null; + + /** + * A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + * Tools can be of types `code_interpreter`, `retrieval`, or `function`. + */ + + tools?: CreateAssistantRequestTools = []; + + /** + * A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + * maximum of 20 files attached to the assistant. Files are ordered by their creation date in + * ascending order. + */ + @maxItems(20) + file_ids?: string[] = []; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata?: Record | null; +} + +model ModifyAssistantRequest { + /** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + * see all of your available models, or see our [Model overview](/docs/models/overview) for + * descriptions of them. + */ + `model`?: string; + + /** The name of the assistant. The maximum length is 256 characters. */ + @maxLength(256) + name?: string | null; + + /** The description of the assistant. The maximum length is 512 characters. */ + @maxLength(512) + description?: string | null; + + /** The system instructions that the assistant uses. The maximum length is 32768 characters. */ + @maxLength(32768) + instructions?: string | null; + + /** + * A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + * Tools can be of types `code_interpreter`, `retrieval`, or `function`. + */ + tools?: CreateAssistantRequestTools = []; + + /** + * A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + * maximum of 20 files attached to the assistant. Files are ordered by their creation date in + * ascending order. + */ + @maxItems(20) + file_ids?: string[] = []; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata?: Record | null; +} + +model CreateAssistantFileRequest { + /** + * A [File](/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should + * use. Useful for tools like `retrieval` and `code_interpreter` that can access files. + */ + file_id: string; +} + +model ListAssistantsResponse { + object: "list"; + data: AssistantObject[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +model DeleteAssistantResponse { + id: string; + deleted: boolean; + object: "assistant.deleted"; +} + +model ListAssistantFilesResponse { + object: "list"; + data: AssistantFileObject[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +/** + * Deletes the association between the assistant and the file, but does not delete the + * [File](/docs/api-reference/files) object itself. + */ +model DeleteAssistantFileResponse { + id: string; + deleted: boolean; + object: "assistant.file.deleted"; +} + +@maxItems(128) +model CreateAssistantRequestTools is CreateAssistantRequestTool[]; + +@oneOf +@extension("x-oaiExpandable", true) +union CreateAssistantRequestTool { + AssistantToolsCode, + AssistantToolsRetrieval, + AssistantToolsFunction +} + +model AssistantToolsCode { + /** The type of tool being defined: `code_interpreter` */ + type: "code_interpreter"; +} + +model AssistantToolsRetrieval { + /** The type of tool being defined: `retrieval` */ + type: "retrieval"; +} + +model AssistantToolsFunction { + /** The type of tool being defined: `function` */ + type: "function"; + + function: FunctionObject; +} + +/** Represents an `assistant` that can call the model and use tools. */ +model AssistantObject { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + + /** The object type, which is always `assistant`. */ + object: "assistant"; + + /** The Unix timestamp (in seconds) for when the assistant was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** The name of the assistant. The maximum length is 256 characters. */ + @maxLength(256) + name: string | null; + + /** The description of the assistant. The maximum length is 512 characters. */ + @maxLength(512) + description: string | null; + + /** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + * see all of your available models, or see our [Model overview](/docs/models/overview) for + * descriptions of them. + */ + `model`: string; + + /** The system instructions that the assistant uses. The maximum length is 32768 characters. */ + @maxLength(32768) + instructions: string | null; + + /** + * A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + * Tools can be of types `code_interpreter`, `retrieval`, or `function`. + */ + tools: CreateAssistantRequestTools = []; + + /** + * A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + * maximum of 20 files attached to the assistant. Files are ordered by their creation date in + * ascending order. + */ + @maxItems(20) + file_ids: string[] = []; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata: Record | null; +} + +/** A list of [Files](/docs/api-reference/files) attached to an `assistant`. */ +model AssistantFileObject { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + + /** The object type, which is always `assistant.file`. */ + object: "assistant.file"; + + /** The Unix timestamp (in seconds) for when the assistant file was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** The assistant ID that the file is attached to. */ + assistant_id: string; +} \ No newline at end of file diff --git a/assistants/operations.tsp b/assistants/operations.tsp new file mode 100644 index 000000000..e462c6f5b --- /dev/null +++ b/assistants/operations.tsp @@ -0,0 +1,162 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/models.tsp"; +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/assistants") +interface Assistants { + @post + @operationId("createAssistant") + @tag("Assistants") + @summary("Create an assistant with a model and instructions.") + createAssistant( + @body assistant: CreateAssistantRequest, + ): AssistantObject | ErrorResponse; + + @get + @operationId("listAssistants") + @tag("Assistants") + @summary("Returns a list of assistants.") + listAssistants( + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + * default is 20. + */ + @query limit?: int32 = 20; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + * for descending order. + */ + @query order?: ListOrder = ListOrder.desc; + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include after=obj_foo in order to fetch the next page of the list. + */ + @query after?: string; + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include before=obj_foo in order to fetch the previous page of the list. + */ + @query before?: string; + ): ListAssistantsResponse | ErrorResponse; + + @route("{assistant_id}") + @get + @operationId("getAssistant") + @tag("Assistants") + @summary("Retrieves an assistant.") + getAssistant( + /** The ID of the assistant to retrieve. */ + @path assistant_id: string, + ): AssistantObject | ErrorResponse; + + @route("{assistant_id}") + @post + @operationId("modifyAssistant") + @tag("Assistants") + @summary("Modifies an assistant.") + modifyAssistant( + /** The ID of the assistant to modify. */ + @path assistant_id: string, + + @body assistant: ModifyAssistantRequest, + ): AssistantObject | ErrorResponse; + + @route("{assistant_id}") + @delete + @operationId("deleteAssistant") + @tag("Assistants") + @summary("Delete an assistant.") + deleteAssistant( + /** The ID of the assistant to delete. */ + @path assistant_id: string, + ): DeleteAssistantResponse | ErrorResponse; + + @route("{assistant_id}/files") + @post + @operationId("createAssistantFile") + @tag("Assistants") + @summary(""" + Create an assistant file by attaching a [File](/docs/api-reference/files) to a + [assistant](/docs/api-reference/assistants). + """) + createAssistantFile( + /** The ID of the assistant for which to create a file. */ + @path assistant_id: string, + @body file: CreateAssistantFileRequest, + ): AssistantFileObject | ErrorResponse; + + @route("{assistant_id}/files") + @get + @operationId("listAssistantFiles") + @tag("Assistants") + @summary("Returns a list of assistant files.") + listAssistantFiles( + /** The ID of the assistant the file belongs to. */ + @path assistant_id: string, + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + * default is 20. + */ + @query limit?: int32 = 20; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + * for descending order. + */ + @query order?: ListOrder = ListOrder.desc; + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include after=obj_foo in order to fetch the next page of the list. + */ + @query after?: string; + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include before=obj_foo in order to fetch the previous page of the list. + */ + @query before?: string; + ): ListAssistantFilesResponse | ErrorResponse; + + @route("{assistant_id}/files/{file_id}") + @get + @operationId("getAssistantFile") + @tag("Assistants") + @summary("Retrieves an assistant file.") + getAssistantFile( + /** The ID of the assistant the file belongs to. */ + @path assistant_id: string, + + /** The ID of the file we're getting. */ + @path file_id: string, + ): AssistantFileObject | ErrorResponse; + + @route("{assistant_id}/files/{file_id}") + @delete + @operationId("deleteAssistantFile") + @tag("Assistants") + @summary("Delete an assistant file.") + deleteAssistantFile( + /** The ID of the assistant the file belongs to. */ + @path assistant_id: string, + + /** The ID of the file to delete. */ + @path file_id: string, + ): DeleteAssistantFileResponse | ErrorResponse; +} diff --git a/audio/main.tsp b/audio/main.tsp index c6458821f..144c4aeaf 100644 --- a/audio/main.tsp +++ b/audio/main.tsp @@ -1,2 +1 @@ import "./operations.tsp"; -import "./models.tsp"; diff --git a/audio/models.tsp b/audio/models.tsp index a2a440a90..53d7757c3 100644 --- a/audio/models.tsp +++ b/audio/models.tsp @@ -1,6 +1,39 @@ -namespace OpenAI; +import "../common/models.tsp"; + +using TypeSpec.Http; using TypeSpec.OpenAPI; +namespace OpenAI; + +model CreateSpeechRequest { + /** One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` */ + @extension("x-oaiTypeLabel", "string") + `model`: string | TEXT_TO_SPEECH_MODELS; + + /** + * The text to generate audio for. The maximum length is 4096 characters. + */ + @maxLength(4096) + input: string; + + /** + * The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, + * `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the + * [Text to speech guide](/docs/guides/text-to-speech/voice-options). + */ + voice: "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer"; + + /** The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. */ + response_format?: "mp3" | "opus" | "aac" | "flac" = "mp3"; + + /** + * The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. + */ + @minValue(0.25) + @maxValue(4.0) + speed?: float64 = 1.0; +} + model CreateTranscriptionRequest { /** * The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, @@ -12,7 +45,14 @@ model CreateTranscriptionRequest { /** ID of the model to use. Only `whisper-1` is currently available. */ @extension("x-oaiTypeLabel", "string") - `model`: string | "whisper-1"; + `model`: string | SPEECH_TO_TEXT_MODELS; + + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + * and latency. + */ + language?: string; /** * An optional text to guide the model's style or continue a previous audio segment. The @@ -32,21 +72,10 @@ model CreateTranscriptionRequest { * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to * automatically increase the temperature until certain thresholds are hit. */ + // NOTE: Min and max values are absent in the OpenAPI spec but mentioned in the description. @minValue(0) @maxValue(1) temperature?: float64 = 0; - - /** - * The language of the input audio. Supplying the input language in - * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy - * and latency. - */ - language?: string; -} - -// Note: This does not currently support the non-default response format types. -model CreateTranscriptionResponse { - text: string; } model CreateTranslationRequest { @@ -60,7 +89,7 @@ model CreateTranslationRequest { /** ID of the model to use. Only `whisper-1` is currently available. */ @extension("x-oaiTypeLabel", "string") - `model`: string | "whisper-1"; + `model`: string | SPEECH_TO_TEXT_MODELS; /** * An optional text to guide the model's style or continue a previous audio segment. The @@ -81,12 +110,104 @@ model CreateTranslationRequest { * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to * automatically increase the temperature until certain thresholds are hit. */ + // NOTE: Min and max values are absent in the OpenAPI spec but mentioned in the description. @minValue(0) @maxValue(1) temperature?: float64 = 0; } -// Note: This does not currently support the non-default response format types. +// NOTE: This model is not defined in the OpenAI API spec. +model CreateTranscriptionResponse { + /** The transcribed text for the provided audio data. */ + text: string; + + /** The label that describes which operation type generated the accompanying response data. */ + task?: "transcribe"; + + /** The spoken language that was detected in the audio data. */ + language?: string; + + /** + * The total duration of the audio processed to produce accompanying transcription information. + */ + @encode("seconds", float64) + duration?: duration; + + /** + * A collection of information about the timing, probabilities, and other detail of each processed + * audio segment. + */ + segments?: AudioSegment[]; +} + +// NOTE: This model is not defined in the OpenAI API spec. model CreateTranslationResponse { + /** The translated text for the provided audio data. */ text: string; + + /** The label that describes which operation type generated the accompanying response data. */ + task?: "translate"; + + /** The spoken language that was detected in the audio data. */ + language?: string; + + /** The total duration of the audio processed to produce accompanying translation information. */ + @encode("seconds", float64) + duration?: duration; + + /** + * A collection of information about the timing, probabilities, and other detail of each processed + * audio segment. + */ + segments?: AudioSegment[]; } + +alias TEXT_TO_SPEECH_MODELS = + | "tts-1" + | "tts-1-hd"; + +alias SPEECH_TO_TEXT_MODELS = + | "whisper-1"; + +// NOTE: This model is not defined in the OpenAI API spec. +model AudioSegment { + /** The zero-based index of this segment. */ + id: safeint; + + /** + * The seek position associated with the processing of this audio segment. Seek positions are + * expressed as hundredths of seconds. The model may process several segments from a single seek + * position, so while the seek position will never represent a later time than the segment's + * start, the segment's start may represent a significantly later time than the segment's + * associated seek position. + */ + seek: safeint; + + /** The time at which this segment started relative to the beginning of the audio. */ + @encode("seconds", float64) + start: duration; + + /** The time at which this segment ended relative to the beginning of the audio. */ + @encode("seconds", float64) + end: duration; + + /** The text that was part of this audio segment. */ + text: string; + + /** The token IDs matching the text in this audio segment. */ + tokens: TokenArray; + + /** The temperature score associated with this audio segment. */ + @minValue(0) + @maxValue(1) + temperature: float64; + + /** The average log probability associated with this audio segment. */ + avg_logprob: float64; + + /** The compression ratio of this audio segment. */ + compression_ratio: float64; + + /** The probability of no speech detection within this audio segment. */ + no_speech_prob: float64; +} \ No newline at end of file diff --git a/audio/operations.tsp b/audio/operations.tsp index 636fb941a..ee1cb428e 100644 --- a/audio/operations.tsp +++ b/audio/operations.tsp @@ -2,34 +2,61 @@ import "@typespec/http"; import "@typespec/openapi"; import "../common/errors.tsp"; +import "./models.tsp"; using TypeSpec.Http; using TypeSpec.OpenAPI; namespace OpenAI; + @route("/audio") -namespace Audio { +interface Audio { + @route("speech") + @post + @operationId("createSpeech") + @tag("Audio") + @summary("Generates audio from the input text.") + createSpeech( + @body speech: CreateSpeechRequest, + ): { + /** chunked */ + @header("Transfer-Encoding") transferEncoding?: string; + + @header contentType: "application/octet-stream"; + @body @encode("binary") audio: bytes; + }; + @route("transcriptions") - interface Transcriptions { - @post - @operationId("createTranscription") - @tag("OpenAI") - @summary("Transcribes audio into the input language.") - createTranscription( - @header contentType: "multipart/form-data", - @body audio: CreateTranscriptionRequest, - ): CreateTranscriptionResponse | ErrorResponse; - } + @post + @operationId("createTranscription") + @tag("Audio") + @summary("Transcribes audio into the input language.") + createTranscription( + @header contentType: "multipart/form-data", + @body audio: CreateTranscriptionRequest, + ): + | CreateTranscriptionResponse + | { + // TODO: Is this the appropriate way to describe the multiple possible response types? + @header contentType: "text/plain"; + @body text: string; + } + | ErrorResponse; @route("translations") - interface Translations { - @post - @operationId("createTranslation") - @tag("OpenAI") - @summary("Transcribes audio into the input language.") - createTranslation( - @header contentType: "multipart/form-data", - @body audio: CreateTranslationRequest, - ): CreateTranslationResponse | ErrorResponse; - } -} + @post + @operationId("createTranslation") + @tag("Audio") + @summary("Translates audio into English..") + createTranslation( + @header contentType: "multipart/form-data", + @body audio: CreateTranslationRequest, + ): + | CreateTranslationResponse + | { + // TODO: Is this the appropriate way to describe the multiple possible response types? + @header contentType: "text/plain"; + @body text: string; + } + | ErrorResponse; +} \ No newline at end of file diff --git a/edits/main.tsp b/chat/main.tsp similarity index 100% rename from edits/main.tsp rename to chat/main.tsp diff --git a/completions/chat-meta.tsp b/chat/meta.tsp similarity index 94% rename from completions/chat-meta.tsp rename to chat/meta.tsp index 7823da41d..6df478c87 100644 --- a/completions/chat-meta.tsp +++ b/chat/meta.tsp @@ -1,6 +1,10 @@ +import "./operations.tsp"; + using TypeSpec.OpenAPI; -@@extension(OpenAI.Completions.createCompletion, +namespace OpenAI; + +@@extension(OpenAI.Chat.createChatCompletion, "x-oaiMeta", { name: "Create chat completion", @@ -166,3 +170,13 @@ using TypeSpec.OpenAPI; ], } ); + +// TODO: Fill in example here. +@@extension(OpenAI.CreateChatCompletionResponse, + "x-oaiMeta", + { + name: "The chat completion object", + group: "chat", + example: "", + } +); \ No newline at end of file diff --git a/chat/models.tsp b/chat/models.tsp new file mode 100644 index 000000000..160afb122 --- /dev/null +++ b/chat/models.tsp @@ -0,0 +1,580 @@ +import "../common/models.tsp"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +model CreateChatCompletionRequest { + /** + * A list of messages comprising the conversation so far. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + */ + @minItems(1) + messages: ChatCompletionRequestMessage[]; + + /** + * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + * table for details on which models work with the Chat API. + */ + @extension("x-oaiTypeLabel", "string") + `model`: string | CHAT_COMPLETION_MODELS; + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, decreasing the model's likelihood to repeat the same line + * verbatim. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + @minValue(-2) + @maxValue(2) + frequency_penalty?: float64 | null = 0; + + /** + * Modify the likelihood of specified tokens appearing in the completion. + * + * Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an + * associated bias value from -100 to 100. Mathematically, the bias is added to the logits + * generated by the model prior to sampling. The exact effect will vary per model, but values + * between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + * should result in a ban or exclusive selection of the relevant token. + */ + @extension("x-oaiTypeLabel", "map") + logit_bias?: Record | null = null; + + /** + * Whether to return log probabilities of the output tokens or not. If true, returns the log + * probabilities of each output token returned in the `content` of `message`. This option is + * currently not available on the `gpt-4-vision-preview` model. + */ + logprobs?: boolean | null = false; + + /** + * An integer between 0 and 5 specifying the number of most likely tokens to return at each token + * position, each with an associated log probability. `logprobs` must be set to `true` if this + * parameter is used. + */ + @minValue(0) + @maxValue(5) + top_logprobs?: safeint | null; + + /** + * The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + * + * The total length of input tokens and generated tokens is limited by the model's context length. + * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + * for counting tokens. + */ + @minValue(0) + max_tokens?: safeint | null = 16; + + /** + * How many chat completion choices to generate for each input message. Note that you will be + * charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to + * minimize costs. + */ + @minValue(1) + @maxValue(128) + n?: safeint | null = 1; + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + * in the text so far, increasing the model's likelihood to talk about new topics. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + @minValue(-2) + @maxValue(2) + presence_penalty?: float64 | null = 0; + + /** + * An object specifying the format that the model must output. Compatible with + * [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the + * model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + * yourself via a system or user message. Without this, the model may generate an unending stream + * of whitespace until the generation reaches the token limit, resulting in a long-running and + * seemingly "stuck" request. Also note that the message content may be partially cut off if + * `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the + * conversation exceeded the max context length. + */ + response_format?: { + /** Must be one of `text` or `json_object`. */ + type?: "text" | "json_object" = "text"; + }; + + /** + * This feature is in Beta. + * + * If specified, our system will make a best effort to sample deterministically, such that + * repeated requests with the same `seed` and parameters should return the same result. + * + * Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + * parameter to monitor changes in the backend. + */ + @extension( + "x-oaiMeta", + { + beta: true + } + ) + @minValue(-9223372036854775808) // TODO: Min and max exceed the limits of safeint. + @maxValue(9223372036854775807) + seed?: safeint | null; + + // TODO: Consider inlining when https://github.com/microsoft/typespec/issues/2356 is resolved + // https://github.com/microsoft/typespec/issues/2355 + /** Up to 4 sequences where the API will stop generating further tokens. */ + stop?: Stop | null = null; + + /** + * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available, with the stream terminated by a `data: [DONE]` message. + * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + */ + stream?: boolean | null = false; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + @minValue(0) + @maxValue(2) + temperature?: float64 | null = 1; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + @minValue(0) + @maxValue(1) + top_p?: float64 | null = 1; + + /** + * A list of tools the model may call. Currently, only functions are supported as a tool. Use this + * to provide a list of functions the model may generate JSON inputs for. */ + tools?: ChatCompletionTool[]; + + tool_choice?: ChatCompletionToolChoiceOption; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ + user?: User; + + /** + * Deprecated in favor of `tool_choice`. + * + * Controls which (if any) function is called by the model. `none` means the model will not call a + * function and instead generates a message. `auto` means the model can pick between generating a + * message or calling a function. Specifying a particular function via `{"name": "my_function"}` + * forces the model to call that function. + * + * `none` is the default when no functions are present. `auto` is the default if functions are + * present. + */ + #deprecated "deprecated" + @extension("x-oaiExpandable", true) + function_call?: "none" | "auto" | ChatCompletionFunctionCallOption; + + /** + * Deprecated in favor of `tools`. + * + * A list of functions the model may generate JSON inputs for. + */ + #deprecated "deprecated" + @minItems(1) + @maxItems(128) + functions?: ChatCompletionFunctions[]; +} + +/** Represents a chat completion response returned by model, based on the provided input. */ +model CreateChatCompletionResponse { + /** A unique identifier for the chat completion. */ + id: string; + + /** A list of chat completion choices. Can be more than one if `n` is greater than 1. */ + choices: { + /** + * The reason the model stopped generating tokens. This will be `stop` if the model hit a + * natural stop point or a provided stop sequence, `length` if the maximum number of tokens + * specified in the request was reached, `content_filter` if content was omitted due to a flag + * from our content filters, `tool_calls` if the model called a tool, or `function_call` + * (deprecated) if the model called a function. + */ + finish_reason: "stop" | "length" | "tool_calls" | "content_filter" | "function_call"; + + /** The index of the choice in the list of choices. */ + index: safeint; + + message: ChatCompletionResponseMessage; + + /** Log probability information for the choice. */ + logprobs: { + content: ChatCompletionTokenLogprob[] | null; + } | null; + }[]; + + /** The Unix timestamp (in seconds) of when the chat completion was created. */ + @encode("unixTimestamp", int32) + created: utcDateTime; + + /** The model used for the chat completion. */ + `model`: string; + + /** + * This fingerprint represents the backend configuration that the model runs with. + * + * Can be used in conjunction with the `seed` request parameter to understand when backend changes + * have been made that might impact determinism. + */ + system_fingerprint?: string; + + /** The object type, which is always `chat.completion`. */ + object: "chat.completion"; + + usage?: CompletionUsage; +} + +alias CHAT_COMPLETION_MODELS = + | "gpt-4-0125-preview" + | "gpt-4-turbo-preview" + | "gpt-4-1106-preview" + | "gpt-4-vision-preview" + | "gpt-4" + | "gpt-4-0314" + | "gpt-4-0613" + | "gpt-4-32k" + | "gpt-4-32k-0314" + | "gpt-4-32k-0613" + | "gpt-3.5-turbo" + | "gpt-3.5-turbo-16k" + | "gpt-3.5-turbo-0301" + | "gpt-3.5-turbo-0613" + | "gpt-3.5-turbo-1106" + | "gpt-3.5-turbo-16k-0613"; + +@oneOf +union Stop { + string, + StopSequences, +} + +@minItems(1) +@maxItems(4) +model StopSequences is string[]; + +/** Usage statistics for the completion request. */ +model CompletionUsage { + /** Number of tokens in the prompt. */ + prompt_tokens: safeint; + + /** Number of tokens in the generated completion */ + completion_tokens: safeint; + + /** Total number of tokens used in the request (prompt + completion). */ + total_tokens: safeint; +} + +model ChatCompletionTool { + /** The type of the tool. Currently, only `function` is supported. */ + type: "function"; + + function: FunctionObject; +} + +/** + * Controls which (if any) function is called by the model. `none` means the model will not call a + * function and instead generates a message. `auto` means the model can pick between generating a + * message or calling a function. Specifying a particular function via + * `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that + * function. + * + * `none` is the default when no functions are present. `auto` is the default if functions are + * present. + */ +@oneOf +@extension("x-oaiExpandable", true) +union ChatCompletionToolChoiceOption { + "none", + "auto", + ChatCompletionNamedToolChoice, +} + +/** Specifies a tool the model should use. Use to force the model to call a specific function. */ +model ChatCompletionNamedToolChoice { + /** The type of the tool. Currently, only `function` is supported. */ + type: "function"; + + function: { + /** The name of the function to call. */ + name: string; + } +} + +@oneOf +union ChatCompletionRequestUserMessageContent { + /** The text contents of the message. */ + string, + + /** + * An array of content parts with a defined type, each can be of type `text` or `image_url` when + * passing in images. You can pass multiple images by adding multiple `image_url` content parts. + * Image input is only supported when using the `gpt-4-visual-preview` model. + */ + ChatCompletionRequestMessageContentParts, +}; + +@minItems(1) +model ChatCompletionRequestMessageContentParts is ChatCompletionRequestMessageContentPart[]; + +@oneOf +@extension("x-oaiExpandable", true) +union ChatCompletionRequestMessageContentPart { + ChatCompletionRequestMessageContentPartText, + ChatCompletionRequestMessageContentPartImage, +} + +model ChatCompletionRequestMessageContentPartText { + /** The type of the content part. */ + type: "text"; + + /** The text content. */ + text: string; +} + +model ChatCompletionRequestMessageContentPartImage { + /** The type of the content part. */ + type: "image_url"; + + image_url: { + /** Either a URL of the image or the base64 encoded image data. */ + // TODO: The original OpenAPI spec only describes this as a URL. + url: url | string; + + /** + * Specifies the detail level of the image. Learn more in the + * [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). + */ + detail?: "auto" | "low" | "high" = "auto"; + } +} + +/** The tool calls generated by the model, such as function calls. */ +model ChatCompletionMessageToolCalls is ChatCompletionMessageToolCall[]; + +model ChatCompletionMessageToolCall { + /** The ID of the tool call. */ + id: string; + + /** The type of the tool. Currently, only `function` is supported. */ + type: "function"; + + /** The function that the model called. */ + function: { + /** The name of the function to call. */ + name: string; + + /** + * The arguments to call the function with, as generated by the model in JSON format. Note that + * the model does not always generate valid JSON, and may hallucinate parameters not defined by + * your function schema. Validate the arguments in your code before calling your function. + */ + arguments: string; + } +}; + +@oneOf +@extension("x-oaiExpandable", true) +union ChatCompletionRequestMessage { + ChatCompletionRequestSystemMessage, + ChatCompletionRequestUserMessage, + ChatCompletionRequestAssistantMessage, + ChatCompletionRequestToolMessage, + ChatCompletionRequestFunctionMessage, +} + +model ChatCompletionRequestSystemMessage { + /** The contents of the system message. */ + @extension("x-oaiExpandable", true) + content: string , + + /** The role of the messages author, in this case `system`. */ + role: "system", + + /** + * An optional name for the participant. Provides the model information to differentiate between + * participants of the same role. + */ + name?: string; +} + +model ChatCompletionRequestUserMessage { + /** The contents of the system message. */ + @extension("x-oaiExpandable", true) + content: ChatCompletionRequestUserMessageContent, + + /** The role of the messages author, in this case `user`. */ + role: "user", + + /** + * An optional name for the participant. Provides the model information to differentiate between + * participants of the same role. + */ + name?: string; +} + +model ChatCompletionRequestAssistantMessage { + /** + * The contents of the assistant message. Required unless `tool_calls` or `function_call` is' + * specified. + */ + content?: string | null, + + /** The role of the messages author, in this case `assistant`. */ + role: "assistant", + + /** + * An optional name for the participant. Provides the model information to differentiate between + * participants of the same role. + */ + name?: string; + + tool_calls?: ChatCompletionMessageToolCalls; + + /** + * Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be + * called, as generated by the model. + */ + #deprecated "deprecated" + function_call?: { + /** + * The arguments to call the function with, as generated by the model in JSON format. Note that + * the model does not always generate valid JSON, and may hallucinate parameters not defined by + * your function schema. Validate the arguments in your code before calling your function. + */ + arguments: string; + + /** The name of the function to call. */ + name: string; + + } +} + +model ChatCompletionRequestToolMessage { + /** The role of the messages author, in this case `tool`. */ + role: "tool", + + /** The contents of the tool message. */ + content: string; + + /** Tool call that this message is responding to. */ + tool_call_id: string; +} + +model ChatCompletionRequestFunctionMessage { + /** The role of the messages author, in this case `function`. */ + role: "function", + + /** The contents of the function message. */ + content: string | null; + + /** The name of the function to call. */ + name: string; +} + +model ChatCompletionResponseMessage { + /** The contents of the message. */ + content: string | null; + + tool_calls?: ChatCompletionMessageToolCalls; + + /** The role of the author of this message. */ + role: "assistant"; + + /** Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. */ + #deprecated "deprecated" + function_call?: { + /** + * The arguments to call the function with, as generated by the model in JSON format. Note that + * the model does not always generate valid JSON, and may hallucinate parameters not defined by + * your function schema. Validate the arguments in your code before calling your function. + */ + arguments: string; + + /** The name of the function to call. */ + name: string; + }; +} + +model ChatCompletionTokenLogprob { + /** The token. */ + token: string; + + /** The log probability of this token. */ + logprob: float64; + + /** + * A list of integers representing the UTF-8 bytes representation of the token. Useful in + * instances where characters are represented by multiple tokens and their byte representations + * must be combined to generate the correct text representation. Can be `null` if there is no + * bytes representation for the token. + */ + bytes: safeint[] | null; + + /** + * List of the most likely tokens and their log probability, at this token position. In rare + * cases, there may be fewer than the number of requested `top_logprobs` returned. + */ + top_logprobs: { + /** The token. */ + token: string; + + /** The log probability of this token. */ + logprob: float64; + + /** + * A list of integers representing the UTF-8 bytes representation of the token. Useful in + * instances where characters are represented by multiple tokens and their byte representations + * must be combined to generate the correct text representation. Can be `null` if there is no + * bytes representation for the token. + */ + bytes: safeint[] | null; + }[]; +} + +/** + * Specifying a particular function via `{"name": "my_function"}` forces the model to call that + * function. + */ +model ChatCompletionFunctionCallOption { + /** The name of the function to call. */ + name: string; +} + +#deprecated "deprecated" +model ChatCompletionFunctions { + /** + * A description of what the function does, used by the model to choose when and how to call the + * function. + */ + description?: string; + + /** + * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + * dashes, with a maximum length of 64. + */ + name: string; + + parameters?: FunctionParameters; +} \ No newline at end of file diff --git a/chat/operations.tsp b/chat/operations.tsp new file mode 100644 index 000000000..0b2972421 --- /dev/null +++ b/chat/operations.tsp @@ -0,0 +1,22 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/chat") +interface Chat { + @route("completions") + @post + @operationId("createChatCompletion") + @tag("Chat") + @summary("Creates a model response for the given chat conversation.") + createChatCompletion( + ...CreateChatCompletionRequest, + ): CreateChatCompletionResponse | ErrorResponse; +} \ No newline at end of file diff --git a/common/models.tsp b/common/models.tsp index d6d0d4f91..13ab11ca2 100644 --- a/common/models.tsp +++ b/common/models.tsp @@ -1,39 +1,43 @@ -namespace OpenAI; using TypeSpec.OpenAPI; -model ListModelsResponse { - object: string; - data: Model[]; -} - -/** Describes an OpenAI model offering that can be used with the API. */ -model Model { - /** The model identifier, which can be referenced in the API endpoints. */ - id: string; +namespace OpenAI; - /** The object type, which is always "model". */ - object: "model"; +scalar User extends string; - /** The Unix timestamp (in seconds) when the model was created. */ - @encode("unixTimestamp", int32) - created: utcDateTime; +@minItems(1) +model TokenArray is safeint[]; - /** The organization that owns the model. */ - owned_by: string; -} +@minItems(1) +model TokenArrayArray is TokenArray[]; -model DeleteModelResponse { - id: string; - object: string; - deleted: boolean; +enum ListOrder { + asc: "asc", + desc: "desc", } -// this is using yaml refs instead of a def in the openapi, not sure if that's required? +model FunctionObject { + /** + * A description of what the function does, used by the model to choose when and how to call the + * function. + */ + description?: string; -scalar User extends string; + /** + * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + * dashes, with a maximum length of 64. + */ + name: string; -@minItems(1) -model TokenArray is safeint[]; + parameters?: FunctionParameters; +} -@minItems(1) -model TokenArrayArray is TokenArray[]; +/** + * The parameters the functions accepts, described as a JSON Schema object. See the + * [guide](/docs/guides/gpt/function-calling) for examples, and the + * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation + * about the format.\n\nTo describe a function that accepts no parameters, provide the value + * `{\"type\": \"object\", \"properties\": {}}`. + */ +// TODO: The generated spec produces "additionalProperties: {}" for this instead of +// "additionalProperties: true". Are they equivalent? +model FunctionParameters is Record; \ No newline at end of file diff --git a/completions/meta.tsp b/completions/meta.tsp new file mode 100644 index 000000000..708dea11d --- /dev/null +++ b/completions/meta.tsp @@ -0,0 +1,35 @@ +import "./models.tsp"; +import "./operations.tsp"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +// TODO: Fill in example here. +@@extension(OpenAI.CreateCompletionResponse, + "x-oaiMeta", + """ + name: The completion object + legacy: true, + example: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "gpt-3.5-turbo", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + """ +); \ No newline at end of file diff --git a/completions/models.tsp b/completions/models.tsp index 5aa332b32..4122d8bda 100644 --- a/completions/models.tsp +++ b/completions/models.tsp @@ -1,80 +1,46 @@ -namespace OpenAI; -using TypeSpec.OpenAPI; +import "../common/models.tsp"; +import "../chat/models.tsp"; -alias CHAT_COMPLETION_MODELS = - | "gpt4" - | "gpt-4-0314" - | "gpt-4-0613" - | "gpt-4-32k" - | "gpt-4-32k-0314" - | "gpt-4-32k-0613" - | "gpt-3.5-turbo" - | "gpt-3.5-turbo-16k" - | "gpt-3.5-turbo-0301" - | "gpt-3.5-turbo-0613" - | "gpt-3.5-turbo-16k-0613"; +using TypeSpec.OpenAPI; -alias COMPLETION_MODELS = - | "babbage-002" - | "davinci-002" - | "text-davinci-003" - | "text-davinci-002" - | "text-davinci-001" - | "code-davinci-002" - | "text-curie-001" - | "text-babbage-001" - | "text-ada-001"; +namespace OpenAI; -alias SharedCompletionProperties = { +model CreateCompletionRequest { /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - * more random, while lower values like 0.2 will make it more focused and deterministic. - * - * We generally recommend altering this or `top_p` but not both. + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + * see all of your available models, or see our [Model overview](/docs/models/overview) for + * descriptions of them. */ - temperature?: Temperature | null = 1; + @extension("x-oaiTypeLabel", "string") + `model`: string | COMPLETION_MODELS; /** - * An alternative to sampling with temperature, called nucleus sampling, where the model considers - * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - * the top 10% probability mass are considered. + * The prompt(s) to generate completions for, encoded as a string, array of strings, array of + * tokens, or array of token arrays. * - * We generally recommend altering this or `temperature` but not both. + * Note that <|endoftext|> is the document separator that the model sees during training, so if a + * prompt is not specified the model will generate as if from the beginning of a new document. */ - top_p?: TopP | null = 1; + // TODO: consider inlining when https://github.com/microsoft/typespec/issues/2356 fixed + prompt: Prompt | null = "<|endoftext|>"; /** - * How many completions to generate for each prompt. + * Generates `best_of` completions server-side and returns the "best" (the one with the highest + * log probability per token). Results cannot be streamed. + * + * When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + * how many to return – `best_of` must be greater than `n`. + * * **Note:** Because this parameter generates many completions, it can quickly consume your token * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. */ - n?: N | null = 1; - - /** - * The maximum number of [tokens](/tokenizer) to generate in the completion. - * - * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - * for counting tokens. - */ - max_tokens?: MaxTokens | null = 16; - - // todo: consider inlining when https://github.com/microsoft/typespec/issues/2356 is resolved - // https://github.com/microsoft/typespec/issues/2355 - /** Up to 4 sequences where the API will stop generating further tokens. */ - stop?: Stop = null; + @minValue(0) + @maxValue(20) + best_of?: safeint | null = 1; - // needs default - // https://github.com/microsoft/typespec/issues/1646 - /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear - * in the text so far, increasing the model's likelihood to talk about new topics. - * - * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - */ - presence_penalty?: Penalty | null; + /** Echo back the prompt in addition to the completion */ + echo?: boolean | null = false; - // needs default /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing * frequency in the text so far, decreasing the model's likelihood to repeat the same line @@ -82,319 +48,137 @@ alias SharedCompletionProperties = { * * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ - frequency_penalty?: Penalty | null; + @minValue(-2) + @maxValue(2) + frequency_penalty?: float64 | null = 0; - // needs default of null /** * Modify the likelihood of specified tokens appearing in the completion. - * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an - * associated bias value from -100 to 100. Mathematically, the bias is added to the logits - * generated by the model prior to sampling. The exact effect will vary per model, but values - * between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 - * should result in a ban or exclusive selection of the relevant token. + * + * Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an + * associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) + * to convert text to token IDs. Mathematically, the bias is added to the logits generated by the + * model prior to sampling. The exact effect will vary per model, but values between -1 and 1 + * should decrease or increase likelihood of selection; values like -100 or 100 should result in a + * ban or exclusive selection of the relevant token. + * + * As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being + * generated. */ @extension("x-oaiTypeLabel", "map") - logit_bias?: Record | null; - - /** - * A unique identifier representing your end-user, which can help OpenAI to monitor and detect - * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - */ - user?: User; - - /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` message. - * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). - */ - stream?: boolean | null = true; -}; - -@oneOf -union Stop { - string, - StopSequences, - null, -} - -@minValue(-2) -@maxValue(2) -scalar Penalty extends float64; - -@minItems(1) -@maxItems(4) -model StopSequences is string[]; - -@minValue(0) -@maxValue(2) -scalar Temperature extends float64; - -@minValue(0) -@maxValue(1) -scalar TopP extends float64; - -@minValue(1) -@maxValue(128) -scalar N extends safeint; - -@minValue(0) -scalar MaxTokens extends safeint; - -model CreateChatCompletionRequest { - /** - * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) - * table for details on which models work with the Chat API. - */ - @extension("x-oaiTypeLabel", "string") - `model`: string | CHAT_COMPLETION_MODELS; - - /** - * A list of messages comprising the conversation so far. - * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). - */ - @minItems(1) - messages: ChatCompletionRequestMessage[]; - - /** A list of functions the model may generate JSON inputs for. */ - @minItems(1) - @maxItems(128) - functions?: ChatCompletionFunctions[]; - - /** - * Controls how the model responds to function calls. `none` means the model does not call a - * function, and responds to the end-user. `auto` means the model can pick between an end-user or - * calling a function. Specifying a particular function via `{\"name":\ \"my_function\"}` forces the - * model to call that function. `none` is the default when no functions are present. `auto` is the - * default if functions are present. - */ - function_call?: "none" | "auto" | ChatCompletionFunctionCallOption; - - ...SharedCompletionProperties; -} - -model ChatCompletionFunctionCallOption { - /** The name of the function to call. */ - name: string; -} + logit_bias?: Record | null = null; -model ChatCompletionFunctions { /** - * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and - * dashes, with a maximum length of 64. + * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + * For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + * API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + * elements in the response. + * + * The maximum value for `logprobs` is 5. */ - name: string; + @minValue(0) + @maxValue(5) + logprobs?: safeint | null = null; /** - * A description of what the function does, used by the model to choose when and how to call the - * function. + * The maximum number of [tokens](/tokenizer) to generate in the completion. + * + * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + * for counting tokens. */ - description?: string; + @minValue(0) + max_tokens?: safeint | null = 16; /** - * The parameters the functions accepts, described as a JSON Schema object. See the - * [guide](/docs/guides/gpt/function-calling) for examples, and the - * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation - * about the format.\n\nTo describe a function that accepts no parameters, provide the value - * `{\"type\": \"object\", \"properties\": {}}`. + * How many completions to generate for each prompt. + * + * **Note:** Because this parameter generates many completions, it can quickly consume your token + * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. */ - parameters: ChatCompletionFunctionParameters; -} - -model ChatCompletionFunctionParameters is Record; - -model ChatCompletionRequestMessage { - /** The role of the messages author. One of `system`, `user`, `assistant`, or `function`. */ - role: "system" | "user" | "assistant" | "function"; + @minValue(1) + @maxValue(128) + n?: safeint | null = 1; /** - * The contents of the message. `content` is required for all messages, and may be null for - * assistant messages with function calls. + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + * in the text so far, increasing the model's likelihood to talk about new topics. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ - content: string | null; + @minValue(-2) + @maxValue(2) + presence_penalty?: float64 | null = 0; + + /** + * If specified, our system will make a best effort to sample deterministically, such that + * repeated requests with the same `seed` and parameters should return the same result. + * + * Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + * parameter to monitor changes in the backend. + */ + @extension( + "x-oaiMeta", + { + beta: true + } + ) + @minValue(-9223372036854775808) // TODO: Min and max exceed the limits of safeint. + @maxValue(9223372036854775807) + seed?: safeint | null; + + // TODO: Consider inlining when https://github.com/microsoft/typespec/issues/2356 is resolved + // https://github.com/microsoft/typespec/issues/2355 + /** Up to 4 sequences where the API will stop generating further tokens. */ + stop?: Stop | null = null; - // TODO: the constraints are not specified in the API /** - * The name of the author of this message. `name` is required if role is `function`, and it - * should be the name of the function whose response is in the `content`. May contain a-z, - * A-Z, 0-9, and underscores, with a maximum length of 64 characters. + * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available, with the stream terminated by a `data: [DONE]` message. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). */ - name?: string; - - /** The name and arguments of a function that should be called, as generated by the model. */ - function_call?: { - /** The name of the function to call. */ - name: string; + stream?: boolean | null = false; - /** - * The arguments to call the function with, as generated by the model in JSON format. Note that - * the model does not always generate valid JSON, and may hallucinate parameters not defined by - * your function schema. Validate the arguments in your code before calling your function. - */ - arguments: string; - }; -} - -/** Represents a chat completion response returned by model, based on the provided input. */ -// TODO: Fill in example here. -@extension( - "x-oaiMeta", - { - name: "The chat completion object", - group: "chat", - example: "", - } -) -model CreateChatCompletionResponse { - /** A unique identifier for the chat completion. */ - id: string; - - /** The object type, which is always `chat.completion`. */ - object: string; - - /** The Unix timestamp (in seconds) of when the chat completion was created. */ - @encode("unixTimestamp", int32) - created: utcDateTime; - - /** The model used for the chat completion. */ - `model`: string; - - /** A list of chat completion choices. Can be more than one if `n` is greater than 1. */ - choices: { - /** The index of the choice in the list of choices. */ - index: safeint; - - message: ChatCompletionResponseMessage; - - /** - * The reason the model stopped generating tokens. This will be `stop` if the model hit a - * natural stop point or a provided stop sequence, `length` if the maximum number of tokens - * specified in the request was reached, `content_filter` if the content was omitted due to - * a flag from our content filters, or `function_call` if the model called a function. - */ - finish_reason: "stop" | "length" | "function_call" | "content_filter"; - }[]; - - usage?: CompletionUsage; -} - -/** Usage statistics for the completion request. */ -model CompletionUsage { - /** Number of tokens in the prompt. */ - prompt_tokens: safeint; - - /** Number of tokens in the generated completion */ - completion_tokens: safeint; - - /** Total number of tokens used in the request (prompt + completion). */ - total_tokens: safeint; -} - -model ChatCompletionResponseMessage { - /** The role of the author of this message. */ - role: "system" | "user" | "assistant" | "function"; - - /** The contents of the message. */ - content: string | null; - - /** The name and arguments of a function that should be called, as generated by the model. */ - function_call?: { - /** The name of the function to call. */ - name: string; - - /** - * The arguments to call the function with, as generated by the model in JSON format. Note that - * the model does not always generate valid JSON, and may hallucinate parameters not defined by - * your function schema. Validate the arguments in your code before calling your function. - */ - arguments: string; - }; -} - -model CreateCompletionRequest { - /** - * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - * see all of your available models, or see our [Model overview](/docs/models/overview) for - * descriptions of them. - */ - @extension("x-oaiTypeLabel", "string") - `model`: string | COMPLETION_MODELS; + /** The suffix that comes after a completion of inserted text. */ + suffix?: string | null = null; /** - * The prompt(s) to generate completions for, encoded as a string, array of strings, array of - * tokens, or array of token arrays. + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. * - * Note that <|endoftext|> is the document separator that the model sees during training, so if a - * prompt is not specified the model will generate as if from the beginning of a new document. + * We generally recommend altering this or `top_p` but not both. */ - // TODO: consider inlining when https://github.com/microsoft/typespec/issues/2356 fixed - prompt: Prompt = "<|endoftext|>"; - - /** The suffix that comes after a completion of inserted text. */ - suffix?: string | null = null; - - ...SharedCompletionProperties; + @minValue(0) + @maxValue(2) + temperature?: float64 | null = 1; /** - * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. - * For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The - * API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` - * elements in the response. + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. * - * The maximum value for `logprobs` is 5. + * We generally recommend altering this or `temperature` but not both. */ - logprobs?: safeint | null = null; - - /** Echo back the prompt in addition to the completion */ - echo?: boolean | null = false; + @minValue(0) + @maxValue(1) + top_p?: float64 | null = 1; /** - * Generates `best_of` completions server-side and returns the "best" (the one with the highest - * log probability per token). Results cannot be streamed. - * - * When used with `n`, `best_of` controls the number of candidate completions and `n` specifies - * how many to return – `best_of` must be greater than `n`. - * - * **Note:** Because this parameter generates many completions, it can quickly consume your token - * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). */ - best_of?: safeint | null = 1; + user?: User; } -@oneOf -union Prompt { - string, - string[], - TokenArray, - TokenArrayArray, - null, -} /** * Represents a completion response from the API. Note: both the streamed and non-streamed response * objects share the same shape (unlike the chat endpoint). */ -@extension( - "x-oaiMeta", - { - name: "The completion object", - legacy: true, - example: "", // fill in - } -) model CreateCompletionResponse { /** A unique identifier for the completion. */ id: string; - /** The object type, which is always `text_completion`. */ - object: string; - - /** The Unix timestamp (in seconds) of when the completion was created. */ - @encode("unixTimestamp", int32) - created: utcDateTime; - - /** The model used for the completion. */ - `model`: string; - /** The list of completion choices the model generated for the input. */ choices: { index: safeint; @@ -413,8 +197,42 @@ model CreateCompletionResponse { * in the request was reached, or `content_filter` if content was omitted due to a flag from our * content filters. */ + // TODO: The generated spec includes other values like "tool_calls" and "function_call". + // Is it because we're importing /chat/models.tsp? finish_reason: "stop" | "length" | "content_filter"; }[]; + /** The Unix timestamp (in seconds) of when the completion was created. */ + @encode("unixTimestamp", int32) + created: utcDateTime; + + /** The model used for the completion. */ + `model`: string; + + /** + * This fingerprint represents the backend configuration that the model runs with. + * + * Can be used in conjunction with the `seed` request parameter to understand when backend changes + * have been made that might impact determinism. + */ + system_fingerprint?: string; + + /** The object type, which is always `text_completion`. */ + object: "text_completion"; + + /** Usage statistics for the completion request. */ usage?: CompletionUsage; } + +alias COMPLETION_MODELS = + | "gpt-3.5-turbo-instruct" + | "davinci-002" + | "babbage-002"; + +@oneOf +union Prompt { + string, + string[], + TokenArray, + TokenArrayArray, +} \ No newline at end of file diff --git a/completions/operations.tsp b/completions/operations.tsp index d53245f7c..b24f018df 100644 --- a/completions/operations.tsp +++ b/completions/operations.tsp @@ -3,30 +3,18 @@ import "@typespec/openapi"; import "../common/errors.tsp"; import "./models.tsp"; -import "./chat-meta.tsp"; using TypeSpec.Http; using TypeSpec.OpenAPI; namespace OpenAI; -@route("/chat") -namespace Chat { - @route("/completions") - interface Completions { - @tag("OpenAI") - @post - @operationId("createChatCompletion") - createChatCompletion( - ...CreateChatCompletionRequest, - ): CreateChatCompletionResponse | ErrorResponse; - } -} @route("/completions") interface Completions { - @tag("OpenAI") @post @operationId("createCompletion") + @tag("Completions") + @summary("Creates a completion for the provided prompt and parameters.") createCompletion( ...CreateCompletionRequest, ): CreateCompletionResponse | ErrorResponse; diff --git a/edits/models.tsp b/edits/models.tsp deleted file mode 100644 index d76372649..000000000 --- a/edits/models.tsp +++ /dev/null @@ -1,69 +0,0 @@ -namespace OpenAI; -using TypeSpec.OpenAPI; - -model CreateEditRequest { - /** - * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` - * model with this endpoint. - */ - @extension("x-oaiTypeLabel", "string") - `model`: string | "text-davinci-edit-001" | "code-davinci-edit-001"; - - /** The input text to use as a starting point for the edit. */ - input?: string | null = ""; - - /** The instruction that tells the model how to edit the prompt. */ - instruction: string; - - /** How many edits to generate for the input and instruction. */ - n?: EditN | null = 1; - - /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - * more random, while lower values like 0.2 will make it more focused and deterministic. - * - * We generally recommend altering this or `top_p` but not both. - */ - temperature?: Temperature | null = 1; - - /** - * An alternative to sampling with temperature, called nucleus sampling, where the model considers - * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - * the top 10% probability mass are considered. - * - * We generally recommend altering this or `temperature` but not both. - */ - top_p?: TopP | null = 1; -} - -#deprecated "deprecated" -model CreateEditResponse { - /** The object type, which is always `edit`. */ - object: "edit"; - - /** The Unix timestamp (in seconds) of when the edit was created. */ - @encode("unixTimestamp", int32) - created: utcDateTime; - - /** description: A list of edit choices. Can be more than one if `n` is greater than 1. */ - choices: { - /** The edited result. */ - text: string; - - /** The index of the choice in the list of choices. */ - index: safeint; - - /** - * The reason the model stopped generating tokens. This will be `stop` if the model hit a - * natural stop point or a provided stop sequence, or `length` if the maximum number of tokens - * specified in the request was reached. - */ - finish_reason: "stop" | "length"; - }[]; - - usage: CompletionUsage; -} - -@minValue(0) -@maxValue(20) -scalar EditN extends safeint; diff --git a/edits/operations.tsp b/edits/operations.tsp deleted file mode 100644 index 08497364e..000000000 --- a/edits/operations.tsp +++ /dev/null @@ -1,19 +0,0 @@ -import "@typespec/http"; -import "@typespec/openapi"; - -import "../common/errors.tsp"; -import "./models.tsp"; - -using TypeSpec.Http; -using TypeSpec.OpenAPI; - -namespace OpenAI; - -@route("/edits") -interface Edits { - #deprecated "deprecated" - @post - @tag("OpenAI") - @operationId("createEdit") - createEdit(@body edit: CreateEditRequest): CreateEditResponse | ErrorResponse; -} diff --git a/embeddings/meta.tsp b/embeddings/meta.tsp new file mode 100644 index 000000000..284eb01dc --- /dev/null +++ b/embeddings/meta.tsp @@ -0,0 +1,24 @@ +import "./models.tsp"; +import "./operations.tsp"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@@extension(OpenAI.Embedding, + "x-oaiMeta", + """ + name: The embedding object + example: | + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } + """ +); \ No newline at end of file diff --git a/embeddings/models.tsp b/embeddings/models.tsp index ab46275b2..fb6ab394a 100644 --- a/embeddings/models.tsp +++ b/embeddings/models.tsp @@ -1,42 +1,81 @@ import "../common/models.tsp"; -namespace OpenAI; using TypeSpec.OpenAPI; -model CreateEmbeddingRequest { - /** ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. */ - @extension("x-oaiTypeLabel", "string") - `model`: string | "text-embedding-ada-002"; +namespace OpenAI; +model CreateEmbeddingRequest { /** * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a * single request, pass an array of strings or array of token arrays. Each input must not exceed - * the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. + * the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an + * empty string. * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) * for counting tokens. */ - input: string | string[] | TokenArray | TokenArrayArray; + @extension("x-oaiExpandable", true) + input: CreateEmbeddingRequestInput; + /** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + * see all of your available models, or see our [Model overview](/docs/models/overview) for + * descriptions of them. + */ + @extension("x-oaiTypeLabel", "string") + `model`: string | EMBEDDINGS_MODELS; + + /** + * The format to return the embeddings in. Can be either `float` or + * [`base64`](https://pypi.org/project/pybase64/). + */ + encoding_format?: "float" | "base64" = "float"; + + /** + * The number of dimensions the resulting output embeddings should have. Only supported in + * `text-embedding-3` and later models. + */ + @minValue(1) + dimensions?: safeint; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ user?: User; } -model CreateEmbeddingResponse { - /** The object type, which is always "embedding". */ - object: "embedding"; - - /** The name of the model used to generate the embedding. */ - `model`: string; +model CreateEmbeddingResponse { /** The list of embeddings generated by the model. */ data: Embedding[]; + + /** The name of the model used to generate the embedding. */ + `model`: string; + + /** The object type, which is always "list". */ + object: "list"; /** The usage information for the request. */ - usage: { - /** The number of tokens used by the prompt. */ - prompt_tokens: safeint; + usage: EmbeddingUsage; +} + +alias EMBEDDINGS_MODELS = + | "text-embedding-ada-002" + | "text-embedding-3-small" + | "text-embedding-3-large"; + +@oneOf +union CreateEmbeddingRequestInput { + /** The string that will be turned into an embedding. */ + string, + + /** The array of strings that will be turned into an embedding. */ + string[], - /** The total number of tokens used by the request. */ - total_tokens: safeint; - }; + /** The array of integers that will be turned into an embedding. */ + TokenArray, + + /** The array of arrays containing integers that will be turned into an embedding. */ + TokenArrayArray; } /** Represents an embedding vector returned by embedding endpoint. */ @@ -44,12 +83,20 @@ model Embedding { /** The index of the embedding in the list of embeddings. */ index: safeint; - /** The object type, which is always "embedding". */ - object: "embedding"; - /** - * The embedding vector, which is a list of floats. The length of vector depends on the model as\ + * The embedding vector, which is a list of floats. The length of vector depends on the model as * listed in the [embedding guide](/docs/guides/embeddings). */ - embedding: float64[]; + embedding: float64[] | string; + + /** The object type, which is always "embedding". */ + object: "embedding"; } + +model EmbeddingUsage { + /** The number of tokens used by the prompt. */ + prompt_tokens: safeint; + + /** The total number of tokens used by the request. */ + total_tokens: safeint; +}; \ No newline at end of file diff --git a/embeddings/operations.tsp b/embeddings/operations.tsp index 012d97c58..61c8e1839 100644 --- a/embeddings/operations.tsp +++ b/embeddings/operations.tsp @@ -11,10 +11,10 @@ namespace OpenAI; @route("/embeddings") interface Embeddings { - @tag("OpenAI") - @summary("Creates an embedding vector representing the input text.") @post @operationId("createEmbedding") + @tag("Embeddings") + @summary("Creates an embedding vector representing the input text.") createEmbedding( @body embedding: CreateEmbeddingRequest, ): CreateEmbeddingResponse | ErrorResponse; diff --git a/files/meta.tsp b/files/meta.tsp new file mode 100644 index 000000000..d1c14977b --- /dev/null +++ b/files/meta.tsp @@ -0,0 +1,22 @@ +import "./models.tsp"; +import "./operations.tsp"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@@extension(OpenAI.OpenAIFile, + "x-oaiMeta", + """ + name: The file object + example: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "salesOverview.pdf", + "purpose": "assistants", + } + """ +); \ No newline at end of file diff --git a/files/models.tsp b/files/models.tsp index 990c1ea11..7cfd02a68 100644 --- a/files/models.tsp +++ b/files/models.tsp @@ -1,70 +1,75 @@ -namespace OpenAI; using TypeSpec.OpenAPI; -model ListFilesResponse { - object: string; // presumably this is always some constant, but not defined. - data: OpenAIFile[]; -} +namespace OpenAI; model CreateFileRequest { /** - * Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. - * - * If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. + * The file object (not file name) to be uploaded. */ @encode("binary") file: bytes; /** - * The intended purpose of the uploaded documents. Use "fine-tune" for - * [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the - * uploaded file. + * The intended purpose of the uploaded file. Use "fine-tune" for + * [Fine-tuning](/docs/api-reference/fine-tuning) and "assistants" for + * [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). This + * allows us to validate the format of the uploaded file is correct for fine-tuning. */ - purpose: string; + purpose: "fine-tune" | "assistants"; +} + +model ListFilesResponse { + data: OpenAIFile[]; + object: "list"; +} + +model DeleteFileResponse { + id: string; + object: "file"; + deleted: boolean; } +alias FILE_PURPOSE = + | "fine-tune" + | "fine-tune-results" + | "assistants" + | "assistants_output"; + /** The `File` object represents a document that has been uploaded to OpenAI. */ model OpenAIFile { /** The file identifier, which can be referenced in the API endpoints. */ id: string; - /** The object type, which is always "file". */ - object: "file"; - - /** The size of the file in bytes. */ - bytes: safeint; + /** The size of the file, in bytes. */ + bytes: safeint | null; // TODO: This is not nullable in the OpenAPI spec, but it is in practice. /** The Unix timestamp (in seconds) for when the file was created. */ @encode("unixTimestamp", int32) - createdAt: utcDateTime; + created_at: utcDateTime; /** The name of the file. */ filename: string; - /** The intended purpose of the file. Currently, only "fine-tune" is supported. */ - purpose: string; + /** The object type, which is always "file". */ + object: "file"; - /** - * The current status of the file, which can be either `uploaded`, `processed`, `pending`, - * `error`, `deleting` or `deleted`. + /** + * The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, + * `assistants`, and `assistants_output`. */ - status: - | "uploaded" - | "processed" - | "pending" - | "error" - | "deleting" - | "deleted"; + purpose: FILE_PURPOSE; /** - * Additional details about the status of the file. If the file is in the `error` state, this will - * include a message describing the error. + * Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or + * `error`. */ - status_details?: string | null; -} + #deprecated "deprecated" + status: "uploaded" | "processed" | "error"; -model DeleteFileResponse { - id: string; - object: string; - deleted: boolean; -} + /** + * Deprecated. For details on why a fine-tuning training file failed validation, see the `error` + * field on `fine_tuning.job`. + */ + #deprecated "deprecated" + status_details?: string; +} \ No newline at end of file diff --git a/files/operations.tsp b/files/operations.tsp index 2e601ae03..94320fa3d 100644 --- a/files/operations.tsp +++ b/files/operations.tsp @@ -11,48 +11,61 @@ namespace OpenAI; @route("/files") interface Files { - @tag("OpenAI") - @get - @summary("Returns a list of files that belong to the user's organization.") - @operationId("listFiles") - listFiles(): ListFilesResponse | ErrorResponse; - - @tag("OpenAI") @post - @summary("Returns a list of files that belong to the user's organization.") @operationId("createFile") + @tag("Files") + @summary(""" + Upload a file that can be used across various endpoints. The size of all the files uploaded by + one organization can be up to 100 GB. + + The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See + the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files + supported. The Fine-tuning API only supports `.jsonl` files. + + Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + """) createFile( @header contentType: "multipart/form-data", @body file: CreateFileRequest, ): OpenAIFile | ErrorResponse; - @tag("OpenAI") - @post - @summary("Returns information about a specific file.") + @get + @operationId("listFiles") + @tag("Files") + @summary("Returns a list of files that belong to the user's organization.") + listFiles( + /** Only return files with the given purpose. */ + // NOTE: This is just a string in the OpenAPI spec. + @query purpose?: string, + ): ListFilesResponse | ErrorResponse; + + @route("{file_id}") + @get @operationId("retrieveFile") - @route("/files/{file_id}") + @tag("Files") + @summary("Returns information about a specific file.") retrieveFile( /** The ID of the file to use for this request. */ @path file_id: string, ): OpenAIFile | ErrorResponse; - @tag("OpenAI") + @route("{file_id}") @delete - @summary("Delete a file") @operationId("deleteFile") - @route("/files/{file_id}") + @tag("Files") + @summary("Delete a file") deleteFile( /** The ID of the file to use for this request. */ @path file_id: string, ): DeleteFileResponse | ErrorResponse; - @route("/files/{file_id}/content") - @tag("OpenAI") + @route("{file_id}/content") @get - @summary("Returns the contents of the specified file.") @operationId("downloadFile") + @tag("Files") + @summary("Returns the contents of the specified file.") downloadFile( /** The ID of the file to use for this request. */ @path file_id: string, - ): string | ErrorResponse; + ): string | ErrorResponse; // TODO: The OpenAPI spec says this is a string but the Content-Type is application/json? } diff --git a/fine-tuning/models.tsp b/fine-tuning/models.tsp index bf846072b..5fa91066e 100644 --- a/fine-tuning/models.tsp +++ b/fine-tuning/models.tsp @@ -1,416 +1,213 @@ -namespace OpenAI; using TypeSpec.OpenAPI; -model FineTuningJob { - /** The object identifier, which can be referenced in the API endpoints. */ - id: string; - - /** The object type, which is always "fine_tuning.job". */ - object: "fine_tuning.job"; - - /** The Unix timestamp (in seconds) for when the fine-tuning job was created. */ - @encode("unixTimestamp", int32) - created_at: utcDateTime; +namespace OpenAI; +model CreateFineTuningJobRequest { /** - * The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be - * null if the fine-tuning job is still running. + * The name of the model to fine-tune. You can select one of the + * [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). */ - @encode("unixTimestamp", int32) - finished_at: utcDateTime | null; - - /** The base model that is being fine-tuned. */ - `model`: string; + @extension("x-oaiTypeLabel", "string") + `model`: string | "babbage-002" | "davinci-002" | "gpt-3.5-turbo"; /** - * The name of the fine-tuned model that is being created. The value will be null if the - * fine-tuning job is still running. + * The ID of an uploaded file that contains training data. + * + * See [upload file](/docs/api-reference/files/upload) for how to upload a file. + * + * Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + * the purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. */ - fine_tuned_model: string | null; + training_file: string; - /** The organization that owns the fine-tuning job. */ - organization_id: string; + /** The hyperparameters used for the fine-tuning job. */ + hyperparameters?: { + /** + * Number of examples in each batch. A larger batch size means that model parameters are + * updated less frequently, but with lower variance. + */ + batch_size?: "auto" | BatchSize = "auto"; - /** - * The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, - * `succeeded`, `failed`, or `cancelled`. - */ - status: - | "created" - | "pending" - | "running" - | "succeeded" - | "failed" - | "cancelled"; + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + * overfitting. + */ + learning_rate_multiplier?: "auto" | LearningRateMultiplier = "auto"; - /** - * The hyperparameters used for the fine-tuning job. See the - * [fine-tuning guide](/docs/guides/fine-tuning) for more details. - */ - hyperparameters: { /** * The number of epochs to train the model for. An epoch refers to one full cycle through the * training dataset. - * - * "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the - * number manually, we support any number between 1 and 50 epochs. */ n_epochs?: "auto" | NEpochs = "auto"; }; /** - * The file ID used for training. You can retrieve the training data with the - * [Files API](/docs/api-reference/files/retrieve-contents). + * A string of up to 18 characters that will be added to your fine-tuned model name. + * + * For example, a `suffix` of "custom-model-name" would produce a model name like + * `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. */ - training_file: string; + suffix?: SuffixString | null = null; /** - * The file ID used for validation. You can retrieve the validation results with the - * [Files API](/docs/api-reference/files/retrieve-contents). + * The ID of an uploaded file that contains validation data. + * + * If you provide this file, the data is used to generate validation metrics periodically during + * fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + * not be present in both train and validation files. + * + * Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + * `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. */ - validation_file: string | null; + validation_file?: string | null; +} - /** - * The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the - * [Files API](/docs/api-reference/files/retrieve-contents). - */ - result_files: string[]; +model ListPaginatedFineTuningJobsResponse { + data: FineTuningJob[]; - /** - * The total number of billable tokens processed by this fine tuning job. The value will be null - * if the fine-tuning job is still running. - */ - trained_tokens: safeint | null; + has_more: boolean; - /** - * For fine-tuning jobs that have `failed`, this will contain more information on the cause of the - * failure. - */ - error: { - /** A human-readable error message. */ - message?: string; // likely should be required, but spec doesn't say so. + object: "list"; +} - /** A machine-readable error code. */ - code?: string; +model ListFineTuningJobEventsResponse { + data: FineTuningJobEvent[]; - /** - * The parameter that was invalid, usually `training_file` or `validation_file`. This field - * will be null if the failure was not parameter-specific. - */ - param?: string | null; - } | null; + object: "list"; + + // TODO: The sample in the OpenAPI spec includes a `has_more` property that is not included here. } -model FineTuningEvent { - object: string; +@minValue(1) +@maxValue(50) +scalar NEpochs extends safeint; - @encode("unixTimestamp", int32) - created_at: utcDateTime; +@minValue(1) +@maxValue(256) +scalar BatchSize extends safeint; - level: string; - message: string; - data?: Record | null; - type?: "message" | "metrics"; // "default is "none"? -} +@minValueExclusive(0) +scalar LearningRateMultiplier extends float64; -/** The `FineTune` object represents a legacy fine-tune job that has been created through the API. */ -#deprecated "deprecated" -model FineTune { +@minLength(1) +@maxLength(40) +scalar SuffixString extends string; + +/* The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. */ +model FineTuningJob { /** The object identifier, which can be referenced in the API endpoints. */ id: string; - /** The object type, which is always "fine-tune". */ - object: "fine-tune"; - /** The Unix timestamp (in seconds) for when the fine-tuning job was created. */ @encode("unixTimestamp", int32) created_at: utcDateTime; - /** The Unix timestamp (in seconds) for when the fine-tuning job was last updated. */ - @encode("unixTimestamp", int32) - updated_at: utcDateTime; - - /** The base model that is being fine-tuned. */ - `model`: string; - - /** The name of the fine-tuned model that is being created. */ - fine_tuned_model: string | null; - - /** The organization that owns the fine-tuning job. */ - organization_id: string; - /** - * The current status of the fine-tuning job, which can be either `created`, `running`, - * `succeeded`, `failed`, or `cancelled`. + * For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + * failure. */ - status: "created" | "running" | "succeeded" | "failed" | "cancelled"; + error: { + /** A machine-readable error code. */ + code: string; - /** - * The hyperparameters used for the fine-tuning job. See the - * [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. - */ - hyperparams: { - /** - * The number of epochs to train the model for. An epoch refers to one full cycle through the - * training dataset. - */ - n_epochs: safeint; + /** A human-readable error message. */ + message: string; /** - * The batch size to use for training. The batch size is the number of training examples used to - * train a single forward and backward pass. + * The parameter that was invalid, usually `training_file` or `validation_file`. This field will + * be null if the failure was not parameter-specific. */ - batch_size: safeint; - - /** The weight to use for loss on the prompt tokens. */ - prompt_loss_weight: float64; - - /** The learning rate multiplier to use for training. */ - learning_rate_multiplier: float64; - - /** The classification metrics to compute using the validation dataset at the end of every epoch. */ - compute_classification_metrics?: boolean; - - /** The positive class to use for computing classification metrics. */ - classification_positive_class?: string; - - /** The number of classes to use for computing classification metrics. */ - classification_n_classes?: safeint; - }; - - /** The list of files used for training. */ - training_files: OpenAIFile[]; - - /** The list of files used for validation. */ - validation_files: OpenAIFile[]; - - /** The compiled results files for the fine-tuning job. */ - result_files: OpenAIFile[]; - - /** The list of events that have been observed in the lifecycle of the FineTune job. */ - events?: FineTuneEvent[]; -} - -model FineTuningJobEvent { - id: string; - object: string; - - @encode("unixTimestamp", int32) - created_at: utcDateTime; - - level: "info" | "warn" | "error"; - message: string; -} - -model FineTuneEvent { - object: string; - - @encode("unixTimestamp", int32) - created_at: utcDateTime; - - level: string; - message: string; -} + param: string | null; + } | null; -model CreateFineTuningJobRequest { /** - * The ID of an uploaded file that contains training data. - * - * See [upload file](/docs/api-reference/files/upload) for how to upload a file. - * - * Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with - * the purpose `fine-tune`. - * - * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + * The name of the fine-tuned model that is being created. The value will be null if the + * fine-tuning job is still running. */ - training_file: string; + fine_tuned_model: string | null; /** - * The ID of an uploaded file that contains validation data. - * - * If you provide this file, the data is used to generate validation metrics periodically during - * fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should - * not be present in both train and validation files. - * - * Your dataset must be formatted as a JSONL file. You must upload your file with the purpose - * `fine-tune`. - * - * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + * The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + * null if the fine-tuning job is still running. */ - validation_file?: string | null; + @encode("unixTimestamp", int32) + finished_at: utcDateTime | null; /** - * The name of the model to fine-tune. You can select one of the - * [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + * The hyperparameters used for the fine-tuning job. See the + * [fine-tuning guide](/docs/guides/fine-tuning) for more details. */ - @extension("x-oaiTypeLabel", "string") - `model`: string | "babbage-002" | "davinci-002" | "gpt-3.5-turbo"; - - /** The hyperparameters used for the fine-tuning job. */ - hyperparameters?: { + hyperparameters: { /** * The number of epochs to train the model for. An epoch refers to one full cycle through the * training dataset. + * + * "auto" decides the optimal number of epochs based on the size of the dataset. If setting the + * number manually, we support any number between 1 and 50 epochs. */ - n_epochs?: "auto" | NEpochs = "auto"; + n_epochs: "auto" | NEpochs = "auto"; }; - /** - * A string of up to 18 characters that will be added to your fine-tuned model name. - * - * For example, a `suffix` of "custom-model-name" would produce a model name like - * `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. - */ - suffix?: SuffixString | null = null; -} - -@minValue(1) -@maxValue(50) -scalar NEpochs extends safeint; - -model ListFineTuningJobEventsResponse { - object: string; - data: FineTuningJobEvent[]; -} - -model CreateFineTuneRequest { - /** - * The ID of an uploaded file that contains training data. - * - * See [upload file](/docs/api-reference/files/upload) for how to upload a file. - * - * Your dataset must be formatted as a JSONL file, where each training example is a JSON object - * with the keys "prompt" and "completion". Additionally, you must upload your file with the - * purpose `fine-tune`. - * - * See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more - * details. - */ - training_file: string; - - /** - * The ID of an uploaded file that contains validation data. - * - * If you provide this file, the data is used to generate validation metrics periodically during - * fine-tuning. These metrics can be viewed in the - * [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - * Your train and validation data should be mutually exclusive. - * - * Your dataset must be formatted as a JSONL file, where each validation example is a JSON object - * with the keys "prompt" and "completion". Additionally, you must upload your file with the - * purpose `fine-tune`. - * - * See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more - * details. - */ - validation_file?: string | null; - - /** - * The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", - * "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more - * about these models, see the [Models](/docs/models) documentation. - */ - @extension("x-oaiTypeLabel", "string") - `model`?: string | "ada" | "babbage" | "curie" | "davinci" | null; - - /** - * The number of epochs to train the model for. An epoch refers to one full cycle through the - * training dataset. - */ - n_epochs?: safeint | null = 4; - - /** - * The batch size to use for training. The batch size is the number of training examples used to - * train a single forward and backward pass. - * - * By default, the batch size will be dynamically configured to be ~0.2% of the number of examples - * in the training set, capped at 256 - in general, we've found that larger batch sizes tend to - * work better for larger datasets. - */ - batch_size?: safeint | null = null; + /** The base model that is being fine-tuned. */ + `model`: string; - /** - * The learning rate multiplier to use for training. The fine-tuning learning rate is the original - * learning rate used for pretraining multiplied by this value. - * - * By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final - * `batch_size` (larger learning rates tend to perform better with larger batch sizes). We - * recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best - * results. - */ - learning_rate_multiplier?: float64 | null = null; + /** The object type, which is always "fine_tuning.job". */ + object: "fine_tuning.job"; - /** - * The weight to use for loss on the prompt tokens. This controls how much the model tries to - * learn to generate the prompt (as compared to the completion which always has a weight of 1.0), - * and can add a stabilizing effect to training when completions are short. - * - * If prompts are extremely long (relative to completions), it may make sense to reduce this - * weight so as to avoid over-prioritizing learning the prompt. - */ - prompt_loss_rate?: float64 | null = 0.01; + /** The organization that owns the fine-tuning job. */ + organization_id: string; /** - * If set, we calculate classification-specific metrics such as accuracy and F-1 score using the - * validation set at the end of every epoch. These metrics can be viewed in the - * [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - * - * In order to compute classification metrics, you must provide a `validation_file`. Additionally, - * you must specify `classification_n_classes` for multiclass classification or - * `classification_positive_class` for binary classification. + * The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + * [Files API](/docs/api-reference/files/retrieve-contents). */ - compute_classification_metrics?: boolean | null = false; + result_files: string[]; /** - * The number of classes in a classification task. - * - * This parameter is required for multiclass classification. + * The current status of the fine-tuning job, which can be either `validating_files`, `queued`, + * `running`, `succeeded`, `failed`, or `cancelled`. */ - classification_n_classes?: safeint | null = null; + status: + | "validating_files" + | "queued" + | "running" + | "succeeded" + | "failed" + | "cancelled"; /** - * The positive class in binary classification. - * - * This parameter is needed to generate precision, recall, and F1 metrics when doing binary - * classification. + * The total number of billable tokens processed by this fine-tuning job. The value will be null + * if the fine-tuning job is still running. */ - classification_positive_class?: string | null = null; + trained_tokens: safeint | null; /** - * If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score - * is a generalization of F-1 score. This is only used for binary classification. - * - * With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger - * beta score puts more weight on recall and less on precision. A smaller beta score puts more - * weight on precision and less on recall. + * The file ID used for training. You can retrieve the training data with the + * [Files API](/docs/api-reference/files/retrieve-contents). */ - classification_betas?: float64[] | null = null; + training_file: string; /** - * A string of up to 18 characters that will be added to your fine-tuned model name. - * - * For example, a `suffix` of "custom-model-name" would produce a model name like - * `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + * The file ID used for validation. You can retrieve the validation results with the + * [Files API](/docs/api-reference/files/retrieve-contents). */ - suffix?: SuffixString | null = null; + validation_file: string | null; } -@minLength(1) -@maxLength(40) -scalar SuffixString extends string; +/** Fine-tuning job event object */ +model FineTuningJobEvent { + id: string; -model ListFineTunesResponse { - object: string; - data: FineTune[]; -} + @encode("unixTimestamp", int32) + created_at: utcDateTime; -model ListFineTuneEventsResponse { - object: string; - data: FineTuneEvent[]; -} + level: "info" | "warn" | "error"; -model ListPaginatedFineTuningJobsResponse { - object: string; - data: FineTuningJob[]; - has_more: boolean; -} + message: string; + + object: "fine_tuning.job.event"; +} \ No newline at end of file diff --git a/fine-tuning/operations.tsp b/fine-tuning/operations.tsp index 15491f62e..d972c4219 100644 --- a/fine-tuning/operations.tsp +++ b/fine-tuning/operations.tsp @@ -10,182 +10,72 @@ using TypeSpec.OpenAPI; namespace OpenAI; @route("/fine_tuning") -namespace FineTuning { +interface FineTuning { @route("jobs") - interface Jobs { - /** - * Creates a job that fine-tunes a specified model from a given dataset. - * - * Response includes details of the enqueued job including job status and the name of the - * fine-tuned models once complete. - * - * [Learn more about fine-tuning](/docs/guides/fine-tuning) - */ - @post - @tag("OpenAI") - @operationId("createFineTuningJob") - createFineTuningJob( - @body job: CreateFineTuningJobRequest, - ): FineTuningJob | ErrorResponse; - - @get - @tag("OpenAI") - @operationId("listPaginatedFineTuningJobs") - listPaginatedFineTuningJobs( - /** Identifier for the last job from the previous pagination request. */ - @query after?: string, - - /** Number of fine-tuning jobs to retrieve. */ - @query limit?: safeint = 20, - ): ListPaginatedFineTuningJobsResponse | ErrorResponse; - - @summary(""" - Get info about a fine-tuning job. - - [Learn more about fine-tuning](/docs/guides/fine-tuning) - """) - @route("{fine_tuning_job_id}") - @tag("OpenAI") - @get - @operationId("retrieveFineTuningJob") - retrieveFineTuningJob( - @path fine_tuning_job_id: string, - ): FineTuningJob | ErrorResponse; - - @summary("Get status updates for a fine-tuning job.") - @tag("OpenAI") - @route("{fine_tuning_job_id}/events") - @get - @operationId("listFineTuningEvents") - listFineTuningEvents( - /** The ID of the fine-tuning job to get events for. */ - @path fine_tuning_job_id: string, - - /** Identifier for the last event from the previous pagination request. */ - @query after?: string, - - /** Number of events to retrieve. */ - @query limit?: integer = 20, - ): ListFineTuningJobEventsResponse | ErrorResponse; - - @summary("Immediately cancel a fine-tune job.") - @tag("OpenAI") - @route("{fine_tuning_job_id}/cancel") - @post - @operationId("cancelFineTuningJob") - cancelFineTuningJob( - /** The ID of the fine-tuning job to cancel. */ - @path fine_tuning_job_id: string, - ): FineTuningJob | ErrorResponse; - } -} - -@route("/fine-tunes") -interface FineTunes { - #deprecated "deprecated" @post - @tag("OpenAI") + @operationId("createFineTuningJob") + @tag("Fine-tuning") @summary(""" - Creates a job that fine-tunes a specified model from a given dataset. + Creates a fine-tuning job which begins the process of creating a new model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + [Learn more about fine-tuning](/docs/guides/fine-tuning) """) - @operationId("createFineTune") - createFineTune( - @body fine_tune: CreateFineTuneRequest, - ): FineTune | ErrorResponse; + createFineTuningJob( + @body job: CreateFineTuningJobRequest, + ): FineTuningJob | ErrorResponse; - #deprecated "deprecated" + @route("jobs") @get - @tag("OpenAI") + @operationId("listPaginatedFineTuningJobs") + @tag("Fine-tuning") @summary("List your organization's fine-tuning jobs") - @operationId("listFineTunes") - listFineTunes(): ListFineTunesResponse | ErrorResponse; + listPaginatedFineTuningJobs( + /** Identifier for the last job from the previous pagination request. */ + @query after?: string, - #deprecated "deprecated" + /** Number of fine-tuning jobs to retrieve. */ + @query limit?: int32 = 20, + ): ListPaginatedFineTuningJobsResponse | ErrorResponse; + + @route("jobs/{fine_tuning_job_id}") @get - @route("{fine_tune_id}") - @tag("OpenAI") + @operationId("retrieveFineTuningJob") + @tag("Fine-tuning") @summary(""" - Gets info about the fine-tune job. + Get info about a fine-tuning job. - [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + [Learn more about fine-tuning](/docs/guides/fine-tuning) """) - @operationId("retrieveFineTune") - retrieveFineTune( - /** The ID of the fine-tune job */ - @path fine_tune_id: string, - ): FineTune | ErrorResponse; - - #deprecated "deprecated" - @route("{fine_tune_id}/events") - @get - @tag("OpenAI") - @summary("Get fine-grained status updates for a fine-tune job.") - @operationId("listFineTuneEvents") - listFineTuneEvents( - /** The ID of the fine-tune job to get events for. */ - @path fine_tune_id: string, - - /** - * Whether to stream events for the fine-tune job. If set to true, events will be sent as - * data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available. The stream will terminate with a `data: [DONE]` message when the - * job is finished (succeeded, cancelled, or failed). - * - * If set to false, only events generated so far will be returned. - */ - @query stream?: boolean = false, - ): ListFineTuneEventsResponse | ErrorResponse; + retrieveFineTuningJob( + /** The ID of the fine-tuning job. */ + @path fine_tuning_job_id: string, + ): FineTuningJob | ErrorResponse; - #deprecated "deprecated" - @route("{fine_tune_id}/cancel") + @route("jobs/{fine_tuning_job_id}/cancel") @post - @tag("OpenAI") + @operationId("cancelFineTuningJob") + @tag("Fine-tuning") @summary("Immediately cancel a fine-tune job.") - @operationId("cancelFineTune") - cancelFineTune( - /** The ID of the fine-tune job to cancel */ - @path fine_tune_id: string, - ): FineTune | ErrorResponse; -} - -@route("/models") -interface Models { - @get - @tag("OpenAI") - @summary(""" - Lists the currently available models, and provides basic information about each one such as the - owner and availability. - """) - @operationId("listModels") - listModels(): ListModelsResponse | ErrorResponse; + cancelFineTuningJob( + /** The ID of the fine-tuning job to cancel. */ + @path fine_tuning_job_id: string, + ): FineTuningJob | ErrorResponse; + @route("jobs/{fine_tuning_job_id}/events") @get - @route("{model}") - @operationId("retrieveModel") - @tag("OpenAI") - @summary(""" - Retrieves a model instance, providing basic information about the model such as the owner and - permissioning. - """) - retrieve( - /** The ID of the model to use for this request. */ - @path `model`: string, - ): Model | ErrorResponse; - - @delete - @route("{model}") - @operationId("deleteModel") - @tag("OpenAI") - @summary(""" - Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. - """) - delete( - /** The model to delete */ - @path `model`: string, - ): DeleteModelResponse | ErrorResponse; -} + @operationId("listFineTuningEvents") + @tag("Fine-tuning") + @summary("Get status updates for a fine-tuning job.") + listFineTuningEvents( + /** The ID of the fine-tuning job to get events for. */ + @path fine_tuning_job_id: string, + + /** Identifier for the last event from the previous pagination request. */ + @query after?: string, + + /** Number of events to retrieve. */ + @query limit?: int32 = 20, + ): ListFineTuningJobEventsResponse | ErrorResponse; +} \ No newline at end of file diff --git a/images/meta.tsp b/images/meta.tsp new file mode 100644 index 000000000..79e02b4ee --- /dev/null +++ b/images/meta.tsp @@ -0,0 +1,18 @@ +import "./models.tsp"; +import "./operations.tsp"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@@extension(OpenAI.Image, + "x-oaiMeta", + """ + name: "The image object", + example: | + { + "url": "...", + "revised_prompt": "..." + } + """ +); \ No newline at end of file diff --git a/images/models.tsp b/images/models.tsp index 3d7020b51..e308ade23 100644 --- a/images/models.tsp +++ b/images/models.tsp @@ -1,51 +1,58 @@ import "../common/models.tsp"; -namespace OpenAI; using TypeSpec.OpenAPI; -alias SharedImageProperties = { - /** The number of images to generate. Must be between 1 and 10. */ - n?: ImagesN | null = 1; - - /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ - size?: IMAGE_SIZES | null = "1024x1024"; - - /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ - response_format?: "url" | "b64_json" | null = "url"; - - user?: User; -}; +namespace OpenAI; model CreateImageRequest { - /** A text description of the desired image(s). The maximum length is 1000 characters. */ + /** + * A text description of the desired image(s). The maximum length is 1000 characters for + * `dall-e-2` and 4000 characters for `dall-e-3`. + */ prompt: string; - ...SharedImageProperties; -} + /** The model to use for image generation. */ + @extension("x-oaiTypeLabel", "string") + `model`?: string | "dall-e-2" | "dall-e-3" = "dall-e-2"; -model ImagesResponse { - @encode("unixTimestamp", int32) - created: utcDateTime; + /** + * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is + * supported. + */ + // TODO: This is generated as a "oneOf" in the tsp-output? + n?: ImagesN | null = 1; - data: Image[]; -} + /** + * The quality of the image that will be generated. `hd` creates images with finer details and + * greater consistency across the image. This param is only supported for `dall-e-3`. + */ + // NOTE: This is not marked as nullable in the OpenAPI spec. + quality?: "standard" | "hd" | null = "standard"; -alias IMAGE_SIZES = "256x256" | "512x512" | "1024x1024"; + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + response_format?: "url" | "b64_json" | null = "url"; -/** Represents the url or the content of an image generated by the OpenAI API. */ -model Image { - /** The URL of the generated image, if `response_format` is `url` (default). */ - url?: url; + /** + * The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for + * `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + */ + size?: "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792" | null = "1024x1024"; - /** The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. */ - @encode("base64", string) - b64_json?: bytes; + /** + * The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model + * to lean towards generating hyper-real and dramatic images. Natural causes the model to produce + * more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + */ + style?: "vivid" | "natural" | null = "vivid"; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ + user?: User; } model CreateImageEditRequest { - /** A text description of the desired image(s). The maximum length is 1000 characters. */ - prompt: string; - /** * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not * provided, image must have transparency, which will be used as the mask. @@ -53,6 +60,11 @@ model CreateImageEditRequest { @encode("binary") image: bytes; + /** A text description of the desired image(s). The maximum length is 1000 characters. */ + // NOTE: Max length is not defined in the OpenAI spec but mentioned in the description. + @maxLength(1000) + prompt: string; + /** * An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where * `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions @@ -61,7 +73,26 @@ model CreateImageEditRequest { @encode("binary") mask?: bytes; - ...SharedImageProperties; + /** The model to use for image generation. Only `dall-e-2` is supported at this time. */ + @extension("x-oaiTypeLabel", "string") + `model`?: string | "dall-e-2" = "dall-e-2"; + + /** + * The number of images to generate. Must be between 1 and 10. + */ + n?: ImagesN | null = 1; + + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024" | null = "1024x1024"; + + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + response_format?: "url" | "b64_json" | null = "url"; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ + user?: User; } model CreateImageVariationRequest { @@ -72,9 +103,49 @@ model CreateImageVariationRequest { @encode("binary") image: bytes; - ...SharedImageProperties; + /** The model to use for image generation. Only `dall-e-2` is supported at this time. */ + @extension("x-oaiTypeLabel", "string") + `model`?: string | "dall-e-2" = "dall-e-2"; + + /** + * The number of images to generate. Must be between 1 and 10. + */ + n?: ImagesN | null = 1; + + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + response_format?: "url" | "b64_json" | null = "url"; + + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024" | null = "1024x1024"; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ + user?: User; +} + +model ImagesResponse { + @encode("unixTimestamp", int32) + created: utcDateTime; + + data: Image[]; } @minValue(1) @maxValue(10) scalar ImagesN extends safeint; + +/** Represents the url or the content of an image generated by the OpenAI API. */ +model Image { + /** The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. */ + @encode("base64", string) + b64_json?: bytes; + + /** The URL of the generated image, if `response_format` is `url` (default). */ + url?: url; + + /** The prompt that was used to generate the image, if there was any revision to the prompt. */ + revised_prompt?: string; +} + diff --git a/images/operations.tsp b/images/operations.tsp index 09203262b..4db5886ae 100644 --- a/images/operations.tsp +++ b/images/operations.tsp @@ -14,14 +14,16 @@ interface Images { @route("generations") @post @operationId("createImage") - @tag("OpenAI") + @tag("Images") @summary("Creates an image given a prompt") - createImage(@body image: CreateImageRequest): ImagesResponse | ErrorResponse; + createImage( + @body image: CreateImageRequest + ): ImagesResponse | ErrorResponse; @route("edits") @post @operationId("createImageEdit") - @tag("OpenAI") + @tag("Images") @summary("Creates an edited or extended image given an original image and a prompt.") createImageEdit( @header contentType: "multipart/form-data", @@ -31,7 +33,7 @@ interface Images { @route("variations") @post @operationId("createImageVariation") - @tag("OpenAI") + @tag("Images") @summary("Creates an edited or extended image given an original image and a prompt.") createImageVariation( @header contentType: "multipart/form-data", diff --git a/main.tsp b/main.tsp index 2ea8cbbc3..293beba5b 100644 --- a/main.tsp +++ b/main.tsp @@ -3,13 +3,18 @@ import "@typespec/openapi3"; import "@typespec/openapi"; import "./audio"; +import "./assistants"; +import "./chat"; import "./completions"; -import "./edits"; import "./embeddings"; import "./files"; import "./fine-tuning"; import "./images"; -import "./moderation"; +import "./messages"; +import "./models"; +import "./moderations"; +import "./runs"; +import "./threads"; using TypeSpec.Http; @@ -24,9 +29,8 @@ using TypeSpec.Http; license: { name: "MIT", url: "https://github.com/openai/openai-openapi/blob/master/LICENSE", - }, - version: "2.0.0", + } }) @server("https://api.openai.com/v1", "OpenAI Endpoint") @useAuth(BearerAuth) -namespace OpenAI; +namespace OpenAI; \ No newline at end of file diff --git a/messages/main.tsp b/messages/main.tsp new file mode 100644 index 000000000..6a754bcb5 --- /dev/null +++ b/messages/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; \ No newline at end of file diff --git a/messages/meta.tsp b/messages/meta.tsp new file mode 100644 index 000000000..682702c76 --- /dev/null +++ b/messages/meta.tsp @@ -0,0 +1,52 @@ +import "./models.tsp"; + +import "@typespec/openapi"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@@extension(OpenAI.MessageObject, + "x-oaiMeta", + """ + name: The message object + beta: true + example: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1698983503, + "thread_id": "thread_abc123", + "role": "assistant", + "content": [ + { + "type": "text", + "text": { + "value": "Hi! How can I help you today?", + "annotations": [] + } + } + ], + "file_ids": [], + "assistant_id": "asst_abc123", + "run_id": "run_abc123", + "metadata": {} + } + """ +); + +@@extension(OpenAI.MessageFileObject, + "x-oaiMeta", + """ + name: The message file object + beta: true + example: | + { + "id": "file-abc123", + "object": "thread.message.file", + "created_at": 1698107661, + "message_id": "message_QLoItBbqwyAJEzlTy4y9kOMM", + "file_id": "file-abc123" + } + """ +); \ No newline at end of file diff --git a/messages/models.tsp b/messages/models.tsp new file mode 100644 index 000000000..67d78db29 --- /dev/null +++ b/messages/models.tsp @@ -0,0 +1,212 @@ +import "../common/models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +model CreateMessageRequest { + /** The role of the entity that is creating the message. Currently only `user` is supported. */ + role: "user"; // TODO: The generated spec add "assistants" to this enum. + + /** The content of the message. */ + @minLength(1) + @maxLength(32768) + content: string; + + /** + * A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a + * maximum of 10 files attached to a message. Useful for tools like `retrieval` and + * `code_interpreter` that can access and use files. + */ + @minItems(1) + @maxItems(10) + file_ids?: string[] = []; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata?: Record | null; +} + +model ModifyMessageRequest { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata?: Record | null; +} + +model ListMessagesResponse { + object: "list"; + data: MessageObject[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +model ListMessageFilesResponse { + object: "list"; + data: MessageFileObject[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +model MessageObject { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + + /** The object type, which is always `thread.message`. */ + object: "thread.message"; + + /** The Unix timestamp (in seconds) for when the message was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** The [thread](/docs/api-reference/threads) ID that this message belongs to. */ + thread_id: string; + + /** The entity that produced the message. One of `user` or `assistant`. */ + role: "user" | "assistant"; + + /** The content of the message in array of text and/or images. */ + content: MessageObjectContent[]; + + /** + * If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this + * message. + */ + assistant_id: string | null; + + /** + * If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of + * this message. + */ + run_id: string | null; + + /** + * A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for + * tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be + * attached to a message. + */ + @maxItems(10) + file_ids: string[] = []; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata: Record | null; +} + +@oneOf +@extension("x-oaiExpandable", true) +union MessageObjectContent { + MessageContentImageFileObject, + MessageContentTextObject, +} + +/** References an image [File](/docs/api-reference/files) in the content of a message. */ +model MessageContentImageFileObject { + /** Always `image_file`. */ + type: "image_file"; + + image_file: { + /** The [File](/docs/api-reference/files) ID of the image in the message content. */ + file_id: string; + } +} + +/** The text content that is part of a message. */ +model MessageContentTextObject { + /** Always `text`. */ + type: "text"; // TODO: The generated spec adds "json_object" to this enum. + + text: { + /** The data that makes up the text. */ + value: string; + + annotations: MessageContentTextObjectAnnotations[]; + } +} + +@oneOf +@extension("x-oaiExpandable", true) +union MessageContentTextObjectAnnotations { + MessageContentTextAnnotationsFileCitationObject, + MessageContentTextAnnotationsFilePathObject, +} + +/** + * A citation within the message that points to a specific quote from a specific File associated + * with the assistant or the message. Generated when the assistant uses the "retrieval" tool to + * search files. + */ +model MessageContentTextAnnotationsFileCitationObject { + /** Always `file_citation`. */ + type: "file_citation"; + + /** The text in the message content that needs to be replaced. */ + text: string; + + file_citation: { + /** The ID of the specific File the citation is from. */ + file_id: string; + + /** The specific quote in the file. */ + quote: string; + }; + + @minValue(0) + start_index: safeint; + + @minValue(0) + end_index: safeint; +} + +/** + * A URL for the file that's generated when the assistant used the `code_interpreter` tool to + * generate a file. + */ +model MessageContentTextAnnotationsFilePathObject { + /** Always `file_path`. */ + type: "file_path"; + + /** The text in the message content that needs to be replaced. */ + text: string; + + file_path: { + /** The ID of the file that was generated. */ + file_id: string; + }; + + @minValue(0) + start_index: safeint; + + @minValue(0) + end_index: safeint; +} + +/** A list of files attached to a `message`. */ +model MessageFileObject { + /** TThe identifier, which can be referenced in API endpoints. */ + id: string; + + /** The object type, which is always `thread.message.file`. */ + object: "thread.message.file"; + + /** The Unix timestamp (in seconds) for when the message file was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. */ + message_id: string; +} \ No newline at end of file diff --git a/messages/operations.tsp b/messages/operations.tsp new file mode 100644 index 000000000..652b79007 --- /dev/null +++ b/messages/operations.tsp @@ -0,0 +1,143 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/models.tsp"; +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("threads/{thread_id}/messages") +interface Messages { + @post + @operationId("createMessage") + @tag("Assistants") + @summary("Create a message.") + createMessage( + /** The ID of the [thread](/docs/api-reference/threads) to create a message for. */ + @path thread_id: string, + + @body message: CreateMessageRequest, + ): MessageObject | ErrorResponse; + + @get + @operationId("listMessages") + @tag("Assistants") + @summary("Returns a list of messages for a given thread.") + listMessages( + /** The ID of the [thread](/docs/api-reference/threads) the messages belong to. */ + @path thread_id: string, + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + * default is 20. + */ + @query limit?: int32 = 20; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + * for descending order. + */ + @query order?: ListOrder = ListOrder.desc; + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include after=obj_foo in order to fetch the next page of the list. + */ + @query after?: string; + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include before=obj_foo in order to fetch the previous page of the list. + */ + @query before?: string; + ): ListMessagesResponse | ErrorResponse; + + @route("{message_id}") + @get + @operationId("getMessage") + @tag("Assistants") + @summary("Retrieve a message.") + getMessage( + /** The ID of the [thread](/docs/api-reference/threads) to which this message belongs. */ + @path thread_id: string, + + /** The ID of the message to retrieve. */ + @path message_id: string, + ): MessageObject | ErrorResponse; + + @route("{message_id}") + @post + @operationId("modifyMessage") + @tag("Assistants") + @summary("Modifies a message.") + modifyMessage( + /** The ID of the thread to which this message belongs. */ + @path thread_id: string, + + /** The ID of the message to modify. */ + @path message_id: string, + + @body message: ModifyMessageRequest, + ): MessageObject | ErrorResponse; + + @route("{message_id}/files") + @get + @operationId("listMessageFiles") + @tag("Assistants") + @summary("Returns a list of message files.") + listMessageFiles( + /** The ID of the thread that the message and files belong to. */ + @path thread_id: string, + + /** The ID of the message that the files belongs to. */ + @path message_id: string, + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + * default is 20. + */ + @query limit?: int32 = 20; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + * for descending order. + */ + @query order?: ListOrder = ListOrder.desc; + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include after=obj_foo in order to fetch the next page of the list. + */ + @query after?: string; + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include before=obj_foo in order to fetch the previous page of the list. + */ + @query before?: string; + ): ListMessageFilesResponse | ErrorResponse; + + @route("{message_id}/files/{file_id}") + @get + @operationId("getMessageFile") + @tag("Assistants") + @summary("Retrieves a message file.") + getMessageFile( + /** The ID of the thread to which the message and File belong. */ + @path thread_id: string, + + /** The ID of the message the file belongs to. */ + @path message_id: string, + + /** The ID of the file being retrieved. */ + @path file_id: string, + ): MessageFileObject | ErrorResponse; +} diff --git a/models/main.tsp b/models/main.tsp new file mode 100644 index 000000000..6a754bcb5 --- /dev/null +++ b/models/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; \ No newline at end of file diff --git a/models/meta.tsp b/models/meta.tsp new file mode 100644 index 000000000..cf8dd65dc --- /dev/null +++ b/models/meta.tsp @@ -0,0 +1,15 @@ +import "./models.tsp"; +import "./operations.tsp"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +// TODO: Fill in example here. +@@extension(OpenAI.Model, + "x-oaiMeta", + """ + name: "The model object", + example: "*retrieve_model_response" + """ +); \ No newline at end of file diff --git a/models/models.tsp b/models/models.tsp new file mode 100644 index 000000000..f522ca893 --- /dev/null +++ b/models/models.tsp @@ -0,0 +1,30 @@ +using TypeSpec.OpenAPI; + +namespace OpenAI; + +model ListModelsResponse { + object: "list"; + data: Model[]; +} + +model DeleteModelResponse { + id: string; + deleted: boolean; + object: "model"; // NOTE: This is just a string in the OpenAPI spec, no enum. +} + +/** Describes an OpenAI model offering that can be used with the API. */ +model Model { + /** The model identifier, which can be referenced in the API endpoints. */ + id: string; + + /** The Unix timestamp (in seconds) when the model was created. */ + @encode("unixTimestamp", int32) + created: utcDateTime; + + /** The object type, which is always "model". */ + object: "model"; + + /** The organization that owns the model. */ + owned_by: string; +} \ No newline at end of file diff --git a/models/operations.tsp b/models/operations.tsp new file mode 100644 index 000000000..74f91f332 --- /dev/null +++ b/models/operations.tsp @@ -0,0 +1,47 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/models") +interface Models { + @get + @operationId("listModels") + @tag("Models") + @summary(""" + Lists the currently available models, and provides basic information about each one such as the + owner and availability. + """) + listModels(): ListModelsResponse | ErrorResponse; + + @route("{model}") + @get + @operationId("retrieveModel") + @tag("Models") + @summary(""" + Retrieves a model instance, providing basic information about the model such as the owner and + permissioning. + """) + retrieve( + /** The ID of the model to use for this request. */ + @path `model`: string, + ): Model | ErrorResponse; + + @route("{model}") + @delete + @operationId("deleteModel") + @tag("Models") + @summary(""" + Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + """) + delete( + /** The model to delete */ + @path `model`: string, + ): DeleteModelResponse | ErrorResponse; +} diff --git a/moderation/main.tsp b/moderations/main.tsp similarity index 100% rename from moderation/main.tsp rename to moderations/main.tsp diff --git a/moderations/meta.tsp b/moderations/meta.tsp new file mode 100644 index 000000000..7d96cda38 --- /dev/null +++ b/moderations/meta.tsp @@ -0,0 +1,15 @@ +import "./models.tsp"; +import "./operations.tsp"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +// TODO: Fill in example here. +@@extension(OpenAI.CreateModerationResponse, + "x-oaiMeta", + """ + name: "The moderation object", + example: "*moderation_example" + """ +); \ No newline at end of file diff --git a/moderation/models.tsp b/moderations/models.tsp similarity index 90% rename from moderation/models.tsp rename to moderations/models.tsp index f47b21be1..b844b2659 100644 --- a/moderation/models.tsp +++ b/moderations/models.tsp @@ -1,9 +1,10 @@ -namespace OpenAI; using TypeSpec.OpenAPI; +namespace OpenAI; + model CreateModerationRequest { /** The input text to classify */ - input: string | string[]; + input: CreateModerationRequestInput; /** * Two content moderations models are available: `text-moderation-stable` and @@ -13,9 +14,12 @@ model CreateModerationRequest { * of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. */ @extension("x-oaiTypeLabel", "string") - `model`?: string | "text-moderation-latest" | "text-moderation-stable" = "text-moderation-latest"; + `model`?: string | MODERATION_MODELS = "text-moderation-latest"; } +/** + * Represents policy compliance report by OpenAI's content moderation model against a given input. + */ model CreateModerationResponse { /** The unique identifier for the moderation request. */ id: string; @@ -66,7 +70,7 @@ model CreateModerationResponse { * Content that encourages performing acts of self-harm, such as suicide, cutting, and eating * disorders, or that gives instructions or advice on how to commit such acts. */ - `self-harm/instructive`: boolean; + `self-harm/instructions`: boolean; /** * Content meant to arouse sexual excitement, such as the description of sexual activity, or @@ -105,7 +109,7 @@ model CreateModerationResponse { `self-harm/intent`: float64; /** The score for the category 'self-harm/instructive'. */ - `self-harm/instructive`: float64; + `self-harm/instructions`: float64; /** The score for the category 'sexual'. */ sexual: float64; @@ -121,3 +125,13 @@ model CreateModerationResponse { }; }[]; } + +alias MODERATION_MODELS = + | "text-moderation-latest" + | "text-moderation-stable"; + +@oneOf +union CreateModerationRequestInput { + string, + string[] +} \ No newline at end of file diff --git a/moderation/operations.tsp b/moderations/operations.tsp similarity index 93% rename from moderation/operations.tsp rename to moderations/operations.tsp index 5f29bc3be..7760ec2b2 100644 --- a/moderation/operations.tsp +++ b/moderations/operations.tsp @@ -11,8 +11,9 @@ namespace OpenAI; @route("/moderations") interface Moderations { + @post @operationId("createModeration") - @tag("OpenAI") + @tag("Moderations") @summary("Classifies if text violates OpenAI's Content Policy") createModeration( @body content: CreateModerationRequest, diff --git a/openapi.yaml b/openapi.yaml index 011ccf375..a6e16ee12 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -13,8 +13,26 @@ info: servers: - url: https://api.openai.com/v1 tags: - - name: OpenAI - description: The OpenAI REST API + - name: Assistants + description: Build Assistants that can call models and use tools. + - name: Audio + description: Learn how to turn audio into text or text into audio. + - name: Chat + description: Given a list of messages comprising a conversation, the model will return a response. + - name: Completions + description: Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. + - name: Embeddings + description: Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. + - name: Fine-tuning + description: Manage fine-tuning jobs to tailor a model to your specific training data. + - name: Files + description: Files are used to upload documents that can be used with features like Assistants and Fine-tuning. + - name: Images + description: Given a prompt and/or an input image, the model will generate a new image. + - name: Models + description: List and describe the various models available in the API. + - name: Moderations + description: Given a input text, outputs if the model classifies it as violating OpenAI's content policy. paths: # Note: When adding an endpoint, make sure you also add it in the `groups` section, in the end of this file, # under the appropriate group @@ -22,7 +40,7 @@ paths: post: operationId: createChatCompletion tags: - - OpenAI + - Chat summary: Creates a model response for the given chat conversation. requestBody: required: true @@ -45,7 +63,7 @@ paths: Returns a [chat completion](/docs/api-reference/chat/object) object, or a streamed sequence of [chat completion chunk](/docs/api-reference/chat/streaming) objects if the request is streamed. path: create examples: - - title: No Streaming + - title: Default request: curl: | curl https://api.openai.com/v1/chat/completions \ @@ -65,11 +83,10 @@ paths: ] }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") + from openai import OpenAI + client = OpenAI() - completion = openai.ChatCompletion.create( + completion = client.chat.completions.create( model="VAR_model_id", messages=[ {"role": "system", "content": "You are a helpful assistant."}, @@ -99,12 +116,111 @@ paths: "object": "chat.completion", "created": 1677652288, "model": "gpt-3.5-turbo-0613", + "system_fingerprint": "fp_44709d6fcb", "choices": [{ "index": 0, "message": { "role": "assistant", "content": "\n\nHello there, how may I assist you today?", }, + "logprobs": null, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21 + } + } + - title: Image input + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "gpt-4-vision-preview", + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What’s in this image?" + }, + { + "type": "image_url", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + } + } + ] + } + ], + "max_tokens": 300 + }' + python: | + from openai import OpenAI + + client = OpenAI() + + response = client.chat.completions.create( + model="gpt-4-vision-preview", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "What’s in this image?"}, + { + "type": "image_url", + "image_url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + }, + ], + } + ], + max_tokens=300, + ) + + print(response.choices[0]) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const response = await openai.chat.completions.create({ + model: "gpt-4-vision-preview", + messages: [ + { + role: "user", + content: [ + { type: "text", text: "What’s in this image?" }, + { + type: "image_url", + image_url: + "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + }, + ], + }, + ], + }); + console.log(response.choices[0]); + } + main(); + response: &chat_completion_image_example | + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-3.5-turbo-0613", + "system_fingerprint": "fp_44709d6fcb", + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": "\n\nHello there, how may I assist you today?", + }, + "logprobs": null, "finish_reason": "stop" }], "usage": { @@ -134,11 +250,10 @@ paths: "stream": true }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") + from openai import OpenAI + client = OpenAI() - completion = openai.ChatCompletion.create( + completion = client.chat.completions.create( model="VAR_model_id", messages=[ {"role": "system", "content": "You are a helpful assistant."}, @@ -172,24 +287,403 @@ paths: main(); response: &chat_completion_chunk_example | + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]} + + .... + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":" today"},"logprobs":null,"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + - title: Functions + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "What is the weather like in Boston?" + } + ], + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "tool_choice": "auto" + }' + python: | + from openai import OpenAI + client = OpenAI() + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ] + messages = [{"role": "user", "content": "What's the weather like in Boston today?"}] + completion = client.chat.completions.create( + model="VAR_model_id", + messages=messages, + tools=tools, + tool_choice="auto" + ) + + print(completion) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]; + const tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ]; + + const response = await openai.chat.completions.create({ + model: "gpt-3.5-turbo", + messages: messages, + tools: tools, + tool_choice: "auto", + }); + + console.log(response); + } + + main(); + response: &chat_completion_function_example | + { + "id": "chatcmpl-abc123", + "object": "chat.completion", + "created": 1699896916, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": null, + "tool_calls": [ + { + "id": "call_abc123", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\n\"location\": \"Boston, MA\"\n}" + } + } + ] + }, + "logprobs": null, + "finish_reason": "tool_calls" + } + ], + "usage": { + "prompt_tokens": 82, + "completion_tokens": 17, + "total_tokens": 99 + } + } + - title: Logprobs + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "messages": [ + { + "role": "user", + "content": "Hello!" + } + ], + "logprobs": true, + "top_logprobs": 2 + }' + python: | + from openai import OpenAI + client = OpenAI() + + completion = client.chat.completions.create( + model="VAR_model_id", + messages=[ + {"role": "user", "content": "Hello!"} + ], + logprobs=True, + top_logprobs=2 + ) + + print(completion.choices[0].message) + print(completion.choices[0].logprobs) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.chat.completions.create({ + messages: [{ role: "user", content: "Hello!" }], + model: "VAR_model_id", + logprobs: true, + top_logprobs: 2, + }); + + console.log(completion.choices[0]); + } + + main(); + response: | { "id": "chatcmpl-123", - "object": "chat.completion.chunk", - "created": 1677652288, - "model": "gpt-3.5-turbo", - "choices": [{ - "index": 0, - "delta": { - "content": "Hello", - }, - "finish_reason": "stop" - }] + "object": "chat.completion", + "created": 1702685778, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Hello! How can I assist you today?" + }, + "logprobs": { + "content": [ + { + "token": "Hello", + "logprob": -0.31725305, + "bytes": [72, 101, 108, 108, 111], + "top_logprobs": [ + { + "token": "Hello", + "logprob": -0.31725305, + "bytes": [72, 101, 108, 108, 111] + }, + { + "token": "Hi", + "logprob": -1.3190403, + "bytes": [72, 105] + } + ] + }, + { + "token": "!", + "logprob": -0.02380986, + "bytes": [ + 33 + ], + "top_logprobs": [ + { + "token": "!", + "logprob": -0.02380986, + "bytes": [33] + }, + { + "token": " there", + "logprob": -3.787621, + "bytes": [32, 116, 104, 101, 114, 101] + } + ] + }, + { + "token": " How", + "logprob": -0.000054669687, + "bytes": [32, 72, 111, 119], + "top_logprobs": [ + { + "token": " How", + "logprob": -0.000054669687, + "bytes": [32, 72, 111, 119] + }, + { + "token": "<|end|>", + "logprob": -10.953937, + "bytes": null + } + ] + }, + { + "token": " can", + "logprob": -0.015801601, + "bytes": [32, 99, 97, 110], + "top_logprobs": [ + { + "token": " can", + "logprob": -0.015801601, + "bytes": [32, 99, 97, 110] + }, + { + "token": " may", + "logprob": -4.161023, + "bytes": [32, 109, 97, 121] + } + ] + }, + { + "token": " I", + "logprob": -3.7697225e-6, + "bytes": [ + 32, + 73 + ], + "top_logprobs": [ + { + "token": " I", + "logprob": -3.7697225e-6, + "bytes": [32, 73] + }, + { + "token": " assist", + "logprob": -13.596657, + "bytes": [32, 97, 115, 115, 105, 115, 116] + } + ] + }, + { + "token": " assist", + "logprob": -0.04571125, + "bytes": [32, 97, 115, 115, 105, 115, 116], + "top_logprobs": [ + { + "token": " assist", + "logprob": -0.04571125, + "bytes": [32, 97, 115, 115, 105, 115, 116] + }, + { + "token": " help", + "logprob": -3.1089056, + "bytes": [32, 104, 101, 108, 112] + } + ] + }, + { + "token": " you", + "logprob": -5.4385737e-6, + "bytes": [32, 121, 111, 117], + "top_logprobs": [ + { + "token": " you", + "logprob": -5.4385737e-6, + "bytes": [32, 121, 111, 117] + }, + { + "token": " today", + "logprob": -12.807695, + "bytes": [32, 116, 111, 100, 97, 121] + } + ] + }, + { + "token": " today", + "logprob": -0.0040071653, + "bytes": [32, 116, 111, 100, 97, 121], + "top_logprobs": [ + { + "token": " today", + "logprob": -0.0040071653, + "bytes": [32, 116, 111, 100, 97, 121] + }, + { + "token": "?", + "logprob": -5.5247097, + "bytes": [63] + } + ] + }, + { + "token": "?", + "logprob": -0.0008108172, + "bytes": [63], + "top_logprobs": [ + { + "token": "?", + "logprob": -0.0008108172, + "bytes": [63] + }, + { + "token": "?\n", + "logprob": -7.184561, + "bytes": [63, 10] + } + ] + } + ] + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 9, + "total_tokens": 18 + }, + "system_fingerprint": null } + /completions: post: operationId: createCompletion tags: - - OpenAI + - Completions summary: Creates a completion for the provided prompt and parameters. requestBody: required: true @@ -206,6 +700,7 @@ paths: $ref: "#/components/schemas/CreateCompletionResponse" x-oaiMeta: name: Create completion + group: completions returns: | Returns a [completion](/docs/api-reference/completions/object) object, or a sequence of completion objects if the request is streamed. legacy: true @@ -223,10 +718,10 @@ paths: "temperature": 0 }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Completion.create( + from openai import OpenAI + client = OpenAI() + + client.completions.create( model="VAR_model_id", prompt="Say this is a test", max_tokens=7, @@ -254,6 +749,7 @@ paths: "object": "text_completion", "created": 1589478378, "model": "VAR_model_id", + "system_fingerprint": "fp_44709d6fcb", "choices": [ { "text": "\n\nThis is indeed a test", @@ -282,17 +778,17 @@ paths: "stream": true }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - for chunk in openai.Completion.create( + from openai import OpenAI + client = OpenAI() + + for chunk in client.completions.create( model="VAR_model_id", prompt="Say this is a test", max_tokens=7, temperature=0, stream=True ): - print(chunk['choices'][0]['text']) + print(chunk.choices[0].text) node.js: |- import OpenAI from "openai"; @@ -324,90 +820,14 @@ paths: } ], "model": "gpt-3.5-turbo-instruct" + "system_fingerprint": "fp_44709d6fcb", } - /edits: - post: - operationId: createEdit - deprecated: true - tags: - - OpenAI - summary: Creates a new edit for the provided input, instruction, and parameters. - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CreateEditRequest" - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: "#/components/schemas/CreateEditResponse" - x-oaiMeta: - name: Create edit - returns: | - Returns an [edit](/docs/api-reference/edits/object) object. - group: edits - examples: - request: - curl: | - curl https://api.openai.com/v1/edits \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -d '{ - "model": "VAR_model_id", - "input": "What day of the wek is it?", - "instruction": "Fix the spelling mistakes" - }' - python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Edit.create( - model="VAR_model_id", - input="What day of the wek is it?", - instruction="Fix the spelling mistakes" - ) - node.js: |- - import OpenAI from "openai"; - - const openai = new OpenAI(); - - async function main() { - const edit = await openai.edits.create({ - model: "VAR_model_id", - input: "What day of the wek is it?", - instruction: "Fix the spelling mistakes.", - }); - - console.log(edit); - } - - main(); - response: &edit_example | - { - "object": "edit", - "created": 1589478378, - "choices": [ - { - "text": "What day of the week is it?", - "index": 0, - } - ], - "usage": { - "prompt_tokens": 25, - "completion_tokens": 32, - "total_tokens": 57 - } - } /images/generations: post: operationId: createImage tags: - - OpenAI + - Images summary: Creates an image given a prompt. requestBody: required: true @@ -424,6 +844,7 @@ paths: $ref: "#/components/schemas/ImagesResponse" x-oaiMeta: name: Create image + group: images returns: Returns a list of [image](/docs/api-reference/images/object) objects. examples: request: @@ -432,17 +853,19 @@ paths: -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ + "model": "dall-e-3", "prompt": "A cute baby sea otter", - "n": 2, + "n": 1, "size": "1024x1024" }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Image.create( + from openai import OpenAI + client = OpenAI() + + client.images.generate( + model="dall-e-3", prompt="A cute baby sea otter", - n=2, + n=1, size="1024x1024" ) node.js: |- @@ -451,7 +874,7 @@ paths: const openai = new OpenAI(); async function main() { - const image = await openai.images.generate({ prompt: "A cute baby sea otter" }); + const image = await openai.images.generate({ model: "dall-e-3", prompt: "A cute baby sea otter" }); console.log(image.data); } @@ -468,12 +891,11 @@ paths: } ] } - /images/edits: post: operationId: createImageEdit tags: - - OpenAI + - Images summary: Creates an edited or extended image given an original image and a prompt. requestBody: required: true @@ -490,6 +912,7 @@ paths: $ref: "#/components/schemas/ImagesResponse" x-oaiMeta: name: Create image edit + group: images returns: Returns a list of [image](/docs/api-reference/images/object) objects. examples: request: @@ -502,10 +925,10 @@ paths: -F n=2 \ -F size="1024x1024" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Image.create_edit( + from openai import OpenAI + client = OpenAI() + + client.images.edit( image=open("otter.png", "rb"), mask=open("mask.png", "rb"), prompt="A cute baby sea otter wearing a beret", @@ -540,12 +963,11 @@ paths: } ] } - /images/variations: post: operationId: createImageVariation tags: - - OpenAI + - Images summary: Creates a variation of a given image. requestBody: required: true @@ -562,6 +984,7 @@ paths: $ref: "#/components/schemas/ImagesResponse" x-oaiMeta: name: Create image variation + group: images returns: Returns a list of [image](/docs/api-reference/images/object) objects. examples: request: @@ -572,11 +995,11 @@ paths: -F n=2 \ -F size="1024x1024" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Image.create_variation( - image=open("otter.png", "rb"), + from openai import OpenAI + client = OpenAI() + + response = client.images.create_variation( + image=open("image_edit_original.png", "rb"), n=2, size="1024x1024" ) @@ -611,7 +1034,7 @@ paths: post: operationId: createEmbedding tags: - - OpenAI + - Embeddings summary: Creates an embedding vector representing the input text. requestBody: required: true @@ -628,6 +1051,7 @@ paths: $ref: "#/components/schemas/CreateEmbeddingResponse" x-oaiMeta: name: Create embeddings + group: embeddings returns: A list of [embedding](/docs/api-reference/embeddings/object) objects. examples: request: @@ -637,15 +1061,17 @@ paths: -H "Content-Type: application/json" \ -d '{ "input": "The food was delicious and the waiter...", - "model": "text-embedding-ada-002" + "model": "text-embedding-ada-002", + "encoding_format": "float" }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Embedding.create( + from openai import OpenAI + client = OpenAI() + + client.embeddings.create( model="text-embedding-ada-002", - input="The food was delicious and the waiter..." + input="The food was delicious and the waiter...", + encoding_format="float" ) node.js: |- import OpenAI from "openai"; @@ -656,6 +1082,7 @@ paths: const embedding = await openai.embeddings.create({ model: "text-embedding-ada-002", input: "The quick brown fox jumped over the lazy dog", + encoding_format: "float", }); console.log(embedding); @@ -684,74 +1111,149 @@ paths: } } - /audio/transcriptions: + /audio/speech: post: - operationId: createTranscription + operationId: createSpeech tags: - - OpenAI - summary: Transcribes audio into the input language. + - Audio + summary: Generates audio from the input text. requestBody: required: true content: - multipart/form-data: + application/json: schema: - $ref: "#/components/schemas/CreateTranscriptionRequest" + $ref: "#/components/schemas/CreateSpeechRequest" responses: "200": description: OK + headers: + Transfer-Encoding: + schema: + type: string + description: chunked content: - application/json: + application/octet-stream: schema: - $ref: "#/components/schemas/CreateTranscriptionResponse" + type: string + format: binary x-oaiMeta: - name: Create transcription - returns: The transcriped text. + name: Create speech + group: audio + returns: The audio file content. examples: request: curl: | - curl https://api.openai.com/v1/audio/transcriptions \ + curl https://api.openai.com/v1/audio/speech \ -H "Authorization: Bearer $OPENAI_API_KEY" \ - -H "Content-Type: multipart/form-data" \ - -F file="@/path/to/file/audio.mp3" \ - -F model="whisper-1" + -H "Content-Type: application/json" \ + -d '{ + "model": "tts-1", + "input": "The quick brown fox jumped over the lazy dog.", + "voice": "alloy" + }' \ + --output speech.mp3 python: | - import os + from pathlib import Path import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - audio_file = open("audio.mp3", "rb") - transcript = openai.Audio.transcribe("whisper-1", audio_file) - node: |- + + speech_file_path = Path(__file__).parent / "speech.mp3" + response = openai.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog." + ) + response.stream_to_file(speech_file_path) + node: | import fs from "fs"; + import path from "path"; import OpenAI from "openai"; const openai = new OpenAI(); + const speechFile = path.resolve("./speech.mp3"); + async function main() { - const transcription = await openai.audio.transcriptions.create({ - file: fs.createReadStream("audio.mp3"), - model: "whisper-1", + const mp3 = await openai.audio.speech.create({ + model: "tts-1", + voice: "alloy", + input: "Today is a wonderful day to build something people love!", }); - - console.log(transcription.text); + console.log(speechFile); + const buffer = Buffer.from(await mp3.arrayBuffer()); + await fs.promises.writeFile(speechFile, buffer); } main(); - response: | - { - "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that." - } - - /audio/translations: + /audio/transcriptions: post: - operationId: createTranslation + operationId: createTranscription tags: - - OpenAI - summary: Translates audio into English. + - Audio + summary: Transcribes audio into the input language. requestBody: required: true content: multipart/form-data: schema: - $ref: "#/components/schemas/CreateTranslationRequest" + $ref: "#/components/schemas/CreateTranscriptionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateTranscriptionResponse" + x-oaiMeta: + name: Create transcription + group: audio + returns: The transcribed text. + examples: + request: + curl: | + curl https://api.openai.com/v1/audio/transcriptions \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/audio.mp3" \ + -F model="whisper-1" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.transcriptions.create( + model="whisper-1", + file=audio_file + ) + node: | + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const transcription = await openai.audio.transcriptions.create({ + file: fs.createReadStream("audio.mp3"), + model: "whisper-1", + }); + + console.log(transcription.text); + } + main(); + response: | + { + "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that." + } + /audio/translations: + post: + operationId: createTranslation + tags: + - Audio + summary: Translates audio into English. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateTranslationRequest" responses: "200": description: OK @@ -761,6 +1263,7 @@ paths: $ref: "#/components/schemas/CreateTranslationResponse" x-oaiMeta: name: Create translation + group: audio returns: The translated text. examples: request: @@ -771,21 +1274,29 @@ paths: -F file="@/path/to/file/german.m4a" \ -F model="whisper-1" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - audio_file = open("german.m4a", "rb") - transcript = openai.Audio.translate("whisper-1", audio_file) + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.translations.create( + model="whisper-1", + file=audio_file + ) node: | - const { Configuration, OpenAIApi } = require("openai"); - const configuration = new Configuration({ - apiKey: process.env.OPENAI_API_KEY, - }); - const openai = new OpenAIApi(configuration); - const resp = await openai.createTranslation( - fs.createReadStream("audio.mp3"), - "whisper-1" - ); + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const translation = await openai.audio.translations.create({ + file: fs.createReadStream("speech.mp3"), + model: "whisper-1", + }); + + console.log(translation.text); + } + main(); response: | { "text": "Hello, my name is Wolfgang and I come from Germany. Where are you heading today?" @@ -795,8 +1306,15 @@ paths: get: operationId: listFiles tags: - - OpenAI + - Files summary: Returns a list of files that belong to the user's organization. + parameters: + - in: query + name: purpose + required: false + schema: + type: string + description: Only return files with the given purpose. responses: "200": description: OK @@ -806,17 +1324,18 @@ paths: $ref: "#/components/schemas/ListFilesResponse" x-oaiMeta: name: List files - returns: A list of [file](/docs/api-reference/files/object) objects. + group: files + returns: A list of [File](/docs/api-reference/files/object) objects. examples: request: curl: | curl https://api.openai.com/v1/files \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.File.list() + from openai import OpenAI + client = OpenAI() + + client.files.list() node.js: |- import OpenAI from "openai"; @@ -839,8 +1358,8 @@ paths: "object": "file", "bytes": 175, "created_at": 1613677385, - "filename": "train.jsonl", - "purpose": "search" + "filename": "salesOverview.pdf", + "purpose": "assistants", }, { "id": "file-abc123", @@ -848,7 +1367,7 @@ paths: "bytes": 140, "created_at": 1613779121, "filename": "puppy.jsonl", - "purpose": "search" + "purpose": "fine-tune", } ], "object": "list" @@ -856,10 +1375,13 @@ paths: post: operationId: createFile tags: - - OpenAI + - Files summary: | - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. + Upload a file that can be used across various endpoints. The size of all the files uploaded by one organization can be up to 100 GB. + + The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files. + Please [contact us](https://help.openai.com/) if you need to increase these storage limits. requestBody: required: true content: @@ -875,7 +1397,8 @@ paths: $ref: "#/components/schemas/OpenAIFile" x-oaiMeta: name: Upload file - returns: The uploaded [file](/docs/api-reference/files/object) object. + group: files + returns: The uploaded [File](/docs/api-reference/files/object) object. examples: request: curl: | @@ -884,12 +1407,12 @@ paths: -F purpose="fine-tune" \ -F file="@mydata.jsonl" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.File.create( + from openai import OpenAI + client = OpenAI() + + client.files.create( file=open("mydata.jsonl", "rb"), - purpose='fine-tune' + purpose="fine-tune" ) node.js: |- import fs from "fs"; @@ -911,18 +1434,16 @@ paths: { "id": "file-abc123", "object": "file", - "bytes": 140, - "created_at": 1613779121, + "bytes": 120000, + "created_at": 1677610602, "filename": "mydata.jsonl", "purpose": "fine-tune", - "status": "uploaded" | "processed" | "pending" | "error" } - /files/{file_id}: delete: operationId: deleteFile tags: - - OpenAI + - Files summary: Delete a file. parameters: - in: path @@ -940,6 +1461,7 @@ paths: $ref: "#/components/schemas/DeleteFileResponse" x-oaiMeta: name: Delete file + group: files returns: Deletion status. examples: request: @@ -948,10 +1470,10 @@ paths: -X DELETE \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.File.delete("file-abc123") + from openai import OpenAI + client = OpenAI() + + client.files.delete("file-abc123") node.js: |- import OpenAI from "openai"; @@ -973,7 +1495,7 @@ paths: get: operationId: retrieveFile tags: - - OpenAI + - Files summary: Returns information about a specific file. parameters: - in: path @@ -991,17 +1513,18 @@ paths: $ref: "#/components/schemas/OpenAIFile" x-oaiMeta: name: Retrieve file - returns: The [file](/docs/api-reference/files/object) object matching the specified ID. + group: files + returns: The [File](/docs/api-reference/files/object) object matching the specified ID. examples: request: curl: | curl https://api.openai.com/v1/files/file-abc123 \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.File.retrieve("file-abc123") + from openai import OpenAI + client = OpenAI() + + client.files.retrieve("file-abc123") node.js: |- import OpenAI from "openai"; @@ -1018,17 +1541,16 @@ paths: { "id": "file-abc123", "object": "file", - "bytes": 140, - "created_at": 1613779657, + "bytes": 120000, + "created_at": 1677610602, "filename": "mydata.jsonl", - "purpose": "fine-tune" + "purpose": "fine-tune", } - /files/{file_id}/content: get: operationId: downloadFile tags: - - OpenAI + - Files summary: Returns the contents of the specified file. parameters: - in: path @@ -1046,6 +1568,7 @@ paths: type: string x-oaiMeta: name: Retrieve file content + group: files returns: The file content. examples: request: @@ -1053,10 +1576,10 @@ paths: curl https://api.openai.com/v1/files/file-abc123/content \ -H "Authorization: Bearer $OPENAI_API_KEY" > file.jsonl python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - content = openai.File.download("file-abc123") + from openai import OpenAI + client = OpenAI() + + content = client.files.retrieve_content("file-abc123") node.js: | import OpenAI from "openai"; @@ -1074,9 +1597,9 @@ paths: post: operationId: createFineTuningJob tags: - - OpenAI + - Fine-tuning summary: | - Creates a job that fine-tunes a specified model from a given dataset. + Creates a fine-tuning job which begins the process of creating a new model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. @@ -1096,23 +1619,27 @@ paths: $ref: "#/components/schemas/FineTuningJob" x-oaiMeta: name: Create fine-tuning job + group: fine-tuning returns: A [fine-tuning.job](/docs/api-reference/fine-tuning/object) object. examples: - - title: No hyperparameters + - title: Default request: curl: | curl https://api.openai.com/v1/fine_tuning/jobs \ -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "training_file": "file-abc123" - "model": "gpt-3.5-turbo", + "training_file": "file-BK7bzQj3FfZFXr7DbL6xJwfo", + "model": "gpt-3.5-turbo" }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTuningJob.create(training_file="file-abc123", model="gpt-3.5-turbo") + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + model="gpt-3.5-turbo" + ) node.js: | import OpenAI from "openai"; @@ -1130,7 +1657,7 @@ paths: response: | { "object": "fine_tuning.job", - "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + "id": "ftjob-abc123", "model": "gpt-3.5-turbo-0613", "created_at": 1614807352, "fine_tuned_model": null, @@ -1140,24 +1667,30 @@ paths: "validation_file": null, "training_file": "file-abc123", } - - title: Hyperparameters + - title: Epochs request: curl: | curl https://api.openai.com/v1/fine_tuning/jobs \ -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "training_file": "file-abc123" + "training_file": "file-abc123", "model": "gpt-3.5-turbo", "hyperparameters": { "n_epochs": 2 } }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTuningJob.create(training_file="file-abc123", model="gpt-3.5-turbo", hyperparameters={"n_epochs":2}) + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + model="gpt-3.5-turbo", + hyperparameters={ + "n_epochs":2 + } + ) node.js: | import OpenAI from "openai"; @@ -1167,7 +1700,7 @@ paths: const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", model: "gpt-3.5-turbo", - hyperparameters: { n_epochs: 2 }, + hyperparameters: { n_epochs: 2 } }); console.log(fineTune); @@ -1177,7 +1710,7 @@ paths: response: | { "object": "fine_tuning.job", - "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + "id": "ftjob-abc123", "model": "gpt-3.5-turbo-0613", "created_at": 1614807352, "fine_tuned_model": null, @@ -1186,12 +1719,60 @@ paths: "status": "queued", "validation_file": null, "training_file": "file-abc123", - "hyperparameters":{"n_epochs":2}, + "hyperparameters": {"n_epochs": 2}, + } + - title: Validation file + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "validation_file": "file-abc123", + "model": "gpt-3.5-turbo" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + validation_file="file-def456", + model="gpt-3.5-turbo" + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123", + validation_file: "file-abc123" + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0613", + "created_at": 1614807352, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": "file-abc123", + "training_file": "file-abc123", } get: operationId: listPaginatedFineTuningJobs tags: - - OpenAI + - Fine-tuning summary: | List your organization's fine-tuning jobs parameters: @@ -1217,6 +1798,7 @@ paths: $ref: "#/components/schemas/ListPaginatedFineTuningJobsResponse" x-oaiMeta: name: List fine-tuning jobs + group: fine-tuning returns: A list of paginated [fine-tuning job](/docs/api-reference/fine-tuning/object) objects. examples: request: @@ -1224,10 +1806,10 @@ paths: curl https://api.openai.com/v1/fine_tuning/jobs?limit=2 \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTuningJob.list() + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.list() node.js: |- import OpenAI from "openai"; @@ -1263,7 +1845,7 @@ paths: get: operationId: retrieveFineTuningJob tags: - - OpenAI + - Fine-tuning summary: | Get info about a fine-tuning job. @@ -1286,25 +1868,26 @@ paths: $ref: "#/components/schemas/FineTuningJob" x-oaiMeta: name: Retrieve fine-tuning job - returns: The [fine-tuning](/docs/api-reference/fine-tunes/object) object with the given ID. + group: fine-tuning + returns: The [fine-tuning](/docs/api-reference/fine-tuning/object) object with the given ID. examples: request: curl: | curl https://api.openai.com/v1/fine_tuning/jobs/ft-AF1WoRqd3aJAHsqc9NY7iL8F \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTuningJob.retrieve("ft-anaKUAgnnBkNGB3QcSr4pImR") + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.retrieve("ftjob-abc123") node.js: | import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const fineTune = await openai.fineTuning.jobs.retrieve("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); - + const fineTune = await openai.fineTuning.jobs.retrieve("ftjob-abc123"); + console.log(fineTune); } @@ -1312,7 +1895,7 @@ paths: response: &fine_tuning_example | { "object": "fine_tuning.job", - "id": "ft-zRdUkP4QeZqeYjDcQL0wwam1", + "id": "ftjob-abc123", "model": "davinci-002", "created_at": 1692661014, "finished_at": 1692661190, @@ -1329,12 +1912,11 @@ paths: }, "trained_tokens": 5768 } - /fine_tuning/jobs/{fine_tuning_job_id}/events: get: operationId: listFineTuningEvents tags: - - OpenAI + - Fine-tuning summary: | Get status updates for a fine-tuning job. parameters: @@ -1368,24 +1950,28 @@ paths: $ref: "#/components/schemas/ListFineTuningJobEventsResponse" x-oaiMeta: name: List fine-tuning events + group: fine-tuning returns: A list of fine-tuning event objects. examples: request: curl: | - curl https://api.openai.com/v1/fine_tuning/jobs/ft-AF1WoRqd3aJAHsqc9NY7iL8F/events \ + curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/events \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTuningJob.list_events(id="ft-w9WJrnTe9vcVopaTy9LrlGQv", limit=2) + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.list_events( + fine_tuning_job_id="ftjob-abc123", + limit=2 + ) node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const list = await openai.fineTuning.list_events(id="ft-w9WJrnTe9vcVopaTy9LrlGQv", limit=2); + const list = await openai.fineTuning.list_events(id="ftjob-abc123", limit=2); for await (const fineTune of list) { console.log(fineTune); @@ -1418,12 +2004,11 @@ paths: ], "has_more": true } - /fine_tuning/jobs/{fine_tuning_job_id}/cancel: post: operationId: cancelFineTuningJob tags: - - OpenAI + - Fine-tuning summary: | Immediately cancel a fine-tune job. parameters: @@ -1444,24 +2029,25 @@ paths: $ref: "#/components/schemas/FineTuningJob" x-oaiMeta: name: Cancel fine-tuning + group: fine-tuning returns: The cancelled [fine-tuning](/docs/api-reference/fine-tuning/object) object. examples: request: curl: | - curl -X POST https://api.openai.com/v1/fine_tuning/jobs/ft-AF1WoRqd3aJAHsqc9NY7iL8F/cancel \ + curl -X POST https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/cancel \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTuningJob.cancel("ft-anaKUAgnnBkNGB3QcSr4pImR") + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.cancel("ftjob-abc123") node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const fineTune = await openai.fineTuning.jobs.cancel("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); + const fineTune = await openai.fineTuning.jobs.cancel("ftjob-abc123"); console.log(fineTune); } @@ -1469,7 +2055,7 @@ paths: response: | { "object": "fine_tuning.job", - "id": "ft-gleYLJhWh1YFufiy29AahVpj", + "id": "ftjob-abc123", "model": "gpt-3.5-turbo-0613", "created_at": 1689376978, "fine_tuned_model": null, @@ -1483,2690 +2069,6608 @@ paths: "training_file": "file-abc123" } - /fine-tunes: - post: - operationId: createFineTune - deprecated: true + /models: + get: + operationId: listModels tags: - - OpenAI - summary: | - Creates a job that fine-tunes a specified model from a given dataset. - - Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - - [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CreateFineTuneRequest" + - Models + summary: Lists the currently available models, and provides basic information about each one such as the owner and availability. responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/FineTune" + $ref: "#/components/schemas/ListModelsResponse" x-oaiMeta: - name: Create fine-tune - returns: A [fine-tune](/docs/api-reference/fine-tunes/object) object. + name: List models + group: models + returns: A list of [model](/docs/api-reference/models/object) objects. examples: request: curl: | - curl https://api.openai.com/v1/fine-tunes \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -d '{ - "training_file": "file-abc123" - }' + curl https://api.openai.com/v1/models \ + -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTune.create(training_file="file-abc123") - node.js: | + from openai import OpenAI + client = OpenAI() + + client.models.list() + node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const fineTune = await openai.fineTunes.create({ - training_file: "file-abc123" - }); + const list = await openai.models.list(); - console.log(fineTune); + for await (const model of list) { + console.log(model); + } } - main(); response: | { - "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - "object": "fine-tune", - "model": "curie", - "created_at": 1614807352, - "events": [ + "object": "list", + "data": [ { - "object": "fine-tune-event", - "created_at": 1614807352, - "level": "info", - "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." - } - ], - "fine_tuned_model": null, - "hyperparams": { - "batch_size": 4, - "learning_rate_multiplier": 0.1, - "n_epochs": 4, - "prompt_loss_weight": 0.1, - }, - "organization_id": "org-123", - "result_files": [], - "status": "pending", - "validation_files": [], - "training_files": [ + "id": "model-id-0", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner" + }, { - "id": "file-abc123", - "object": "file", - "bytes": 1547276, - "created_at": 1610062281, - "filename": "my-data-train.jsonl", - "purpose": "fine-tune-train" - } + "id": "model-id-1", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner", + }, + { + "id": "model-id-2", + "object": "model", + "created": 1686935002, + "owned_by": "openai" + }, ], - "updated_at": 1614807352, + "object": "list" } + /models/{model}: get: - operationId: listFineTunes - deprecated: true + operationId: retrieveModel tags: - - OpenAI - summary: | - List your organization's fine-tuning jobs + - Models + summary: Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + parameters: + - in: path + name: model + required: true + schema: + type: string + # ideally this will be an actual ID, so this will always work from browser + example: gpt-3.5-turbo + description: The ID of the model to use for this request responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/ListFineTunesResponse" + $ref: "#/components/schemas/Model" x-oaiMeta: - name: List fine-tunes - returns: A list of [fine-tune](/docs/api-reference/fine-tunes/object) objects. + name: Retrieve model + group: models + returns: The [model](/docs/api-reference/models/object) object matching the specified ID. examples: request: curl: | - curl https://api.openai.com/v1/fine-tunes \ + curl https://api.openai.com/v1/models/VAR_model_id \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTune.list() + from openai import OpenAI + client = OpenAI() + + client.models.retrieve("VAR_model_id") node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const list = await openai.fineTunes.list(); + const model = await openai.models.retrieve("gpt-3.5-turbo"); - for await (const fineTune of list) { - console.log(fineTune); - } + console.log(model); } main(); - response: | + response: &retrieve_model_response | { - "object": "list", - "data": [ - { - "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - "object": "fine-tune", - "model": "curie", - "created_at": 1614807352, - "fine_tuned_model": null, - "hyperparams": { ... }, - "organization_id": "org-123", - "result_files": [], - "status": "pending", - "validation_files": [], - "training_files": [ { ... } ], - "updated_at": 1614807352, - }, - { ... }, - { ... } - ] + "id": "VAR_model_id", + "object": "model", + "created": 1686935002, + "owned_by": "openai" } - - /fine-tunes/{fine_tune_id}: - get: - operationId: retrieveFineTune - deprecated: true + delete: + operationId: deleteModel tags: - - OpenAI - summary: | - Gets info about the fine-tune job. - - [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + - Models + summary: Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. parameters: - in: path - name: fine_tune_id + name: model required: true schema: type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - description: | - The ID of the fine-tune job + example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 + description: The model to delete responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/FineTune" + $ref: "#/components/schemas/DeleteModelResponse" x-oaiMeta: - name: Retrieve fine-tune - returns: The [fine-tune](/docs/api-reference/fine-tunes/object) object with the given ID. + name: Delete a fine-tuned model + group: models + returns: Deletion status. examples: request: curl: | - curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F \ + curl https://api.openai.com/v1/models/ft:gpt-3.5-turbo:acemeco:suffix:abc123 \ + -X DELETE \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTune.retrieve(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") + from openai import OpenAI + client = OpenAI() + + client.models.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123") node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const fineTune = await openai.fineTunes.retrieve("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); + const model = await openai.models.del("ft:gpt-3.5-turbo:acemeco:suffix:abc123"); - console.log(fineTune); + console.log(model); } - main(); - response: &fine_tune_example | + response: | { - "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - "object": "fine-tune", - "model": "curie", - "created_at": 1614807352, - "events": [ - { - "object": "fine-tune-event", - "created_at": 1614807352, - "level": "info", - "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." - }, - { - "object": "fine-tune-event", - "created_at": 1614807356, - "level": "info", - "message": "Job started." - }, - { - "object": "fine-tune-event", - "created_at": 1614807861, - "level": "info", - "message": "Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20." - }, - { - "object": "fine-tune-event", - "created_at": 1614807864, - "level": "info", - "message": "Uploaded result files: file-abc123." - }, - { - "object": "fine-tune-event", - "created_at": 1614807864, - "level": "info", - "message": "Job succeeded." - } - ], - "fine_tuned_model": "curie:ft-acmeco-2021-03-03-21-44-20", - "hyperparams": { - "batch_size": 4, - "learning_rate_multiplier": 0.1, - "n_epochs": 4, - "prompt_loss_weight": 0.1, - }, - "organization_id": "org-123", - "result_files": [ - { - "id": "file-abc123", - "object": "file", - "bytes": 81509, - "created_at": 1614807863, - "filename": "compiled_results.csv", - "purpose": "fine-tune-results" - } - ], - "status": "succeeded", - "validation_files": [], - "training_files": [ - { - "id": "file-abc123", - "object": "file", - "bytes": 1547276, - "created_at": 1610062281, - "filename": "my-data-train.jsonl", - "purpose": "fine-tune-train" - } - ], - "updated_at": 1614807865, + "id": "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "object": "model", + "deleted": true } - /fine-tunes/{fine_tune_id}/cancel: + /moderations: post: - operationId: cancelFineTune - deprecated: true + operationId: createModeration tags: - - OpenAI - summary: | - Immediately cancel a fine-tune job. - parameters: - - in: path - name: fine_tune_id - required: true - schema: - type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - description: | - The ID of the fine-tune job to cancel + - Moderations + summary: Classifies if text violates OpenAI's Content Policy + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateModerationRequest" responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/FineTune" + $ref: "#/components/schemas/CreateModerationResponse" x-oaiMeta: - name: Cancel fine-tune - returns: The cancelled [fine-tune](/docs/api-reference/fine-tunes/object) object. + name: Create moderation + group: moderations + returns: A [moderation](/docs/api-reference/moderations/object) object. examples: request: curl: | - curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/cancel \ - -H "Authorization: Bearer $OPENAI_API_KEY" + curl https://api.openai.com/v1/moderations \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "input": "I want to kill them." + }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTune.cancel(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") - node.js: |- + from openai import OpenAI + client = OpenAI() + + client.moderations.create(input="I want to kill them.") + node.js: | import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const fineTune = await openai.fineTunes.cancel("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); + const moderation = await openai.moderations.create({ input: "I want to kill them." }); - console.log(fineTune); + console.log(moderation); } main(); - response: | + response: &moderation_example | { - "id": "ft-xhrpBbvVUzYGo8oUO1FY4nI7", - "object": "fine-tune", - "model": "curie", - "created_at": 1614807770, - "events": [ { ... } ], - "fine_tuned_model": null, - "hyperparams": { ... }, - "organization_id": "org-123", - "result_files": [], - "status": "cancelled", - "validation_files": [], - "training_files": [ + "id": "modr-XXXXX", + "model": "text-moderation-005", + "results": [ { - "id": "file-abc123", - "object": "file", - "bytes": 1547276, - "created_at": 1610062281, - "filename": "my-data-train.jsonl", - "purpose": "fine-tune-train" + "flagged": true, + "categories": { + "sexual": false, + "hate": false, + "harassment": false, + "self-harm": false, + "sexual/minors": false, + "hate/threatening": false, + "violence/graphic": false, + "self-harm/intent": false, + "self-harm/instructions": false, + "harassment/threatening": true, + "violence": true, + }, + "category_scores": { + "sexual": 1.2282071e-06, + "hate": 0.010696256, + "harassment": 0.29842457, + "self-harm": 1.5236925e-08, + "sexual/minors": 5.7246268e-08, + "hate/threatening": 0.0060676364, + "violence/graphic": 4.435014e-06, + "self-harm/intent": 8.098441e-10, + "self-harm/instructions": 2.8498655e-11, + "harassment/threatening": 0.63055265, + "violence": 0.99011886, + } } - ], - "updated_at": 1614807789, + ] } - /fine-tunes/{fine_tune_id}/events: + /assistants: get: - operationId: listFineTuneEvents - deprecated: true + operationId: listAssistants tags: - - OpenAI - summary: | - Get fine-grained status updates for a fine-tune job. + - Assistants + summary: Returns a list of assistants. parameters: - - in: path - name: fine_tune_id - required: true + - name: limit + in: query + description: &pagination_limit_param_description | + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: &pagination_order_param_description | + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. schema: type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - description: | - The ID of the fine-tune job to get events for. - - in: query - name: stream - required: false + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: &pagination_after_param_description | + A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. schema: - type: boolean - default: false - description: | - Whether to stream events for the fine-tune job. If set to true, - events will be sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available. The stream will terminate with a - `data: [DONE]` message when the job is finished (succeeded, cancelled, - or failed). - - If set to false, only events generated so far will be returned. + type: string + - name: before + in: query + description: &pagination_before_param_description | + A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/ListFineTuneEventsResponse" + $ref: "#/components/schemas/ListAssistantsResponse" x-oaiMeta: - name: List fine-tune events - returns: A list of fine-tune event objects. + name: List assistants + group: assistants + beta: true + returns: A list of [assistant](/docs/api-reference/assistants/object) objects. examples: request: curl: | - curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/events \ - -H "Authorization: Bearer $OPENAI_API_KEY" + curl "https://api.openai.com/v1/assistants?order=desc&limit=20" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.FineTune.list_events(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") + from openai import OpenAI + client = OpenAI() + + my_assistants = client.beta.assistants.list( + order="desc", + limit="20", + ) + print(my_assistants.data) node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const fineTune = await openai.fineTunes.listEvents("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); + const myAssistants = await openai.beta.assistants.list({ + order: "desc", + limit: "20", + }); - console.log(fineTune); + console.log(myAssistants.data); } + main(); - response: | + response: &list_assistants_example | { "object": "list", "data": [ { - "object": "fine-tune-event", - "created_at": 1614807352, - "level": "info", - "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." - }, - { - "object": "fine-tune-event", - "created_at": 1614807356, - "level": "info", - "message": "Job started." - }, - { - "object": "fine-tune-event", - "created_at": 1614807861, - "level": "info", - "message": "Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20." + "id": "asst_abc123", + "object": "assistant", + "created_at": 1698982736, + "name": "Coding Tutor", + "description": null, + "model": "gpt-4", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "file_ids": [], + "metadata": {} }, { - "object": "fine-tune-event", - "created_at": 1614807864, - "level": "info", - "message": "Uploaded result files: file-abc123" + "id": "asst_abc456", + "object": "assistant", + "created_at": 1698982718, + "name": "My Assistant", + "description": null, + "model": "gpt-4", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "file_ids": [], + "metadata": {} }, { - "object": "fine-tune-event", - "created_at": 1614807864, - "level": "info", - "message": "Job succeeded." + "id": "asst_abc789", + "object": "assistant", + "created_at": 1698982643, + "name": null, + "description": null, + "model": "gpt-4", + "instructions": null, + "tools": [], + "file_ids": [], + "metadata": {} } - ] + ], + "first_id": "asst_abc123", + "last_id": "asst_abc789", + "has_more": false } + post: + operationId: createAssistant + tags: + - Assistants + summary: Create an assistant with a model and instructions. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateAssistantRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/AssistantObject" + x-oaiMeta: + name: Create assistant + group: assistants + beta: true + returns: An [assistant](/docs/api-reference/assistants/object) object. + examples: + - title: Code Interpreter + request: + curl: | + curl "https://api.openai.com/v1/assistants" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + "name": "Math Tutor", + "tools": [{"type": "code_interpreter"}], + "model": "gpt-4" + }' - /models: + python: | + from openai import OpenAI + client = OpenAI() + + my_assistant = client.beta.assistants.create( + instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + name="Math Tutor", + tools=[{"type": "code_interpreter"}], + model="gpt-4", + ) + print(my_assistant) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myAssistant = await openai.beta.assistants.create({ + instructions: + "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + name: "Math Tutor", + tools: [{ type: "code_interpreter" }], + model: "gpt-4", + }); + + console.log(myAssistant); + } + + main(); + response: &create_assistants_example | + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1698984975, + "name": "Math Tutor", + "description": null, + "model": "gpt-4", + "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + "tools": [ + { + "type": "code_interpreter" + } + ], + "file_ids": [], + "metadata": {} + } + - title: Files + request: + curl: | + curl https://api.openai.com/v1/assistants \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", + "tools": [{"type": "retrieval"}], + "model": "gpt-4", + "file_ids": ["file-abc123"] + }' + python: | + from openai import OpenAI + client = OpenAI() + + my_assistant = client.beta.assistants.create( + instructions="You are an HR bot, and you have access to files to answer employee questions about company policies.", + name="HR Helper", + tools=[{"type": "retrieval"}], + model="gpt-4", + file_ids=["file-abc123"], + ) + print(my_assistant) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myAssistant = await openai.beta.assistants.create({ + instructions: + "You are an HR bot, and you have access to files to answer employee questions about company policies.", + name: "HR Helper", + tools: [{ type: "retrieval" }], + model: "gpt-4", + file_ids: ["file-abc123"], + }); + + console.log(myAssistant); + } + + main(); + response: | + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1699009403, + "name": "HR Helper", + "description": null, + "model": "gpt-4", + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", + "tools": [ + { + "type": "retrieval" + } + ], + "file_ids": [ + "file-abc123" + ], + "metadata": {} + } + + /assistants/{assistant_id}: get: - operationId: listModels + operationId: getAssistant tags: - - OpenAI - summary: Lists the currently available models, and provides basic information about each one such as the owner and availability. + - Assistants + summary: Retrieves an assistant. + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + description: The ID of the assistant to retrieve. responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/ListModelsResponse" + $ref: "#/components/schemas/AssistantObject" x-oaiMeta: - name: List models - returns: A list of [model](/docs/api-reference/models/object) objects. + name: Retrieve assistant + group: assistants + beta: true + returns: The [assistant](/docs/api-reference/assistants/object) object matching the specified ID. examples: request: curl: | - curl https://api.openai.com/v1/models \ - -H "Authorization: Bearer $OPENAI_API_KEY" + curl https://api.openai.com/v1/assistants/asst_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Model.list() + from openai import OpenAI + client = OpenAI() + + my_assistant = client.beta.assistants.retrieve("asst_abc123") + print(my_assistant) node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const list = await openai.models.list(); + const myAssistant = await openai.beta.assistants.retrieve( + "asst_abc123" + ); - for await (const model of list) { - console.log(model); - } + console.log(myAssistant); } + main(); response: | { - "object": "list", - "data": [ - { - "id": "model-id-0", - "object": "model", - "created": 1686935002, - "owned_by": "organization-owner" - }, + "id": "asst_abc123", + "object": "assistant", + "created_at": 1699009709, + "name": "HR Helper", + "description": null, + "model": "gpt-4", + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", + "tools": [ { - "id": "model-id-1", - "object": "model", - "created": 1686935002, - "owned_by": "organization-owner", - }, - { - "id": "model-id-2", - "object": "model", - "created": 1686935002, - "owned_by": "openai" - }, + "type": "retrieval" + } ], - "object": "list" + "file_ids": [ + "file-abc123" + ], + "metadata": {} } - - /models/{model}: - get: - operationId: retrieveModel + post: + operationId: modifyAssistant tags: - - OpenAI - summary: Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + - Assistants + summary: Modifies an assistant. parameters: - in: path - name: model + name: assistant_id required: true schema: type: string - # ideally this will be an actual ID, so this will always work from browser - example: gpt-3.5-turbo - description: The ID of the model to use for this request + description: The ID of the assistant to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyAssistantRequest" responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/Model" + $ref: "#/components/schemas/AssistantObject" x-oaiMeta: - name: Retrieve model - returns: The [model](/docs/api-reference/models/object) object matching the specified ID. + name: Modify assistant + group: assistants + beta: true + returns: The modified [assistant](/docs/api-reference/assistants/object) object. examples: request: curl: | - curl https://api.openai.com/v1/models/VAR_model_id \ - -H "Authorization: Bearer $OPENAI_API_KEY" + curl https://api.openai.com/v1/assistants/asst_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + "tools": [{"type": "retrieval"}], + "model": "gpt-4", + "file_ids": ["file-abc123", "file-abc456"] + }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Model.retrieve("VAR_model_id") + from openai import OpenAI + client = OpenAI() + + my_updated_assistant = client.beta.assistants.update( + "asst_abc123", + instructions="You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + name="HR Helper", + tools=[{"type": "retrieval"}], + model="gpt-4", + file_ids=["file-abc123", "file-abc456"], + ) + + print(my_updated_assistant) node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const model = await openai.models.retrieve("gpt-3.5-turbo"); + const myUpdatedAssistant = await openai.beta.assistants.update( + "asst_abc123", + { + instructions: + "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + name: "HR Helper", + tools: [{ type: "retrieval" }], + model: "gpt-4", + file_ids: [ + "file-abc123", + "file-abc456", + ], + } + ); - console.log(model); + console.log(myUpdatedAssistant); } main(); - response: &retrieve_model_response | + response: | { - "id": "VAR_model_id", - "object": "model", - "created": 1686935002, - "owned_by": "openai" + "id": "asst_abc123", + "object": "assistant", + "created_at": 1699009709, + "name": "HR Helper", + "description": null, + "model": "gpt-4", + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + "tools": [ + { + "type": "retrieval" + } + ], + "file_ids": [ + "file-abc123", + "file-abc456" + ], + "metadata": {} } delete: - operationId: deleteModel + operationId: deleteAssistant tags: - - OpenAI - summary: Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + - Assistants + summary: Delete an assistant. parameters: - in: path - name: model + name: assistant_id required: true schema: type: string - example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 - description: The model to delete + description: The ID of the assistant to delete. responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/DeleteModelResponse" + $ref: "#/components/schemas/DeleteAssistantResponse" x-oaiMeta: - name: Delete fine-tune model - returns: Deletion status. + name: Delete assistant + group: assistants + beta: true + returns: Deletion status examples: request: curl: | - curl https://api.openai.com/v1/models/ft:gpt-3.5-turbo:acemeco:suffix:abc123 \ - -X DELETE \ - -H "Authorization: Bearer $OPENAI_API_KEY" + curl https://api.openai.com/v1/assistants/asst_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -X DELETE python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Model.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123") + from openai import OpenAI + client = OpenAI() + + response = client.beta.assistants.delete("asst_abc123") + print(response) node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const model = await openai.models.del("ft:gpt-3.5-turbo:acemeco:suffix:abc123"); + const response = await openai.beta.assistants.del("asst_abc123"); - console.log(model); + console.log(response); } main(); response: | { - "id": "ft:gpt-3.5-turbo:acemeco:suffix:abc123", - "object": "model", + "id": "asst_abc123", + "object": "assistant.deleted", "deleted": true } - /moderations: + /threads: post: - operationId: createModeration + operationId: createThread tags: - - OpenAI - summary: Classifies if text violates OpenAI's Content Policy + - Assistants + summary: Create a thread. + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/CreateThreadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ThreadObject" + x-oaiMeta: + name: Create thread + group: threads + beta: true + returns: A [thread](/docs/api-reference/threads) object. + examples: + - title: Empty + request: + curl: | + curl https://api.openai.com/v1/threads \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '' + python: | + from openai import OpenAI + client = OpenAI() + + empty_thread = client.beta.threads.create() + print(empty_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const emptyThread = await openai.beta.threads.create(); + + console.log(emptyThread); + } + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699012949, + "metadata": {} + } + - title: Messages + request: + curl: | + curl https://api.openai.com/v1/threads \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "messages": [{ + "role": "user", + "content": "Hello, what is AI?", + "file_ids": ["file-abc123"] + }, { + "role": "user", + "content": "How does AI work? Explain it in simple terms." + }] + }' + python: | + from openai import OpenAI + client = OpenAI() + + message_thread = client.beta.threads.create( + messages=[ + { + "role": "user", + "content": "Hello, what is AI?", + "file_ids": ["file-abc123"], + }, + { + "role": "user", + "content": "How does AI work? Explain it in simple terms." + }, + ] + ) + + print(message_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const messageThread = await openai.beta.threads.create({ + messages: [ + { + role: "user", + content: "Hello, what is AI?", + file_ids: ["file-abc123"], + }, + { + role: "user", + content: "How does AI work? Explain it in simple terms.", + }, + ], + }); + + console.log(messageThread); + } + + main(); + response: | + { + id: 'thread_abc123', + object: 'thread', + created_at: 1699014083, + metadata: {} + } + + /threads/{thread_id}: + get: + operationId: getThread + tags: + - Assistants + summary: Retrieves a thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ThreadObject" + x-oaiMeta: + name: Retrieve thread + group: threads + beta: true + returns: The [thread](/docs/api-reference/threads/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + my_thread = client.beta.threads.retrieve("thread_abc123") + print(my_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myThread = await openai.beta.threads.retrieve( + "thread_abc123" + ); + + console.log(myThread); + } + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699014083, + "metadata": {} + } + post: + operationId: modifyThread + tags: + - Assistants + summary: Modifies a thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to modify. Only the `metadata` can be modified. requestBody: required: true content: application/json: schema: - $ref: "#/components/schemas/CreateModerationRequest" + $ref: "#/components/schemas/ModifyThreadRequest" responses: "200": description: OK content: application/json: schema: - $ref: "#/components/schemas/CreateModerationResponse" + $ref: "#/components/schemas/ThreadObject" x-oaiMeta: - name: Create moderation - returns: A [moderation](/docs/api-reference/moderations/object) object. + name: Modify thread + group: threads + beta: true + returns: The modified [thread](/docs/api-reference/threads/object) object matching the specified ID. examples: request: curl: | - curl https://api.openai.com/v1/moderations \ + curl https://api.openai.com/v1/threads/thread_abc123 \ -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ -d '{ - "input": "I want to kill them." - }' + "metadata": { + "modified": "true", + "user": "abc123" + } + }' python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Moderation.create( - input="I want to kill them.", + from openai import OpenAI + client = OpenAI() + + my_updated_thread = client.beta.threads.update( + "thread_abc123", + metadata={ + "modified": "true", + "user": "abc123" + } ) - node.js: | + print(my_updated_thread) + node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const moderation = await openai.moderations.create({ input: "I want to kill them." }); + const updatedThread = await openai.beta.threads.update( + "thread_abc123", + { + metadata: { modified: "true", user: "abc123" }, + } + ); - console.log(moderation); + console.log(updatedThread); } + main(); - response: &moderation_example | + response: | { - "id": "modr-XXXXX", - "model": "text-moderation-005", - "results": [ - { - "flagged": true, - "categories": { - "sexual": false, - "hate": false, - "harassment": false, - "self-harm": false, - "sexual/minors": false, - "hate/threatening": false, - "violence/graphic": false, - "self-harm/intent": false, - "self-harm/instructions": false, - "harassment/threatening": true, - "violence": true, - }, - "category_scores": { - "sexual": 1.2282071e-06, - "hate": 0.010696256, - "harassment": 0.29842457, - "self-harm": 1.5236925e-08, - "sexual/minors": 5.7246268e-08, - "hate/threatening": 0.0060676364, - "violence/graphic": 4.435014e-06, - "self-harm/intent": 8.098441e-10, - "self-harm/instructions": 2.8498655e-11, - "harassment/threatening": 0.63055265, - "violence": 0.99011886, - } - } - ] + "id": "thread_abc123", + "object": "thread", + "created_at": 1699014083, + "metadata": { + "modified": "true", + "user": "abc123" + } } + delete: + operationId: deleteThread + tags: + - Assistants + summary: Delete a thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteThreadResponse" + x-oaiMeta: + name: Delete thread + group: threads + beta: true + returns: Deletion status + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -X DELETE + python: | + from openai import OpenAI + client = OpenAI() -components: + response = client.beta.threads.delete("thread_abc123") + print(response) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const response = await openai.beta.threads.del("thread_abc123"); + + console.log(response); + } + main(); + response: | + { + "id": "thread_abc123", + "object": "thread.deleted", + "deleted": true + } + + /threads/{thread_id}/messages: + get: + operationId: listMessages + tags: + - Assistants + summary: Returns a list of messages for a given thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) the messages belong to. + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListMessagesResponse" + x-oaiMeta: + name: List messages + group: threads + beta: true + returns: A list of [message](/docs/api-reference/messages) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + thread_messages = client.beta.threads.messages.list("thread_abc123") + print(thread_messages.data) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const threadMessages = await openai.beta.threads.messages.list( + "thread_abc123" + ); + + console.log(threadMessages.data); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699016383, + "thread_id": "thread_abc123", + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "file_ids": [], + "assistant_id": null, + "run_id": null, + "metadata": {} + }, + { + "id": "msg_abc456", + "object": "thread.message", + "created_at": 1699016383, + "thread_id": "thread_abc123", + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "Hello, what is AI?", + "annotations": [] + } + } + ], + "file_ids": [ + "file-abc123" + ], + "assistant_id": null, + "run_id": null, + "metadata": {} + } + ], + "first_id": "msg_abc123", + "last_id": "msg_abc456", + "has_more": false + } + post: + operationId: createMessage + tags: + - Assistants + summary: Create a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) to create a message for. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateMessageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageObject" + x-oaiMeta: + name: Create message + group: threads + beta: true + returns: A [message](/docs/api-reference/messages/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "role": "user", + "content": "How does AI work? Explain it in simple terms." + }' + python: | + from openai import OpenAI + client = OpenAI() + + thread_message = client.beta.threads.messages.create( + "thread_abc123", + role="user", + content="How does AI work? Explain it in simple terms.", + ) + print(thread_message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const threadMessages = await openai.beta.threads.messages.create( + "thread_abc123", + { role: "user", content: "How does AI work? Explain it in simple terms." } + ); + + console.log(threadMessages); + } + + main(); + response: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699017614, + "thread_id": "thread_abc123", + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "file_ids": [], + "assistant_id": null, + "run_id": null, + "metadata": {} + } + + /threads/{thread_id}/messages/{message_id}: + get: + operationId: getMessage + tags: + - Assistants + summary: Retrieve a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) to which this message belongs. + - in: path + name: message_id + required: true + schema: + type: string + description: The ID of the message to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageObject" + x-oaiMeta: + name: Retrieve message + group: threads + beta: true + returns: The [message](/docs/api-reference/threads/messages/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + message = client.beta.threads.messages.retrieve( + message_id="msg_abc123", + thread_id="thread_abc123", + ) + print(message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const message = await openai.beta.threads.messages.retrieve( + "thread_abc123", + "msg_abc123" + ); + + console.log(message); + } + + main(); + response: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699017614, + "thread_id": "thread_abc123", + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "file_ids": [], + "assistant_id": null, + "run_id": null, + "metadata": {} + } + post: + operationId: modifyMessage + tags: + - Assistants + summary: Modifies a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which this message belongs. + - in: path + name: message_id + required: true + schema: + type: string + description: The ID of the message to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyMessageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageObject" + x-oaiMeta: + name: Modify message + group: threads + beta: true + returns: The modified [message](/docs/api-reference/threads/messages/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "metadata": { + "modified": "true", + "user": "abc123" + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + message = client.beta.threads.messages.update( + message_id="msg_abc12", + thread_id="thread_abc123", + metadata={ + "modified": "true", + "user": "abc123", + }, + ) + print(message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const message = await openai.beta.threads.messages.update( + "thread_abc123", + "msg_abc123", + { + metadata: { + modified: "true", + user: "abc123", + }, + } + }' + response: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699017614, + "thread_id": "thread_abc123", + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "file_ids": [], + "assistant_id": null, + "run_id": null, + "metadata": { + "modified": "true", + "user": "abc123" + } + } + + /threads/runs: + post: + operationId: createThreadAndRun + tags: + - Assistants + summary: Create a thread and run it in one request. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateThreadAndRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Create thread and run + group: threads + beta: true + returns: A [run](/docs/api-reference/runs/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "assistant_id": "asst_abc123", + "thread": { + "messages": [ + {"role": "user", "content": "Explain deep learning to a 5 year old."} + ] + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.create_and_run( + assistant_id="asst_abc123", + thread={ + "messages": [ + {"role": "user", "content": "Explain deep learning to a 5 year old."} + ] + } + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.createAndRun({ + assistant_id: "asst_abc123", + thread: { + messages: [ + { role: "user", content: "Explain deep learning to a 5 year old." }, + ], + }, + }); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699076792, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "queued", + "started_at": null, + "expires_at": 1699077392, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "last_error": null, + "model": "gpt-4", + "instructions": "You are a helpful assistant.", + "tools": [], + "file_ids": [], + "metadata": {}, + "usage": null + } + + /threads/{thread_id}/runs: + get: + operationId: listRuns + tags: + - Assistants + summary: Returns a list of runs belonging to a thread. + parameters: + - name: thread_id + in: path + required: true + schema: + type: string + description: The ID of the thread the run belongs to. + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListRunsResponse" + x-oaiMeta: + name: List runs + group: threads + beta: true + returns: A list of [run](/docs/api-reference/runs/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + runs = client.beta.threads.runs.list( + "thread_abc123" + ) + print(runs) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const runs = await openai.beta.threads.runs.list( + "thread_abc123" + ); + + console.log(runs); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-3.5-turbo", + "instructions": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "file_ids": [ + "file-abc123", + "file-abc456" + ], + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + }, + { + "id": "run_abc456", + "object": "thread.run", + "created_at": 1699063290, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699063290, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699063291, + "last_error": null, + "model": "gpt-3.5-turbo", + "instructions": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "file_ids": [ + "file-abc123", + "file-abc456" + ], + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + ], + "first_id": "run_abc123", + "last_id": "run_abc456", + "has_more": false + } + post: + operationId: createRun + tags: + - Assistants + summary: Create a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to run. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Create run + group: threads + beta: true + returns: A [run](/docs/api-reference/runs/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "assistant_id": "asst_abc123" + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.create( + thread_id="thread_abc123", + assistant_id="asst_abc123" + ) + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.create( + "thread_abc123", + { assistant_id: "asst_abc123" } + ); + + console.log(run); + } + + main(); + response: &run_object_example | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699063290, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "queued", + "started_at": 1699063290, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699063291, + "last_error": null, + "model": "gpt-4", + "instructions": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "file_ids": [ + "file-abc123", + "file-abc456" + ], + "metadata": {}, + "usage": null + } + + /threads/{thread_id}/runs/{run_id}: + get: + operationId: getRun + tags: + - Assistants + summary: Retrieves a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) that was run. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Retrieve run + group: threads + beta: true + returns: The [run](/docs/api-reference/runs/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.retrieve( + thread_id="thread_abc123", + run_id="run_abc123" + ) + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.retrieve( + "thread_abc123", + "run_abc123" + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-3.5-turbo", + "instructions": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "file_ids": [ + "file-abc123", + "file-abc456" + ], + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + post: + operationId: modifyRun + tags: + - Assistants + summary: Modifies a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) that was run. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Modify run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "metadata": { + "user_id": "user_abc123" + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.update( + thread_id="thread_abc123", + run_id="run_abc123", + metadata={"user_id": "user_abc123"}, + ) + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.update( + "thread_abc123", + "run_abc123", + { + metadata: { + user_id: "user_abc123", + }, + } + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-3.5-turbo", + "instructions": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "file_ids": [ + "file-abc123", + "file-abc456" + ], + "metadata": { + "user_id": "user_abc123" + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + + /threads/{thread_id}/runs/{run_id}/submit_tool_outputs: + post: + operationId: submitToolOuputsToRun + tags: + - Assistants + summary: | + When a run has the `status: "requires_action"` and `required_action.type` is `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) to which this run belongs. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run that requires the tool output submission. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SubmitToolOutputsRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Submit tool outputs to run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/submit_tool_outputs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "tool_outputs": [ + { + "tool_call_id": "call_abc123", + "output": "28C" + } + ] + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.submit_tool_outputs( + thread_id="thread_abc123", + run_id="run_abc123", + tool_outputs=[ + { + "tool_call_id": "call_abc123", + "output": "28C" + } + ] + ) + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.submitToolOutputs( + "thread_abc123", + "run_abc123", + { + tool_outputs: [ + { + tool_call_id: "call_abc123", + output: "28C", + }, + ], + } + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075592, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "queued", + "started_at": 1699075592, + "expires_at": 1699076192, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "last_error": null, + "model": "gpt-4", + "instructions": "You tell the weather.", + "tools": [ + { + "type": "function", + "function": { + "name": "get_weather", + "description": "Determine weather in my location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": [ + "c", + "f" + ] + } + }, + "required": [ + "location" + ] + } + } + } + ], + "file_ids": [], + "metadata": {}, + "usage": null + } + + /threads/{thread_id}/runs/{run_id}/cancel: + post: + operationId: cancelRun + tags: + - Assistants + summary: Cancels a run that is `in_progress`. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which this run belongs. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to cancel. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Cancel a run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/cancel \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ + -X POST + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.cancel( + thread_id="thread_abc123", + run_id="run_abc123" + ) + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.cancel( + "thread_abc123", + "run_abc123" + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699076126, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "cancelling", + "started_at": 1699076126, + "expires_at": 1699076726, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "last_error": null, + "model": "gpt-4", + "instructions": "You summarize books.", + "tools": [ + { + "type": "retrieval" + } + ], + "file_ids": [], + "metadata": {}, + "usage": null + } + + /threads/{thread_id}/runs/{run_id}/steps: + get: + operationId: listRunSteps + tags: + - Assistants + summary: Returns a list of run steps belonging to a run. + parameters: + - name: thread_id + in: path + required: true + schema: + type: string + description: The ID of the thread the run and run steps belong to. + - name: run_id + in: path + required: true + schema: + type: string + description: The ID of the run the run steps belong to. + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListRunStepsResponse" + x-oaiMeta: + name: List run steps + group: threads + beta: true + returns: A list of [run step](/docs/api-reference/runs/step-object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + run_steps = client.beta.threads.runs.steps.list( + thread_id="thread_abc123", + run_id="run_abc123" + ) + print(run_steps) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const runStep = await openai.beta.threads.runs.steps.list( + "thread_abc123", + "run_abc123" + ); + console.log(runStep); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "step_abc123", + "object": "thread.run.step", + "created_at": 1699063291, + "run_id": "run_abc123", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "type": "message_creation", + "status": "completed", + "cancelled_at": null, + "completed_at": 1699063291, + "expired_at": null, + "failed_at": null, + "last_error": null, + "step_details": { + "type": "message_creation", + "message_creation": { + "message_id": "msg_abc123" + } + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + ], + "first_id": "step_abc123", + "last_id": "step_abc456", + "has_more": false + } + + /threads/{thread_id}/runs/{run_id}/steps/{step_id}: + get: + operationId: getRunStep + tags: + - Assistants + summary: Retrieves a run step. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which the run and run step belongs. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to which the run step belongs. + - in: path + name: step_id + required: true + schema: + type: string + description: The ID of the run step to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunStepObject" + x-oaiMeta: + name: Retrieve run step + group: threads + beta: true + returns: The [run step](/docs/api-reference/runs/step-object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps/step_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + run_step = client.beta.threads.runs.steps.retrieve( + thread_id="thread_abc123", + run_id="run_abc123", + step_id="step_abc123" + ) + print(run_step) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const runStep = await openai.beta.threads.runs.steps.retrieve( + "thread_abc123", + "run_abc123", + "step_abc123" + ); + console.log(runStep); + } + + main(); + response: &run_step_object_example | + { + "id": "step_abc123", + "object": "thread.run.step", + "created_at": 1699063291, + "run_id": "run_abc123", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "type": "message_creation", + "status": "completed", + "cancelled_at": null, + "completed_at": 1699063291, + "expired_at": null, + "failed_at": null, + "last_error": null, + "step_details": { + "type": "message_creation", + "message_creation": { + "message_id": "msg_abc123" + } + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + + /assistants/{assistant_id}/files: + get: + operationId: listAssistantFiles + tags: + - Assistants + summary: Returns a list of assistant files. + parameters: + - name: assistant_id + in: path + description: The ID of the assistant the file belongs to. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListAssistantFilesResponse" + x-oaiMeta: + name: List assistant files + group: assistants + beta: true + returns: A list of [assistant file](/docs/api-reference/assistants/file-object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/assistants/asst_abc123/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + assistant_files = client.beta.assistants.files.list( + assistant_id="asst_abc123" + ) + print(assistant_files) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const assistantFiles = await openai.beta.assistants.files.list( + "asst_abc123" + ); + console.log(assistantFiles); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "file-abc123", + "object": "assistant.file", + "created_at": 1699060412, + "assistant_id": "asst_abc123" + }, + { + "id": "file-abc456", + "object": "assistant.file", + "created_at": 1699060412, + "assistant_id": "asst_abc123" + } + ], + "first_id": "file-abc123", + "last_id": "file-abc456", + "has_more": false + } + post: + operationId: createAssistantFile + tags: + - Assistants + summary: Create an assistant file by attaching a [File](/docs/api-reference/files) to an [assistant](/docs/api-reference/assistants). + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + example: file-abc123 + description: | + The ID of the assistant for which to create a File. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateAssistantFileRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/AssistantFileObject" + x-oaiMeta: + name: Create assistant file + group: assistants + beta: true + returns: An [assistant file](/docs/api-reference/assistants/file-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/assistants/asst_abc123/files \ + -H 'Authorization: Bearer $OPENAI_API_KEY"' \ + -H 'Content-Type: application/json' \ + -H 'OpenAI-Beta: assistants=v1' \ + -d '{ + "file_id": "file-abc123" + }' + python: | + from openai import OpenAI + client = OpenAI() + + assistant_file = client.beta.assistants.files.create( + assistant_id="asst_abc123", + file_id="file-abc123" + ) + print(assistant_file) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const myAssistantFile = await openai.beta.assistants.files.create( + "asst_abc123", + { + file_id: "file-abc123" + } + ); + console.log(myAssistantFile); + } + + main(); + response: &assistant_file_object | + { + "id": "file-abc123", + "object": "assistant.file", + "created_at": 1699055364, + "assistant_id": "asst_abc123" + } + + /assistants/{assistant_id}/files/{file_id}: + get: + operationId: getAssistantFile + tags: + - Assistants + summary: Retrieves an AssistantFile. + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + description: The ID of the assistant who the file belongs to. + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file we're getting. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/AssistantFileObject" + x-oaiMeta: + name: Retrieve assistant file + group: assistants + beta: true + returns: The [assistant file](/docs/api-reference/assistants/file-object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/assistants/asst_abc123/files/file-abc123 \ + -H 'Authorization: Bearer $OPENAI_API_KEY"' \ + -H 'Content-Type: application/json' \ + -H 'OpenAI-Beta: assistants=v1' + python: | + from openai import OpenAI + client = OpenAI() + + assistant_file = client.beta.assistants.files.retrieve( + assistant_id="asst_abc123", + file_id="file-abc123" + ) + print(assistant_file) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const myAssistantFile = await openai.beta.assistants.files.retrieve( + "asst_abc123", + "file-abc123" + ); + console.log(myAssistantFile); + } + + main(); + response: *assistant_file_object + delete: + operationId: deleteAssistantFile + tags: + - Assistants + summary: Delete an assistant file. + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + description: The ID of the assistant that the file belongs to. + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteAssistantFileResponse" + x-oaiMeta: + name: Delete assistant file + group: assistants + beta: true + returns: Deletion status + examples: + request: + curl: | + curl https://api.openai.com/v1/assistants/asst_abc123/files/file-abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -X DELETE + python: | + from openai import OpenAI + client = OpenAI() + + deleted_assistant_file = client.beta.assistants.files.delete( + assistant_id="asst_abc123", + file_id="file-abc123" + ) + print(deleted_assistant_file) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const deletedAssistantFile = await openai.beta.assistants.files.del( + "asst_abc123", + "file-abc123" + ); + console.log(deletedAssistantFile); + } + + main(); + response: | + { + id: "file-abc123", + object: "assistant.file.deleted", + deleted: true + } + + /threads/{thread_id}/messages/{message_id}/files: + get: + operationId: listMessageFiles + tags: + - Assistants + summary: Returns a list of message files. + parameters: + - name: thread_id + in: path + description: The ID of the thread that the message and files belong to. + required: true + schema: + type: string + - name: message_id + in: path + description: The ID of the message that the files belongs to. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListMessageFilesResponse" + x-oaiMeta: + name: List message files + group: threads + beta: true + returns: A list of [message file](/docs/api-reference/messages/file-object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + message_files = client.beta.threads.messages.files.list( + thread_id="thread_abc123", + message_id="msg_abc123" + ) + print(message_files) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const messageFiles = await openai.beta.threads.messages.files.list( + "thread_abc123", + "msg_abc123" + ); + console.log(messageFiles); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "file-abc123", + "object": "thread.message.file", + "created_at": 1699061776, + "message_id": "msg_abc123" + }, + { + "id": "file-abc123", + "object": "thread.message.file", + "created_at": 1699061776, + "message_id": "msg_abc123" + } + ], + "first_id": "file-abc123", + "last_id": "file-abc123", + "has_more": false + } + + /threads/{thread_id}/messages/{message_id}/files/{file_id}: + get: + operationId: getMessageFile + tags: + - Assistants + summary: Retrieves a message file. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + example: thread_abc123 + description: The ID of the thread to which the message and File belong. + - in: path + name: message_id + required: true + schema: + type: string + example: msg_abc123 + description: The ID of the message the file belongs to. + - in: path + name: file_id + required: true + schema: + type: string + example: file-abc123 + description: The ID of the file being retrieved. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageFileObject" + x-oaiMeta: + name: Retrieve message file + group: threads + beta: true + returns: The [message file](/docs/api-reference/messages/file-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123/files/file-abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + message_files = client.beta.threads.messages.files.retrieve( + thread_id="thread_abc123", + message_id="msg_abc123", + file_id="file-abc123" + ) + print(message_files) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const messageFile = await openai.beta.threads.messages.files.retrieve( + "thread_abc123", + "msg_abc123", + "file-abc123" + ); + console.log(messageFile); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "thread.message.file", + "created_at": 1699061776, + "message_id": "msg_abc123" + } + +components: + securitySchemes: + ApiKeyAuth: + type: http + scheme: "bearer" + + schemas: + Error: + type: object + properties: + code: + type: string + nullable: true + message: + type: string + nullable: false + param: + type: string + nullable: true + type: + type: string + nullable: false + required: + - type + - message + - param + - code + ErrorResponse: + type: object + properties: + error: + $ref: "#/components/schemas/Error" + required: + - error + + ListModelsResponse: + type: object + properties: + object: + type: string + enum: [list] + data: + type: array + items: + $ref: "#/components/schemas/Model" + required: + - object + - data + DeleteModelResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + required: + - id + - object + - deleted + + CreateCompletionRequest: + type: object + properties: + model: + description: &model_description | + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + anyOf: + - type: string + - type: string + enum: ["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"] + x-oaiTypeLabel: string + prompt: + description: &completions_prompt_description | + The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + default: "<|endoftext|>" + nullable: true + oneOf: + - type: string + default: "" + example: "This is a test." + - type: array + items: + type: string + default: "" + example: "This is a test." + - type: array + minItems: 1 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + minItems: 1 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + best_of: + type: integer + default: 1 + minimum: 0 + maximum: 20 + nullable: true + description: &completions_best_of_description | + Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + echo: + type: boolean + default: false + nullable: true + description: &completions_echo_description > + Echo back the prompt in addition to the completion + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_frequency_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + logit_bias: &completions_logit_bias + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: &completions_logit_bias_description | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + logprobs: &completions_logprobs_configuration + type: integer + minimum: 0 + maximum: 5 + default: null + nullable: true + description: &completions_logprobs_description | + Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + + The maximum value for `logprobs` is 5. + max_tokens: + type: integer + minimum: 0 + default: 16 + example: 16 + nullable: true + description: &completions_max_tokens_description | + The maximum number of [tokens](/tokenizer) that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: &completions_completions_description | + How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_presence_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + seed: &completions_seed_param + type: integer + minimum: -9223372036854775808 + maximum: 9223372036854775807 + nullable: true + description: | + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + stop: + description: &completions_stop_description > + Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + default: null + nullable: true + oneOf: + - type: string + default: <|endoftext|> + example: "\n" + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + example: '["\n"]' + stream: + description: > + Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + suffix: + description: The suffix that comes after a completion of inserted text. + default: null + nullable: true + type: string + example: "test." + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: &completions_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &completions_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + user: &end_user_param_configuration + type: string + example: user-1234 + description: | + A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + required: + - model + - prompt + + CreateCompletionResponse: + type: object + description: | + Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + properties: + id: + type: string + description: A unique identifier for the completion. + choices: + type: array + description: The list of completion choices the model generated for the input prompt. + items: + type: object + required: + - finish_reason + - index + - logprobs + - text + properties: + finish_reason: + type: string + description: &completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + or `content_filter` if content was omitted due to a flag from our content filters. + enum: ["stop", "length", "content_filter"] + index: + type: integer + logprobs: + type: object + nullable: true + properties: + text_offset: + type: array + items: + type: integer + token_logprobs: + type: array + items: + type: number + tokens: + type: array + items: + type: string + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: number + text: + type: string + created: + type: integer + description: The Unix timestamp (in seconds) of when the completion was created. + model: + type: string + description: The model used for completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always "text_completion" + enum: [text_completion] + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - id + - object + - created + - model + - choices + x-oaiMeta: + name: The completion object + legacy: true + example: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "gpt-3.5-turbo", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + + ChatCompletionRequestMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" + x-oaiExpandable: true + + ChatCompletionRequestMessageContentPartImage: + type: object + title: Image content part + properties: + type: + type: string + enum: ["image_url"] + description: The type of the content part. + image_url: + type: object + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + detail: + type: string + description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). + enum: ["auto", "low", "high"] + default: "auto" + required: + - url + required: + - type + - image_url + + ChatCompletionRequestMessageContentPartText: + type: object + title: Text content part + properties: + type: + type: string + enum: ["text"] + description: The type of the content part. + text: + type: string + description: The text content. + required: + - type + - text + + ChatCompletionRequestMessage: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + + ChatCompletionRequestSystemMessage: + type: object + title: System message + properties: + content: + description: The contents of the system message. + type: string + role: + type: string + enum: ["system"] + description: The role of the messages author, in this case `system`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + required: + - content + - role + + ChatCompletionRequestUserMessage: + type: object + title: User message + properties: + content: + description: | + The contents of the user message. + oneOf: + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-visual-preview` model. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestMessageContentPart" + minItems: 1 + x-oaiExpandable: true + role: + type: string + enum: ["user"] + description: The role of the messages author, in this case `user`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + required: + - content + - role + + ChatCompletionRequestAssistantMessage: + type: object + title: Assistant message + properties: + content: + nullable: true + type: string + description: | + The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + role: + type: string + enum: ["assistant"] + description: The role of the messages author, in this case `assistant`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name + required: + - role + + ChatCompletionRequestToolMessage: + type: object + title: Tool message + properties: + role: + type: string + enum: ["tool"] + description: The role of the messages author, in this case `tool`. + content: + type: string + description: The contents of the tool message. + tool_call_id: + type: string + description: Tool call that this message is responding to. + required: + - role + - content + - tool_call_id + + ChatCompletionRequestFunctionMessage: + type: object + title: Function message + deprecated: true + properties: + role: + type: string + enum: ["function"] + description: The role of the messages author, in this case `function`. + content: + nullable: true + type: string + description: The contents of the function message. + name: + type: string + description: The name of the function to call. + required: + - role + - content + - name + + FunctionParameters: + type: object + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + additionalProperties: true + + ChatCompletionFunctions: + type: object + deprecated: true + properties: + description: + type: string + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + required: + - name + + ChatCompletionFunctionCallOption: + type: object + description: > + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + properties: + name: + type: string + description: The name of the function to call. + required: + - name + + ChatCompletionTool: + type: object + properties: + type: + type: string + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + $ref: "#/components/schemas/FunctionObject" + required: + - type + - function + + FunctionObject: + type: object + properties: + description: + type: string + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + required: + - name + + ChatCompletionToolChoiceOption: + description: | + Controls which (if any) function is called by the model. + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that function. + + `none` is the default when no functions are present. `auto` is the default if functions are present. + oneOf: + - type: string + description: > + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + enum: [none, auto] + - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" + x-oaiExpandable: true + + ChatCompletionNamedToolChoice: + type: object + description: Specifies a tool the model should use. Use to force the model to call a specific function. + properties: + type: + type: string + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + required: + - type + - function + + ChatCompletionMessageToolCalls: + type: array + description: The tool calls generated by the model, such as function calls. + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCall" + + ChatCompletionMessageToolCall: + type: object + properties: + # TODO: index included when streaming + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + description: The function that the model called. + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + required: + - name + - arguments + required: + - id + - type + - function + + ChatCompletionMessageToolCallChunk: + type: object + properties: + index: + type: integer + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + required: + - index + + # Note, this isn't referenced anywhere, but is kept as a convenience to record all possible roles in one place. + ChatCompletionRole: + type: string + description: The role of the author of a message + enum: + - system + - user + - assistant + - tool + - function + + ChatCompletionResponseMessage: + type: object + description: A chat completion message generated by the model. + properties: + content: + type: string + description: The contents of the message. + nullable: true + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + role: + type: string + enum: ["assistant"] + description: The role of the author of this message. + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - name + - arguments + required: + - role + - content + + ChatCompletionStreamResponseDelta: + type: object + description: A chat completion delta generated by streamed model responses. + properties: + content: + type: string + description: The contents of the chunk message. + nullable: true + function_call: + deprecated: true + type: object + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + tool_calls: + type: array + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" + role: + type: string + enum: ["system", "user", "assistant", "tool"] + description: The role of the author of this message. + + CreateChatCompletionRequest: + type: object + properties: + messages: + description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + type: array + minItems: 1 + items: + $ref: "#/components/schemas/ChatCompletionRequestMessage" + model: + description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + example: "gpt-3.5-turbo" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_frequency_penalty_description + logit_bias: + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + logprobs: + description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model. + type: boolean + default: false + nullable: true + top_logprobs: + description: An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + type: integer + minimum: 0 + maximum: 5 + nullable: true + max_tokens: + description: | + The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + + The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + type: integer + nullable: true + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_presence_penalty_description + response_format: + type: object + description: | + An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + properties: + type: + type: string + enum: ["text", "json_object"] + example: "json_object" + default: "text" + description: Must be one of `text` or `json_object`. + seed: + type: integer + minimum: -9223372036854775808 + maximum: 9223372036854775807 + nullable: true + description: | + This feature is in Beta. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + x-oaiMeta: + beta: true + stop: + description: | + Up to 4 sequences where the API will stop generating further tokens. + default: null + oneOf: + - type: string + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + stream: + description: > + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *completions_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *completions_top_p_description + tools: + type: array + description: > + A list of tools the model may call. Currently, only functions are supported as a tool. + Use this to provide a list of functions the model may generate JSON inputs for. + items: + $ref: "#/components/schemas/ChatCompletionTool" + tool_choice: + $ref: "#/components/schemas/ChatCompletionToolChoiceOption" + user: *end_user_param_configuration + function_call: + deprecated: true + description: | + Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + + `none` is the default when no functions are present. `auto` is the default if functions are present. + oneOf: + - type: string + description: > + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + enum: [none, auto] + - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" + x-oaiExpandable: true + functions: + deprecated: true + description: | + Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + + required: + - model + - messages + + CreateChatCompletionResponse: + type: object + description: Represents a chat completion response returned by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - finish_reason + - index + - message + - logprobs + properties: + finish_reason: + type: string + description: &chat_completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + `content_filter` if content was omitted due to a flag from our content filters, + `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + logprobs: &chat_completion_response_logprobs + description: Log probability information for the choice. + type: object + nullable: true + properties: + content: + description: A list of message content tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + required: + - content + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: [chat.completion] + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_example + + CreateChatCompletionFunctionResponse: + type: object + description: Represents a chat completion response returned by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - finish_reason + - index + - message + - logprobs + properties: + finish_reason: + type: string + description: + &chat_completion_function_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. + enum: ["stop", "length", "function_call", "content_filter"] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: [chat.completion] + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_function_example + + ChatCompletionTokenLogprob: + type: object + properties: + token: &chat_completion_response_logprobs_token + description: The token. + type: string + logprob: &chat_completion_response_logprobs_token_logprob + description: The log probability of this token. + type: number + bytes: &chat_completion_response_logprobs_bytes + description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. + type: array + items: + type: integer + nullable: true + top_logprobs: + description: List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. + type: array + items: + type: object + properties: + token: *chat_completion_response_logprobs_token + logprob: *chat_completion_response_logprobs_token_logprob + bytes: *chat_completion_response_logprobs_bytes + required: + - token + - logprob + - bytes + required: + - token + - logprob + - bytes + - top_logprobs + + ListPaginatedFineTuningJobsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJob" + has_more: + type: boolean + object: + type: string + enum: [list] + required: + - object + - data + - has_more + + CreateChatCompletionStreamResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. Each chunk has the same ID. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - delta + - finish_reason + - index + properties: + delta: + $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" + logprobs: *chat_completion_response_logprobs + finish_reason: + type: string + description: *chat_completion_finish_reason_description + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] + nullable: true + index: + type: integer + description: The index of the choice in the list of choices. + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + model: + type: string + description: The model to generate the completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion.chunk`. + enum: [chat.completion.chunk] + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_chunk_example + + CreateChatCompletionImageResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_image_example + + CreateImageRequest: + type: object + properties: + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. + type: string + example: "A cute baby sea otter" + model: + anyOf: + - type: string + - type: string + enum: ["dall-e-2", "dall-e-3"] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-3" + nullable: true + description: The model to use for image generation. + n: &images_n + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. + quality: + type: string + enum: ["standard", "hd"] + default: "standard" + example: "standard" + description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. + response_format: &images_response_format + type: string + enum: ["url", "b64_json"] + default: "url" + example: "url" + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + size: &images_size + type: string + enum: ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"] + default: "1024x1024" + example: "1024x1024" + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + style: + type: string + enum: ["vivid", "natural"] + default: "vivid" + example: "vivid" + nullable: true + description: The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + user: *end_user_param_configuration + required: + - prompt + + ImagesResponse: + properties: + created: + type: integer + data: + type: array + items: + $ref: "#/components/schemas/Image" + required: + - created + - data + + Image: + type: object + description: Represents the url or the content of an image generated by the OpenAI API. + properties: + b64_json: + type: string + description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + url: + type: string + description: The URL of the generated image, if `response_format` is `url` (default). + revised_prompt: + type: string + description: The prompt that was used to generate the image, if there was any revision to the prompt. + x-oaiMeta: + name: The image object + example: | + { + "url": "...", + "revised_prompt": "..." + } + + CreateImageEditRequest: + type: object + properties: + image: + description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. + type: string + format: binary + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters. + type: string + example: "A cute baby sea otter wearing a beret" + mask: + description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + type: string + format: binary + model: + anyOf: + - type: string + - type: string + enum: ["dall-e-2"] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" + nullable: true + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. + size: &dalle2_images_size + type: string + enum: ["256x256", "512x512", "1024x1024"] + default: "1024x1024" + example: "1024x1024" + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + response_format: *images_response_format + user: *end_user_param_configuration + required: + - prompt + - image + + CreateImageVariationRequest: + type: object + properties: + image: + description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. + type: string + format: binary + model: + anyOf: + - type: string + - type: string + enum: ["dall-e-2"] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" + nullable: true + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: *images_n + response_format: *images_response_format + size: *dalle2_images_size + user: *end_user_param_configuration + required: + - image + + CreateModerationRequest: + type: object + properties: + input: + description: The input text to classify + oneOf: + - type: string + default: "" + example: "I want to kill them." + - type: array + items: + type: string + default: "" + example: "I want to kill them." + model: + description: | + Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + + The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + nullable: false + default: "text-moderation-latest" + example: "text-moderation-stable" + anyOf: + - type: string + - type: string + enum: ["text-moderation-latest", "text-moderation-stable"] + x-oaiTypeLabel: string + required: + - input - securitySchemes: - ApiKeyAuth: - type: http - scheme: 'bearer' + CreateModerationResponse: + type: object + description: Represents policy compliance report by OpenAI's content moderation model against a given input. + properties: + id: + type: string + description: The unique identifier for the moderation request. + model: + type: string + description: The model used to generate the moderation results. + results: + type: array + description: A list of moderation objects. + items: + type: object + properties: + flagged: + type: boolean + description: Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + categories: + type: object + description: A list of the categories, and whether they are flagged or not. + properties: + hate: + type: boolean + description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. + hate/threatening: + type: boolean + description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm towards any target. + self-harm: + type: boolean + description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/intent: + type: boolean + description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/instructions: + type: boolean + description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + sexual: + type: boolean + description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic detail. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + category_scores: + type: object + description: A list of the categories along with their scores as predicted by model. + properties: + hate: + type: number + description: The score for the category 'hate'. + hate/threatening: + type: number + description: The score for the category 'hate/threatening'. + harassment: + type: number + description: The score for the category 'harassment'. + harassment/threatening: + type: number + description: The score for the category 'harassment/threatening'. + self-harm: + type: number + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + description: The score for the category 'self-harm/instructions'. + sexual: + type: number + description: The score for the category 'sexual'. + sexual/minors: + type: number + description: The score for the category 'sexual/minors'. + violence: + type: number + description: The score for the category 'violence'. + violence/graphic: + type: number + description: The score for the category 'violence/graphic'. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + required: + - flagged + - categories + - category_scores + required: + - id + - model + - results + x-oaiMeta: + name: The moderation object + example: *moderation_example - schemas: - Error: + ListFilesResponse: type: object properties: - type: + data: + type: array + items: + $ref: "#/components/schemas/OpenAIFile" + object: type: string - nullable: false - message: + enum: [list] + required: + - object + - data + + CreateFileRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The File object (not file name) to be uploaded. type: string - nullable: false - param: + format: binary + purpose: + description: | + The intended purpose of the uploaded file. + + Use "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning) and "assistants" for [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). This allows us to validate the format of the uploaded file is correct for fine-tuning. type: string - nullable: true - code: + enum: ["fine-tune", "assistants"] + required: + - file + - purpose + + DeleteFileResponse: + type: object + properties: + id: type: string - nullable: true + object: + type: string + enum: [file] + deleted: + type: boolean required: - - type - - message - - param - - code + - id + - object + - deleted - ErrorResponse: + CreateFineTuningJobRequest: type: object properties: - error: - $ref: "#/components/schemas/Error" + model: + description: | + The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + example: "gpt-3.5-turbo" + anyOf: + - type: string + - type: string + enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo"] + x-oaiTypeLabel: string + training_file: + description: | + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/upload) for how to upload a file. + + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + type: string + example: "file-abc123" + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. + properties: + batch_size: + description: | + Number of examples in each batch. A larger batch size means that model parameters + are updated less frequently, but with lower variance. + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 256 + default: auto + learning_rate_multiplier: + description: | + Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + overfitting. + oneOf: + - type: string + enum: [auto] + - type: number + minimum: 0 + exclusiveMinimum: true + default: auto + n_epochs: + description: | + The number of epochs to train the model for. An epoch refers to one full cycle + through the training dataset. + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 50 + default: auto + suffix: + description: | + A string of up to 18 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + type: string + minLength: 1 + maxLength: 40 + default: null + nullable: true + validation_file: + description: | + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation + metrics periodically during fine-tuning. These metrics can be viewed in + the fine-tuning results file. + The same data should not be present in both train and validation files. + + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + type: string + nullable: true + example: "file-abc123" required: - - error + - model + - training_file - ListModelsResponse: + ListFineTuningJobEventsResponse: type: object properties: - object: - type: string data: type: array items: - $ref: "#/components/schemas/Model" - required: - - object - - data - - DeleteModelResponse: - type: object - properties: - id: - type: string + $ref: "#/components/schemas/FineTuningJobEvent" object: type: string - deleted: - type: boolean + enum: [list] required: - - id - object - - deleted + - data - CreateCompletionRequest: + CreateEmbeddingRequest: type: object + additionalProperties: false properties: - model: - description: &model_description | - ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - anyOf: - - type: string - - type: string - enum: - [ - "babbage-002", - "davinci-002", - "gpt-3.5-turbo-instruct", - "text-davinci-003", - "text-davinci-002", - "text-davinci-001", - "code-davinci-002", - "text-curie-001", - "text-babbage-001", - "text-ada-001", - ] - x-oaiTypeLabel: string - prompt: - description: &completions_prompt_description | - The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. - - Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. - default: "<|endoftext|>" - nullable: true + input: + description: | + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + example: "The quick brown fox jumped over the lazy dog" oneOf: - type: string + title: string + description: The string that will be turned into an embedding. default: "" example: "This is a test." - type: array + title: array + description: The array of strings that will be turned into an embedding. + minItems: 1 + maxItems: 2048 items: type: string default: "" - example: "This is a test." + example: "['This is a test.']" - type: array + title: array + description: The array of integers that will be turned into an embedding. minItems: 1 + maxItems: 2048 items: type: integer example: "[1212, 318, 257, 1332, 13]" - type: array + title: array + description: The array of arrays containing integers that will be turned into an embedding. minItems: 1 + maxItems: 2048 items: type: array minItems: 1 items: type: integer example: "[[1212, 318, 257, 1332, 13]]" - suffix: - description: The suffix that comes after a completion of inserted text. - default: null - nullable: true + x-oaiExpandable: true + model: + description: *model_description + example: "text-embedding-3-small" + anyOf: + - type: string + - type: string + enum: ["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"] + x-oaiTypeLabel: string + encoding_format: + description: "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/)." + example: "float" + default: "float" type: string - example: "test." - max_tokens: - type: integer - minimum: 0 - default: 16 - example: 16 - nullable: true - description: &completions_max_tokens_description | - The maximum number of [tokens](/tokenizer) to generate in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: &completions_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: &completions_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - n: + enum: ["float", "base64"] + dimensions: + description: | + The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. type: integer minimum: 1 - maximum: 128 - default: 1 - example: 1 - nullable: true - description: &completions_completions_description | - How many completions to generate for each prompt. + user: *end_user_param_configuration + required: + - model + - input - **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - stream: - description: > - Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). - type: boolean - nullable: true - default: false - logprobs: &completions_logprobs_configuration - type: integer - minimum: 0 - maximum: 5 - default: null - nullable: true - description: &completions_logprobs_description | - Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + CreateEmbeddingResponse: + type: object + properties: + data: + type: array + description: The list of embeddings generated by the model. + items: + $ref: "#/components/schemas/Embedding" + model: + type: string + description: The name of the model used to generate the embedding. + object: + type: string + description: The object type, which is always "list". + enum: [list] + usage: + type: object + description: The usage information for the request. + properties: + prompt_tokens: + type: integer + description: The number of tokens used by the prompt. + total_tokens: + type: integer + description: The total number of tokens used by the request. + required: + - prompt_tokens + - total_tokens + required: + - object + - model + - data + - usage - The maximum value for `logprobs` is 5. - echo: - type: boolean - default: false - nullable: true - description: &completions_echo_description > - Echo back the prompt in addition to the completion - stop: - description: &completions_stop_description > - Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. - default: null - nullable: true - oneOf: + CreateTranscriptionRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + type: string + x-oaiTypeLabel: file + format: binary + model: + description: | + ID of the model to use. Only `whisper-1` is currently available. + example: whisper-1 + anyOf: - type: string - default: <|endoftext|> - example: "\n" - nullable: true - - type: array - minItems: 1 - maxItems: 4 - items: - type: string - example: '["\n"]' - presence_penalty: + - type: string + enum: ["whisper-1"] + x-oaiTypeLabel: string + language: + description: | + The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. + type: string + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + type: string + response_format: + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. type: number default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: &completions_presence_penalty_description | - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + required: + - file + - model - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - frequency_penalty: + # Note: This does not currently support the non-default response format types. + CreateTranscriptionResponse: + type: object + properties: + text: + type: string + required: + - text + + CreateTranslationRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + type: string + x-oaiTypeLabel: file + format: binary + model: + description: | + ID of the model to use. Only `whisper-1` is currently available. + example: whisper-1 + anyOf: + - type: string + - type: string + enum: ["whisper-1"] + x-oaiTypeLabel: string + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + type: string + response_format: + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + type: string + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. type: number default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: &completions_frequency_penalty_description | - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - best_of: - type: integer - default: 1 - minimum: 0 - maximum: 20 - nullable: true - description: &completions_best_of_description | - Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. - - When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. - - **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - logit_bias: &completions_logit_bias - type: object - x-oaiTypeLabel: map - default: null - nullable: true - additionalProperties: - type: integer - description: &completions_logit_bias_description | - Modify the likelihood of specified tokens appearing in the completion. - - Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + required: + - file + - model - As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. - user: &end_user_param_configuration + # Note: This does not currently support the non-default response format types. + CreateTranslationResponse: + type: object + properties: + text: type: string - example: user-1234 + required: + - text + + CreateSpeechRequest: + type: object + additionalProperties: false + properties: + model: description: | - A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + anyOf: + - type: string + - type: string + enum: ["tts-1", "tts-1-hd"] + x-oaiTypeLabel: string + input: + type: string + description: The text to generate audio for. The maximum length is 4096 characters. + maxLength: 4096 + voice: + description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + type: string + enum: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] + response_format: + description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`." + default: "mp3" + type: string + enum: ["mp3", "opus", "aac", "flac"] + speed: + description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." + type: number + default: 1.0 + minimum: 0.25 + maximum: 4.0 required: - model - - prompt + - input + - voice - CreateCompletionResponse: - type: object - description: | - Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + Model: + title: Model + description: Describes an OpenAI model offering that can be used with the API. properties: id: type: string - description: A unique identifier for the completion. - object: - type: string - description: The object type, which is always "text_completion" + description: The model identifier, which can be referenced in the API endpoints. created: type: integer - description: The Unix timestamp (in seconds) of when the completion was created. - model: + description: The Unix timestamp (in seconds) when the model was created. + object: type: string - description: The model used for completion. - choices: - type: array - description: The list of completion choices the model generated for the input prompt. - items: - type: object - required: - - text - - index - - logprobs - - finish_reason - properties: - text: - type: string - index: - type: integer - logprobs: - type: object - nullable: true - properties: - tokens: - type: array - items: - type: string - token_logprobs: - type: array - items: - type: number - top_logprobs: - type: array - items: - type: object - additionalProperties: - type: integer - text_offset: - type: array - items: - type: integer - finish_reason: - type: string - description: &completion_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, - `length` if the maximum number of tokens specified in the request was reached, - or `content_filter` if content was omitted due to a flag from our content filters. - enum: ["stop", "length", "content_filter"] - usage: - $ref: "#/components/schemas/CompletionUsage" + description: The object type, which is always "model". + enum: [model] + owned_by: + type: string + description: The organization that owns the model. required: - id - object - created - - model - - choices + - owned_by x-oaiMeta: - name: The completion object - legacy: true - example: | - { - "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", - "object": "text_completion", - "created": 1589478378, - "model": "gpt-3.5-turbo", - "choices": [ - { - "text": "\n\nThis is indeed a test", - "index": 0, - "logprobs": null, - "finish_reason": "length" - } - ], - "usage": { - "prompt_tokens": 5, - "completion_tokens": 7, - "total_tokens": 12 - } - } + name: The model object + example: *retrieve_model_response - ChatCompletionRequestMessage: - type: object + OpenAIFile: + title: OpenAIFile + description: The `File` object represents a document that has been uploaded to OpenAI. properties: - role: + id: type: string - enum: ["system", "user", "assistant", "function"] - description: The role of the messages author. One of `system`, `user`, `assistant`, or `function`. - content: + description: The file identifier, which can be referenced in the API endpoints. + bytes: + type: integer + description: The size of the file, in bytes. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the file was created. + filename: type: string - nullable: true - description: The contents of the message. `content` is required for all messages, and may be null for assistant messages with function calls. - name: + description: The name of the file. + object: type: string - description: The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. - function_call: - type: object - description: The name and arguments of a function that should be called, as generated by the model. - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - required: - - name - - arguments - required: - - role - - content - - ChatCompletionFunctionParameters: - type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.\n\nTo describe a function that accepts no parameters, provide the value `{\"type\": \"object\", \"properties\": {}}`." - additionalProperties: true - - ChatCompletionFunctions: - type: object - properties: - name: + description: The object type, which is always `file`. + enum: ["file"] + purpose: type: string - description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - description: + description: The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, `assistants`, and `assistants_output`. + enum: + [ + "fine-tune", + "fine-tune-results", + "assistants", + "assistants_output", + ] + status: type: string - description: A description of what the function does, used by the model to choose when and how to call the function. - parameters: - $ref: "#/components/schemas/ChatCompletionFunctionParameters" + deprecated: true + description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. + enum: ["uploaded", "processed", "error"] + status_details: + type: string + deprecated: true + description: Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. required: - - name - - parameters - - ChatCompletionFunctionCallOption: + - id + - object + - bytes + - created_at + - filename + - purpose + - status + x-oaiMeta: + name: The file object + example: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "salesOverview.pdf", + "purpose": "assistants", + } + Embedding: type: object + description: | + Represents an embedding vector returned by embedding endpoint. properties: - name: + index: + type: integer + description: The index of the embedding in the list of embeddings. + embedding: + type: array + description: | + The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). + items: + type: number + object: type: string - description: The name of the function to call. + description: The object type, which is always "embedding". + enum: [embedding] required: - - name + - index + - object + - embedding + x-oaiMeta: + name: The embedding object + example: | + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } - ChatCompletionResponseMessage: + FineTuningJob: type: object - description: A chat completion message generated by the model. + title: FineTuningJob + description: | + The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. properties: - role: - type: string - enum: ["system", "user", "assistant", "function"] - description: The role of the author of this message. - content: + id: type: string - description: The contents of the message. - nullable: true - function_call: + description: The object identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + error: type: object - description: The name and arguments of a function that should be called, as generated by the model. + nullable: true + description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. properties: - name: + code: type: string - description: The name of the function to call. - arguments: + description: A machine-readable error code. + message: type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + description: A human-readable error message. + param: + type: string + description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + nullable: true required: - - name - - arguments - required: - - role - - content - - ChatCompletionStreamResponseDelta: - type: object - description: A chat completion delta generated by streamed model responses. - properties: - role: - type: string - enum: ["system", "user", "assistant", "function"] - description: The role of the author of this message. - content: + - code + - message + - param + fine_tuned_model: type: string - description: The contents of the chunk message. nullable: true - function_call: + description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + finished_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + hyperparameters: type: object - description: The name and arguments of a function that should be called, as generated by the model. + description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + n_epochs: + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 50 + default: auto + description: + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - CreateChatCompletionRequest: - type: object - properties: + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + required: + - n_epochs model: - description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. - example: "gpt-3.5-turbo" - anyOf: - - type: string - - type: string - enum: - [ - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string - messages: - description: A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). - type: array - minItems: 1 - items: - $ref: "#/components/schemas/ChatCompletionRequestMessage" - functions: - description: A list of functions the model may generate JSON inputs for. + type: string + description: The base model that is being fine-tuned. + object: + type: string + description: The object type, which is always "fine_tuning.job". + enum: [fine_tuning.job] + organization_id: + type: string + description: The organization that owns the fine-tuning job. + result_files: type: array - minItems: 1 - maxItems: 128 + description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). items: - $ref: "#/components/schemas/ChatCompletionFunctions" - function_call: - description: "Controls how the model responds to function calls. `none` means the model does not call a function, and responds to the end-user. `auto` means the model can pick between an end-user or calling a function. Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present." - oneOf: - - type: string - enum: [none, auto] - - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *completions_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *completions_top_p_description - n: + type: string + example: file-abc123 + status: + type: string + description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + enum: + [ + "validating_files", + "queued", + "running", + "succeeded", + "failed", + "cancelled", + ] + trained_tokens: type: integer - minimum: 1 - maximum: 128 - default: 1 - example: 1 nullable: true - description: How many chat completion choices to generate for each input message. - stream: - description: > - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). - type: boolean + description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + training_file: + type: string + description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). + validation_file: + type: string nullable: true - default: false - stop: - description: | - Up to 4 sequences where the API will stop generating further tokens. - default: null - oneOf: - - type: string - nullable: true - - type: array - minItems: 1 - maxItems: 4 - items: - type: string - max_tokens: - description: | - The maximum number of [tokens](/tokenizer) to generate in the chat completion. + description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). + required: + - created_at + - error + - finished_at + - fine_tuned_model + - hyperparameters + - id + - model + - object + - organization_id + - result_files + - status + - trained_tokens + - training_file + - validation_file + x-oaiMeta: + name: The fine-tuning job object + example: *fine_tuning_example - The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. - default: inf + FineTuningJobEvent: + type: object + description: Fine-tuning job event object + properties: + id: + type: string + created_at: type: integer - nullable: true - presence_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: *completions_presence_penalty_description - frequency_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: *completions_frequency_penalty_description - logit_bias: - type: object - x-oaiTypeLabel: map - default: null - nullable: true - additionalProperties: - type: integer - description: | - Modify the likelihood of specified tokens appearing in the completion. + level: + type: string + enum: ["info", "warn", "error"] + message: + type: string + object: + type: string + enum: [fine_tuning.job.event] + required: + - id + - object + - created_at + - level + - message + x-oaiMeta: + name: The fine-tuning job event object + example: | + { + "object": "fine_tuning.job.event", + "id": "ftevent-abc123" + "created_at": 1677610602, + "level": "info", + "message": "Created fine-tuning job" + } - Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - user: *end_user_param_configuration + CompletionUsage: + type: object + description: Usage statistics for the completion request. + properties: + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). required: - - model - - messages + - prompt_tokens + - completion_tokens + - total_tokens - CreateChatCompletionResponse: + RunCompletionUsage: type: object - description: Represents a chat completion response returned by model, based on the provided input. + description: Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). properties: - id: - type: string - description: A unique identifier for the chat completion. - object: - type: string - description: The object type, which is always `chat.completion`. - created: + completion_tokens: type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: - type: string - description: The model used for the chat completion. - choices: - type: array - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. - items: - type: object - required: - - index - - message - - finish_reason - properties: - index: - type: integer - description: The index of the choice in the list of choices. - message: - $ref: "#/components/schemas/ChatCompletionResponseMessage" - finish_reason: - type: string - description: &chat_completion_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, - `length` if the maximum number of tokens specified in the request was reached, - `content_filter` if content was omitted due to a flag from our content filters, - or `function_call` if the model called a function. - enum: ["stop", "length", "function_call", "content_filter"] - usage: - $ref: "#/components/schemas/CompletionUsage" + description: Number of completion tokens used over the course of the run. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). required: - - id - - object - - created - - model - - choices - x-oaiMeta: - name: The chat completion object - group: chat - example: *chat_completion_example + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true - ListPaginatedFineTuningJobsResponse: + RunStepCompletionUsage: type: object + description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. properties: - object: - type: string - data: - type: array - items: - $ref: "#/components/schemas/FineTuningJob" - has_more: - type: boolean + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run step. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run step. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). required: - - object - - data - - has_more + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true - CreateChatCompletionStreamResponse: + AssistantObject: type: object - description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + title: Assistant + description: Represents an `assistant` that can call the model and use tools. properties: id: + description: The identifier, which can be referenced in API endpoints. type: string - description: A unique identifier for the chat completion chunk. object: + description: The object type, which is always `assistant`. type: string - description: The object type, which is always `chat.completion.chunk`. - created: + enum: [assistant] + created_at: + description: The Unix timestamp (in seconds) for when the assistant was created. type: integer - description: The Unix timestamp (in seconds) of when the chat completion chunk was created. + name: + description: &assistant_name_param_description | + The name of the assistant. The maximum length is 256 characters. + type: string + maxLength: 256 + nullable: true + description: + description: &assistant_description_param_description | + The description of the assistant. The maximum length is 512 characters. + type: string + maxLength: 512 + nullable: true model: + description: *model_description type: string - description: The model to generate the completion. - choices: + instructions: + description: &assistant_instructions_param_description | + The system instructions that the assistant uses. The maximum length is 32768 characters. + type: string + maxLength: 32768 + nullable: true + tools: + description: &assistant_tools_param_description | + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + default: [] type: array - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + maxItems: 128 items: - type: object - required: - - index - - delta - - finish_reason - properties: - index: - type: integer - description: The index of the choice in the list of choices. - delta: - $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" - finish_reason: - type: string - description: *chat_completion_finish_reason_description - enum: ["stop", "length", "function_call"] - nullable: true + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsRetrieval" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + file_ids: + description: &assistant_file_param_description | + A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. + default: [] + type: array + maxItems: 20 + items: + type: string + metadata: + description: &metadata_description | + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true required: - id - object - - created + - created_at + - name + - description - model - - choices + - instructions + - tools + - file_ids + - metadata x-oaiMeta: - name: The chat completion chunk object - group: chat - example: *chat_completion_chunk_example + name: The assistant object + beta: true + example: *create_assistants_example - CreateEditRequest: + CreateAssistantRequest: type: object + additionalProperties: false properties: model: - description: ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. - example: "text-davinci-edit-001" + description: *model_description anyOf: - type: string - - type: string - enum: ["text-davinci-edit-001", "code-davinci-edit-001"] - x-oaiTypeLabel: string - input: - description: The input text to use as a starting point for the edit. + name: + description: *assistant_name_param_description type: string - default: "" nullable: true - example: "What day of the wek is it?" - instruction: - description: The instruction that tells the model how to edit the prompt. + maxLength: 256 + description: + description: *assistant_description_param_description type: string - example: "Fix the spelling mistakes." - n: - type: integer - minimum: 1 - maximum: 20 - default: 1 - example: 1 - nullable: true - description: How many edits to generate for the input and instruction. - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *completions_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 nullable: true - description: *completions_top_p_description - required: - - model - - instruction - - CreateEditResponse: - type: object - title: Edit - deprecated: true - properties: - object: + maxLength: 512 + instructions: + description: *assistant_instructions_param_description type: string - description: The object type, which is always `edit`. - created: - type: integer - description: The Unix timestamp (in seconds) of when the edit was created. - choices: + nullable: true + maxLength: 32768 + tools: + description: *assistant_tools_param_description + default: [] type: array - description: A list of edit choices. Can be more than one if `n` is greater than 1. + maxItems: 128 items: - type: object - required: - - text - - index - - finish_reason - properties: - text: - type: string - description: The edited result. - index: - type: integer - description: The index of the choice in the list of choices. - finish_reason: - type: string - description: *completion_finish_reason_description - enum: ["stop", "length"] - usage: - $ref: "#/components/schemas/CompletionUsage" + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsRetrieval" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + file_ids: + description: *assistant_file_param_description + default: [] + maxItems: 20 + type: array + items: + type: string + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: - - object - - created - - choices - - usage - x-oaiMeta: - name: The edit object - example: *edit_example + - model - CreateImageRequest: + ModifyAssistantRequest: type: object + additionalProperties: false properties: - prompt: - description: A text description of the desired image(s). The maximum length is 1000 characters. + model: + description: *model_description + anyOf: + - type: string + name: + description: *assistant_name_param_description type: string - example: "A cute baby sea otter" - n: &images_n - type: integer - minimum: 1 - maximum: 10 - default: 1 - example: 1 nullable: true - description: The number of images to generate. Must be between 1 and 10. - size: &images_size + maxLength: 256 + description: + description: *assistant_description_param_description type: string - enum: ["256x256", "512x512", "1024x1024"] - default: "1024x1024" - example: "1024x1024" nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - response_format: &images_response_format + maxLength: 512 + instructions: + description: *assistant_instructions_param_description type: string - enum: ["url", "b64_json"] - default: "url" - example: "url" nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. - user: *end_user_param_configuration - required: - - prompt - - ImagesResponse: - properties: - created: - type: integer - data: + maxLength: 32768 + tools: + description: *assistant_tools_param_description + default: [] type: array + maxItems: 128 items: - $ref: "#/components/schemas/Image" - required: - - created - - data + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsRetrieval" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + file_ids: + description: | + A list of [File](/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previously attached to the list but does not show up in the list, it will be deleted from the assistant. + default: [] + type: array + maxItems: 20 + items: + type: string + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true - Image: + DeleteAssistantResponse: type: object - description: Represents the url or the content of an image generated by the OpenAI API. properties: - url: + id: type: string - description: The URL of the generated image, if `response_format` is `url` (default). - b64_json: + deleted: + type: boolean + object: type: string - description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. - x-oaiMeta: - name: The image object - example: | - { - "url": "..." - } + enum: [assistant.deleted] + required: + - id + - object + - deleted - CreateImageEditRequest: + ListAssistantsResponse: type: object properties: - image: - description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. + object: type: string - format: binary - mask: - description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/AssistantObject" + first_id: type: string - format: binary - prompt: - description: A text description of the desired image(s). The maximum length is 1000 characters. + example: "asst_abc123" + last_id: type: string - example: "A cute baby sea otter wearing a beret" - n: *images_n - size: *images_size - response_format: *images_response_format - user: *end_user_param_configuration + example: "asst_abc456" + has_more: + type: boolean + example: false required: - - prompt - - image + - object + - data + - first_id + - last_id + - has_more + x-oaiMeta: + name: List assistants response object + group: chat + example: *list_assistants_example - CreateImageVariationRequest: + AssistantToolsCode: type: object + title: Code interpreter tool properties: - image: - description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. + type: type: string - format: binary - n: *images_n - size: *images_size - response_format: *images_response_format - user: *end_user_param_configuration + description: "The type of tool being defined: `code_interpreter`" + enum: ["code_interpreter"] required: - - image + - type - CreateModerationRequest: + AssistantToolsRetrieval: type: object + title: Retrieval tool properties: - input: - description: The input text to classify - oneOf: - - type: string - default: "" - example: "I want to kill them." - - type: array - items: - type: string - default: "" - example: "I want to kill them." - model: - description: | - Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + type: + type: string + description: "The type of tool being defined: `retrieval`" + enum: ["retrieval"] + required: + - type - The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. - nullable: false - default: "text-moderation-latest" - example: "text-moderation-stable" - anyOf: - - type: string - - type: string - enum: ["text-moderation-latest", "text-moderation-stable"] - x-oaiTypeLabel: string + AssistantToolsFunction: + type: object + title: Function tool + properties: + type: + type: string + description: "The type of tool being defined: `function`" + enum: ["function"] + function: + $ref: "#/components/schemas/FunctionObject" required: - - input + - type + - function - CreateModerationResponse: + RunObject: type: object - description: Represents policy compliance report by OpenAI's content moderation model against a given input. + title: A run on a thread + description: Represents an execution run on a [thread](/docs/api-reference/threads). properties: id: + description: The identifier, which can be referenced in API endpoints. type: string - description: The unique identifier for the moderation request. + object: + description: The object type, which is always `thread.run`. + type: string + enum: ["thread.run"] + created_at: + description: The Unix timestamp (in seconds) for when the run was created. + type: integer + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run. + type: string + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + type: string + status: + description: The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + type: string + enum: + [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + required_action: + type: object + description: Details on the action required to continue the run. Will be `null` if no action is required. + nullable: true + properties: + type: + description: For now, this is always `submit_tool_outputs`. + type: string + enum: ["submit_tool_outputs"] + submit_tool_outputs: + type: object + description: Details on the tool outputs needed for this run to continue. + properties: + tool_calls: + type: array + description: A list of the relevant tool calls. + items: + $ref: "#/components/schemas/RunToolCallObject" + required: + - tool_calls + required: + - type + - submit_tool_outputs + last_error: + type: object + description: The last error associated with this run. Will be `null` if there are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error` or `rate_limit_exceeded`. + enum: ["server_error", "rate_limit_exceeded"] + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + expires_at: + description: The Unix timestamp (in seconds) for when the run will expire. + type: integer + started_at: + description: The Unix timestamp (in seconds) for when the run was started. + type: integer + nullable: true + cancelled_at: + description: The Unix timestamp (in seconds) for when the run was cancelled. + type: integer + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run failed. + type: integer + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run was completed. + type: integer + nullable: true model: + description: The model that the [assistant](/docs/api-reference/assistants) used for this run. type: string - description: The model used to generate the moderation results. - results: + instructions: + description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + type: string + tools: + description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + default: [] type: array - description: A list of moderation objects. + maxItems: 20 items: - type: object - properties: - flagged: - type: boolean - description: Whether the content violates [OpenAI's usage policies](/policies/usage-policies). - categories: - type: object - description: A list of the categories, and whether they are flagged or not. - properties: - hate: - type: boolean - description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harrassment. - hate/threatening: - type: boolean - description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. - harassment: - type: boolean - description: Content that expresses, incites, or promotes harassing language towards any target. - harassment/threatening: - type: boolean - description: Harassment content that also includes violence or serious harm towards any target. - self-harm: - type: boolean - description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. - self-harm/intent: - type: boolean - description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. - self-harm/instructions: - type: boolean - description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. - sexual: - type: boolean - description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). - sexual/minors: - type: boolean - description: Sexual content that includes an individual who is under 18 years old. - violence: - type: boolean - description: Content that depicts death, violence, or physical injury. - violence/graphic: - type: boolean - description: Content that depicts death, violence, or physical injury in graphic detail. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - category_scores: - type: object - description: A list of the categories along with their scores as predicted by model. - properties: - hate: - type: number - description: The score for the category 'hate'. - hate/threatening: - type: number - description: The score for the category 'hate/threatening'. - harassment: - type: number - description: The score for the category 'harassment'. - harassment/threatening: - type: number - description: The score for the category 'harassment/threatening'. - self-harm: - type: number - description: The score for the category 'self-harm'. - self-harm/intent: - type: number - description: The score for the category 'self-harm/intent'. - self-harm/instructions: - type: number - description: The score for the category 'self-harm/instructions'. - sexual: - type: number - description: The score for the category 'sexual'. - sexual/minors: - type: number - description: The score for the category 'sexual/minors'. - violence: - type: number - description: The score for the category 'violence'. - violence/graphic: - type: number - description: The score for the category 'violence/graphic'. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - required: - - flagged - - categories - - category_scores + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsRetrieval" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + file_ids: + description: The list of [File](/docs/api-reference/files) IDs the [assistant](/docs/api-reference/assistants) used for this run. + default: [] + type: array + items: + type: string + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + usage: + $ref: "#/components/schemas/RunCompletionUsage" + required: + - id + - object + - created_at + - thread_id + - assistant_id + - status + - required_action + - last_error + - expires_at + - started_at + - cancelled_at + - failed_at + - completed_at + - model + - instructions + - tools + - file_ids + - metadata + - usage + x-oaiMeta: + name: The run object + beta: true + example: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1698107661, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699073476, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699073498, + "last_error": null, + "model": "gpt-4", + "instructions": null, + "tools": [{"type": "retrieval"}, {"type": "code_interpreter"}], + "file_ids": [], + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + CreateRunRequest: + type: object + additionalProperties: false + properties: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + type: string + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + type: string + nullable: true + instructions: + description: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. + type: string + nullable: true + additional_instructions: + description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. + type: string + nullable: true + tools: + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsRetrieval" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: - - id - - model - - results - x-oaiMeta: - name: The moderation object - example: *moderation_example - - ListFilesResponse: + - thread_id + - assistant_id + ListRunsResponse: type: object properties: object: type: string + example: "list" data: type: array items: - $ref: "#/components/schemas/OpenAIFile" + $ref: "#/components/schemas/RunObject" + first_id: + type: string + example: "run_abc123" + last_id: + type: string + example: "run_abc456" + has_more: + type: boolean + example: false required: - object - data - - CreateFileRequest: + - first_id + - last_id + - has_more + ModifyRunRequest: type: object additionalProperties: false properties: - file: - description: | - Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + SubmitToolOutputsRunRequest: + type: object + additionalProperties: false + properties: + tool_outputs: + description: A list of tools for which the outputs are being submitted. + type: array + items: + type: object + properties: + tool_call_id: + type: string + description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. + output: + type: string + description: The output of the tool call to be submitted to continue the run. + required: + - tool_outputs - If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. + RunToolCallObject: + type: object + description: Tool call objects + properties: + id: type: string - format: binary - purpose: - description: | - The intended purpose of the uploaded documents. + description: The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + type: + type: string + description: The type of tool call the output is required for. For now, this is always `function`. + enum: ["function"] + function: + type: object + description: The function definition. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments that the model expects you to pass to the function. + required: + - name + - arguments + required: + - id + - type + - function - Use "fine-tune" for [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the uploaded file. + CreateThreadAndRunRequest: + type: object + additionalProperties: false + properties: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. type: string + thread: + $ref: "#/components/schemas/CreateThreadRequest" + description: If no thread is provided, an empty thread will be created. + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + type: string + nullable: true + instructions: + description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. + type: string + nullable: true + tools: + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsRetrieval" + - $ref: "#/components/schemas/AssistantToolsFunction" + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: - - file - - purpose + - thread_id + - assistant_id - DeleteFileResponse: + ThreadObject: type: object + title: Thread + description: Represents a thread that contains [messages](/docs/api-reference/messages). properties: id: + description: The identifier, which can be referenced in API endpoints. type: string object: + description: The object type, which is always `thread`. type: string - deleted: - type: boolean + enum: ["thread"] + created_at: + description: The Unix timestamp (in seconds) for when the thread was created. + type: integer + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: - id - object - - deleted + - created_at + - metadata + x-oaiMeta: + name: The thread object + beta: true + example: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1698107661, + "metadata": {} + } - CreateFineTuningJobRequest: + CreateThreadRequest: type: object + additionalProperties: false properties: - training_file: - description: | - The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/upload) for how to upload a file. - - Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - type: string - example: "file-abc123" - validation_file: - description: | - The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation - metrics periodically during fine-tuning. These metrics can be viewed in - the fine-tuning results file. - The same data should not be present in both train and validation files. - - Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - type: string + messages: + description: A list of [messages](/docs/api-reference/messages) to start the thread with. + type: array + items: + $ref: "#/components/schemas/CreateMessageRequest" + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map nullable: true - example: "file-abc123" - model: - description: | - The name of the model to fine-tune. You can select one of the - [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - example: "gpt-3.5-turbo" - anyOf: - - type: string - - type: string - enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo"] - x-oaiTypeLabel: string - hyperparameters: + + ModifyThreadRequest: + type: object + additionalProperties: false + properties: + metadata: + description: *metadata_description type: object - description: The hyperparameters used for the fine-tuning job. - properties: - n_epochs: - description: | - The number of epochs to train the model for. An epoch refers to one - full cycle through the training dataset. - oneOf: - - type: string - enum: [auto] - - type: integer - minimum: 1 - maximum: 50 - default: auto - suffix: - description: | - A string of up to 18 characters that will be added to your fine-tuned model name. + x-oaiTypeLabel: map + nullable: true - For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + DeleteThreadResponse: + type: object + properties: + id: type: string - minLength: 1 - maxLength: 40 - default: null - nullable: true + deleted: + type: boolean + object: + type: string + enum: [thread.deleted] required: - - training_file - - model + - id + - object + - deleted - ListFineTuningJobEventsResponse: - type: object + ListThreadsResponse: properties: object: type: string + example: "list" data: type: array items: - $ref: "#/components/schemas/FineTuningJobEvent" + $ref: "#/components/schemas/ThreadObject" + first_id: + type: string + example: "asst_abc123" + last_id: + type: string + example: "asst_abc456" + has_more: + type: boolean + example: false required: - object - data + - first_id + - last_id + - has_more - CreateFineTuneRequest: + MessageObject: type: object + title: The message object + description: Represents a message within a [thread](/docs/api-reference/threads). properties: - training_file: - description: | - The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/upload) for how to upload a file. - - Your dataset must be formatted as a JSONL file, where each training - example is a JSON object with the keys "prompt" and "completion". - Additionally, you must upload your file with the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more details. + id: + description: The identifier, which can be referenced in API endpoints. type: string - example: "file-abc123" - validation_file: - description: | - The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation - metrics periodically during fine-tuning. These metrics can be viewed in - the [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - Your train and validation data should be mutually exclusive. - - Your dataset must be formatted as a JSONL file, where each validation - example is a JSON object with the keys "prompt" and "completion". - Additionally, you must upload your file with the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more details. + object: + description: The object type, which is always `thread.message`. type: string - nullable: true - example: "file-abc123" - model: - description: | - The name of the base model to fine-tune. You can select one of "ada", - "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. - To learn more about these models, see the - [Models](/docs/models) documentation. - default: "curie" - example: "curie" - nullable: true - anyOf: - - type: string - - type: string - enum: ["ada", "babbage", "curie", "davinci"] - x-oaiTypeLabel: string - n_epochs: - description: | - The number of epochs to train the model for. An epoch refers to one - full cycle through the training dataset. - default: 4 - type: integer - nullable: true - batch_size: - description: | - The batch size to use for training. The batch size is the number of - training examples used to train a single forward and backward pass. - - By default, the batch size will be dynamically configured to be - ~0.2% of the number of examples in the training set, capped at 256 - - in general, we've found that larger batch sizes tend to work better - for larger datasets. - default: null - type: integer - nullable: true - learning_rate_multiplier: - description: | - The learning rate multiplier to use for training. - The fine-tuning learning rate is the original learning rate used for - pretraining multiplied by this value. - - By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 - depending on final `batch_size` (larger learning rates tend to - perform better with larger batch sizes). We recommend experimenting - with values in the range 0.02 to 0.2 to see what produces the best - results. - default: null - type: number - nullable: true - prompt_loss_weight: - description: | - The weight to use for loss on the prompt tokens. This controls how - much the model tries to learn to generate the prompt (as compared - to the completion which always has a weight of 1.0), and can add - a stabilizing effect to training when completions are short. - - If prompts are extremely long (relative to completions), it may make - sense to reduce this weight so as to avoid over-prioritizing - learning the prompt. - default: 0.01 - type: number - nullable: true - compute_classification_metrics: - description: | - If set, we calculate classification-specific metrics such as accuracy - and F-1 score using the validation set at the end of every epoch. - These metrics can be viewed in the [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - - In order to compute classification metrics, you must provide a - `validation_file`. Additionally, you must - specify `classification_n_classes` for multiclass classification or - `classification_positive_class` for binary classification. - type: boolean - default: false - nullable: true - classification_n_classes: - description: | - The number of classes in a classification task. - - This parameter is required for multiclass classification. + enum: ["thread.message"] + created_at: + description: The Unix timestamp (in seconds) for when the message was created. type: integer - default: null - nullable: true - classification_positive_class: - description: | - The positive class in binary classification. - - This parameter is needed to generate precision, recall, and F1 - metrics when doing binary classification. + thread_id: + description: The [thread](/docs/api-reference/threads) ID that this message belongs to. type: string - default: null - nullable: true - classification_betas: - description: | - If this is provided, we calculate F-beta scores at the specified - beta values. The F-beta score is a generalization of F-1 score. - This is only used for binary classification. - - With a beta of 1 (i.e. the F-1 score), precision and recall are - given the same weight. A larger beta score puts more weight on - recall and less on precision. A smaller beta score puts more weight - on precision and less on recall. + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: ["user", "assistant"] + content: + description: The content of the message in array of text and/or images. type: array items: - type: number - example: [0.6, 1, 1.5, 2] - default: null - nullable: true - suffix: - description: | - A string of up to 40 characters that will be added to your fine-tuned model name. - - For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. - type: string - minLength: 1 - maxLength: 40 - default: null + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentTextObject" + x-oaiExpandable: true + assistant_id: + description: If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. + type: string nullable: true - required: - - training_file - - ListFineTunesResponse: - type: object - properties: - object: + run_id: + description: If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of this message. type: string - data: + nullable: true + file_ids: + description: A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be attached to a message. + default: [] + maxItems: 10 type: array items: - $ref: "#/components/schemas/FineTune" + type: string + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: + - id - object - - data + - created_at + - thread_id + - role + - content + - assistant_id + - run_id + - file_ids + - metadata + x-oaiMeta: + name: The message object + beta: true + example: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1698983503, + "thread_id": "thread_abc123", + "role": "assistant", + "content": [ + { + "type": "text", + "text": { + "value": "Hi! How can I help you today?", + "annotations": [] + } + } + ], + "file_ids": [], + "assistant_id": "asst_abc123", + "run_id": "run_abc123", + "metadata": {} + } - ListFineTuneEventsResponse: + CreateMessageRequest: type: object + additionalProperties: false + required: + - role + - content properties: - object: + role: type: string - data: + enum: ["user"] + description: The role of the entity that is creating the message. Currently only `user` is supported. + content: + type: string + minLength: 1 + maxLength: 32768 + description: The content of the message. + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a maximum of 10 files attached to a message. Useful for tools like `retrieval` and `code_interpreter` that can access and use files. + default: [] type: array + minItems: 1 + maxItems: 10 items: - $ref: "#/components/schemas/FineTuneEvent" - required: - - object - - data + type: string + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true - CreateEmbeddingRequest: + ModifyMessageRequest: type: object additionalProperties: false properties: - model: - description: *model_description - example: "text-embedding-ada-002" - anyOf: - - type: string - - type: string - enum: ["text-embedding-ada-002"] - x-oaiTypeLabel: string - input: - description: | - Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. - example: "The quick brown fox jumped over the lazy dog" - oneOf: - - type: string - default: "" - example: "This is a test." - - type: array - items: - type: string - default: "" - example: "This is a test." - - type: array - minItems: 1 - items: - type: integer - example: "[1212, 318, 257, 1332, 13]" - - type: array - minItems: 1 - items: - type: array - minItems: 1 - items: - type: integer - example: "[[1212, 318, 257, 1332, 13]]" - user: *end_user_param_configuration - required: - - model - - input + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true - CreateEmbeddingResponse: + DeleteMessageResponse: type: object properties: + id: + type: string + deleted: + type: boolean object: type: string - description: The object type, which is always "embedding". - model: + enum: [thread.message.deleted] + required: + - id + - object + - deleted + + ListMessagesResponse: + properties: + object: type: string - description: The name of the model used to generate the embedding. + example: "list" data: type: array - description: The list of embeddings generated by the model. items: - $ref: "#/components/schemas/Embedding" - usage: - type: object - description: The usage information for the request. - properties: - prompt_tokens: - type: integer - description: The number of tokens used by the prompt. - total_tokens: - type: integer - description: The total number of tokens used by the request. - required: - - prompt_tokens - - total_tokens + $ref: "#/components/schemas/MessageObject" + first_id: + type: string + example: "msg_abc123" + last_id: + type: string + example: "msg_abc123" + has_more: + type: boolean + example: false required: - object - - model - data - - usage + - first_id + - last_id + - has_more - CreateTranscriptionRequest: + MessageContentImageFileObject: + title: Image file type: object - additionalProperties: false + description: References an image [File](/docs/api-reference/files) in the content of a message. properties: - file: - description: | - The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - type: string - x-oaiTypeLabel: file - format: binary - model: - description: | - ID of the model to use. Only `whisper-1` is currently available. - example: whisper-1 - anyOf: - - type: string - - type: string - enum: ["whisper-1"] - x-oaiTypeLabel: string - prompt: - description: | - An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - type: string - response_format: - description: | - The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. - type: string - enum: - - json - - text - - srt - - verbose_json - - vtt - default: json - temperature: - description: | - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. - type: number - default: 0 - language: - description: | - The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. + type: + description: Always `image_file`. type: string + enum: ["image_file"] + image_file: + type: object + properties: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the message content. + type: string + required: + - file_id required: - - file - - model + - type + - image_file - # Note: This does not currently support the non-default response format types. - CreateTranscriptionResponse: + MessageContentTextObject: + title: Text type: object + description: The text content that is part of a message. properties: - text: + type: + description: Always `text`. type: string + enum: ["text"] + text: + type: object + properties: + value: + description: The data that makes up the text. + type: string + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageContentTextAnnotationsFilePathObject" + x-oaiExpandable: true + required: + - value + - annotations required: + - type - text - CreateTranslationRequest: + MessageContentTextAnnotationsFileCitationObject: + title: File citation type: object - additionalProperties: false + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "retrieval" tool to search files. properties: - file: - description: | - The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - type: string - x-oaiTypeLabel: file - format: binary - model: - description: | - ID of the model to use. Only `whisper-1` is currently available. - example: whisper-1 - anyOf: - - type: string - - type: string - enum: ["whisper-1"] - x-oaiTypeLabel: string - prompt: - description: | - An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. - type: string - response_format: - description: | - The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + type: + description: Always `file_citation`. type: string - default: json - temperature: - description: | - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. - type: number - default: 0 - required: - - file - - model - - # Note: This does not currently support the non-default response format types. - CreateTranslationResponse: - type: object - properties: + enum: ["file_citation"] text: + description: The text in the message content that needs to be replaced. type: string + file_citation: + type: object + properties: + file_id: + description: The ID of the specific File the citation is from. + type: string + quote: + description: The specific quote in the file. + type: string + required: + - file_id + - quote + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 required: + - type - text + - file_citation + - start_index + - end_index - Model: - title: Model - description: Describes an OpenAI model offering that can be used with the API. + MessageContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. properties: - id: + type: + description: Always `file_path`. type: string - description: The model identifier, which can be referenced in the API endpoints. - object: + enum: ["file_path"] + text: + description: The text in the message content that needs to be replaced. type: string - description: The object type, which is always "model". - created: + file_path: + type: object + properties: + file_id: + description: The ID of the file that was generated. + type: string + required: + - file_id + start_index: type: integer - description: The Unix timestamp (in seconds) when the model was created. - owned_by: - type: string - description: The organization that owns the model. - required: - - id - - object - - created - - owned_by - x-oaiMeta: - name: The model object - example: *retrieve_model_response + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - type + - text + - file_path + - start_index + - end_index - OpenAIFile: - title: OpenAIFile + RunStepObject: + type: object + title: Run steps description: | - The `File` object represents a document that has been uploaded to OpenAI. + Represents a step in execution of a run. properties: id: + description: The identifier of the run step, which can be referenced in API endpoints. type: string - description: The file identifier, which can be referenced in the API endpoints. object: + description: The object type, which is always `thread.run.step`. type: string - description: The object type, which is always "file". - bytes: - type: integer - description: The size of the file in bytes. + enum: ["thread.run.step"] created_at: + description: The Unix timestamp (in seconds) for when the run step was created. type: integer - description: The Unix timestamp (in seconds) for when the file was created. - filename: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. type: string - description: The name of the file. - purpose: + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was run. type: string - description: The intended purpose of the file. Currently, only "fine-tune" is supported. - status: + run_id: + description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. type: string - description: The current status of the file, which can be either `uploaded`, `processed`, `pending`, `error`, `deleting` or `deleted`. - status_details: + type: + description: The type of run step, which can be either `message_creation` or `tool_calls`. type: string + enum: ["message_creation", "tool_calls"] + status: + description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. + type: string + enum: ["in_progress", "cancelled", "failed", "completed", "expired"] + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsObject" + x-oaiExpandable: true + last_error: + type: object + description: The last error associated with this run step. Will be `null` if there are no errors. nullable: true - description: | - Additional details about the status of the file. If the file is in the `error` state, this will include a message describing the error. + properties: + code: + type: string + description: One of `server_error` or `rate_limit_exceeded`. + enum: ["server_error", "rate_limit_exceeded"] + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + expired_at: + description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. + type: integer + nullable: true + cancelled_at: + description: The Unix timestamp (in seconds) for when the run step was cancelled. + type: integer + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run step failed. + type: integer + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run step completed. + type: integer + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + usage: + $ref: "#/components/schemas/RunStepCompletionUsage" required: - id - object - - bytes - created_at - - filename - - purpose - - format + - assistant_id + - thread_id + - run_id + - type + - status + - step_details + - last_error + - expired_at + - cancelled_at + - failed_at + - completed_at + - metadata + - usage x-oaiMeta: - name: The file object - example: | - { - "id": "file-abc123", - "object": "file", - "bytes": 120000, - "created_at": 1677610602, - "filename": "my_file.jsonl", - "purpose": "fine-tune", - "status": "uploaded", - "status_details": null - } - Embedding: - type: object - description: | - Represents an embedding vector returned by embedding endpoint. + name: The run step object + beta: true + example: *run_step_object_example + + ListRunStepsResponse: properties: - index: - type: integer - description: The index of the embedding in the list of embeddings. object: type: string - description: The object type, which is always "embedding". - embedding: + example: "list" + data: type: array - description: | - The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). items: - type: number + $ref: "#/components/schemas/RunStepObject" + first_id: + type: string + example: "step_abc123" + last_id: + type: string + example: "step_abc456" + has_more: + type: boolean + example: false required: - - index - object - - embedding - x-oaiMeta: - name: The embedding object - example: | - { - "object": "embedding", - "embedding": [ - 0.0023064255, - -0.009327292, - .... (1536 floats total for ada-002) - -0.0028842222, - ], - "index": 0 - } + - data + - first_id + - last_id + - has_more - FineTuningJob: - title: FineTuningJob - description: | - The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. + RunStepDetailsMessageCreationObject: + title: Message creation + type: object + description: Details of the message creation by the run step. properties: - id: + type: + description: Always `message_creation`. type: string - description: The object identifier, which can be referenced in the API endpoints. - object: + enum: ["message_creation"] + message_creation: + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + required: + - message_id + required: + - type + - message_creation + + RunStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. type: string - description: The object type, which is always "fine_tuning.job". - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - finished_at: - type: integer - nullable: true - description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. - model: + enum: ["tool_calls"] + tool_calls: + type: array + description: | + An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `retrieval`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsRetrievalObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + - tool_calls + + RunStepDetailsToolCallsCodeObject: + title: Code interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + id: type: string - description: The base model that is being fine-tuned. - fine_tuned_model: + description: The ID of the tool call. + type: type: string - nullable: true - description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. - organization_id: + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + enum: ["code_interpreter"] + code_interpreter: + type: object + description: The Code Interpreter tool call definition. + required: + - input + - outputs + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true + required: + - id + - type + - code_interpreter + + RunStepDetailsToolCallsCodeOutputLogsObject: + title: Code interpreter log output + type: object + description: Text output from the Code Interpreter tool call as part of a run step. + properties: + type: + description: Always `logs`. type: string - description: The organization that owns the fine-tuning job. - status: + enum: ["logs"] + logs: type: string - description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. - hyperparameters: + description: The text output from the Code Interpreter tool call. + required: + - type + - logs + + RunStepDetailsToolCallsCodeOutputImageObject: + title: Code interpreter image output + type: object + properties: + type: + description: Always `image`. + type: string + enum: ["image"] + image: type: object - description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. properties: - n_epochs: - oneOf: - - type: string - enum: [auto] - - type: integer - minimum: 1 - maximum: 50 - default: auto - description: - The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - - "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + file_id: + description: The [file](/docs/api-reference/files) ID of the image. + type: string required: - - n_epochs - training_file: + - file_id + required: + - type + - image + + RunStepDetailsToolCallsRetrievalObject: + title: Retrieval tool call + type: object + properties: + id: type: string - description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). - validation_file: + description: The ID of the tool call object. + type: type: string - nullable: true - description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). - result_files: - type: array - description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). - items: - type: string - example: file-abc123 - trained_tokens: - type: integer - nullable: true - description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. - error: + description: The type of tool call. This is always going to be `retrieval` for this type of tool call. + enum: ["retrieval"] + retrieval: type: object - nullable: true - description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + required: + - id + - type + - retrieval + + RunStepDetailsToolCallsFunctionObject: + type: object + title: Function tool call + properties: + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `function` for this type of tool call. + enum: ["function"] + function: + type: object + description: The definition of the function that was called. properties: - message: + name: type: string - description: A human-readable error message. - code: + description: The name of the function. + arguments: type: string - description: A machine-readable error code. - param: + description: The arguments passed to the function. + output: type: string - description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. nullable: true required: - - message - - code - - param + - name + - arguments + - output required: - id - - object - - created_at - - finished_at - - model - - fine_tuned_model - - organization_id - - status - - hyperparameters - - training_file - - validation_file - - result_files - - trained_tokens - - error - x-oaiMeta: - name: The fine-tuning job object - example: *fine_tuning_example + - type + - function - FineTuningEvent: - title: FineTuningEvent + AssistantFileObject: + type: object + title: Assistant files + description: A list of [Files](/docs/api-reference/files) attached to an `assistant`. properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string object: + description: The object type, which is always `assistant.file`. type: string + enum: [assistant.file] created_at: + description: The Unix timestamp (in seconds) for when the assistant file was created. type: integer - level: - type: string - message: + assistant_id: + description: The assistant ID that the file is attached to. type: string - data: - oneOf: - - type: string - default: none - enum: [none, string] - type: - oneOf: - - type: string - default: none - enum: ["message", "metrics"] required: + - id - object - - created_at - - level - - message - x-oiMeta: - name: The fine-tuning event object + - created_at + - assistant_id + x-oaiMeta: + name: The assistant file object + beta: true example: | { - "object": "fine_tuning.job.event", - "created_at": "1689376978", - "level": "info" | "warn" | "error", - "message": "", - "data": null | JSON, - "type": "message"| "metrics" + "id": "file-abc123", + "object": "assistant.file", + "created_at": 1699055364, + "assistant_id": "asst_abc123" } - FineTune: - title: FineTune - deprecated: true - description: | - The `FineTune` object represents a legacy fine-tune job that has been created through the API. + CreateAssistantFileRequest: + type: object + additionalProperties: false + properties: + file_id: + description: A [File](/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should use. Useful for tools like `retrieval` and `code_interpreter` that can access files. + type: string + required: + - file_id + + DeleteAssistantFileResponse: + type: object + description: Deletes the association between the assistant and the file, but does not delete the [File](/docs/api-reference/files) object itself. properties: id: type: string - description: The object identifier, which can be referenced in the API endpoints. + deleted: + type: boolean object: type: string - description: The object type, which is always "fine-tune". - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - updated_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was last updated. - model: - type: string - description: The base model that is being fine-tuned. - fine_tuned_model: - type: string - nullable: true - description: The name of the fine-tuned model that is being created. - organization_id: - type: string - description: The organization that owns the fine-tuning job. - status: + enum: [assistant.file.deleted] + required: + - id + - object + - deleted + ListAssistantFilesResponse: + properties: + object: type: string - description: The current status of the fine-tuning job, which can be either `created`, `running`, `succeeded`, `failed`, or `cancelled`. - hyperparams: - type: object - description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. - properties: - n_epochs: - type: integer - description: | - The number of epochs to train the model for. An epoch refers to one - full cycle through the training dataset. - batch_size: - type: integer - description: | - The batch size to use for training. The batch size is the number of - training examples used to train a single forward and backward pass. - prompt_loss_weight: - type: number - description: | - The weight to use for loss on the prompt tokens. - learning_rate_multiplier: - type: number - description: | - The learning rate multiplier to use for training. - compute_classification_metrics: - type: boolean - description: | - The classification metrics to compute using the validation dataset at the end of every epoch. - classification_positive_class: - type: string - description: | - The positive class to use for computing classification metrics. - classification_n_classes: - type: integer - description: | - The number of classes to use for computing classification metrics. - required: - - n_epochs - - batch_size - - prompt_loss_weight - - learning_rate_multiplier - training_files: - type: array - description: The list of files used for training. - items: - $ref: "#/components/schemas/OpenAIFile" - validation_files: - type: array - description: The list of files used for validation. - items: - $ref: "#/components/schemas/OpenAIFile" - result_files: - type: array - description: The compiled results files for the fine-tuning job. - items: - $ref: "#/components/schemas/OpenAIFile" - events: + example: "list" + data: type: array - description: The list of events that have been observed in the lifecycle of the FineTune job. items: - $ref: "#/components/schemas/FineTuneEvent" + $ref: "#/components/schemas/AssistantFileObject" + first_id: + type: string + example: "file-abc123" + last_id: + type: string + example: "file-abc456" + has_more: + type: boolean + example: false required: - - id - object - - created_at - - updated_at - - model - - fine_tuned_model - - organization_id - - status - - hyperparams - - training_files - - validation_files - - result_files - x-oaiMeta: - name: The fine-tune object - example: *fine_tune_example + - data + - items + - first_id + - last_id + - has_more - FineTuningJobEvent: - title: FineTuningJobEvent + MessageFileObject: + type: object + title: Message files + description: A list of files attached to a `message`. properties: id: + description: The identifier, which can be referenced in API endpoints. type: string object: + description: The object type, which is always `thread.message.file`. type: string + enum: ["thread.message.file"] created_at: + description: The Unix timestamp (in seconds) for when the message file was created. type: integer - level: - type: string - enum: ["info", "warn", "error"] - message: + message_id: + description: The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. type: string required: - id - object - created_at - - level - - message - x-oiMeta: - name: The fine-tuning job event object + - message_id + x-oaiMeta: + name: The message file object + beta: true example: | { - "object": "event", - "id": "ftevent-abc123" - "created_at": 1677610602, - "level": "info", - "message": "Created fine-tuning job" + "id": "file-abc123", + "object": "thread.message.file", + "created_at": 1698107661, + "message_id": "message_QLoItBbqwyAJEzlTy4y9kOMM", + "file_id": "file-abc123" } - FineTuneEvent: - title: FineTuneEvent + ListMessageFilesResponse: properties: object: type: string - created_at: - type: integer - level: + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/MessageFileObject" + first_id: type: string - message: + example: "file-abc123" + last_id: type: string + example: "file-abc456" + has_more: + type: boolean + example: false required: - object - - created_at - - level - - message - x-oiMeta: - name: The fine-tune event object - example: | - { - "object": "event", - "created_at": 1677610602, - "level": "info", - "message": "Created fine-tune job" - } - CompletionUsage: - type: object - description: Usage statistics for the completion request. - properties: - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - total_tokens: - type: integer - description: Total number of tokens used in the request (prompt + completion). - required: - - prompt_tokens - - completion_tokens - - total_tokens + - data + - items + - first_id + - last_id + - has_more security: - ApiKeyAuth: [] - x-oaiMeta: groups: # > General Notes @@ -4193,10 +8697,13 @@ x-oaiMeta: - id: audio title: Audio description: | - Learn how to turn audio into text. + Learn how to turn audio into text or text into audio. Related guide: [Speech to text](/docs/guides/speech-to-text) sections: + - type: endpoint + key: createSpeech + path: createSpeech - type: endpoint key: createTranscription path: createTranscription @@ -4208,31 +8715,17 @@ x-oaiMeta: description: | Given a list of messages comprising a conversation, the model will return a response. - Related guide: [Chat completions](/docs/guides/gpt) + Related guide: [Chat Completions](/docs/guides/text-generation) sections: + - type: endpoint + key: createChatCompletion + path: create - type: object key: CreateChatCompletionResponse path: object - type: object key: CreateChatCompletionStreamResponse path: streaming - - type: endpoint - key: createChatCompletion - path: create - - id: completions - title: Completions - legacy: true - description: | - Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. We recommend most users use our Chat completions API. [Learn more](/docs/deprecations/2023-07-06-gpt-and-embeddings) - - Related guide: [Legacy Completions](/docs/guides/gpt/completions-api) - sections: - - type: object - key: CreateCompletionResponse - path: object - - type: endpoint - key: createCompletion - path: create - id: embeddings title: Embeddings description: | @@ -4240,59 +8733,63 @@ x-oaiMeta: Related guide: [Embeddings](/docs/guides/embeddings) sections: - - type: object - key: Embedding - path: object - type: endpoint key: createEmbedding path: create + - type: object + key: Embedding + path: object - id: fine-tuning title: Fine-tuning description: | Manage fine-tuning jobs to tailor a model to your specific training data. - Related guide: [fine-tune models](/docs/guides/fine-tuning) + Related guide: [Fine-tune models](/docs/guides/fine-tuning) sections: - - type: object - path: object - key: FineTuningJob - type: endpoint key: createFineTuningJob path: create - type: endpoint key: listPaginatedFineTuningJobs + path: list + - type: endpoint + key: listFineTuningEvents + path: list-events - type: endpoint key: retrieveFineTuningJob path: retrieve - type: endpoint key: cancelFineTuningJob path: cancel - - type: endpoint - key: listFineTuningEvents - path: list-events + - type: object + key: FineTuningJob + path: object + - type: object + key: FineTuningJobEvent + path: event-object - id: files title: Files description: | - Files are used to upload documents that can be used with features like [fine-tuning](/docs/api-reference/fine-tuning). + Files are used to upload documents that can be used with features like [Assistants](/docs/api-reference/assistants) and [Fine-tuning](/docs/api-reference/fine-tuning). sections: - - type: object - key: OpenAIFile - path: object - - type: endpoint - key: listFiles - path: list - type: endpoint key: createFile path: create - type: endpoint - key: deleteFile - path: delete + key: listFiles + path: list - type: endpoint key: retrieveFile path: retrieve + - type: endpoint + key: deleteFile + path: delete - type: endpoint key: downloadFile path: retrieve-contents + - type: object + key: OpenAIFile + path: object - id: images title: Images description: | @@ -4300,9 +8797,6 @@ x-oaiMeta: Related guide: [Image generation](/docs/guides/images) sections: - - type: object - key: Image - path: object - type: endpoint key: createImage path: create @@ -4312,14 +8806,14 @@ x-oaiMeta: - type: endpoint key: createImageVariation path: createVariation + - type: object + key: Image + path: object - id: models title: Models description: | List and describe the various models available in the API. You can refer to the [Models](/docs/models) documentation to understand what models are available and the differences between them. sections: - - type: object - key: Model - path: object - type: endpoint key: listModels path: list @@ -4329,6 +8823,9 @@ x-oaiMeta: - type: endpoint key: deleteModel path: delete + - type: object + key: Model + path: object - id: moderations title: Moderations description: | @@ -4336,47 +8833,158 @@ x-oaiMeta: Related guide: [Moderations](/docs/guides/moderation) sections: + - type: endpoint + key: createModeration + path: create - type: object key: CreateModerationResponse path: object + - id: assistants + title: Assistants + beta: true + description: | + Build assistants that can call models and use tools to perform tasks. + + [Get started with the Assistants API](/docs/assistants) + sections: - type: endpoint - key: createModeration - path: create - - id: fine-tunes - title: Fine-tunes - deprecated: true + key: createAssistant + path: createAssistant + - type: endpoint + key: createAssistantFile + path: createAssistantFile + - type: endpoint + key: listAssistants + path: listAssistants + - type: endpoint + key: listAssistantFiles + path: listAssistantFiles + - type: endpoint + key: getAssistant + path: getAssistant + - type: endpoint + key: getAssistantFile + path: getAssistantFile + - type: endpoint + key: modifyAssistant + path: modifyAssistant + - type: endpoint + key: deleteAssistant + path: deleteAssistant + - type: endpoint + key: deleteAssistantFile + path: deleteAssistantFile + - type: object + key: AssistantObject + path: object + - type: object + key: AssistantFileObject + path: file-object + - id: threads + title: Threads + beta: true description: | - Manage legacy fine-tuning jobs to tailor a model to your specific training data. + Create threads that assistants can interact with. - We recommend transitioning to the updating [fine-tuning API](/docs/guides/fine-tuning) + Related guide: [Assistants](/docs/assistants/overview) sections: + - type: endpoint + key: createThread + path: createThread + - type: endpoint + key: getThread + path: getThread + - type: endpoint + key: modifyThread + path: modifyThread + - type: endpoint + key: deleteThread + path: deleteThread - type: object + key: ThreadObject path: object - key: FineTune + - id: messages + title: Messages + beta: true + description: | + Create messages within threads + + Related guide: [Assistants](/docs/assistants/overview) + sections: - type: endpoint - key: createFineTune - path: create + key: createMessage + path: createMessage - type: endpoint - key: listFineTunes - path: list + key: listMessages + path: listMessages - type: endpoint - key: retrieveFineTune - path: retrieve + key: listMessageFiles + path: listMessageFiles - type: endpoint - key: cancelFineTune - path: cancel + key: getMessage + path: getMessage - type: endpoint - key: listFineTuneEvents - path: list-events - - id: edits - title: Edits - deprecated: true + key: getMessageFile + path: getMessageFile + - type: endpoint + key: modifyMessage + path: modifyMessage + - type: object + key: MessageObject + path: object + - type: object + key: MessageFileObject + path: file-object + - id: runs + title: Runs + beta: true description: | - Given a prompt and an instruction, the model will return an edited version of the prompt. + Represents an execution run on a thread. + + Related guide: [Assistants](/docs/assistants/overview) sections: + - type: endpoint + key: createRun + path: createRun + - type: endpoint + key: createThreadAndRun + path: createThreadAndRun + - type: endpoint + key: listRuns + path: listRuns + - type: endpoint + key: listRunSteps + path: listRunSteps + - type: endpoint + key: getRun + path: getRun + - type: endpoint + key: getRunStep + path: getRunStep + - type: endpoint + key: modifyRun + path: modifyRun + - type: endpoint + key: submitToolOuputsToRun + path: submitToolOutputs + - type: endpoint + key: cancelRun + path: cancelRun - type: object - key: CreateEditResponse + key: RunObject path: object + - type: object + key: RunStepObject + path: step-object + - id: completions + title: Completions + legacy: true + description: | + Given a prompt, the model will return one or more predicted completions along with the probabilities of alternative tokens at each position. Most developer should use our [Chat Completions API](/docs/guides/text-generation/text-generation-models) to leverage our best and newest models. Most models that support the legacy Completions endpoint [will be shut off on January 4th, 2024](/docs/deprecations/2023-07-06-gpt-and-embeddings). + sections: - type: endpoint - key: createEdit + key: createCompletion path: create + - type: object + key: CreateCompletionResponse + path: object \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index a77b672a3..a752dc7f5 100644 --- a/package-lock.json +++ b/package-lock.json @@ -8,18 +8,78 @@ "name": "openai-tsp", "version": "0.1.0", "dependencies": { - "@typespec/compiler": "^0.49.0-dev.11", - "@typespec/openapi": "^0.49.0-dev.4", - "@typespec/openapi3": "^0.49.0-dev.10", - "@typespec/rest": "^0.49.0-dev.3" + "@azure-tools/typespec-csharp": "latest", + "@typespec/compiler": "^0.54.0", + "@typespec/http": "^0.54.0", + "@typespec/openapi": "^0.54.0", + "@typespec/openapi3": "^0.54.0", + "@typespec/rest": "^0.54.0", + "@typespec/versioning": "^0.54.0" + } + }, + "node_modules/@autorest/csharp": { + "version": "3.0.0-beta.20240312.2", + "resolved": "https://registry.npmjs.org/@autorest/csharp/-/csharp-3.0.0-beta.20240312.2.tgz", + "integrity": "sha512-7Q4eyNvY+/l/Ga1j4jDo1Mr0TzwDPvi2957fGwiW4zHd0Hl/sTC8wNrjdiZtpfhkJUuzJ6CAJJE84wYuhbh3hQ==" + }, + "node_modules/@azure-tools/typespec-azure-core": { + "version": "0.40.0", + "resolved": "https://registry.npmjs.org/@azure-tools/typespec-azure-core/-/typespec-azure-core-0.40.0.tgz", + "integrity": "sha512-l5U47zXKYQKFbipRQLpjG4EwvPJg0SogdFEe5a3rRr7mUy8sWPkciHpngLZVOd2cKZQD5m7nqwfWL798I9TJnQ==", + "peer": true, + "engines": { + "node": ">=18.0.0" + }, + "peerDependencies": { + "@typespec/compiler": "~0.54.0", + "@typespec/http": "~0.54.0", + "@typespec/rest": "~0.54.0" + } + }, + "node_modules/@azure-tools/typespec-client-generator-core": { + "version": "0.40.0", + "resolved": "https://registry.npmjs.org/@azure-tools/typespec-client-generator-core/-/typespec-client-generator-core-0.40.0.tgz", + "integrity": "sha512-Nm/OfDtSWBr1lylISbXR37B9QKWlZHK1j4T8L439Y1v3VcvJsC/0F5PLemY0odHpOYZNwu2uevJjAeM5W56wlw==", + "peer": true, + "dependencies": { + "change-case": "~5.4.2", + "pluralize": "^8.0.0" + }, + "engines": { + "node": ">=18.0.0" + }, + "peerDependencies": { + "@typespec/compiler": "~0.54.0", + "@typespec/http": "~0.54.0", + "@typespec/rest": "~0.54.0", + "@typespec/versioning": "~0.54.0" + } + }, + "node_modules/@azure-tools/typespec-csharp": { + "version": "0.2.0-beta.20240312.2", + "resolved": "https://registry.npmjs.org/@azure-tools/typespec-csharp/-/typespec-csharp-0.2.0-beta.20240312.2.tgz", + "integrity": "sha512-F1TAiBpg8U/TIDoz4RgBkh4QNzWkZenQInY8vNpCP6Xpg7y9SA8yWlpqNwOfni9icWbtJFKEEv3bxocmOFANQQ==", + "dependencies": { + "@autorest/csharp": "3.0.0-beta.20240312.2", + "json-serialize-refs": "0.1.0-0", + "winston": "^3.8.2" + }, + "peerDependencies": { + "@azure-tools/typespec-azure-core": ">=0.36.0 <1.0.0", + "@azure-tools/typespec-client-generator-core": ">=0.36.0 <1.0.0", + "@typespec/compiler": ">=0.50.0 <1.0.0", + "@typespec/http": ">=0.50.0 <1.0.0", + "@typespec/openapi": ">=0.50.0 <1.0.0", + "@typespec/rest": ">=0.50.0 <1.0.0", + "@typespec/versioning": ">=0.50.0 <1.0.0" } }, "node_modules/@babel/code-frame": { - "version": "7.22.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", - "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz", + "integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==", "dependencies": { - "@babel/highlight": "^7.22.13", + "@babel/highlight": "^7.23.4", "chalk": "^2.4.2" }, "engines": { @@ -27,19 +87,19 @@ } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.5.tgz", - "integrity": "sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/highlight": { - "version": "7.22.13", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.13.tgz", - "integrity": "sha512-C/BaXcnnvBCmHTpz/VGZ8jgtE2aYlW4hxDhseJAWZb7gqGM/qtCK6iZUb0TyKFf7BOUsBH7Q7fkRsDRhg1XklQ==", + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz", + "integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==", "dependencies": { - "@babel/helper-validator-identifier": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", "chalk": "^2.4.2", "js-tokens": "^4.0.0" }, @@ -47,6 +107,24 @@ "node": ">=6.9.0" } }, + "node_modules/@colors/colors": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.6.0.tgz", + "integrity": "sha512-Ir+AOibqzrIsL6ajt3Rz3LskB7OiMVHqltZmspbW/TJuTVuyOMirVqAkjfY6JISiLHgyNqicAC8AyHHGzNd/dA==", + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@dabh/diagnostics": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@dabh/diagnostics/-/diagnostics-2.0.3.tgz", + "integrity": "sha512-hrlQOIi7hAfzsMqlGSFyVucrx38O+j6wiGOf//H2ecvIEqYN4ADBSS2iLMh5UFyDunCNniUIPk/q3riFv45xRA==", + "dependencies": { + "colorspace": "1.1.x", + "enabled": "2.0.x", + "kuler": "^2.0.0" + } + }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", @@ -79,23 +157,39 @@ "node": ">= 8" } }, + "node_modules/@sindresorhus/merge-streams": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/merge-streams/-/merge-streams-2.3.0.tgz", + "integrity": "sha512-LtoMMhxAlorcGhmFYI+LhPgbPZCkgP6ra1YL604EeF6U98pLlQ3iWIGMdWSC+vWmPBWBNgmDBAhnAobLROJmwg==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@types/triple-beam": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/triple-beam/-/triple-beam-1.3.5.tgz", + "integrity": "sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw==" + }, "node_modules/@typespec/compiler": { - "version": "0.49.0-dev.11", - "resolved": "https://registry.npmjs.org/@typespec/compiler/-/compiler-0.49.0-dev.11.tgz", - "integrity": "sha512-SNt6hqu017JhwU3qPpolsGRKgSnb9Wc4FZs5FPQ6i1Ktubtgx9Ac9pxEdSNgOsdoBC3efzbpNCBasLGms0V+Fw==", + "version": "0.54.0", + "resolved": "https://registry.npmjs.org/@typespec/compiler/-/compiler-0.54.0.tgz", + "integrity": "sha512-lxMqlvUq5m1KZUjg+IoM/gEwY+yeSjjnpUsz6wmzjK4cO9cIY4wPJdrZwe8jUc2UFOoqKXN3AK8N1UWxA+w9Dg==", "dependencies": { - "@babel/code-frame": "~7.22.13", + "@babel/code-frame": "~7.23.5", "ajv": "~8.12.0", - "change-case": "~4.1.2", - "globby": "~13.2.2", + "change-case": "~5.4.2", + "globby": "~14.0.0", "mustache": "~4.2.0", "picocolors": "~1.0.0", - "prettier": "~3.0.3", + "prettier": "~3.2.5", "prompts": "~2.4.2", - "semver": "^7.5.4", - "vscode-languageserver": "~9.0.0", - "vscode-languageserver-textdocument": "~1.0.8", - "yaml": "~2.3.2", + "semver": "^7.6.0", + "vscode-languageserver": "~9.0.1", + "vscode-languageserver-textdocument": "~1.0.11", + "yaml": "~2.3.4", "yargs": "~17.7.2" }, "bin": { @@ -103,72 +197,70 @@ "tsp-server": "cmd/tsp-server.js" }, "engines": { - "node": ">=16.0.0" + "node": ">=18.0.0" } }, "node_modules/@typespec/http": { - "version": "0.48.0", - "resolved": "https://registry.npmjs.org/@typespec/http/-/http-0.48.0.tgz", - "integrity": "sha512-e+0Y0Ky71flUNZSRzCfoOm8XvXsSYGmQgB9VZFDbLl8mQlXwuTfib4tWrU531TCtZHMnylbXx2wAk5+3uC6b9g==", - "peer": true, + "version": "0.54.0", + "resolved": "https://registry.npmjs.org/@typespec/http/-/http-0.54.0.tgz", + "integrity": "sha512-/hZd9pkjJh3ogOekyKzZnpVV2kXzxtWDiTt3Gekc6iHTGk/CE1JpRFts8xwXoI5d3FqYotfb4w5ztVw62WjOcA==", "engines": { - "node": ">=16.0.0" + "node": ">=18.0.0" }, "peerDependencies": { - "@typespec/compiler": "~0.48.0" + "@typespec/compiler": "~0.54.0" } }, "node_modules/@typespec/openapi": { - "version": "0.49.0-dev.4", - "resolved": "https://registry.npmjs.org/@typespec/openapi/-/openapi-0.49.0-dev.4.tgz", - "integrity": "sha512-qH2borMxQoAdiMDvd88MTvlF2vFZUzusDFtxmKx/GEy+aqkw7pAnR0fqeCbPGR/P8a6slpDchusY/le3608yAQ==", + "version": "0.54.0", + "resolved": "https://registry.npmjs.org/@typespec/openapi/-/openapi-0.54.0.tgz", + "integrity": "sha512-QJkwq3whcqKb29ScMD5IQzqvDmPQyLAubRl82Zj6kVMCqabRwegOX9aN+K0083nci65zt9rflZbv9bKY5GRy/A==", "engines": { - "node": ">=16.0.0" + "node": ">=18.0.0" }, "peerDependencies": { - "@typespec/compiler": "~0.48.1 || >=0.49.0-dev <0.49.0", - "@typespec/http": "~0.48.0 || >=0.49.0-dev <0.49.0" + "@typespec/compiler": "~0.54.0", + "@typespec/http": "~0.54.0" } }, "node_modules/@typespec/openapi3": { - "version": "0.49.0-dev.10", - "resolved": "https://registry.npmjs.org/@typespec/openapi3/-/openapi3-0.49.0-dev.10.tgz", - "integrity": "sha512-J9oiVJKv3pTcNIUzftHS676w4LOxvQe6fqAAx37Nql7SJ3AZrqHXwOrlxjMKZHifU7T+V/KZKF8Y6Li4ORPTPw==", + "version": "0.54.0", + "resolved": "https://registry.npmjs.org/@typespec/openapi3/-/openapi3-0.54.0.tgz", + "integrity": "sha512-ryqa6iNWA3Vb2TcyTUD0NrRecGVY5MGaEuAdBJnnEEPcE6CSQY0j0dFZXzRLBUd4LiR332B4Y7Brkq6MjRdrNg==", "dependencies": { - "yaml": "~2.3.2" + "yaml": "~2.3.4" }, "engines": { - "node": ">=16.0.0" + "node": ">=18.0.0" }, "peerDependencies": { - "@typespec/compiler": "~0.48.1 || >=0.49.0-dev <0.49.0", - "@typespec/http": "~0.48.0 || >=0.49.0-dev <0.49.0", - "@typespec/openapi": "~0.48.0 || >=0.49.0-dev <0.49.0", - "@typespec/versioning": "~0.48.0 || >=0.49.0-dev <0.49.0" + "@typespec/compiler": "~0.54.0", + "@typespec/http": "~0.54.0", + "@typespec/openapi": "~0.54.0", + "@typespec/versioning": "~0.54.0" } }, "node_modules/@typespec/rest": { - "version": "0.49.0-dev.3", - "resolved": "https://registry.npmjs.org/@typespec/rest/-/rest-0.49.0-dev.3.tgz", - "integrity": "sha512-/33xOp3N5wtUZ6O+kNssIzCEXR7+fjThtGysnsUL0lS8W3OesCgF9gKZH9fB0beaRlccmzFoRcHSOQLwalkfmg==", + "version": "0.54.0", + "resolved": "https://registry.npmjs.org/@typespec/rest/-/rest-0.54.0.tgz", + "integrity": "sha512-F1hq/Per9epPJQ8Ey84mAtrgrZeLu6fDMIxNao1XlTfDEFZuYgFuCSyg0pyIi0Xg7KUBMvrvSv83WoF3mN2szw==", "engines": { - "node": ">=16.0.0" + "node": ">=18.0.0" }, "peerDependencies": { - "@typespec/compiler": "~0.48.1 || >=0.49.0-dev <0.49.0", - "@typespec/http": "~0.48.0 || >=0.49.0-dev <0.49.0" + "@typespec/compiler": "~0.54.0", + "@typespec/http": "~0.54.0" } }, "node_modules/@typespec/versioning": { - "version": "0.48.0", - "resolved": "https://registry.npmjs.org/@typespec/versioning/-/versioning-0.48.0.tgz", - "integrity": "sha512-WF26vmMPwizhSnjX0ox23nbp7hthtB4cN/J5w1tlryXyp/BXySHoYsJEMK7fviSpj4WdreVXdM6wmRIG33zqig==", - "peer": true, + "version": "0.54.0", + "resolved": "https://registry.npmjs.org/@typespec/versioning/-/versioning-0.54.0.tgz", + "integrity": "sha512-IlGpveOJ0WBTbn3w8nfzgSNhJWNd0+H+bo1Ljrjpeb9SFQmS8bX2fDf0vqsHVl50XgvKIZxgOpEXN5TmuzNnRw==", "engines": { - "node": ">=16.0.0" + "node": ">=18.0.0" }, "peerDependencies": { - "@typespec/compiler": "~0.48.0" + "@typespec/compiler": "~0.54.0" } }, "node_modules/ajv": { @@ -205,6 +297,11 @@ "node": ">=4" } }, + "node_modules/async": { + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.5.tgz", + "integrity": "sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg==" + }, "node_modules/braces": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", @@ -216,25 +313,6 @@ "node": ">=8" } }, - "node_modules/camel-case": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz", - "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", - "dependencies": { - "pascal-case": "^3.1.2", - "tslib": "^2.0.3" - } - }, - "node_modules/capital-case": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/capital-case/-/capital-case-1.0.4.tgz", - "integrity": "sha512-ds37W8CytHgwnhGGTi88pcPyR15qoNkOpYwmMMfnWqqWgESapLqvDx6huFjQ5vqWSn2Z06173XNA7LtMOeUh1A==", - "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3", - "upper-case-first": "^2.0.2" - } - }, "node_modules/chalk": { "version": "2.4.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", @@ -249,23 +327,9 @@ } }, "node_modules/change-case": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/change-case/-/change-case-4.1.2.tgz", - "integrity": "sha512-bSxY2ws9OtviILG1EiY5K7NNxkqg/JnRnFxLtKQ96JaviiIxi7djMrSd0ECT9AC+lttClmYwKw53BWpOMblo7A==", - "dependencies": { - "camel-case": "^4.1.2", - "capital-case": "^1.0.4", - "constant-case": "^3.0.4", - "dot-case": "^3.0.4", - "header-case": "^2.0.4", - "no-case": "^3.0.4", - "param-case": "^3.0.4", - "pascal-case": "^3.1.2", - "path-case": "^3.0.4", - "sentence-case": "^3.0.4", - "snake-case": "^3.0.4", - "tslib": "^2.0.3" - } + "version": "5.4.3", + "resolved": "https://registry.npmjs.org/change-case/-/change-case-5.4.3.tgz", + "integrity": "sha512-4cdyvorTy/lViZlVzw2O8/hHCLUuHqp4KpSSP3DlauhFCf3LdnfF+p5s0EAhjKsU7bqrMzu7iQArYfoPiHO2nw==" }, "node_modules/cliui": { "version": "8.0.1", @@ -280,6 +344,15 @@ "node": ">=12" } }, + "node_modules/color": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/color/-/color-3.2.1.tgz", + "integrity": "sha512-aBl7dZI9ENN6fUGC7mWpMTPNHmWUSNan9tuWN6ahh5ZLNk9baLJOnSMlrQkHcrfFgz2/RigjUVAjdx36VcemKA==", + "dependencies": { + "color-convert": "^1.9.3", + "color-string": "^1.6.0" + } + }, "node_modules/color-convert": { "version": "1.9.3", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", @@ -293,34 +366,22 @@ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" }, - "node_modules/constant-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/constant-case/-/constant-case-3.0.4.tgz", - "integrity": "sha512-I2hSBi7Vvs7BEuJDr5dDHfzb/Ruj3FyvFyh7KLilAjNQw3Be+xgqUBA2W6scVEcL0hL1dwPRtIqEPVUCKkSsyQ==", + "node_modules/color-string": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", + "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3", - "upper-case": "^2.0.2" - } - }, - "node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dependencies": { - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=8" + "color-name": "^1.0.0", + "simple-swizzle": "^0.2.2" } }, - "node_modules/dot-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", - "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", + "node_modules/colorspace": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/colorspace/-/colorspace-1.1.4.tgz", + "integrity": "sha512-BgvKJiuVu1igBUF2kEjRCZXol6wiiGbY5ipL/oVPwm0BL9sIpMIzM8IK7vwuxIIzOXMV3Ey5w+vxhm0rR/TN8w==", "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" + "color": "^3.1.3", + "text-hex": "1.0.x" } }, "node_modules/emoji-regex": { @@ -328,10 +389,15 @@ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" }, + "node_modules/enabled": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/enabled/-/enabled-2.0.0.tgz", + "integrity": "sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ==" + }, "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", "engines": { "node": ">=6" } @@ -350,9 +416,9 @@ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" }, "node_modules/fast-glob": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.1.tgz", - "integrity": "sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==", + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", @@ -365,13 +431,18 @@ } }, "node_modules/fastq": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", - "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", "dependencies": { "reusify": "^1.0.4" } }, + "node_modules/fecha": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/fecha/-/fecha-4.2.3.tgz", + "integrity": "sha512-OP2IUU6HeYKJi3i0z4A19kHMQoLVs4Hc+DPqqxI2h/DPZHTm/vjsfC6P0b4jCMy14XizLBqvndQ+UilD7707Jw==" + }, "node_modules/fill-range": { "version": "7.0.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", @@ -383,6 +454,11 @@ "node": ">=8" } }, + "node_modules/fn.name": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fn.name/-/fn.name-1.1.0.tgz", + "integrity": "sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw==" + }, "node_modules/get-caller-file": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", @@ -403,18 +479,19 @@ } }, "node_modules/globby": { - "version": "13.2.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz", - "integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==", + "version": "14.0.1", + "resolved": "https://registry.npmjs.org/globby/-/globby-14.0.1.tgz", + "integrity": "sha512-jOMLD2Z7MAhyG8aJpNOpmziMOP4rPLcc95oQPKXBazW82z+CEgPFBQvEpRUa1KeIMUJo4Wsm+q6uzO/Q/4BksQ==", "dependencies": { - "dir-glob": "^3.0.1", - "fast-glob": "^3.3.0", + "@sindresorhus/merge-streams": "^2.1.0", + "fast-glob": "^3.3.2", "ignore": "^5.2.4", - "merge2": "^1.4.1", - "slash": "^4.0.0" + "path-type": "^5.0.0", + "slash": "^5.1.0", + "unicorn-magic": "^0.1.0" }, "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + "node": ">=18" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -428,23 +505,24 @@ "node": ">=4" } }, - "node_modules/header-case": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/header-case/-/header-case-2.0.4.tgz", - "integrity": "sha512-H/vuk5TEEVZwrR0lp2zed9OCo1uAILMlx0JEMgC26rzyJJ3N1v6XkwHHXJQdR2doSjcGPM6OKPYoJgf0plJ11Q==", - "dependencies": { - "capital-case": "^1.0.4", - "tslib": "^2.0.3" - } - }, "node_modules/ignore": { - "version": "5.2.4", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", - "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", "engines": { "node": ">= 4" } }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/is-arrayish": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", + "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" + }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", @@ -480,6 +558,17 @@ "node": ">=0.12.0" } }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/js-tokens": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", @@ -490,6 +579,11 @@ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" }, + "node_modules/json-serialize-refs": { + "version": "0.1.0-0", + "resolved": "https://registry.npmjs.org/json-serialize-refs/-/json-serialize-refs-0.1.0-0.tgz", + "integrity": "sha512-SnNMfW2RRPDXIMKa8zdLb59UjMSI1UFZCtIb8ae68GcZ0a6x8b77lIWqqTOdq1azzmkXupD6UWriPLd0JCrFng==" + }, "node_modules/kleur": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", @@ -498,12 +592,25 @@ "node": ">=6" } }, - "node_modules/lower-case": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", - "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "node_modules/kuler": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/kuler/-/kuler-2.0.0.tgz", + "integrity": "sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A==" + }, + "node_modules/logform": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/logform/-/logform-2.6.0.tgz", + "integrity": "sha512-1ulHeNPp6k/LD8H91o7VYFBng5i1BDE7HoKxVbZiGFidS1Rj65qcywLxX+pVfAPoQJEjRdvKcusKwOupHCVOVQ==", "dependencies": { - "tslib": "^2.0.3" + "@colors/colors": "1.6.0", + "@types/triple-beam": "^1.3.2", + "fecha": "^4.2.0", + "ms": "^2.1.1", + "safe-stable-stringify": "^2.3.1", + "triple-beam": "^1.3.0" + }, + "engines": { + "node": ">= 12.0.0" } }, "node_modules/lru-cache": { @@ -537,6 +644,11 @@ "node": ">=8.6" } }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, "node_modules/mustache": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/mustache/-/mustache-4.2.0.tgz", @@ -545,48 +657,23 @@ "mustache": "bin/mustache" } }, - "node_modules/no-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", - "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", - "dependencies": { - "lower-case": "^2.0.2", - "tslib": "^2.0.3" - } - }, - "node_modules/param-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", - "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", - "dependencies": { - "dot-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/pascal-case": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz", - "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", - "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/path-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/path-case/-/path-case-3.0.4.tgz", - "integrity": "sha512-qO4qCFjXqVTrcbPt/hQfhTQ+VhFsqNKOPtytgNKkKxSoEp3XPUQ8ObFuePylOIok5gjn69ry8XiULxCwot3Wfg==", + "node_modules/one-time": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/one-time/-/one-time-1.0.0.tgz", + "integrity": "sha512-5DXOiRKwuSEcQ/l0kGCF6Q3jcADFv5tSmRaJck/OqkVFcOzutB134KRSfF0xDrL39MNnqxbHBbUUcjZIhTgb2g==", "dependencies": { - "dot-case": "^3.0.4", - "tslib": "^2.0.3" + "fn.name": "1.x.x" } }, "node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-5.0.0.tgz", + "integrity": "sha512-5HviZNaZcfqP95rwpv+1HDgUamezbqdSYTyzjTvwtJSnIH+3vnbmWsItli8OFEndS984VT55M3jduxZbX351gg==", "engines": { - "node": ">=8" + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/picocolors": { @@ -605,10 +692,19 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, + "node_modules/pluralize": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-8.0.0.tgz", + "integrity": "sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==", + "peer": true, + "engines": { + "node": ">=4" + } + }, "node_modules/prettier": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.0.3.tgz", - "integrity": "sha512-L/4pUDMxcNa8R/EthV08Zt42WBO4h1rarVtK0K+QJG0X187OLo7l699jWw0GKuwzkPQ//jMFA/8Xm6Fh3J/DAg==", + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.2.5.tgz", + "integrity": "sha512-3/GWa9aOC0YeD7LUfvOG2NiDyhOWRvt1k+rcKhOuYnMY24iiCphgneUfJDyFXd6rZCAnuLBv6UeAULtrhT/F4A==", "bin": { "prettier": "bin/prettier.cjs" }, @@ -632,9 +728,9 @@ } }, "node_modules/punycode": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", - "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", "engines": { "node": ">=6" } @@ -658,6 +754,19 @@ } ] }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, "node_modules/require-directory": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", @@ -705,10 +814,37 @@ "queue-microtask": "^1.2.2" } }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safe-stable-stringify": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/safe-stable-stringify/-/safe-stable-stringify-2.4.3.tgz", + "integrity": "sha512-e2bDA2WJT0wxseVd4lsDP4+3ONX6HpMXQa1ZhFQ7SU+GjvORCmShbCMltrtIDfkYhVHrOcPtj+KhmDBdPdZD1g==", + "engines": { + "node": ">=10" + } + }, "node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dependencies": { "lru-cache": "^6.0.0" }, @@ -719,14 +855,12 @@ "node": ">=10" } }, - "node_modules/sentence-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/sentence-case/-/sentence-case-3.0.4.tgz", - "integrity": "sha512-8LS0JInaQMCRoQ7YUytAo/xUu5W2XnQxV2HI/6uM6U7CITS1RqPElr30V6uIqyMKM9lJGRVFy5/4CuzcixNYSg==", + "node_modules/simple-swizzle": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", + "integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==", "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3", - "upper-case-first": "^2.0.2" + "is-arrayish": "^0.3.1" } }, "node_modules/sisteransi": { @@ -735,23 +869,30 @@ "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" }, "node_modules/slash": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-5.1.0.tgz", + "integrity": "sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg==", "engines": { - "node": ">=12" + "node": ">=14.16" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/snake-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/snake-case/-/snake-case-3.0.4.tgz", - "integrity": "sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==", + "node_modules/stack-trace": { + "version": "0.0.10", + "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz", + "integrity": "sha512-KGzahc7puUKkzyMt+IqAep+TVNbKP+k2Lmwhub39m1AsTSkaDutx56aDCo+HLDzf/D26BIHTJWNiTG1KAJiQCg==", + "engines": { + "node": "*" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", "dependencies": { - "dot-case": "^3.0.4", - "tslib": "^2.0.3" + "safe-buffer": "~5.2.0" } }, "node_modules/string-width": { @@ -789,6 +930,11 @@ "node": ">=4" } }, + "node_modules/text-hex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/text-hex/-/text-hex-1.0.0.tgz", + "integrity": "sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg==" + }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", @@ -800,25 +946,23 @@ "node": ">=8.0" } }, - "node_modules/tslib": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", - "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" - }, - "node_modules/upper-case": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/upper-case/-/upper-case-2.0.2.tgz", - "integrity": "sha512-KgdgDGJt2TpuwBUIjgG6lzw2GWFRCW9Qkfkiv0DxqHHLYJHmtmdUIKcZd8rHgFSjopVTlw6ggzCm1b8MFQwikg==", - "dependencies": { - "tslib": "^2.0.3" + "node_modules/triple-beam": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/triple-beam/-/triple-beam-1.4.1.tgz", + "integrity": "sha512-aZbgViZrg1QNcG+LULa7nhZpJTZSLm/mXnHXnbAbjmN5aSa0y7V+wvv6+4WaBtpISJzThKy+PIPxc1Nq1EJ9mg==", + "engines": { + "node": ">= 14.0.0" } }, - "node_modules/upper-case-first": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/upper-case-first/-/upper-case-first-2.0.2.tgz", - "integrity": "sha512-514ppYHBaKwfJRK/pNC6c/OxfGa0obSnAl106u97Ed0I625Nin96KAjttZF6ZL3e1XLtphxnqrOi9iWgm+u+bg==", - "dependencies": { - "tslib": "^2.0.3" + "node_modules/unicorn-magic": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.1.0.tgz", + "integrity": "sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/uri-js": { @@ -829,6 +973,11 @@ "punycode": "^2.1.0" } }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, "node_modules/vscode-jsonrpc": { "version": "8.2.0", "resolved": "https://registry.npmjs.org/vscode-jsonrpc/-/vscode-jsonrpc-8.2.0.tgz", @@ -838,34 +987,68 @@ } }, "node_modules/vscode-languageserver": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/vscode-languageserver/-/vscode-languageserver-9.0.0.tgz", - "integrity": "sha512-npT72Iu28Tjsm94MsCbwJmIu5ycCF3UEPj3Eb3725T1Hqf4d+Vj2W4GC+F8l4n9yNItJuvE/AHYvomvAs9Dj8A==", + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/vscode-languageserver/-/vscode-languageserver-9.0.1.tgz", + "integrity": "sha512-woByF3PDpkHFUreUa7Hos7+pUWdeWMXRd26+ZX2A8cFx6v/JPTtd4/uN0/jB6XQHYaOlHbio03NTHCqrgG5n7g==", "dependencies": { - "vscode-languageserver-protocol": "3.17.4" + "vscode-languageserver-protocol": "3.17.5" }, "bin": { "installServerIntoExtension": "bin/installServerIntoExtension" } }, "node_modules/vscode-languageserver-protocol": { - "version": "3.17.4", - "resolved": "https://registry.npmjs.org/vscode-languageserver-protocol/-/vscode-languageserver-protocol-3.17.4.tgz", - "integrity": "sha512-IpaHLPft+UBWf4dOIH15YEgydTbXGz52EMU2h16SfFpYu/yOQt3pY14049mtpJu+4CBHn+hq7S67e7O0AwpRqQ==", + "version": "3.17.5", + "resolved": "https://registry.npmjs.org/vscode-languageserver-protocol/-/vscode-languageserver-protocol-3.17.5.tgz", + "integrity": "sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg==", "dependencies": { "vscode-jsonrpc": "8.2.0", - "vscode-languageserver-types": "3.17.4" + "vscode-languageserver-types": "3.17.5" } }, "node_modules/vscode-languageserver-textdocument": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/vscode-languageserver-textdocument/-/vscode-languageserver-textdocument-1.0.8.tgz", - "integrity": "sha512-1bonkGqQs5/fxGT5UchTgjGVnfysL0O8v1AYMBjqTbWQTFn721zaPGDYFkOKtfDgFiSgXM3KwaG3FMGfW4Ed9Q==" + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/vscode-languageserver-textdocument/-/vscode-languageserver-textdocument-1.0.11.tgz", + "integrity": "sha512-X+8T3GoiwTVlJbicx/sIAF+yuJAqz8VvwJyoMVhwEMoEKE/fkDmrqUgDMyBECcM2A2frVZIUj5HI/ErRXCfOeA==" }, "node_modules/vscode-languageserver-types": { - "version": "3.17.4", - "resolved": "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-3.17.4.tgz", - "integrity": "sha512-9YXi5pA3XF2V+NUQg6g+lulNS0ncRCKASYdK3Cs7kiH9sVFXWq27prjkC/B8M/xJLRPPRSPCHVMuBTgRNFh2sQ==" + "version": "3.17.5", + "resolved": "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-3.17.5.tgz", + "integrity": "sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg==" + }, + "node_modules/winston": { + "version": "3.12.0", + "resolved": "https://registry.npmjs.org/winston/-/winston-3.12.0.tgz", + "integrity": "sha512-OwbxKaOlESDi01mC9rkM0dQqQt2I8DAUMRLZ/HpbwvDXm85IryEHgoogy5fziQy38PntgZsLlhAYHz//UPHZ5w==", + "dependencies": { + "@colors/colors": "^1.6.0", + "@dabh/diagnostics": "^2.0.2", + "async": "^3.2.3", + "is-stream": "^2.0.0", + "logform": "^2.4.0", + "one-time": "^1.0.0", + "readable-stream": "^3.4.0", + "safe-stable-stringify": "^2.3.1", + "stack-trace": "0.0.x", + "triple-beam": "^1.3.0", + "winston-transport": "^4.7.0" + }, + "engines": { + "node": ">= 12.0.0" + } + }, + "node_modules/winston-transport": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.7.0.tgz", + "integrity": "sha512-ajBj65K5I7denzer2IYW6+2bNIVqLGDHqDw3Ow8Ohh+vdW+rv4MZ6eiDvHoKhfJFZ2auyN8byXieDDJ96ViONg==", + "dependencies": { + "logform": "^2.3.2", + "readable-stream": "^3.6.0", + "triple-beam": "^1.3.0" + }, + "engines": { + "node": ">= 12.0.0" + } }, "node_modules/wrap-ansi": { "version": "7.0.0", @@ -927,9 +1110,9 @@ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "node_modules/yaml": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.2.tgz", - "integrity": "sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg==", + "version": "2.3.4", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.4.tgz", + "integrity": "sha512-8aAvwVUSHpfEqTQ4w/KMlf3HcRdt50E5ODIQJBw1fQ5RL34xabzxtUlzTXVqc4rkZsPbvrXKWnABCD7kWSmocA==", "engines": { "node": ">= 14" } diff --git a/package.json b/package.json index eba48058d..40565051c 100644 --- a/package.json +++ b/package.json @@ -3,12 +3,13 @@ "version": "0.1.0", "type": "module", "dependencies": { - "@typespec/compiler": "^0.49.0-dev.11", - "@typespec/openapi": "^0.49.0-dev.4", - "@typespec/openapi3": "^0.49.0-dev.10", - "@typespec/rest": "^0.49.0-dev.3", - "@typespec/http": "^0.49.0-dev.0", - "@typespec/versioning": "^0.49.0-dev.0" + "@azure-tools/typespec-csharp": "latest", + "@typespec/compiler": "^0.54.0", + "@typespec/http": "^0.54.0", + "@typespec/openapi": "^0.54.0", + "@typespec/openapi3": "^0.54.0", + "@typespec/rest": "^0.54.0", + "@typespec/versioning": "^0.54.0" }, "private": true } diff --git a/readme.md b/readme.md deleted file mode 100644 index c4dd93188..000000000 --- a/readme.md +++ /dev/null @@ -1,15 +0,0 @@ -A conversion of the OpenAI OpenAPI to TypeSpec. - -There are some deltas: - -### Changes to API Semantics: - -- Many things are missing defaults (mostly due to bug where we can't set null defaults) -- Error responses have been added. -- Where known, the `object` property's type is narrowed from string to the constant value it will always be - -### Changes to API metadata or OpenAPI format - -- Much of the x-oaiMeta entries have not been added. -- In some cases, new schemas needed to be defined in order to be defined in TypeSpec (e.g. because the constraints could not be added to a model property with a heterogeneous type) -- There is presently no way to set `title` diff --git a/runs/main.tsp b/runs/main.tsp new file mode 100644 index 000000000..6a754bcb5 --- /dev/null +++ b/runs/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; \ No newline at end of file diff --git a/runs/meta.tsp b/runs/meta.tsp new file mode 100644 index 000000000..6819970c2 --- /dev/null +++ b/runs/meta.tsp @@ -0,0 +1,50 @@ +import "./models.tsp"; + +import "@typespec/openapi"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@@extension(OpenAI.RunObject, + "x-oaiMeta", + """ + name: The run object + beta: true + example: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1698107661, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699073476, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699073498, + "last_error": null, + "model": "gpt-4", + "instructions": null, + "tools": [{"type": "retrieval"}, {"type": "code_interpreter"}], + "file_ids": [], + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + """ +); + +// TODO: Fill in example here. +@@extension(OpenAI.RunStepObject, + "x-oaiMeta", + """ + name: The run step object + beta: true + example: *run_step_object_example + """ +); \ No newline at end of file diff --git a/runs/models.tsp b/runs/models.tsp new file mode 100644 index 000000000..e29f762e0 --- /dev/null +++ b/runs/models.tsp @@ -0,0 +1,486 @@ +import "../common/models.tsp"; +import "../assistants/models.tsp"; +import "../threads/models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +model CreateRunRequest { + /** The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. */ + assistant_id: string; + + /** + * The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value + * is provided here, it will override the model associated with the assistant. If not, the model + * associated with the assistant will be used. */ + `model`?: string | null; + + /** + * Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. + * This is useful for modifying the behavior on a per-run basis. + */ + instructions?: string | null; + + /** + * Appends additional instructions at the end of the instructions for the run. This is useful for + * modifying the behavior on a per-run basis without overriding other instructions. + */ + additional_instructions?: string | null; + + /** + * Override the tools the assistant can use for this run. This is useful for modifying the + * behavior on a per-run basis. + */ + tools?: CreateRunRequestTools | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata?: Record | null; +} + +model ModifyRunRequest { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata?: Record | null; +} + +model CreateThreadAndRunRequest { + /** The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. */ + assistant_id: string; + + /** If no thread is provided, an empty thread will be created. */ + thread?: CreateThreadRequest; + + /** + * The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is + * provided here, it will override the model associated with the assistant. If not, the model + * associated with the assistant will be used. + */ + `model`?: string | null; + + /** + * Override the default system message of the assistant. This is useful for modifying the behavior + * on a per-run basis. + */ + instructions?: string | null; + + /** + * Override the tools the assistant can use for this run. This is useful for modifying the + * behavior on a per-run basis. + */ + tools?: CreateRunRequestTools | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata?: Record | null; +} + +model SubmitToolOutputsRunRequest { + /** A list of tools for which the outputs are being submitted. */ + tool_outputs: { + /** + * The ID of the tool call in the `required_action` object within the run object the output is + * being submitted for. */ + tool_call_id?: string; + + /** The output of the tool call to be submitted to continue the run. */ + output?: string; + }[]; +} + +model ListRunsResponse { + object: "list"; + data: RunObject[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +model ListRunStepsResponse { + object: "list"; + data: RunStepObject[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +@maxItems(20) +model CreateRunRequestTools is CreateRunRequestTool[]; + +@oneOf +@extension("x-oaiExpandable", true) +union CreateRunRequestTool { + AssistantToolsCode, + AssistantToolsRetrieval, + AssistantToolsFunction +} + +@oneOf +@extension("x-oaiExpandable", true) +union RunStepDetails { + RunStepDetailsMessageCreationObject, + RunStepDetailsToolCallsObject, +} + +/** Details of the message creation by the run step. */ +model RunStepDetailsMessageCreationObject { + /** Details of the message creation by the run step. */ + type: "message_creation"; + + message_creation: { + /** The ID of the message that was created by this run step. */ + message_id: string; + } +} + +/** Details of the tool call. */ +model RunStepDetailsToolCallsObject { + /** Always `tool_calls`. */ + type: "tool_calls"; + + /** + * An array of tool calls the run step was involved in. These can be associated with one of three + * types of tools: `code_interpreter`, `retrieval`, or `function`. + */ + tool_calls: RunStepDetailsToolCallsObjectToolCalls; +} + +model RunStepDetailsToolCallsObjectToolCalls is RunStepDetailsToolCallsObjectToolCall[]; + +@oneOf +@extension("x-oaiExpandable", true) +union RunStepDetailsToolCallsObjectToolCall { + RunStepDetailsToolCallsCodeObject, + RunStepDetailsToolCallsRetrievalObject, + RunStepDetailsToolCallsFunctionObject, +} + +/** Details of the Code Interpreter tool call the run step was involved in. */ +model RunStepDetailsToolCallsCodeObject { + /** The ID of the tool call. */ + id: string; + + /** + * The type of tool call. This is always going to be `code_interpreter` for this type of tool + * call. + */ + type: "code_interpreter"; + + /** The Code Interpreter tool call definition. */ + code_interpreter: { + /** The input to the Code Interpreter tool call. */ + input: string; + + /** + * The outputs from the Code Interpreter tool call. Code Interpreter can output one or more + * items, including text (`logs`) or images (`image`). Each of these are represented by a + * different object type. + */ + outputs: RunStepDetailsToolCallsCodeOutputs; + } +} + +model RunStepDetailsToolCallsCodeOutputs is RunStepDetailsToolCallsCodeOutput[]; + +@oneOf +@extension("x-oaiExpandable", true) +union RunStepDetailsToolCallsCodeOutput { + RunStepDetailsToolCallsCodeOutputLogsObject, + RunStepDetailsToolCallsCodeOutputImageObject, +} + +/** Text output from the Code Interpreter tool call as part of a run step. */ +model RunStepDetailsToolCallsCodeOutputLogsObject { + /** Always `logs`. */ + type: "logs"; + + /** The text output from the Code Interpreter tool call. */ + logs: string; +} + +model RunStepDetailsToolCallsCodeOutputImageObject { + /** Always `image`. */ + type: "image"; + + image: { + /** The [file](/docs/api-reference/files) ID of the image. */ + file_id: string; + } +} + +model RunStepDetailsToolCallsRetrievalObject { + /** The ID of the tool call object. */ + id: string; + + /** The type of tool call. This is always going to be `retrieval` for this type of tool call. */ + type: "retrieval"; + + /** For now, this is always going to be an empty object. */ + @extension("x-oaiTypeLabel", "map") + retrieval: { }; // TODO: Is this the appropriate way to represent an empty object? +} + +model RunStepDetailsToolCallsFunctionObject { + /** The ID of the tool call object. */ + id: string; + + /** The type of tool call. This is always going to be `function` for this type of tool call. */ + type: "function"; + + /** The definition of the function that was called. */ + function: { + /** The name of the function. */ + name: string; + + /** The arguments passed to the function. */ + arguments: string; + + /** + * The output of the function. This will be `null` if the outputs have not been + * [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + */ + output: string | null; + } +} + +/** + * Usage statistics related to the run. This value will be `null` if the run is not in a terminal + * state (i.e. `in_progress`, `queued`, etc.). + */ +model RunCompletionUsage { + /** Number of completion tokens used over the course of the run. */ + completion_tokens: safeint; + + /** Number of prompt tokens used over the course of the run. */ + prompt_tokens: safeint; + + /** Total number of tokens used (prompt + completion). */ + total_tokens: safeint; +} + +/** + * Usage statistics related to the run step. This value will be `null` while the run step's status + * is `in_progress`. + */ +model RunStepCompletionUsage { + /** Number of completion tokens used over the course of the run step. */ + completion_tokens: safeint; + + /** Number of prompt tokens used over the course of the run step. */ + prompt_tokens: safeint; + + /** Total number of tokens used (prompt + completion). */ + total_tokens: safeint; +} + +/** Represents an execution run on a [thread](/docs/api-reference/threads). */ +model RunObject { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + + /** The object type, which is always `thread.run`. */ + object: "thread.run"; + + /** The Unix timestamp (in seconds) for when the run was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** + * The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this + * run. + */ + thread_id: string; + + /** The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. */ + assistant_id: string; + + /** + * The status of the run, which can be either `queued`, `in_progress`, `requires_action`, + * `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + */ + status: + | "queued" + | "in_progress" + | "requires_action" + | "cancelling" + | "cancelled" + | "failed" + | "completed" + | "expired"; + + /** + * Details on the action required to continue the run. Will be `null` if no action is + * required. + */ + required_action: { + /** For now, this is always `submit_tool_outputs`. */ + type: "submit_tool_outputs"; + + /** Details on the tool outputs needed for this run to continue. */ + submit_tool_outputs: { + /** A list of the relevant tool calls. */ + tool_calls: RunToolCallObject[]; + } + } | null; + + /** The last error associated with this run. Will be `null` if there are no errors. */ + last_error: { + /** One of `server_error` or `rate_limit_exceeded`. */ + code: "server_error" | "rate_limit_exceeded"; + + /** A human-readable description of the error. */ + message: string; + } | null; + + /** The Unix timestamp (in seconds) for when the run will expire. */ + @encode("unixTimestamp", int32) + expires_at: utcDateTime | null; // TODO: This is not nullable in the OpenAPI spec, but it is in practice. + + /** The Unix timestamp (in seconds) for when the run was started. */ + @encode("unixTimestamp", int32) + started_at: utcDateTime | null; + + /** The Unix timestamp (in seconds) for when the run was cancelled. */ + @encode("unixTimestamp", int32) + cancelled_at: utcDateTime | null; + + /** The Unix timestamp (in seconds) for when the run failed. */ + @encode("unixTimestamp", int32) + failed_at: utcDateTime | null; + + /** The Unix timestamp (in seconds) for when the run was completed. */ + @encode("unixTimestamp", int32) + completed_at: utcDateTime | null; + + /** The model that the [assistant](/docs/api-reference/assistants) used for this run. */ + `model`: string; + + /** The instructions that the [assistant](/docs/api-reference/assistants) used for this run. */ + instructions: string; + + /** The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. */ + tools: CreateRunRequestTools; + + /** + * The list of [File](/docs/api-reference/files) IDs the + * [assistant](/docs/api-reference/assistants) used for this run. + */ + file_ids: string[] = []; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata: Record | null; + + usage: RunCompletionUsage | null; +} + +/** Represents a step in execution of a run. */ +model RunStepObject { + /** The identifier of the run step, which can be referenced in API endpoints. */ + id: string; + + /** The object type, which is always `thread.run.step`. */ + object: "thread.run.step"; + + /** The Unix timestamp (in seconds) for when the run step was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. */ + assistant_id: string; + + /** The ID of the [thread](/docs/api-reference/threads) that was run. */ + thread_id: string; + + /** The ID of the [run](/docs/api-reference/runs) that this run step is a part of. */ + run_id: string; + + /** The type of run step, which can be either `message_creation` or `tool_calls`. */ + type: "message_creation" | "tool_calls"; + + /** + * The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, + * `completed`, or `expired`. + */ + status: "in_progress" | "cancelled" | "failed" | "completed" | "expired"; + + /** The details of the run step. */ + step_details: RunStepDetails; + + /** The last error associated with this run step. Will be `null` if there are no errors. */ + last_error: { + /** One of `server_error` or `rate_limit_exceeded`. */ + code: "server_error" | "rate_limit_exceeded"; + + /** A human-readable description of the error. */ + message: string; + } | null; + + /** + * The Unix timestamp (in seconds) for when the run step expired. A step is considered expired + * if the parent run is expired. + */ + @encode("unixTimestamp", int32) + expires_at: utcDateTime | null; + + /** The Unix timestamp (in seconds) for when the run step was cancelled. */ + @encode("unixTimestamp", int32) + cancelled_at: utcDateTime | null; + + /** The Unix timestamp (in seconds) for when the run step failed. */ + @encode("unixTimestamp", int32) + failed_at: utcDateTime | null; + + /** T The Unix timestamp (in seconds) for when the run step completed.. */ + @encode("unixTimestamp", int32) + completed_at: utcDateTime | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata: Record | null; + + usage: RunCompletionUsage | null; +} + +/** Tool call objects */ +model RunToolCallObject { + /** + * The ID of the tool call. This ID must be referenced when you submit the tool outputs in using + * the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + */ + id: string; + + /** The type of tool call the output is required for. For now, this is always `function`. */ + type: "function"; + + /** The function definition. */ + function: { + /** The name of the function. */ + name: string; + + /** The arguments that the model expects you to pass to the function. */ + arguments: string; + } +} \ No newline at end of file diff --git a/runs/operations.tsp b/runs/operations.tsp new file mode 100644 index 000000000..69a96e03c --- /dev/null +++ b/runs/operations.tsp @@ -0,0 +1,186 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/models.tsp"; +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("threads") +interface Runs { + @route("runs") + @post + @operationId("createThreadAndRun") + @tag("Assistants") + @summary("Create a thread and run it in one request.") + createThreadAndRun( + @body threadAndRun: CreateThreadAndRunRequest; + ): RunObject | ErrorResponse; + + @route("{thread_id}/runs") + @post + @operationId("createRun") + @tag("Assistants") + @summary("Create a run.") + createRun( + /** The ID of the thread to run. */ + @path thread_id: string, + + @body run: CreateRunRequest, + ): RunObject | ErrorResponse; + + @route("{thread_id}/runs") + @get + @operationId("listRuns") + @tag("Assistants") + @summary("Returns a list of runs belonging to a thread.") + listRuns( + /** The ID of the thread the run belongs to. */ + @path thread_id: string, + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + * default is 20. + */ + @query limit?: int32 = 20; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + * for descending order. + */ + @query order?: ListOrder = ListOrder.desc; + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include after=obj_foo in order to fetch the next page of the list. + */ + @query after?: string; + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include before=obj_foo in order to fetch the previous page of the list. + */ + @query before?: string; + ): ListRunsResponse | ErrorResponse; + + @route("{thread_id}/runs/{run_id}") + @get + @operationId("getRun") + @tag("Assistants") + @summary("Retrieves a run.") + getRun( + /** The ID of the [thread](/docs/api-reference/threads) that was run. */ + @path thread_id: string, + + /** The ID of the run to retrieve. */ + @path run_id: string, + ): RunObject | ErrorResponse; + + @route("{thread_id}/runs/{run_id}") + @post + @operationId("modifyRun") + @tag("Assistants") + @summary("Modifies a run.") + modifyRun( + /** The ID of the [thread](/docs/api-reference/threads) that was run. */ + @path thread_id: string, + + /** The ID of the run to modify. */ + @path run_id: string, + + @body run: ModifyRunRequest, + ): RunObject | ErrorResponse; + + @route("{thread_id}/runs/{run_id}/cancel") + @post + @operationId("cancelRun") + @tag("Assistants") + @summary("Cancels a run that is `in_progress`.") + cancelRun( + /** The ID of the thread to which this run belongs. */ + @path thread_id: string, + + /** The ID of the run to cancel. */ + @path run_id: string, + ): RunObject | ErrorResponse; + + @route("{thread_id}/runs/{run_id}/submit_tool_outputs") + @post + @operationId("submitToolOuputsToRun") + @tag("Assistants") + @summary(""" + When a run has the `status: "requires_action"` and `required_action.type` is + `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once + they're all completed. All outputs must be submitted in a single request. + """) + submitToolOuputsToRun( + /** The ID of the [thread](/docs/api-reference/threads) to which this run belongs. */ + @path thread_id: string, + + /** The ID of the run that requires the tool output submission. */ + @path run_id: string, + + @body submitToolOutputsRun: SubmitToolOutputsRunRequest, + ): RunObject | ErrorResponse; + + @route("{thread_id}/runs/{run_id}/steps") + @get + @operationId("listRunSteps") + @tag("Assistants") + @summary("Returns a list of run steps belonging to a run.") + listRunSteps( + /** The ID of the thread the run and run steps belong to. */ + @path thread_id: string, + + /** The ID of the run the run steps belong to. */ + @path run_id: string, + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + * default is 20. + */ + @query limit?: int32 = 20; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + * for descending order. + */ + @query order?: ListOrder = ListOrder.desc; + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include after=obj_foo in order to fetch the next page of the list. + */ + @query after?: string; + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the list. + * For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + * subsequent call can include before=obj_foo in order to fetch the previous page of the list. + */ + @query before?: string; + ): ListRunStepsResponse | ErrorResponse; + + @route("{thread_id}/runs/{run_id}/steps/{step_id}") + @get + @operationId("getRunStep") + @tag("Assistants") + @summary("Retrieves a run step.") + getRunStep( + /** The ID of the thread to which the run and run step belongs. */ + @path thread_id: string, + + /** The ID of the run to which the run step belongs. */ + @path run_id: string, + + /** The ID of the run step to retrieve. */ + @path step_id: string, + ): RunStepObject | ErrorResponse; +} \ No newline at end of file diff --git a/threads/main.tsp b/threads/main.tsp new file mode 100644 index 000000000..6a754bcb5 --- /dev/null +++ b/threads/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; \ No newline at end of file diff --git a/threads/meta.tsp b/threads/meta.tsp new file mode 100644 index 000000000..9a6edf95b --- /dev/null +++ b/threads/meta.tsp @@ -0,0 +1,22 @@ +import "./models.tsp"; + +import "@typespec/openapi"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@@extension(OpenAI.ThreadObject, + "x-oaiMeta", + """ + name: The thread object + beta: true + example: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1698107661, + "metadata": {} + } + """ +); \ No newline at end of file diff --git a/threads/models.tsp b/threads/models.tsp new file mode 100644 index 000000000..4bdb28fae --- /dev/null +++ b/threads/models.tsp @@ -0,0 +1,55 @@ +import "../common/models.tsp"; +import "../messages/models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +model CreateThreadRequest { + /** A list of [messages](/docs/api-reference/messages) to start the thread with. */ + messages?: CreateMessageRequest[]; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + metadata?: Record | null; +} + +model ModifyThreadRequest { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + metadata?: Record | null; +} + +model DeleteThreadResponse { + id: string; + deleted: boolean; + object: "thread.deleted"; +} + +/** Represents a thread that contains [messages](/docs/api-reference/messages). */ +model ThreadObject { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + + /** The object type, which is always `thread`. */ + object: "thread"; + + /** The Unix timestamp (in seconds) for when the thread was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ + @extension("x-oaiTypeLabel", "map") + metadata: Record | null; +} \ No newline at end of file diff --git a/threads/operations.tsp b/threads/operations.tsp new file mode 100644 index 000000000..fb1dc5d10 --- /dev/null +++ b/threads/operations.tsp @@ -0,0 +1,52 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/threads") +interface Threads { + @post + @operationId("createThread") + @tag("Assistants") + @summary("Create a thread.") + createThread( + @body thread: CreateThreadRequest, + ): ThreadObject | ErrorResponse; + + @route("{thread_id}") + @get + @operationId("getThread") + @tag("Assistants") + @summary("Retrieves a thread.") + getThread( + /** The ID of the thread to retrieve. */ + @path thread_id: string, + ): ThreadObject | ErrorResponse; + + @route("{thread_id}") + @post + @operationId("modifyThread") + @tag("Assistants") + @summary("Modifies a thread.") + modifyThread( + /** The ID of the thread to modify. Only the `metadata` can be modified. */ + @path thread_id: string, + @body thread: ModifyThreadRequest, + ): ThreadObject | ErrorResponse; + + @route("{thread_id}") + @delete + @operationId("deleteThread") + @tag("Assistants") + @summary("Delete a thread.") + deleteThread( + /** The ID of the thread to delete. */ + @path thread_id: string, + ): DeleteThreadResponse | ErrorResponse; +} diff --git a/tsp-output/@typespec/openapi3/openapi.yaml b/tsp-output/@typespec/openapi3/openapi.yaml index d37490680..172e5a50d 100644 --- a/tsp-output/@typespec/openapi3/openapi.yaml +++ b/tsp-output/@typespec/openapi3/openapi.yaml @@ -1,17 +1,26 @@ openapi: 3.0.0 info: title: OpenAI API - version: 2.0.0 description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. + version: 0.0.0 tags: - - name: OpenAI + - name: Audio + - name: Assistants + - name: Chat + - name: Completions + - name: Embeddings + - name: Files + - name: Fine-tuning + - name: Images + - name: Models + - name: Moderations paths: - /audio/transcriptions: + /assistants: post: tags: - - OpenAI - operationId: createTranscription - summary: Transcribes audio into the input language. + - Assistants + operationId: createAssistant + summary: Create an assistant with a model and instructions. parameters: [] responses: '200': @@ -19,7 +28,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/CreateTranscriptionResponse' + $ref: '#/components/schemas/AssistantObject' default: description: An unexpected error response. content: @@ -29,73 +38,110 @@ paths: requestBody: required: true content: - multipart/form-data: + application/json: schema: - $ref: '#/components/schemas/CreateTranscriptionRequest' - /audio/translations: - post: + $ref: '#/components/schemas/CreateAssistantRequest' + get: tags: - - OpenAI - operationId: createTranslation - summary: Transcribes audio into the input language. - parameters: [] + - Assistants + operationId: listAssistants + summary: Returns a list of assistants. + parameters: + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + $ref: '#/components/schemas/ListOrder' + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/CreateTranslationResponse' + $ref: '#/components/schemas/ListAssistantsResponse' default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateTranslationRequest' - /chat/completions: - post: + /assistants/{assistant_id}: + get: tags: - - OpenAI - operationId: createChatCompletion - parameters: [] + - Assistants + operationId: getAssistant + summary: Retrieves an assistant. + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant to retrieve. + schema: + type: string responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/CreateChatCompletionResponse' + $ref: '#/components/schemas/AssistantObject' default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateChatCompletionRequest' - /completions: post: tags: - - OpenAI - operationId: createCompletion - parameters: [] + - Assistants + operationId: modifyAssistant + summary: Modifies an assistant. + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant to modify. + schema: + type: string responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/CreateCompletionResponse' + $ref: '#/components/schemas/AssistantObject' default: description: An unexpected error response. content: @@ -107,195 +153,54 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/CreateCompletionRequest' - x-oaiMeta: - name: Create chat completion - group: chat - returns: |- - Returns a [chat completion](/docs/api-reference/chat/object) object, or a streamed sequence of - [chat completion chunk](/docs/api-reference/chat/streaming) objects if the request is streamed. - path: create - examples: - - title: No streaming - request: - curl: |- - curl https://api.openai.com/v1/chat/completions \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -d '{ - "model": "VAR_model_id", - "messages": [ - { - "role": "system", - "content": "You are a helpful assistant." - }, - { - "role": "user", - "content": "Hello!" - } - ] - python: |- - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - - completion = openai.ChatCompletion.create( - model="VAR_model_id", - messages=[ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "Hello!"} - ] - ) - - print(completion.choices[0].message) - node.js: |- - import OpenAI from "openai"; - - const openai = new OpenAI(); - - async function main() { - const completion = await openai.chat.completions.create({ - messages: [{ role: "system", content: "string" }], - model: "VAR_model_id", - }); - - console.log(completion.choices[0]); - } - - main(); - response: |- - { - "id": "chatcmpl-123", - "object": "chat.completion", - "created": 1677652288, - "model": "gpt-3.5-turbo-0613", - "choices": [{ - "index": 0, - "message": { - "role": "assistant", - "content": " - - Hello there, how may I assist you today?", - }, - "finish_reason": "stop" - }], - "usage": { - "prompt_tokens": 9, - "completion_tokens": 12, - "total_tokens": 21 - } - } - - title: Streaming - request: - curl: |- - curl https://api.openai.com/v1/chat/completions \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -d '{ - "model": "VAR_model_id", - "messages": [ - { - "role": "system", - "content": "You are a helpful assistant." - }, - { - "role": "user", - "content": "Hello!" - } - ], - "stream": true - }' - python: |- - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - - completion = openai.ChatCompletion.create( - model="VAR_model_id", - messages=[ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "Hello!"} - ], - stream=True - ) - - for chunk in completion: - print(chunk.choices[0].delta) - node.js: |- - import OpenAI from "openai"; - - const openai = new OpenAI(); - - async function main() { - const completion = await openai.chat.completions.create({ - model: "VAR_model_id", - messages: [ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "Hello!"} - ], - stream: true, - }); - - for await (const chunk of completion) { - console.log(chunk.choices[0].delta.content); - } - } - - main(); - response: |- - { - "id": "chatcmpl-123", - "object": "chat.completion.chunk", - "created": 1677652288, - "model": "gpt-3.5-turbo", - "choices": [{ - "index": 0, - "delta": { - "content": "Hello", - }, - "finish_reason": "stop" - }] - } - /edits: - post: + $ref: '#/components/schemas/ModifyAssistantRequest' + delete: tags: - - OpenAI - operationId: createEdit - parameters: [] + - Assistants + operationId: deleteAssistant + summary: Delete an assistant. + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant to delete. + schema: + type: string responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/CreateEditResponse' + $ref: '#/components/schemas/DeleteAssistantResponse' default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateEditRequest' - deprecated: true - /embeddings: + /assistants/{assistant_id}/files: post: tags: - - OpenAI - operationId: createEmbedding - summary: Creates an embedding vector representing the input text. - parameters: [] + - Assistants + operationId: createAssistantFile + summary: |- + Create an assistant file by attaching a [File](/docs/api-reference/files) to a + [assistant](/docs/api-reference/assistants). + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant for which to create a file. + schema: + type: string responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/CreateEmbeddingResponse' + $ref: '#/components/schemas/AssistantFileObject' default: description: An unexpected error response. content: @@ -307,63 +212,86 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/CreateEmbeddingRequest' - /files: + $ref: '#/components/schemas/CreateAssistantFileRequest' get: tags: - - OpenAI - operationId: listFiles - summary: Returns a list of files that belong to the user's organization. - parameters: [] - responses: - '200': - description: The request has succeeded. - content: - application/json: - schema: - $ref: '#/components/schemas/ListFilesResponse' - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - post: - tags: - - OpenAI - operationId: createFile - summary: Returns a list of files that belong to the user's organization. - parameters: [] + - Assistants + operationId: listAssistantFiles + summary: Returns a list of assistant files. + parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant the file belongs to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + $ref: '#/components/schemas/ListOrder' + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/OpenAIFile' + $ref: '#/components/schemas/ListAssistantFilesResponse' default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - requestBody: - required: true - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateFileRequest' - /files/files/{file_id}: - post: + /assistants/{assistant_id}/files/{file_id}: + get: tags: - - OpenAI - operationId: retrieveFile - summary: Returns information about a specific file. + - Assistants + operationId: getAssistantFile + summary: Retrieves an assistant file. parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant the file belongs to. + schema: + type: string - name: file_id in: path required: true - description: The ID of the file to use for this request. + description: The ID of the file we're getting. schema: type: string responses: @@ -372,7 +300,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/OpenAIFile' + $ref: '#/components/schemas/AssistantFileObject' default: description: An unexpected error response. content: @@ -381,14 +309,20 @@ paths: $ref: '#/components/schemas/ErrorResponse' delete: tags: - - OpenAI - operationId: deleteFile - summary: Delete a file + - Assistants + operationId: deleteAssistantFile + summary: Delete an assistant file. parameters: + - name: assistant_id + in: path + required: true + description: The ID of the assistant the file belongs to. + schema: + type: string - name: file_id in: path required: true - description: The ID of the file to use for this request. + description: The ID of the file to delete. schema: type: string responses: @@ -397,31 +331,55 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/DeleteFileResponse' + $ref: '#/components/schemas/DeleteAssistantFileResponse' default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - /files/files/{file_id}/content: - get: + /audio/speech: + post: tags: - - OpenAI - operationId: downloadFile - summary: Returns the contents of the specified file. - parameters: - - name: file_id - in: path - required: true - description: The ID of the file to use for this request. - schema: - type: string + - Audio + operationId: createSpeech + summary: Generates audio from the input text. + parameters: [] + responses: + '200': + description: The request has succeeded. + headers: + Transfer-Encoding: + required: false + description: chunked + schema: + type: string + content: + application/octet-stream: + schema: + type: string + format: binary + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateSpeechRequest' + /audio/transcriptions: + post: + tags: + - Audio + operationId: createTranscription + summary: Transcribes audio into the input language. + parameters: [] responses: '200': description: The request has succeeded. content: application/json: + schema: + $ref: '#/components/schemas/CreateTranscriptionResponse' + text/plain: schema: type: string default: @@ -430,17 +388,18 @@ paths: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - /fine-tunes: + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateTranscriptionRequestMultiPart' + /audio/translations: post: tags: - - OpenAI - operationId: createFineTune - summary: |- - Creates a job that fine-tunes a specified model from a given dataset. - - Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - - [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + - Audio + operationId: createTranslation + summary: Translates audio into English.. parameters: [] responses: '200': @@ -448,7 +407,10 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/FineTune' + $ref: '#/components/schemas/CreateTranslationResponse' + text/plain: + schema: + type: string default: description: An unexpected error response. content: @@ -458,15 +420,15 @@ paths: requestBody: required: true content: - application/json: + multipart/form-data: schema: - $ref: '#/components/schemas/CreateFineTuneRequest' - deprecated: true - get: + $ref: '#/components/schemas/CreateTranslationRequestMultiPart' + /chat/completions: + post: tags: - - OpenAI - operationId: listFineTunes - summary: List your organization's fine-tuning jobs + - Chat + operationId: createChatCompletion + summary: Creates a model response for the given chat conversation. parameters: [] responses: '200': @@ -474,55 +436,141 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/ListFineTunesResponse' + $ref: '#/components/schemas/CreateChatCompletionResponse' default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - deprecated: true - /fine-tunes/{fine_tune_id}: - get: + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateChatCompletionRequest' + /completions: + post: tags: - - OpenAI - operationId: retrieveFineTune - summary: |- - Gets info about the fine-tune job. - - [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - parameters: - - name: fine_tune_id - in: path - required: true - description: The ID of the fine-tune job - schema: - type: string + - Completions + operationId: createCompletion + summary: Creates a completion for the provided prompt and parameters. + parameters: [] responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/FineTune' + $ref: '#/components/schemas/CreateCompletionResponse' default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - deprecated: true - /fine-tunes/{fine_tune_id}/cancel: + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateCompletionRequest' + /embeddings: post: tags: - - OpenAI - operationId: cancelFineTune - summary: Immediately cancel a fine-tune job. + - Embeddings + operationId: createEmbedding + summary: Creates an embedding vector representing the input text. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEmbeddingResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEmbeddingRequest' + /files: + post: + tags: + - Files + operationId: createFile + summary: |- + Upload a file that can be used across various endpoints. The size of all the files uploaded by + one organization can be up to 100 GB. + + The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See + the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files + supported. The Fine-tuning API only supports `.jsonl` files. + + Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIFile' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateFileRequestMultiPart' + get: + tags: + - Files + operationId: listFiles + summary: Returns a list of files that belong to the user's organization. + parameters: + - name: purpose + in: query + required: false + description: Only return files with the given purpose. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListFilesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /files/{file_id}: + get: + tags: + - Files + operationId: retrieveFile + summary: Returns information about a specific file. parameters: - - name: fine_tune_id + - name: file_id in: path required: true - description: The ID of the fine-tune job to cancel + description: The ID of the file to use for this request. schema: type: string responses: @@ -531,65 +579,73 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/FineTune' + $ref: '#/components/schemas/OpenAIFile' default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - deprecated: true - /fine-tunes/{fine_tune_id}/events: - get: + delete: tags: - - OpenAI - operationId: listFineTuneEvents - summary: Get fine-grained status updates for a fine-tune job. + - Files + operationId: deleteFile + summary: Delete a file parameters: - - name: fine_tune_id + - name: file_id in: path required: true - description: The ID of the fine-tune job to get events for. + description: The ID of the file to use for this request. schema: type: string - - name: stream - in: query - required: false - description: |- - Whether to stream events for the fine-tune job. If set to true, events will be sent as - data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available. The stream will terminate with a `data: [DONE]` message when the - job is finished (succeeded, cancelled, or failed). - - If set to false, only events generated so far will be returned. + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteFileResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /files/{file_id}/content: + get: + tags: + - Files + operationId: downloadFile + summary: Returns the contents of the specified file. + parameters: + - name: file_id + in: path + required: true + description: The ID of the file to use for this request. schema: - type: boolean - default: false + type: string responses: '200': description: The request has succeeded. content: application/json: schema: - $ref: '#/components/schemas/ListFineTuneEventsResponse' + type: string default: description: An unexpected error response. content: application/json: schema: $ref: '#/components/schemas/ErrorResponse' - deprecated: true /fine_tuning/jobs: post: tags: - - OpenAI + - Fine-tuning operationId: createFineTuningJob - description: |- - Creates a job that fine-tunes a specified model from a given dataset. + summary: |- + Creates a fine-tuning job which begins the process of creating a new model from a given dataset. - Response includes details of the enqueued job including job status and the name of the - fine-tuned models once complete. + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about fine-tuning](/docs/guides/fine-tuning) parameters: [] @@ -614,8 +670,9 @@ paths: $ref: '#/components/schemas/CreateFineTuningJobRequest' get: tags: - - OpenAI + - Fine-tuning operationId: listPaginatedFineTuningJobs + summary: List your organization's fine-tuning jobs parameters: - name: after in: query @@ -629,7 +686,7 @@ paths: description: Number of fine-tuning jobs to retrieve. schema: type: integer - format: int64 + format: int32 default: 20 responses: '200': @@ -647,7 +704,7 @@ paths: /fine_tuning/jobs/{fine_tuning_job_id}: get: tags: - - OpenAI + - Fine-tuning operationId: retrieveFineTuningJob summary: |- Get info about a fine-tuning job. @@ -657,6 +714,7 @@ paths: - name: fine_tuning_job_id in: path required: true + description: The ID of the fine-tuning job. schema: type: string responses: @@ -675,7 +733,7 @@ paths: /fine_tuning/jobs/{fine_tuning_job_id}/cancel: post: tags: - - OpenAI + - Fine-tuning operationId: cancelFineTuningJob summary: Immediately cancel a fine-tune job. parameters: @@ -701,7 +759,7 @@ paths: /fine_tuning/jobs/{fine_tuning_job_id}/events: get: tags: - - OpenAI + - Fine-tuning operationId: listFineTuningEvents summary: Get status updates for a fine-tuning job. parameters: @@ -723,6 +781,7 @@ paths: description: Number of events to retrieve. schema: type: integer + format: int32 default: 20 responses: '200': @@ -740,7 +799,7 @@ paths: /images/edits: post: tags: - - OpenAI + - Images operationId: createImageEdit summary: Creates an edited or extended image given an original image and a prompt. parameters: [] @@ -762,11 +821,11 @@ paths: content: multipart/form-data: schema: - $ref: '#/components/schemas/CreateImageEditRequest' + $ref: '#/components/schemas/CreateImageEditRequestMultiPart' /images/generations: post: tags: - - OpenAI + - Images operationId: createImage summary: Creates an image given a prompt parameters: [] @@ -792,7 +851,7 @@ paths: /images/variations: post: tags: - - OpenAI + - Images operationId: createImageVariation summary: Creates an edited or extended image given an original image and a prompt. parameters: [] @@ -814,11 +873,11 @@ paths: content: multipart/form-data: schema: - $ref: '#/components/schemas/CreateImageVariationRequest' + $ref: '#/components/schemas/CreateImageVariationRequestMultiPart' /models: get: tags: - - OpenAI + - Models operationId: listModels summary: |- Lists the currently available models, and provides basic information about each one such as the @@ -840,7 +899,7 @@ paths: /models/{model}: get: tags: - - OpenAI + - Models operationId: retrieveModel summary: |- Retrieves a model instance, providing basic information about the model such as the owner and @@ -867,7 +926,7 @@ paths: $ref: '#/components/schemas/ErrorResponse' delete: tags: - - OpenAI + - Models operationId: deleteModel summary: Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. parameters: @@ -893,7 +952,7 @@ paths: /moderations: post: tags: - - OpenAI + - Moderations operationId: createModeration summary: Classifies if text violates OpenAI's Content Policy parameters: [] @@ -916,2041 +975,4593 @@ paths: application/json: schema: $ref: '#/components/schemas/CreateModerationRequest' -security: - - BearerAuth: [] -components: - schemas: - ChatCompletionFunctionCallOption: - type: object - required: - - name - properties: - name: - type: string - description: The name of the function to call. - ChatCompletionFunctionParameters: - type: object - additionalProperties: {} - ChatCompletionFunctions: - type: object - required: - - name - - parameters - properties: - name: - type: string - description: |- - The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and - dashes, with a maximum length of 64. - description: - type: string - description: |- - A description of what the function does, used by the model to choose when and how to call the - function. - parameters: - allOf: - - $ref: '#/components/schemas/ChatCompletionFunctionParameters' - description: |- - The parameters the functions accepts, described as a JSON Schema object. See the - [guide](/docs/guides/gpt/function-calling) for examples, and the - [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation - about the format.\n\nTo describe a function that accepts no parameters, provide the value - `{\"type\": \"object\", \"properties\": {}}`. - ChatCompletionRequestMessage: - type: object - required: - - role - - content - properties: - role: - type: string - enum: - - system - - user - - assistant - - function - description: The role of the messages author. One of `system`, `user`, `assistant`, or `function`. + /threads: + post: + tags: + - Assistants + operationId: createThread + summary: Create a thread. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ThreadObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true content: - type: string - nullable: true - description: |- - The contents of the message. `content` is required for all messages, and may be null for - assistant messages with function calls. - name: - type: string - description: |- - The name of the author of this message. `name` is required if role is `function`, and it - should be the name of the function whose response is in the `content`. May contain a-z, - A-Z, 0-9, and underscores, with a maximum length of 64 characters. - function_call: - type: object - description: The name and arguments of a function that should be called, as generated by the model. - required: - - name - - arguments - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: |- - The arguments to call the function with, as generated by the model in JSON format. Note that - the model does not always generate valid JSON, and may hallucinate parameters not defined by - your function schema. Validate the arguments in your code before calling your function. - ChatCompletionResponseMessage: - type: object - required: - - role - - content - properties: - role: - type: string - enum: - - system - - user - - assistant - - function - description: The role of the author of this message. + application/json: + schema: + $ref: '#/components/schemas/CreateThreadRequest' + /threads/runs: + post: + tags: + - Assistants + operationId: createThreadAndRun + summary: Create a thread and run it in one request. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true content: - type: string - nullable: true - description: The contents of the message. - function_call: - type: object - description: The name and arguments of a function that should be called, as generated by the model. - required: - - name - - arguments - properties: - name: - type: string - description: The name of the function to call. + application/json: + schema: + $ref: '#/components/schemas/CreateThreadAndRunRequest' + /threads/{thread_id}: + get: + tags: + - Assistants + operationId: getThread + summary: Retrieves a thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to retrieve. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ThreadObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + post: + tags: + - Assistants + operationId: modifyThread + summary: Modifies a thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to modify. Only the `metadata` can be modified. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ThreadObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ModifyThreadRequest' + delete: + tags: + - Assistants + operationId: deleteThread + summary: Delete a thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to delete. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteThreadResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/messages: + post: + tags: + - Assistants + operationId: createMessage + summary: Create a message. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) to create a message for. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/MessageObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateMessageRequest' + get: + tags: + - Assistants + operationId: listMessages + summary: Returns a list of messages for a given thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) the messages belong to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + $ref: '#/components/schemas/ListOrder' + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListMessagesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/messages/{message_id}: + get: + tags: + - Assistants + operationId: getMessage + summary: Retrieve a message. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) to which this message belongs. + schema: + type: string + - name: message_id + in: path + required: true + description: The ID of the message to retrieve. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/MessageObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + post: + tags: + - Assistants + operationId: modifyMessage + summary: Modifies a message. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to which this message belongs. + schema: + type: string + - name: message_id + in: path + required: true + description: The ID of the message to modify. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/MessageObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ModifyMessageRequest' + /threads/{thread_id}/messages/{message_id}/files: + get: + tags: + - Assistants + operationId: listMessageFiles + summary: Returns a list of message files. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread that the message and files belong to. + schema: + type: string + - name: message_id + in: path + required: true + description: The ID of the message that the files belongs to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + $ref: '#/components/schemas/ListOrder' + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListMessageFilesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/messages/{message_id}/files/{file_id}: + get: + tags: + - Assistants + operationId: getMessageFile + summary: Retrieves a message file. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to which the message and File belong. + schema: + type: string + - name: message_id + in: path + required: true + description: The ID of the message the file belongs to. + schema: + type: string + - name: file_id + in: path + required: true + description: The ID of the file being retrieved. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/MessageFileObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/runs: + post: + tags: + - Assistants + operationId: createRun + summary: Create a run. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to run. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateRunRequest' + get: + tags: + - Assistants + operationId: listRuns + summary: Returns a list of runs belonging to a thread. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread the run belongs to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + $ref: '#/components/schemas/ListOrder' + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListRunsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/runs/{run_id}: + get: + tags: + - Assistants + operationId: getRun + summary: Retrieves a run. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) that was run. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run to retrieve. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + post: + tags: + - Assistants + operationId: modifyRun + summary: Modifies a run. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) that was run. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run to modify. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ModifyRunRequest' + /threads/{thread_id}/runs/{run_id}/cancel: + post: + tags: + - Assistants + operationId: cancelRun + summary: Cancels a run that is `in_progress`. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to which this run belongs. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run to cancel. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/runs/{run_id}/steps: + get: + tags: + - Assistants + operationId: listRunSteps + summary: Returns a list of run steps belonging to a run. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread the run and run steps belong to. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run the run steps belong to. + schema: + type: string + - name: limit + in: query + required: false + description: |- + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the + default is 20. + schema: + type: integer + format: int32 + default: 20 + - name: order + in: query + required: false + description: |- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` + for descending order. + schema: + $ref: '#/components/schemas/ListOrder' + default: desc + - name: after + in: query + required: false + description: |- + A cursor for use in pagination. `after` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + required: false + description: |- + A cursor for use in pagination. `before` is an object ID that defines your place in the list. + For instance, if you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListRunStepsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/runs/{run_id}/steps/{step_id}: + get: + tags: + - Assistants + operationId: getRunStep + summary: Retrieves a run step. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the thread to which the run and run step belongs. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run to which the run step belongs. + schema: + type: string + - name: step_id + in: path + required: true + description: The ID of the run step to retrieve. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunStepObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /threads/{thread_id}/runs/{run_id}/submit_tool_outputs: + post: + tags: + - Assistants + operationId: submitToolOuputsToRun + summary: |- + When a run has the `status: "requires_action"` and `required_action.type` is + `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once + they're all completed. All outputs must be submitted in a single request. + parameters: + - name: thread_id + in: path + required: true + description: The ID of the [thread](/docs/api-reference/threads) to which this run belongs. + schema: + type: string + - name: run_id + in: path + required: true + description: The ID of the run that requires the tool output submission. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/RunObject' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/SubmitToolOutputsRunRequest' +security: + - BearerAuth: [] +components: + schemas: + AssistantFileObject: + type: object + required: + - id + - object + - created_at + - assistant_id + properties: + id: + type: string + description: The identifier, which can be referenced in API endpoints. + object: + type: string + enum: + - assistant.file + description: The object type, which is always `assistant.file`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the assistant file was created. + assistant_id: + type: string + description: The assistant ID that the file is attached to. + description: A list of [Files](/docs/api-reference/files) attached to an `assistant`. + AssistantObject: + type: object + required: + - id + - object + - created_at + - name + - description + - model + - instructions + - tools + - file_ids + - metadata + properties: + id: + type: string + description: The identifier, which can be referenced in API endpoints. + object: + type: string + enum: + - assistant + description: The object type, which is always `assistant`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the assistant was created. + name: + type: string + nullable: true + maxLength: 256 + description: The name of the assistant. The maximum length is 256 characters. + description: + type: string + nullable: true + maxLength: 512 + description: The description of the assistant. The maximum length is 512 characters. + model: + type: string + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + instructions: + type: string + nullable: true + maxLength: 32768 + description: The system instructions that the assistant uses. The maximum length is 32768 characters. + tools: + $ref: '#/components/schemas/CreateAssistantRequestToolsItem' + description: |- + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + Tools can be of types `code_interpreter`, `retrieval`, or `function`. + default: [] + file_ids: + type: array + items: + type: string + maxItems: 20 + description: |- + A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + maximum of 20 files attached to the assistant. Files are ordered by their creation date in + ascending order. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + description: Represents an `assistant` that can call the model and use tools. + AssistantToolsCode: + type: object + required: + - type + properties: + type: + type: string + enum: + - code_interpreter + description: 'The type of tool being defined: `code_interpreter`' + AssistantToolsFunction: + type: object + required: + - type + - function + properties: + type: + type: string + enum: + - function + description: 'The type of tool being defined: `function`' + function: + $ref: '#/components/schemas/FunctionObject' + AssistantToolsRetrieval: + type: object + required: + - type + properties: + type: + type: string + enum: + - retrieval + description: 'The type of tool being defined: `retrieval`' + AudioSegment: + type: object + required: + - id + - seek + - start + - end + - text + - tokens + - temperature + - avg_logprob + - compression_ratio + - no_speech_prob + properties: + id: + type: integer + format: int64 + description: The zero-based index of this segment. + seek: + type: integer + format: int64 + description: |- + The seek position associated with the processing of this audio segment. Seek positions are + expressed as hundredths of seconds. The model may process several segments from a single seek + position, so while the seek position will never represent a later time than the segment's + start, the segment's start may represent a significantly later time than the segment's + associated seek position. + start: + type: number + format: double + description: The time at which this segment started relative to the beginning of the audio. + end: + type: number + format: double + description: The time at which this segment ended relative to the beginning of the audio. + text: + type: string + description: The text that was part of this audio segment. + tokens: + $ref: '#/components/schemas/TokenArrayItem' + description: The token IDs matching the text in this audio segment. + temperature: + type: number + format: double + minimum: 0 + maximum: 1 + description: The temperature score associated with this audio segment. + avg_logprob: + type: number + format: double + description: The average log probability associated with this audio segment. + compression_ratio: + type: number + format: double + description: The compression ratio of this audio segment. + no_speech_prob: + type: number + format: double + description: The probability of no speech detection within this audio segment. + BatchSize: + type: integer + format: int64 + minimum: 1 + maximum: 256 + ChatCompletionFunctionCallOption: + type: object + required: + - name + properties: + name: + type: string + description: The name of the function to call. + description: |- + Specifying a particular function via `{"name": "my_function"}` forces the model to call that + function. + ChatCompletionFunctions: + type: object + required: + - name + properties: + description: + type: string + description: |- + A description of what the function does, used by the model to choose when and how to call the + function. + name: + type: string + description: |- + The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + dashes, with a maximum length of 64. + parameters: + $ref: '#/components/schemas/FunctionParameters' + deprecated: true + ChatCompletionMessageToolCall: + type: object + required: + - id + - type + - function + properties: + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: |- + The arguments to call the function with, as generated by the model in JSON format. Note that + the model does not always generate valid JSON, and may hallucinate parameters not defined by + your function schema. Validate the arguments in your code before calling your function. + required: + - name + - arguments + description: The function that the model called. + ChatCompletionMessageToolCallsItem: + type: array + items: + $ref: '#/components/schemas/ChatCompletionMessageToolCall' + description: The tool calls generated by the model, such as function calls. + ChatCompletionNamedToolChoice: + type: object + required: + - type + - function + properties: + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + description: Specifies a tool the model should use. Use to force the model to call a specific function. + ChatCompletionRequestAssistantMessage: + type: object + required: + - role + properties: + content: + type: string + nullable: true + description: |- + The contents of the assistant message. Required unless `tool_calls` or `function_call` is' + specified. + role: + type: string + enum: + - assistant + description: The role of the messages author, in this case `assistant`. + name: + type: string + description: |- + An optional name for the participant. Provides the model information to differentiate between + participants of the same role. + tool_calls: + $ref: '#/components/schemas/ChatCompletionMessageToolCallsItem' + function_call: + type: object + properties: + arguments: + type: string + description: |- + The arguments to call the function with, as generated by the model in JSON format. Note that + the model does not always generate valid JSON, and may hallucinate parameters not defined by + your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name + description: |- + Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be + called, as generated by the model. + deprecated: true + ChatCompletionRequestFunctionMessage: + type: object + required: + - role + - content + - name + properties: + role: + type: string + enum: + - function + description: The role of the messages author, in this case `function`. + content: + type: string + nullable: true + description: The contents of the function message. + name: + type: string + description: The name of the function to call. + ChatCompletionRequestMessage: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' + - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' + - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' + - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' + - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' + x-oaiExpandable: true + ChatCompletionRequestMessageContentPart: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartImage' + x-oaiExpandable: true + ChatCompletionRequestMessageContentPartImage: + type: object + required: + - type + - image_url + properties: + type: + type: string + enum: + - image_url + description: The type of the content part. + image_url: + type: object + properties: + url: + anyOf: + - type: string + format: uri + - type: string + description: Either a URL of the image or the base64 encoded image data. + detail: + type: string + enum: + - auto + - low + - high + description: |- + Specifies the detail level of the image. Learn more in the + [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). + default: auto + required: + - url + ChatCompletionRequestMessageContentPartText: + type: object + required: + - type + - text + properties: + type: + type: string + enum: + - text + - json_object + description: The type of the content part. + text: + type: string + description: The text content. + ChatCompletionRequestMessageContentParts: + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestMessageContentPart' + minItems: 1 + ChatCompletionRequestSystemMessage: + type: object + required: + - content + - role + properties: + content: + type: string + description: The contents of the system message. + x-oaiExpandable: true + role: + type: string + enum: + - system + description: The role of the messages author, in this case `system`. + name: + type: string + description: |- + An optional name for the participant. Provides the model information to differentiate between + participants of the same role. + ChatCompletionRequestToolMessage: + type: object + required: + - role + - content + - tool_call_id + properties: + role: + type: string + enum: + - tool + description: The role of the messages author, in this case `tool`. + content: + type: string + description: The contents of the tool message. + tool_call_id: + type: string + description: Tool call that this message is responding to. + ChatCompletionRequestUserMessage: + type: object + required: + - content + - role + properties: + content: + allOf: + - $ref: '#/components/schemas/ChatCompletionRequestUserMessageContent' + description: The contents of the system message. + x-oaiExpandable: true + role: + type: string + enum: + - user + - assistant + description: The role of the messages author, in this case `user`. + name: + type: string + description: |- + An optional name for the participant. Provides the model information to differentiate between + participants of the same role. + ChatCompletionRequestUserMessageContent: + oneOf: + - type: string + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentParts' + ChatCompletionResponseMessage: + type: object + required: + - content + - role + properties: + content: + type: string + nullable: true + description: The contents of the message. + tool_calls: + $ref: '#/components/schemas/ChatCompletionMessageToolCallsItem' + role: + type: string + enum: + - assistant + description: The role of the author of this message. + function_call: + type: object + properties: arguments: type: string description: |- The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name + description: Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + deprecated: true + ChatCompletionTokenLogprob: + type: object + required: + - token + - logprob + - bytes + - top_logprobs + properties: + token: + type: string + description: The token. + logprob: + type: number + format: double + description: The log probability of this token. + bytes: + type: array + items: + type: integer + format: int64 + nullable: true + description: |- + A list of integers representing the UTF-8 bytes representation of the token. Useful in + instances where characters are represented by multiple tokens and their byte representations + must be combined to generate the correct text representation. Can be `null` if there is no + bytes representation for the token. + top_logprobs: + type: array + items: + type: object + properties: + token: + type: string + description: The token. + logprob: + type: number + format: double + description: The log probability of this token. + bytes: + type: array + items: + type: integer + format: int64 + nullable: true + description: |- + A list of integers representing the UTF-8 bytes representation of the token. Useful in + instances where characters are represented by multiple tokens and their byte representations + must be combined to generate the correct text representation. Can be `null` if there is no + bytes representation for the token. + required: + - token + - logprob + - bytes + description: |- + List of the most likely tokens and their log probability, at this token position. In rare + cases, there may be fewer than the number of requested `top_logprobs` returned. + ChatCompletionTool: + type: object + required: + - type + - function + properties: + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + $ref: '#/components/schemas/FunctionObject' + ChatCompletionToolChoiceOption: + oneOf: + - type: string + enum: + - none + - auto + - auto + - $ref: '#/components/schemas/ChatCompletionNamedToolChoice' + description: |- + Controls which (if any) function is called by the model. `none` means the model will not call a + function and instead generates a message. `auto` means the model can pick between generating a + message or calling a function. Specifying a particular function via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that + function. + + `none` is the default when no functions are present. `auto` is the default if functions are + present. + x-oaiExpandable: true CompletionUsage: type: object - description: Usage statistics for the completion request. required: - - prompt_tokens - - completion_tokens - - total_tokens + - prompt_tokens + - completion_tokens + - total_tokens + properties: + prompt_tokens: + type: integer + format: int64 + description: Number of tokens in the prompt. + completion_tokens: + type: integer + format: int64 + description: Number of tokens in the generated completion + total_tokens: + type: integer + format: int64 + description: Total number of tokens used in the request (prompt + completion). + description: Usage statistics for the completion request. + CreateAssistantFileRequest: + type: object + required: + - file_id + properties: + file_id: + type: string + description: |- + A [File](/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should + use. Useful for tools like `retrieval` and `code_interpreter` that can access files. + CreateAssistantRequest: + type: object + required: + - model + properties: + model: + type: string + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + name: + type: string + nullable: true + maxLength: 256 + description: The name of the assistant. The maximum length is 256 characters. + description: + type: string + nullable: true + maxLength: 512 + description: The description of the assistant. The maximum length is 512 characters. + instructions: + type: string + nullable: true + maxLength: 32768 + description: The system instructions that the assistant uses. The maximum length is 32768 characters. + tools: + $ref: '#/components/schemas/CreateAssistantRequestToolsItem' + description: |- + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + Tools can be of types `code_interpreter`, `retrieval`, or `function`. + default: [] + file_ids: + type: array + items: + type: string + maxItems: 20 + description: |- + A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + maximum of 20 files attached to the assistant. Files are ordered by their creation date in + ascending order. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + CreateAssistantRequestTool: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsRetrieval' + - $ref: '#/components/schemas/AssistantToolsFunction' + x-oaiExpandable: true + CreateAssistantRequestToolsItem: + type: array + items: + $ref: '#/components/schemas/CreateAssistantRequestTool' + maxItems: 128 + CreateChatCompletionRequest: + type: object + required: + - messages + - model + properties: + messages: + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestMessage' + minItems: 1 + description: |- + A list of messages comprising the conversation so far. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + model: + anyOf: + - type: string + - type: string + enum: + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0301 + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-16k-0613 + description: |- + ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + table for details on which models work with the Chat API. + x-oaiTypeLabel: string + frequency_penalty: + type: number + format: double + nullable: true + minimum: -2 + maximum: 2 + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + frequency in the text so far, decreasing the model's likelihood to repeat the same line + verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + default: 0 + logit_bias: + type: object + additionalProperties: + type: integer + format: int64 + nullable: true + description: |- + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an + associated bias value from -100 to 100. Mathematically, the bias is added to the logits + generated by the model prior to sampling. The exact effect will vary per model, but values + between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + should result in a ban or exclusive selection of the relevant token. + x-oaiTypeLabel: map + default: null + logprobs: + type: boolean + nullable: true + description: |- + Whether to return log probabilities of the output tokens or not. If true, returns the log + probabilities of each output token returned in the `content` of `message`. This option is + currently not available on the `gpt-4-vision-preview` model. + default: false + top_logprobs: + type: integer + format: int64 + nullable: true + minimum: 0 + maximum: 5 + description: |- + An integer between 0 and 5 specifying the number of most likely tokens to return at each token + position, each with an associated log probability. `logprobs` must be set to `true` if this + parameter is used. + max_tokens: + type: integer + format: int64 + nullable: true + minimum: 0 + description: |- + The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + + The total length of input tokens and generated tokens is limited by the model's context length. + [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + for counting tokens. + default: 16 + n: + type: integer + format: int64 + nullable: true + minimum: 1 + maximum: 128 + description: |- + How many chat completion choices to generate for each input message. Note that you will be + charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to + minimize costs. + default: 1 + presence_penalty: + type: number + format: double + nullable: true + minimum: -2 + maximum: 2 + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + default: 0 + response_format: + type: object + properties: + type: + type: string + enum: + - text + - json_object + description: Must be one of `text` or `json_object`. + default: text + description: |- + An object specifying the format that the model must output. Compatible with + [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the + model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + yourself via a system or user message. Without this, the model may generate an unending stream + of whitespace until the generation reaches the token limit, resulting in a long-running and + seemingly "stuck" request. Also note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the + conversation exceeded the max context length. + seed: + type: integer + format: int64 + nullable: true + minimum: -9223372036854776000 + maximum: 9223372036854776000 + description: |- + This feature is in Beta. + + If specified, our system will make a best effort to sample deterministically, such that + repeated requests with the same `seed` and parameters should return the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + parameter to monitor changes in the backend. + x-oaiMeta: + beta: true + stop: + oneOf: + - $ref: '#/components/schemas/Stop' + nullable: true + description: Up to 4 sequences where the API will stop generating further tokens. + default: null + stream: + type: boolean + nullable: true + description: |- + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. + [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + default: false + temperature: + type: number + format: double + nullable: true + minimum: 0 + maximum: 2 + description: |- + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + default: 1 + top_p: + type: number + format: double + nullable: true + minimum: 0 + maximum: 1 + description: |- + An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + default: 1 + tools: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTool' + description: |- + A list of tools the model may call. Currently, only functions are supported as a tool. Use this + to provide a list of functions the model may generate JSON inputs for. + tool_choice: + $ref: '#/components/schemas/ChatCompletionToolChoiceOption' + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + function_call: + anyOf: + - type: string + enum: + - none + - auto + - auto + - $ref: '#/components/schemas/ChatCompletionFunctionCallOption' + description: |- + Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. `none` means the model will not call a + function and instead generates a message. `auto` means the model can pick between generating a + message or calling a function. Specifying a particular function via `{"name": "my_function"}` + forces the model to call that function. + + `none` is the default when no functions are present. `auto` is the default if functions are + present. + deprecated: true + x-oaiExpandable: true + functions: + type: array + items: + $ref: '#/components/schemas/ChatCompletionFunctions' + minItems: 1 + maxItems: 128 + description: |- + Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + deprecated: true + CreateChatCompletionResponse: + type: object + required: + - id + - choices + - created + - model + - object + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + items: + type: object + properties: + finish_reason: + type: string + enum: + - stop + - length + - tool_calls + - content_filter + - function_call + - length + - content_filter + description: |- + The reason the model stopped generating tokens. This will be `stop` if the model hit a + natural stop point or a provided stop sequence, `length` if the maximum number of tokens + specified in the request was reached, `content_filter` if content was omitted due to a flag + from our content filters, `tool_calls` if the model called a tool, or `function_call` + (deprecated) if the model called a function. + index: + type: integer + format: int64 + description: The index of the choice in the list of choices. + message: + $ref: '#/components/schemas/ChatCompletionResponseMessage' + logprobs: + type: object + properties: + content: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTokenLogprob' + nullable: true + required: + - content + nullable: true + description: Log probability information for the choice. + required: + - finish_reason + - index + - message + - logprobs + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + created: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + system_fingerprint: + type: string + description: |- + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes + have been made that might impact determinism. + object: + type: string + enum: + - chat.completion + description: The object type, which is always `chat.completion`. + usage: + $ref: '#/components/schemas/CompletionUsage' + description: Represents a chat completion response returned by model, based on the provided input. + CreateCompletionRequest: + type: object + required: + - model + - prompt + properties: + model: + anyOf: + - type: string + - type: string + enum: + - gpt-3.5-turbo-instruct + - davinci-002 + - babbage-002 + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + x-oaiTypeLabel: string + prompt: + oneOf: + - $ref: '#/components/schemas/Prompt' + nullable: true + description: |- + The prompt(s) to generate completions for, encoded as a string, array of strings, array of + tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during training, so if a + prompt is not specified the model will generate as if from the beginning of a new document. + default: <|endoftext|> + best_of: + type: integer + format: int64 + nullable: true + minimum: 0 + maximum: 20 + description: |- + Generates `best_of` completions server-side and returns the "best" (the one with the highest + log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly consume your token + quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + default: 1 + echo: + type: boolean + nullable: true + description: Echo back the prompt in addition to the completion + default: false + frequency_penalty: + type: number + format: double + nullable: true + minimum: -2 + maximum: 2 + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + frequency in the text so far, decreasing the model's likelihood to repeat the same line + verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + default: 0 + logit_bias: + type: object + additionalProperties: + type: integer + format: int64 + nullable: true + description: |- + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an + associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) + to convert text to token IDs. Mathematically, the bias is added to the logits generated by the + model prior to sampling. The exact effect will vary per model, but values between -1 and 1 + should decrease or increase likelihood of selection; values like -100 or 100 should result in a + ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being + generated. + x-oaiTypeLabel: map + default: null + logprobs: + type: integer + format: int64 + nullable: true + minimum: 0 + maximum: 5 + description: |- + Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + elements in the response. + + The maximum value for `logprobs` is 5. + default: null + max_tokens: + type: integer + format: int64 + nullable: true + minimum: 0 + description: |- + The maximum number of [tokens](/tokenizer) to generate in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + for counting tokens. + default: 16 + n: + type: integer + format: int64 + nullable: true + minimum: 1 + maximum: 128 + description: |- + How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly consume your token + quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + default: 1 + presence_penalty: + type: number + format: double + nullable: true + minimum: -2 + maximum: 2 + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + default: 0 + seed: + type: integer + format: int64 + nullable: true + minimum: -9223372036854776000 + maximum: 9223372036854776000 + description: |- + If specified, our system will make a best effort to sample deterministically, such that + repeated requests with the same `seed` and parameters should return the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + parameter to monitor changes in the backend. + x-oaiMeta: + beta: true + stop: + oneOf: + - $ref: '#/components/schemas/Stop' + nullable: true + description: Up to 4 sequences where the API will stop generating further tokens. + default: null + stream: + type: boolean + nullable: true + description: |- + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + default: false + suffix: + type: string + nullable: true + description: The suffix that comes after a completion of inserted text. + default: null + temperature: + type: number + format: double + nullable: true + minimum: 0 + maximum: 2 + description: |- + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + default: 1 + top_p: + type: number + format: double + nullable: true + minimum: 0 + maximum: 1 + description: |- + An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + default: 1 + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + CreateCompletionResponse: + type: object + required: + - id + - choices + - created + - model + - object properties: - prompt_tokens: - type: integer - format: int64 - description: Number of tokens in the prompt. - completion_tokens: + id: + type: string + description: A unique identifier for the completion. + choices: + type: array + items: + type: object + properties: + index: + type: integer + format: int64 + text: + type: string + logprobs: + type: object + properties: + tokens: + type: array + items: + type: string + token_logprobs: + type: array + items: + type: number + format: double + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: integer + format: int64 + text_offset: + type: array + items: + type: integer + format: int64 + required: + - tokens + - token_logprobs + - top_logprobs + - text_offset + nullable: true + finish_reason: + type: string + enum: + - stop + - length + - tool_calls + - content_filter + - function_call + - length + - content_filter + description: |- + The reason the model stopped generating tokens. This will be `stop` if the model hit a + natural stop point or a provided stop sequence, or `content_filter` if content was omitted + due to a flag from our content filters, `length` if the maximum number of tokens specified + in the request was reached, or `content_filter` if content was omitted due to a flag from our + content filters. + required: + - index + - text + - logprobs + - finish_reason + description: The list of completion choices the model generated for the input. + created: type: integer - format: int64 - description: Number of tokens in the generated completion - total_tokens: + format: unixtime + description: The Unix timestamp (in seconds) of when the completion was created. + model: + type: string + description: The model used for the completion. + system_fingerprint: + type: string + description: |- + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes + have been made that might impact determinism. + object: + type: string + enum: + - text_completion + description: The object type, which is always `text_completion`. + usage: + allOf: + - $ref: '#/components/schemas/CompletionUsage' + description: Usage statistics for the completion request. + description: |- + Represents a completion response from the API. Note: both the streamed and non-streamed response + objects share the same shape (unlike the chat endpoint). + CreateEmbeddingRequest: + type: object + required: + - input + - model + properties: + input: + allOf: + - $ref: '#/components/schemas/CreateEmbeddingRequestInput' + description: |- + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + single request, pass an array of strings or array of token arrays. Each input must not exceed + the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an + empty string. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + for counting tokens. + x-oaiExpandable: true + model: + anyOf: + - type: string + - type: string + enum: + - text-embedding-ada-002 + - text-embedding-3-small + - text-embedding-3-large + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + x-oaiTypeLabel: string + encoding_format: + type: string + enum: + - float + - base64 + description: |- + The format to return the embeddings in. Can be either `float` or + [`base64`](https://pypi.org/project/pybase64/). + default: float + dimensions: type: integer format: int64 - description: Total number of tokens used in the request (prompt + completion). - CreateChatCompletionRequest: + minimum: 1 + description: |- + The number of dimensions the resulting output embeddings should have. Only supported in + `text-embedding-3` and later models. + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + CreateEmbeddingRequestInput: + oneOf: + - type: string + - type: array + items: + type: string + - $ref: '#/components/schemas/TokenArrayItem' + - $ref: '#/components/schemas/TokenArrayArray' + CreateEmbeddingResponse: + type: object + required: + - data + - model + - object + - usage + properties: + data: + type: array + items: + $ref: '#/components/schemas/Embedding' + description: The list of embeddings generated by the model. + model: + type: string + description: The name of the model used to generate the embedding. + object: + type: string + enum: + - list + description: The object type, which is always "list". + usage: + allOf: + - $ref: '#/components/schemas/EmbeddingUsage' + description: The usage information for the request. + CreateFileRequestMultiPart: + type: object + required: + - file + - purpose + properties: + file: + type: string + format: binary + description: The file object (not file name) to be uploaded. + purpose: + type: string + enum: + - fine-tune + - assistants + description: |- + The intended purpose of the uploaded file. Use "fine-tune" for + [Fine-tuning](/docs/api-reference/fine-tuning) and "assistants" for + [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). This + allows us to validate the format of the uploaded file is correct for fine-tuning. + CreateFineTuningJobRequest: + type: object + required: + - model + - training_file + properties: + model: + anyOf: + - type: string + - type: string + enum: + - babbage-002 + - davinci-002 + - gpt-3.5-turbo + description: |- + The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + x-oaiTypeLabel: string + training_file: + type: string + description: |- + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/upload) for how to upload a file. + + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + hyperparameters: + type: object + properties: + batch_size: + anyOf: + - type: string + enum: + - auto + - low + - high + - $ref: '#/components/schemas/BatchSize' + description: |- + Number of examples in each batch. A larger batch size means that model parameters are + updated less frequently, but with lower variance. + default: auto + learning_rate_multiplier: + anyOf: + - type: string + enum: + - auto + - low + - high + - $ref: '#/components/schemas/LearningRateMultiplier' + description: |- + Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + overfitting. + default: auto + n_epochs: + anyOf: + - type: string + enum: + - auto + - low + - high + - $ref: '#/components/schemas/NEpochs' + description: |- + The number of epochs to train the model for. An epoch refers to one full cycle through the + training dataset. + default: auto + description: The hyperparameters used for the fine-tuning job. + suffix: + oneOf: + - $ref: '#/components/schemas/SuffixString' + nullable: true + description: |- + A string of up to 18 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like + `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + default: null + validation_file: + type: string + nullable: true + description: |- + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation metrics periodically during + fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + not be present in both train and validation files. + + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + CreateImageEditRequestMultiPart: type: object required: - - model - - messages + - image + - prompt properties: + image: + type: string + format: binary + description: |- + The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + provided, image must have transparency, which will be used as the mask. + prompt: + type: string + maxLength: 1000 + description: A text description of the desired image(s). The maximum length is 1000 characters. + mask: + type: string + format: binary + description: |- + An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where + `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + as `image`. model: anyOf: - type: string - type: string enum: - - gpt4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0301 - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-16k-0613 - description: |- - ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) - table for details on which models work with the Chat API. + - dall-e-2 + description: The model to use for image generation. Only `dall-e-2` is supported at this time. x-oaiTypeLabel: string - messages: - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestMessage' + default: dall-e-2 + n: + oneOf: + - $ref: '#/components/schemas/ImagesN' + nullable: true + description: The number of images to generate. Must be between 1 and 10. + default: 1 + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + - 512x512 + - 1024x1024 + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + default: 1024x1024 + response_format: + type: string + enum: + - url + - b64_json + - b64_json + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + default: url + user: + allOf: + - $ref: '#/components/schemas/User' description: |- - A list of messages comprising the conversation so far. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). - minItems: 1 - functions: - type: array - items: - $ref: '#/components/schemas/ChatCompletionFunctions' - description: A list of functions the model may generate JSON inputs for. - minItems: 1 - maxItems: 128 - function_call: + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + CreateImageRequest: + type: object + required: + - prompt + properties: + prompt: + type: string + description: |- + A text description of the desired image(s). The maximum length is 1000 characters for + `dall-e-2` and 4000 characters for `dall-e-3`. + model: anyOf: + - type: string - type: string enum: - - none - - auto - - $ref: '#/components/schemas/ChatCompletionFunctionCallOption' - description: |- - Controls how the model responds to function calls. `none` means the model does not call a - function, and responds to the end-user. `auto` means the model can pick between an end-user or - calling a function. Specifying a particular function via `{\"name":\ \"my_function\"}` forces the - model to call that function. `none` is the default when no functions are present. `auto` is the - default if functions are present. - temperature: + - dall-e-2 + - dall-e-3 + description: The model to use for image generation. + x-oaiTypeLabel: string + default: dall-e-2 + n: oneOf: - - $ref: '#/components/schemas/Temperature' + - $ref: '#/components/schemas/ImagesN' nullable: true description: |- - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is + supported. default: 1 - top_p: - oneOf: - - $ref: '#/components/schemas/TopP' + quality: + type: string + enum: + - standard + - hd nullable: true description: |- - An alternative to sampling with temperature, called nucleus sampling, where the model considers - the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - default: 1 - n: - oneOf: - - $ref: '#/components/schemas/N' + The quality of the image that will be generated. `hd` creates images with finer details and + greater consistency across the image. This param is only supported for `dall-e-3`. + default: standard + response_format: + type: string + enum: + - url + - b64_json + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + default: url + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + - 1792x1024 + - 1024x1792 nullable: true description: |- - How many completions to generate for each prompt. - **Note:** Because this parameter generates many completions, it can quickly consume your token - quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - default: 1 - max_tokens: - oneOf: - - $ref: '#/components/schemas/MaxTokens' + The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for + `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + default: 1024x1024 + style: + type: string + enum: + - vivid + - natural nullable: true description: |- - The maximum number of [tokens](/tokenizer) to generate in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - for counting tokens. - default: 16 - stop: + The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model + to lean towards generating hyper-real and dramatic images. Natural causes the model to produce + more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + default: vivid + user: allOf: - - $ref: '#/components/schemas/Stop' - description: Up to 4 sequences where the API will stop generating further tokens. - default: null - presence_penalty: - oneOf: - - $ref: '#/components/schemas/Penalty' - nullable: true + - $ref: '#/components/schemas/User' description: |- - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear - in the text so far, increasing the model's likelihood to talk about new topics. - - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - frequency_penalty: + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + CreateImageVariationRequestMultiPart: + type: object + required: + - image + properties: + image: + type: string + format: binary + description: |- + The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + and square. + model: + anyOf: + - type: string + - type: string + enum: + - dall-e-2 + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + x-oaiTypeLabel: string + default: dall-e-2 + n: oneOf: - - $ref: '#/components/schemas/Penalty' + - $ref: '#/components/schemas/ImagesN' nullable: true - description: |- - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing - frequency in the text so far, decreasing the model's likelihood to repeat the same line - verbatim. - - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - logit_bias: - type: object - description: |- - Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an - associated bias value from -100 to 100. Mathematically, the bias is added to the logits - generated by the model prior to sampling. The exact effect will vary per model, but values - between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 - should result in a ban or exclusive selection of the relevant token. - additionalProperties: - type: integer - format: int64 + description: The number of images to generate. Must be between 1 and 10. + default: 1 + response_format: + type: string + enum: + - url + - b64_json + - b64_json nullable: true - x-oaiTypeLabel: map + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + default: url + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + - 512x512 + - 1024x1024 + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + default: 1024x1024 user: allOf: - $ref: '#/components/schemas/User' description: |- A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - stream: - type: boolean + CreateMessageRequest: + type: object + required: + - role + - content + properties: + role: + type: string + enum: + - user + - assistant + description: The role of the entity that is creating the message. Currently only `user` is supported. + content: + type: string + minLength: 1 + maxLength: 32768 + description: The content of the message. + file_ids: + type: array + items: + type: string + minItems: 1 + maxItems: 10 + description: |- + A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a + maximum of 10 files attached to a message. Useful for tools like `retrieval` and + `code_interpreter` that can access and use files. + default: [] + metadata: + type: object + additionalProperties: + type: string nullable: true description: |- - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). - default: true - CreateChatCompletionResponse: + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + CreateModerationRequest: + type: object + required: + - input + properties: + input: + allOf: + - $ref: '#/components/schemas/CreateModerationRequestInput' + description: The input text to classify + model: + anyOf: + - type: string + - type: string + enum: + - text-moderation-latest + - text-moderation-stable + description: |- + Two content moderations models are available: `text-moderation-stable` and + `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically + upgraded over time. This ensures you are always using our most accurate model. If you use + `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy + of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + x-oaiTypeLabel: string + default: text-moderation-latest + CreateModerationRequestInput: + oneOf: + - type: string + - type: array + items: + type: string + CreateModerationResponse: type: object - description: Represents a chat completion response returned by model, based on the provided input. required: - id - - object - - created - model - - choices + - results properties: id: type: string - description: A unique identifier for the chat completion. - object: - type: string - description: The object type, which is always `chat.completion`. - created: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) of when the chat completion was created. + description: The unique identifier for the moderation request. model: type: string - description: The model used for the chat completion. - choices: + description: The model used to generate the moderation results. + results: type: array items: type: object - required: - - index - - message - - finish_reason properties: - index: - type: integer - format: int64 - description: The index of the choice in the list of choices. - message: - $ref: '#/components/schemas/ChatCompletionResponseMessage' - finish_reason: - type: string - enum: - - stop - - length - - function_call - - content_filter - description: |- - The reason the model stopped generating tokens. This will be `stop` if the model hit a - natural stop point or a provided stop sequence, `length` if the maximum number of tokens - specified in the request was reached, `content_filter` if the content was omitted due to - a flag from our content filters, or `function_call` if the model called a function. - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. - usage: - $ref: '#/components/schemas/CompletionUsage' - x-oaiMeta: - name: The chat completion object - group: chat - example: '' - CreateCompletionRequest: + flagged: + type: boolean + description: Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + categories: + type: object + properties: + hate: + type: boolean + description: |- + Content that expresses, incites, or promotes hate based on race, gender, ethnicity, + religion, nationality, sexual orientation, disability status, or caste. Hateful content + aimed at non-protected groups (e.g., chess players) is harrassment. + hate/threatening: + type: boolean + description: |- + Hateful content that also includes violence or serious harm towards the targeted group + based on race, gender, ethnicity, religion, nationality, sexual orientation, disability + status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm towards any target. + self-harm: + type: boolean + description: |- + Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, + and eating disorders. + self-harm/intent: + type: boolean + description: |- + Content where the speaker expresses that they are engaging or intend to engage in acts of + self-harm, such as suicide, cutting, and eating disorders. + self-harm/instructions: + type: boolean + description: |- + Content that encourages performing acts of self-harm, such as suicide, cutting, and eating + disorders, or that gives instructions or advice on how to commit such acts. + sexual: + type: boolean + description: |- + Content meant to arouse sexual excitement, such as the description of sexual activity, or + that promotes sexual services (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic detail. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + description: A list of the categories, and whether they are flagged or not. + category_scores: + type: object + properties: + hate: + type: number + format: double + description: The score for the category 'hate'. + hate/threatening: + type: number + format: double + description: The score for the category 'hate/threatening'. + harassment: + type: number + format: double + description: The score for the category 'harassment'. + harassment/threatening: + type: number + format: double + description: The score for the category 'harassment/threatening'. + self-harm: + type: number + format: double + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + format: double + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + format: double + description: The score for the category 'self-harm/instructive'. + sexual: + type: number + format: double + description: The score for the category 'sexual'. + sexual/minors: + type: number + format: double + description: The score for the category 'sexual/minors'. + violence: + type: number + format: double + description: The score for the category 'violence'. + violence/graphic: + type: number + format: double + description: The score for the category 'violence/graphic'. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + description: A list of the categories along with their scores as predicted by model. + required: + - flagged + - categories + - category_scores + description: A list of moderation objects. + description: Represents policy compliance report by OpenAI's content moderation model against a given input. + CreateRunRequest: + type: object + required: + - assistant_id + properties: + assistant_id: + type: string + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + model: + type: string + nullable: true + description: |- + The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value + is provided here, it will override the model associated with the assistant. If not, the model + associated with the assistant will be used. + instructions: + type: string + nullable: true + description: |- + Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. + This is useful for modifying the behavior on a per-run basis. + additional_instructions: + type: string + nullable: true + description: |- + Appends additional instructions at the end of the instructions for the run. This is useful for + modifying the behavior on a per-run basis without overriding other instructions. + tools: + type: object + allOf: + - $ref: '#/components/schemas/CreateRunRequestToolsItem' + nullable: true + description: |- + Override the tools the assistant can use for this run. This is useful for modifying the + behavior on a per-run basis. + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + CreateRunRequestTool: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsRetrieval' + - $ref: '#/components/schemas/AssistantToolsFunction' + x-oaiExpandable: true + CreateRunRequestToolsItem: + type: array + items: + $ref: '#/components/schemas/CreateRunRequestTool' + maxItems: 20 + CreateSpeechRequest: type: object required: - model - - prompt + - input + - voice properties: model: anyOf: - type: string - type: string enum: - - babbage-002 - - davinci-002 - - text-davinci-003 - - text-davinci-002 - - text-davinci-001 - - code-davinci-002 - - text-curie-001 - - text-babbage-001 - - text-ada-001 - description: |- - ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - see all of your available models, or see our [Model overview](/docs/models/overview) for - descriptions of them. + - tts-1 + - tts-1-hd + description: 'One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`' x-oaiTypeLabel: string - prompt: - allOf: - - $ref: '#/components/schemas/Prompt' - description: |- - The prompt(s) to generate completions for, encoded as a string, array of strings, array of - tokens, or array of token arrays. - - Note that <|endoftext|> is the document separator that the model sees during training, so if a - prompt is not specified the model will generate as if from the beginning of a new document. - default: <|endoftext|> - suffix: + input: type: string - nullable: true - description: The suffix that comes after a completion of inserted text. - default: null - temperature: - oneOf: - - $ref: '#/components/schemas/Temperature' - nullable: true - description: |- - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. - default: 1 - top_p: - oneOf: - - $ref: '#/components/schemas/TopP' - nullable: true - description: |- - An alternative to sampling with temperature, called nucleus sampling, where the model considers - the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - default: 1 - n: - oneOf: - - $ref: '#/components/schemas/N' - nullable: true + maxLength: 4096 + description: The text to generate audio for. The maximum length is 4096 characters. + voice: + type: string + enum: + - alloy + - echo + - fable + - onyx + - nova + - shimmer description: |- - How many completions to generate for each prompt. - **Note:** Because this parameter generates many completions, it can quickly consume your token - quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, + `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the + [Text to speech guide](/docs/guides/text-to-speech/voice-options). + response_format: + type: string + enum: + - mp3 + - opus + - aac + - flac + description: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. + default: mp3 + speed: + type: number + format: double + minimum: 0.25 + maximum: 4 + description: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. default: 1 - max_tokens: - oneOf: - - $ref: '#/components/schemas/MaxTokens' - nullable: true - description: |- - The maximum number of [tokens](/tokenizer) to generate in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - for counting tokens. - default: 16 - stop: + CreateThreadAndRunRequest: + type: object + required: + - assistant_id + properties: + assistant_id: + type: string + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + thread: allOf: - - $ref: '#/components/schemas/Stop' - description: Up to 4 sequences where the API will stop generating further tokens. - default: null - presence_penalty: - oneOf: - - $ref: '#/components/schemas/Penalty' + - $ref: '#/components/schemas/CreateThreadRequest' + description: If no thread is provided, an empty thread will be created. + model: + type: string nullable: true description: |- - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear - in the text so far, increasing the model's likelihood to talk about new topics. - - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - frequency_penalty: - oneOf: - - $ref: '#/components/schemas/Penalty' + The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is + provided here, it will override the model associated with the assistant. If not, the model + associated with the assistant will be used. + instructions: + type: string nullable: true description: |- - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing - frequency in the text so far, decreasing the model's likelihood to repeat the same line - verbatim. - - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - logit_bias: + Override the default system message of the assistant. This is useful for modifying the behavior + on a per-run basis. + tools: type: object - description: |- - Modify the likelihood of specified tokens appearing in the completion. - Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an - associated bias value from -100 to 100. Mathematically, the bias is added to the logits - generated by the model prior to sampling. The exact effect will vary per model, but values - between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 - should result in a ban or exclusive selection of the relevant token. - additionalProperties: - type: integer - format: int64 - nullable: true - x-oaiTypeLabel: map - user: allOf: - - $ref: '#/components/schemas/User' - description: |- - A unique identifier representing your end-user, which can help OpenAI to monitor and detect - abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - stream: - type: boolean + - $ref: '#/components/schemas/CreateRunRequestToolsItem' nullable: true description: |- - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). - default: true - logprobs: - type: integer - format: int64 + Override the tools the assistant can use for this run. This is useful for modifying the + behavior on a per-run basis. + metadata: + type: object + additionalProperties: + type: string nullable: true description: |- - Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. - For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The - API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` - elements in the response. - - The maximum value for `logprobs` is 5. - default: null - echo: - type: boolean - nullable: true - description: Echo back the prompt in addition to the completion - default: false - best_of: - type: integer - format: int64 + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + CreateThreadRequest: + type: object + properties: + messages: + type: array + items: + $ref: '#/components/schemas/CreateMessageRequest' + description: A list of [messages](/docs/api-reference/messages) to start the thread with. + metadata: + type: object + additionalProperties: + type: string nullable: true description: |- - Generates `best_of` completions server-side and returns the "best" (the one with the highest - log probability per token). Results cannot be streamed. - - When used with `n`, `best_of` controls the number of candidate completions and `n` specifies - how many to return – `best_of` must be greater than `n`. - - **Note:** Because this parameter generates many completions, it can quickly consume your token - quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - default: 1 - CreateCompletionResponse: + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + CreateTranscriptionRequestMultiPart: type: object - description: |- - Represents a completion response from the API. Note: both the streamed and non-streamed response - objects share the same shape (unlike the chat endpoint). required: - - id - - object - - created + - file - model - - choices properties: - id: - type: string - description: A unique identifier for the completion. - object: + file: type: string - description: The object type, which is always `text_completion`. - created: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) of when the completion was created. + format: binary + description: |- + The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + mpeg, mpga, m4a, ogg, wav, or webm. + x-oaiTypeLabel: file model: + anyOf: + - type: string + - type: string + enum: + - whisper-1 + description: ID of the model to use. Only `whisper-1` is currently available. + x-oaiTypeLabel: string + language: type: string - description: The model used for the completion. - choices: - type: array - items: - type: object - required: - - index - - text - - logprobs - - finish_reason - properties: - index: - type: integer - format: int64 - text: - type: string - logprobs: - type: object - required: - - tokens - - token_logprobs - - top_logprobs - - text_offset - properties: - tokens: - type: array - items: - type: string - token_logprobs: - type: array - items: - type: number - format: double - top_logprobs: - type: array - items: - type: object - additionalProperties: - type: integer - format: int64 - text_offset: - type: array - items: - type: integer - format: int64 - nullable: true - finish_reason: - type: string - enum: - - stop - - length - - content_filter - description: |- - The reason the model stopped generating tokens. This will be `stop` if the model hit a - natural stop point or a provided stop sequence, or `content_filter` if content was omitted - due to a flag from our content filters, `length` if the maximum number of tokens specified - in the request was reached, or `content_filter` if content was omitted due to a flag from our - content filters. - description: The list of completion choices the model generated for the input. - usage: - $ref: '#/components/schemas/CompletionUsage' - x-oaiMeta: - name: The completion object - legacy: true - example: '' - CreateEditRequest: + description: |- + The language of the input audio. Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + and latency. + prompt: + type: string + description: |- + An optional text to guide the model's style or continue a previous audio segment. The + [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + response_format: + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + - text + - srt + - verbose_json + - vtt + description: |- + The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + vtt. + default: json + temperature: + type: number + format: double + minimum: 0 + maximum: 1 + description: |- + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + default: 0 + CreateTranscriptionResponse: + type: object + required: + - text + properties: + text: + type: string + description: The transcribed text for the provided audio data. + task: + type: string + enum: + - transcribe + description: The label that describes which operation type generated the accompanying response data. + language: + type: string + description: The spoken language that was detected in the audio data. + duration: + type: number + format: double + description: The total duration of the audio processed to produce accompanying transcription information. + segments: + type: array + items: + $ref: '#/components/schemas/AudioSegment' + description: |- + A collection of information about the timing, probabilities, and other detail of each processed + audio segment. + CreateTranslationRequestMultiPart: type: object required: + - file - model - - instruction properties: + file: + type: string + format: binary + description: |- + The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + mpeg, mpga, m4a, ogg, wav, or webm. + x-oaiTypeLabel: file model: anyOf: - type: string - type: string enum: - - text-davinci-edit-001 - - code-davinci-edit-001 - description: |- - ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` - model with this endpoint. + - whisper-1 + description: ID of the model to use. Only `whisper-1` is currently available. x-oaiTypeLabel: string - input: + prompt: type: string - nullable: true - description: The input text to use as a starting point for the edit. - default: '' - instruction: + description: |- + An optional text to guide the model's style or continue a previous audio segment. The + [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + response_format: type: string - description: The instruction that tells the model how to edit the prompt. - n: - oneOf: - - $ref: '#/components/schemas/EditN' - nullable: true - description: How many edits to generate for the input and instruction. - default: 1 + enum: + - json + - text + - srt + - verbose_json + - vtt + - text + - srt + - verbose_json + - vtt + description: |- + The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + vtt. + default: json temperature: - oneOf: - - $ref: '#/components/schemas/Temperature' - nullable: true + type: number + format: double + minimum: 0 + maximum: 1 description: |- - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. - default: 1 - top_p: - oneOf: - - $ref: '#/components/schemas/TopP' - nullable: true + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + default: 0 + CreateTranslationResponse: + type: object + required: + - text + properties: + text: + type: string + description: The translated text for the provided audio data. + task: + type: string + enum: + - translate + description: The label that describes which operation type generated the accompanying response data. + language: + type: string + description: The spoken language that was detected in the audio data. + duration: + type: number + format: double + description: The total duration of the audio processed to produce accompanying translation information. + segments: + type: array + items: + $ref: '#/components/schemas/AudioSegment' description: |- - An alternative to sampling with temperature, called nucleus sampling, where the model considers - the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - default: 1 - CreateEditResponse: + A collection of information about the timing, probabilities, and other detail of each processed + audio segment. + DeleteAssistantFileResponse: type: object required: + - id + - deleted - object - - created - - choices - - usage properties: + id: + type: string + deleted: + type: boolean object: type: string enum: - - edit - description: The object type, which is always `edit`. - created: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) of when the edit was created. - choices: - type: array - items: - type: object - required: - - text - - index - - finish_reason - properties: - text: - type: string - description: The edited result. - index: - type: integer - format: int64 - description: The index of the choice in the list of choices. - finish_reason: - type: string - enum: - - stop - - length - description: |- - The reason the model stopped generating tokens. This will be `stop` if the model hit a - natural stop point or a provided stop sequence, or `length` if the maximum number of tokens - specified in the request was reached. - description: 'description: A list of edit choices. Can be more than one if `n` is greater than 1.' - usage: - $ref: '#/components/schemas/CompletionUsage' - CreateEmbeddingRequest: + - assistant.file.deleted + description: |- + Deletes the association between the assistant and the file, but does not delete the + [File](/docs/api-reference/files) object itself. + DeleteAssistantResponse: type: object required: - - model - - input + - id + - deleted + - object properties: - model: - anyOf: - - type: string - - type: string - enum: - - text-embedding-ada-002 - description: ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - x-oaiTypeLabel: string - input: - anyOf: - - type: string - - type: array - items: - type: string - - $ref: '#/components/schemas/TokenArray' - - $ref: '#/components/schemas/TokenArrayArray' - description: |- - Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a - single request, pass an array of strings or array of token arrays. Each input must not exceed - the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. - [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - for counting tokens. - user: - $ref: '#/components/schemas/User' - CreateEmbeddingResponse: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - assistant.deleted + DeleteFileResponse: type: object required: + - id - object - - model - - data - - usage + - deleted properties: + id: + type: string object: type: string enum: - - embedding - description: The object type, which is always "embedding". - model: + - file + deleted: + type: boolean + DeleteModelResponse: + type: object + required: + - id + - deleted + - object + properties: + id: type: string - description: The name of the model used to generate the embedding. - data: - type: array - items: - $ref: '#/components/schemas/Embedding' - description: The list of embeddings generated by the model. - usage: - type: object - description: The usage information for the request. - required: - - prompt_tokens - - total_tokens - properties: - prompt_tokens: - type: integer - format: int64 - description: The number of tokens used by the prompt. - total_tokens: - type: integer - format: int64 - description: The total number of tokens used by the request. - CreateFileRequest: + deleted: + type: boolean + object: + type: string + enum: + - model + DeleteThreadResponse: type: object required: - - file - - purpose + - id + - deleted + - object properties: - file: + id: type: string - format: binary + deleted: + type: boolean + object: + type: string + enum: + - thread.deleted + Embedding: + type: object + required: + - index + - embedding + - object + properties: + index: + type: integer + format: int64 + description: The index of the embedding in the list of embeddings. + embedding: + anyOf: + - type: array + items: + type: number + format: double + - type: string description: |- - Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. - - If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. - purpose: + The embedding vector, which is a list of floats. The length of vector depends on the model as + listed in the [embedding guide](/docs/guides/embeddings). + object: type: string - description: |- - The intended purpose of the uploaded documents. Use "fine-tune" for - [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the - uploaded file. - CreateFineTuneRequest: + enum: + - embedding + description: The object type, which is always "embedding". + description: Represents an embedding vector returned by embedding endpoint. + EmbeddingUsage: type: object required: - - training_file + - prompt_tokens + - total_tokens properties: - training_file: - type: string - description: |- - The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/upload) for how to upload a file. - - Your dataset must be formatted as a JSONL file, where each training example is a JSON object - with the keys "prompt" and "completion". Additionally, you must upload your file with the - purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more - details. - validation_file: - type: string - nullable: true - description: |- - The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation metrics periodically during - fine-tuning. These metrics can be viewed in the - [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - Your train and validation data should be mutually exclusive. - - Your dataset must be formatted as a JSONL file, where each validation example is a JSON object - with the keys "prompt" and "completion". Additionally, you must upload your file with the - purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more - details. - model: - anyOf: - - type: string - - type: string - enum: - - ada - - babbage - - curie - - davinci - nullable: true - description: |- - The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", - "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more - about these models, see the [Models](/docs/models) documentation. - x-oaiTypeLabel: string - n_epochs: - type: integer - format: int64 - nullable: true - description: |- - The number of epochs to train the model for. An epoch refers to one full cycle through the - training dataset. - default: 4 - batch_size: + prompt_tokens: type: integer format: int64 - nullable: true - description: |- - The batch size to use for training. The batch size is the number of training examples used to - train a single forward and backward pass. - - By default, the batch size will be dynamically configured to be ~0.2% of the number of examples - in the training set, capped at 256 - in general, we've found that larger batch sizes tend to - work better for larger datasets. - default: null - learning_rate_multiplier: - type: number - format: double - nullable: true - description: |- - The learning rate multiplier to use for training. The fine-tuning learning rate is the original - learning rate used for pretraining multiplied by this value. - - By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final - `batch_size` (larger learning rates tend to perform better with larger batch sizes). We - recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best - results. - default: null - prompt_loss_rate: - type: number - format: double - nullable: true - description: |- - The weight to use for loss on the prompt tokens. This controls how much the model tries to - learn to generate the prompt (as compared to the completion which always has a weight of 1.0), - and can add a stabilizing effect to training when completions are short. - - If prompts are extremely long (relative to completions), it may make sense to reduce this - weight so as to avoid over-prioritizing learning the prompt. - default: 0.01 - compute_classification_metrics: - type: boolean - nullable: true - description: |- - If set, we calculate classification-specific metrics such as accuracy and F-1 score using the - validation set at the end of every epoch. These metrics can be viewed in the - [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). - - In order to compute classification metrics, you must provide a `validation_file`. Additionally, - you must specify `classification_n_classes` for multiclass classification or - `classification_positive_class` for binary classification. - default: false - classification_n_classes: + description: The number of tokens used by the prompt. + total_tokens: type: integer format: int64 - nullable: true - description: |- - The number of classes in a classification task. - - This parameter is required for multiclass classification. - default: null - classification_positive_class: + description: The total number of tokens used by the request. + Error: + type: object + required: + - type + - message + - param + - code + properties: + type: + type: string + message: + type: string + param: type: string nullable: true - description: |- - The positive class in binary classification. - - This parameter is needed to generate precision, recall, and F1 metrics when doing binary - classification. - default: null - classification_betas: - type: array - items: - type: number - format: double - nullable: true - description: |- - If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score - is a generalization of F-1 score. This is only used for binary classification. - - With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger - beta score puts more weight on recall and less on precision. A smaller beta score puts more - weight on precision and less on recall. - default: null - suffix: - oneOf: - - $ref: '#/components/schemas/SuffixString' + code: + type: string nullable: true - description: |- - A string of up to 18 characters that will be added to your fine-tuned model name. - - For example, a `suffix` of "custom-model-name" would produce a model name like - `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. - default: null - CreateFineTuningJobRequest: + ErrorResponse: type: object required: - - training_file + - error + properties: + error: + $ref: '#/components/schemas/Error' + FineTuningJob: + type: object + required: + - id + - created_at + - error + - fine_tuned_model + - finished_at + - hyperparameters - model + - object + - organization_id + - result_files + - status + - trained_tokens + - training_file + - validation_file properties: - training_file: + id: type: string + description: The object identifier, which can be referenced in the API endpoints. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + error: + type: object + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + param: + type: string + nullable: true + description: |- + The parameter that was invalid, usually `training_file` or `validation_file`. This field will + be null if the failure was not parameter-specific. + required: + - code + - message + - param + nullable: true description: |- - The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/upload) for how to upload a file. - - Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with - the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - validation_file: + For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + failure. + fine_tuned_model: type: string nullable: true description: |- - The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation metrics periodically during - fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should - not be present in both train and validation files. - - Your dataset must be formatted as a JSONL file. You must upload your file with the purpose - `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - model: - anyOf: - - type: string - - type: string - enum: - - babbage-002 - - davinci-002 - - gpt-3.5-turbo + The name of the fine-tuned model that is being created. The value will be null if the + fine-tuning job is still running. + finished_at: + type: string + format: date-time + nullable: true description: |- - The name of the model to fine-tune. You can select one of the - [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - x-oaiTypeLabel: string + The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + null if the fine-tuning job is still running. hyperparameters: type: object - description: The hyperparameters used for the fine-tuning job. properties: n_epochs: anyOf: - type: string enum: - auto + - low + - high - $ref: '#/components/schemas/NEpochs' description: |- The number of epochs to train the model for. An epoch refers to one full cycle through the - training dataset. - default: auto - suffix: - oneOf: - - $ref: '#/components/schemas/SuffixString' - nullable: true - description: |- - A string of up to 18 characters that will be added to your fine-tuned model name. - - For example, a `suffix` of "custom-model-name" would produce a model name like - `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. - default: null - CreateImageEditRequest: - type: object - required: - - prompt - - image - properties: - prompt: - type: string - description: A text description of the desired image(s). The maximum length is 1000 characters. - image: - type: string - format: binary - description: |- - The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not - provided, image must have transparency, which will be used as the mask. - mask: - type: string - format: binary - description: |- - An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where - `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions - as `image`. - n: - oneOf: - - $ref: '#/components/schemas/ImagesN' - nullable: true - description: The number of images to generate. Must be between 1 and 10. - default: 1 - size: - type: string - enum: - - 256x256 - - 512x512 - - 1024x1024 - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - default: 1024x1024 - response_format: - type: string - enum: - - url - - b64_json - nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. - default: url - user: - $ref: '#/components/schemas/User' - CreateImageRequest: - type: object - required: - - prompt - properties: - prompt: - type: string - description: A text description of the desired image(s). The maximum length is 1000 characters. - n: - oneOf: - - $ref: '#/components/schemas/ImagesN' - nullable: true - description: The number of images to generate. Must be between 1 and 10. - default: 1 - size: + training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the + number manually, we support any number between 1 and 50 epochs. + default: auto + required: + - n_epochs + description: |- + The hyperparameters used for the fine-tuning job. See the + [fine-tuning guide](/docs/guides/fine-tuning) for more details. + model: type: string - enum: - - 256x256 - - 512x512 - - 1024x1024 - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - default: 1024x1024 - response_format: + description: The base model that is being fine-tuned. + object: type: string enum: - - url - - b64_json - nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. - default: url - user: - $ref: '#/components/schemas/User' - CreateImageVariationRequest: - type: object - required: - - image - properties: - image: + - fine_tuning.job + description: The object type, which is always "fine_tuning.job". + organization_id: type: string - format: binary + description: The organization that owns the fine-tuning job. + result_files: + type: array + items: + type: string description: |- - The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, - and square. - n: - oneOf: - - $ref: '#/components/schemas/ImagesN' - nullable: true - description: The number of images to generate. Must be between 1 and 10. - default: 1 - size: + The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + [Files API](/docs/api-reference/files/retrieve-contents). + status: type: string enum: - - 256x256 - - 512x512 - - 1024x1024 + - validating_files + - queued + - running + - succeeded + - failed + - cancelled + description: |- + The current status of the fine-tuning job, which can be either `validating_files`, `queued`, + `running`, `succeeded`, `failed`, or `cancelled`. + trained_tokens: + type: integer + format: int64 nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - default: 1024x1024 - response_format: + description: |- + The total number of billable tokens processed by this fine-tuning job. The value will be null + if the fine-tuning job is still running. + training_file: + type: string + description: |- + The file ID used for training. You can retrieve the training data with the + [Files API](/docs/api-reference/files/retrieve-contents). + validation_file: type: string - enum: - - url - - b64_json nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. - default: url - user: - $ref: '#/components/schemas/User' - CreateModerationRequest: - type: object - required: - - input - properties: - input: - anyOf: - - type: string - - type: array - items: - type: string - description: The input text to classify - model: - anyOf: - - type: string - - type: string - enum: - - text-moderation-latest - - text-moderation-stable description: |- - Two content moderations models are available: `text-moderation-stable` and - `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically - upgraded over time. This ensures you are always using our most accurate model. If you use - `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy - of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. - x-oaiTypeLabel: string - default: text-moderation-latest - CreateModerationResponse: + The file ID used for validation. You can retrieve the validation results with the + [Files API](/docs/api-reference/files/retrieve-contents). + FineTuningJobEvent: type: object required: - id - - model - - results + - created_at + - level + - message + - object properties: id: type: string - description: The unique identifier for the moderation request. - model: - type: string - description: The model used to generate the moderation results. - results: - type: array - items: - type: object - required: - - flagged - - categories - - category_scores - properties: - flagged: - type: boolean - description: Whether the content violates [OpenAI's usage policies](/policies/usage-policies). - categories: - type: object - description: A list of the categories, and whether they are flagged or not. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructive - - sexual - - sexual/minors - - violence - - violence/graphic - properties: - hate: - type: boolean - description: |- - Content that expresses, incites, or promotes hate based on race, gender, ethnicity, - religion, nationality, sexual orientation, disability status, or caste. Hateful content - aimed at non-protected groups (e.g., chess players) is harrassment. - hate/threatening: - type: boolean - description: |- - Hateful content that also includes violence or serious harm towards the targeted group - based on race, gender, ethnicity, religion, nationality, sexual orientation, disability - status, or caste. - harassment: - type: boolean - description: Content that expresses, incites, or promotes harassing language towards any target. - harassment/threatening: - type: boolean - description: Harassment content that also includes violence or serious harm towards any target. - self-harm: - type: boolean - description: |- - Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, - and eating disorders. - self-harm/intent: - type: boolean - description: |- - Content where the speaker expresses that they are engaging or intend to engage in acts of - self-harm, such as suicide, cutting, and eating disorders. - self-harm/instructive: - type: boolean - description: |- - Content that encourages performing acts of self-harm, such as suicide, cutting, and eating - disorders, or that gives instructions or advice on how to commit such acts. - sexual: - type: boolean - description: |- - Content meant to arouse sexual excitement, such as the description of sexual activity, or - that promotes sexual services (excluding sex education and wellness). - sexual/minors: - type: boolean - description: Sexual content that includes an individual who is under 18 years old. - violence: - type: boolean - description: Content that depicts death, violence, or physical injury. - violence/graphic: - type: boolean - description: Content that depicts death, violence, or physical injury in graphic detail. - category_scores: - type: object - description: A list of the categories along with their scores as predicted by model. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructive - - sexual - - sexual/minors - - violence - - violence/graphic - properties: - hate: - type: number - format: double - description: The score for the category 'hate'. - hate/threatening: - type: number - format: double - description: The score for the category 'hate/threatening'. - harassment: - type: number - format: double - description: The score for the category 'harassment'. - harassment/threatening: - type: number - format: double - description: The score for the category 'harassment/threatening'. - self-harm: - type: number - format: double - description: The score for the category 'self-harm'. - self-harm/intent: - type: number - format: double - description: The score for the category 'self-harm/intent'. - self-harm/instructive: - type: number - format: double - description: The score for the category 'self-harm/instructive'. - sexual: - type: number - format: double - description: The score for the category 'sexual'. - sexual/minors: - type: number - format: double - description: The score for the category 'sexual/minors'. - violence: - type: number - format: double - description: The score for the category 'violence'. - violence/graphic: - type: number - format: double - description: The score for the category 'violence/graphic'. - description: A list of moderation objects. - CreateTranscriptionRequest: + created_at: + type: integer + format: unixtime + level: + type: string + enum: + - info + - warn + - error + message: + type: string + object: + type: string + enum: + - fine_tuning.job.event + description: Fine-tuning job event object + FunctionObject: type: object required: - - file - - model + - name properties: - file: + description: type: string - format: binary description: |- - The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, - mpeg, mpga, m4a, ogg, wav, or webm. - x-oaiTypeLabel: file - model: - anyOf: - - type: string - - type: string - enum: - - whisper-1 - description: ID of the model to use. Only `whisper-1` is currently available. - x-oaiTypeLabel: string - prompt: + A description of what the function does, used by the model to choose when and how to call the + function. + name: type: string description: |- - An optional text to guide the model's style or continue a previous audio segment. The - [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - response_format: + The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + dashes, with a maximum length of 64. + parameters: + $ref: '#/components/schemas/FunctionParameters' + FunctionParameters: + type: object + additionalProperties: {} + description: |- + The parameters the functions accepts, described as a JSON Schema object. See the + [guide](/docs/guides/gpt/function-calling) for examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation + about the format.\n\nTo describe a function that accepts no parameters, provide the value + `{\"type\": \"object\", \"properties\": {}}`. + Image: + type: object + properties: + b64_json: type: string - enum: - - json - - text - - srt - - verbose_json - - vtt - description: |- - The format of the transcript output, in one of these options: json, text, srt, verbose_json, or - vtt. - default: json - temperature: - type: number - format: double - description: |- - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more - random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, - the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to - automatically increase the temperature until certain thresholds are hit. - minimum: 0 - maximum: 1 - default: 0 - language: + format: base64 + description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + url: type: string - description: |- - The language of the input audio. Supplying the input language in - [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy - and latency. - CreateTranscriptionResponse: + format: uri + description: The URL of the generated image, if `response_format` is `url` (default). + revised_prompt: + type: string + description: The prompt that was used to generate the image, if there was any revision to the prompt. + description: Represents the url or the content of an image generated by the OpenAI API. + ImagesN: + type: integer + format: int64 + minimum: 1 + maximum: 10 + ImagesResponse: type: object required: - - text + - created + - data properties: - text: + created: + type: integer + format: unixtime + data: + type: array + items: + $ref: '#/components/schemas/Image' + LearningRateMultiplier: + type: number + format: double + minimum: 0 + exclusiveMinimum: true + ListAssistantFilesResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/AssistantFileObject' + first_id: type: string - CreateTranslationRequest: + last_id: + type: string + has_more: + type: boolean + ListAssistantsResponse: type: object required: - - file - - model + - object + - data + - first_id + - last_id + - has_more properties: - file: + object: type: string - format: binary - description: |- - The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, - mpeg, mpga, m4a, ogg, wav, or webm. - x-oaiTypeLabel: file - model: - anyOf: - - type: string - - type: string - enum: - - whisper-1 - description: ID of the model to use. Only `whisper-1` is currently available. - x-oaiTypeLabel: string - prompt: + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/AssistantObject' + first_id: type: string - description: |- - An optional text to guide the model's style or continue a previous audio segment. The - [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - response_format: + last_id: + type: string + has_more: + type: boolean + ListFilesResponse: + type: object + required: + - data + - object + properties: + data: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + object: type: string enum: - - json - - text - - srt - - verbose_json - - vtt - description: |- - The format of the transcript output, in one of these options: json, text, srt, verbose_json, or - vtt. - default: json - temperature: - type: number - format: double - description: |- - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more - random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, - the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to - automatically increase the temperature until certain thresholds are hit. - minimum: 0 - maximum: 1 - default: 0 - CreateTranslationResponse: + - list + ListFineTuningJobEventsResponse: type: object required: - - text + - data + - object properties: - text: + data: + type: array + items: + $ref: '#/components/schemas/FineTuningJobEvent' + object: type: string - DeleteFileResponse: + enum: + - list + ListMessageFilesResponse: type: object required: - - id - object - - deleted + - data + - first_id + - last_id + - has_more properties: - id: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/MessageFileObject' + first_id: type: string + last_id: + type: string + has_more: + type: boolean + ListMessagesResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: object: type: string - deleted: + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/MessageObject' + first_id: + type: string + last_id: + type: string + has_more: type: boolean - DeleteModelResponse: + ListModelsResponse: type: object required: - - id - object - - deleted + - data properties: - id: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/Model' + ListOrder: + type: string + enum: + - asc + - desc + ListPaginatedFineTuningJobsResponse: + type: object + required: + - data + - has_more + - object + properties: + data: + type: array + items: + $ref: '#/components/schemas/FineTuningJob' + has_more: + type: boolean + object: + type: string + enum: + - list + ListRunStepsResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/RunStepObject' + first_id: type: string - object: + last_id: type: string - deleted: + has_more: type: boolean - EditN: - type: integer - format: int64 - minimum: 0 - maximum: 20 - Embedding: + ListRunsResponse: type: object - description: Represents an embedding vector returned by embedding endpoint. required: - - index - object - - embedding + - data + - first_id + - last_id + - has_more properties: - index: - type: integer - format: int64 - description: The index of the embedding in the list of embeddings. object: type: string enum: - - embedding - description: The object type, which is always "embedding". - embedding: + - list + data: type: array items: - type: number - format: double - description: |- - The embedding vector, which is a list of floats. The length of vector depends on the model as\ - listed in the [embedding guide](/docs/guides/embeddings). - Error: + $ref: '#/components/schemas/RunObject' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + MessageContentImageFileObject: type: object required: - type - - message - - param - - code + - image_file properties: type: type: string - message: + enum: + - image_file + description: Always `image_file`. + image_file: + type: object + properties: + file_id: + type: string + description: The [File](/docs/api-reference/files) ID of the image in the message content. + required: + - file_id + description: References an image [File](/docs/api-reference/files) in the content of a message. + MessageContentTextAnnotationsFileCitationObject: + type: object + required: + - type + - text + - file_citation + - start_index + - end_index + properties: + type: type: string - param: + enum: + - file_citation + description: Always `file_citation`. + text: type: string - nullable: true - code: + description: The text in the message content that needs to be replaced. + file_citation: + type: object + properties: + file_id: + type: string + description: The ID of the specific File the citation is from. + quote: + type: string + description: The specific quote in the file. + required: + - file_id + - quote + start_index: + type: integer + format: int64 + minimum: 0 + end_index: + type: integer + format: int64 + minimum: 0 + description: |- + A citation within the message that points to a specific quote from a specific File associated + with the assistant or the message. Generated when the assistant uses the "retrieval" tool to + search files. + MessageContentTextAnnotationsFilePathObject: + type: object + required: + - type + - text + - file_path + - start_index + - end_index + properties: + type: type: string - nullable: true - ErrorResponse: + enum: + - file_path + description: Always `file_path`. + text: + type: string + description: The text in the message content that needs to be replaced. + file_path: + type: object + properties: + file_id: + type: string + description: The ID of the file that was generated. + required: + - file_id + start_index: + type: integer + format: int64 + minimum: 0 + end_index: + type: integer + format: int64 + minimum: 0 + description: |- + A URL for the file that's generated when the assistant used the `code_interpreter` tool to + generate a file. + MessageContentTextObject: type: object required: - - error + - type + - text properties: - error: - $ref: '#/components/schemas/Error' - FineTune: + type: + type: string + enum: + - text + - json_object + description: Always `text`. + text: + type: object + properties: + value: + type: string + description: The data that makes up the text. + annotations: + type: array + items: + $ref: '#/components/schemas/MessageContentTextObjectAnnotations' + required: + - value + - annotations + description: The text content that is part of a message. + MessageContentTextObjectAnnotations: + oneOf: + - $ref: '#/components/schemas/MessageContentTextAnnotationsFileCitationObject' + - $ref: '#/components/schemas/MessageContentTextAnnotationsFilePathObject' + x-oaiExpandable: true + MessageFileObject: type: object - description: The `FineTune` object represents a legacy fine-tune job that has been created through the API. required: - id - object - created_at - - updated_at - - model - - fine_tuned_model - - organization_id - - status - - hyperparams - - training_files - - validation_files - - result_files + - message_id properties: id: type: string - description: The object identifier, which can be referenced in the API endpoints. + description: TThe identifier, which can be referenced in API endpoints. object: type: string enum: - - fine-tune - description: The object type, which is always "fine-tune". + - thread.message.file + description: The object type, which is always `thread.message.file`. created_at: type: integer format: unixtime - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - updated_at: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) for when the fine-tuning job was last updated. - model: + description: The Unix timestamp (in seconds) for when the message file was created. + message_id: type: string - description: The base model that is being fine-tuned. - fine_tuned_model: + description: The ID of the [message](/docs/api-reference/messages) that the [File](/docs/api-reference/files) is attached to. + description: A list of files attached to a `message`. + MessageObject: + type: object + required: + - id + - object + - created_at + - thread_id + - role + - content + - assistant_id + - run_id + - file_ids + - metadata + properties: + id: type: string - nullable: true - description: The name of the fine-tuned model that is being created. - organization_id: + description: The identifier, which can be referenced in API endpoints. + object: type: string - description: The organization that owns the fine-tuning job. - status: + enum: + - thread.message + description: The object type, which is always `thread.message`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the message was created. + thread_id: + type: string + description: The [thread](/docs/api-reference/threads) ID that this message belongs to. + role: type: string enum: - - created - - running - - succeeded - - failed - - cancelled - description: |- - The current status of the fine-tuning job, which can be either `created`, `running`, - `succeeded`, `failed`, or `cancelled`. - hyperparams: - type: object - description: |- - The hyperparameters used for the fine-tuning job. See the - [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. - required: - - n_epochs - - batch_size - - prompt_loss_weight - - learning_rate_multiplier - properties: - n_epochs: - type: integer - format: int64 - description: |- - The number of epochs to train the model for. An epoch refers to one full cycle through the - training dataset. - batch_size: - type: integer - format: int64 - description: |- - The batch size to use for training. The batch size is the number of training examples used to - train a single forward and backward pass. - prompt_loss_weight: - type: number - format: double - description: The weight to use for loss on the prompt tokens. - learning_rate_multiplier: - type: number - format: double - description: The learning rate multiplier to use for training. - compute_classification_metrics: - type: boolean - description: The classification metrics to compute using the validation dataset at the end of every epoch. - classification_positive_class: - type: string - description: The positive class to use for computing classification metrics. - classification_n_classes: - type: integer - format: int64 - description: The number of classes to use for computing classification metrics. - training_files: - type: array - items: - $ref: '#/components/schemas/OpenAIFile' - description: The list of files used for training. - validation_files: - type: array - items: - $ref: '#/components/schemas/OpenAIFile' - description: The list of files used for validation. - result_files: + - user + - assistant + description: The entity that produced the message. One of `user` or `assistant`. + content: type: array items: - $ref: '#/components/schemas/OpenAIFile' - description: The compiled results files for the fine-tuning job. - events: + $ref: '#/components/schemas/MessageObjectContent' + description: The content of the message in array of text and/or images. + assistant_id: + type: string + nullable: true + description: |- + If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this + message. + run_id: + type: string + nullable: true + description: |- + If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of + this message. + file_ids: type: array items: - $ref: '#/components/schemas/FineTuneEvent' - description: The list of events that have been observed in the lifecycle of the FineTune job. - FineTuneEvent: + type: string + maxItems: 10 + description: |- + A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for + tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be + attached to a message. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + MessageObjectContent: + oneOf: + - $ref: '#/components/schemas/MessageContentImageFileObject' + - $ref: '#/components/schemas/MessageContentTextObject' + x-oaiExpandable: true + Model: type: object required: + - id + - created - object - - created_at - - level - - message + - owned_by properties: - object: + id: type: string - created_at: + description: The model identifier, which can be referenced in the API endpoints. + created: type: integer format: unixtime - level: + description: The Unix timestamp (in seconds) when the model was created. + object: type: string - message: + enum: + - model + description: The object type, which is always "model". + owned_by: + type: string + description: The organization that owns the model. + description: Describes an OpenAI model offering that can be used with the API. + ModifyAssistantRequest: + type: object + properties: + model: + type: string + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + name: + type: string + nullable: true + maxLength: 256 + description: The name of the assistant. The maximum length is 256 characters. + description: + type: string + nullable: true + maxLength: 512 + description: The description of the assistant. The maximum length is 512 characters. + instructions: type: string - FineTuningEvent: + nullable: true + maxLength: 32768 + description: The system instructions that the assistant uses. The maximum length is 32768 characters. + tools: + $ref: '#/components/schemas/CreateAssistantRequestToolsItem' + description: |- + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + Tools can be of types `code_interpreter`, `retrieval`, or `function`. + default: [] + file_ids: + type: array + items: + type: string + maxItems: 20 + description: |- + A list of [file](/docs/api-reference/files) IDs attached to this assistant. There can be a + maximum of 20 files attached to the assistant. Files are ordered by their creation date in + ascending order. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + ModifyMessageRequest: + type: object + properties: + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + ModifyRunRequest: + type: object + properties: + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + ModifyThreadRequest: + type: object + properties: + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + NEpochs: + type: integer + format: int64 + minimum: 1 + maximum: 50 + OpenAIFile: type: object required: - - object + - id + - bytes - created_at - - level - - message + - filename + - object + - purpose + - status properties: - object: + id: type: string + description: The file identifier, which can be referenced in the API endpoints. + bytes: + type: integer + format: int64 + nullable: true + description: The size of the file, in bytes. created_at: type: integer format: unixtime - level: + description: The Unix timestamp (in seconds) for when the file was created. + filename: type: string - message: + description: The name of the file. + object: type: string - data: - type: object - additionalProperties: {} - nullable: true - type: + enum: + - file + description: The object type, which is always "file". + purpose: type: string enum: - - message - - metrics - FineTuningJob: + - fine-tune + - fine-tune-results + - assistants + - assistants_output + description: |- + The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, + `assistants`, and `assistants_output`. + status: + type: string + enum: + - uploaded + - processed + - error + description: |- + Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or + `error`. + deprecated: true + status_details: + type: string + description: |- + Deprecated. For details on why a fine-tuning training file failed validation, see the `error` + field on `fine_tuning.job`. + deprecated: true + description: The `File` object represents a document that has been uploaded to OpenAI. + Prompt: + oneOf: + - type: string + - type: array + items: + type: string + - $ref: '#/components/schemas/TokenArrayItem' + - $ref: '#/components/schemas/TokenArrayArray' + RunCompletionUsage: + type: object + required: + - completion_tokens + - prompt_tokens + - total_tokens + properties: + completion_tokens: + type: integer + format: int64 + description: Number of completion tokens used over the course of the run. + prompt_tokens: + type: integer + format: int64 + description: Number of prompt tokens used over the course of the run. + total_tokens: + type: integer + format: int64 + description: Total number of tokens used (prompt + completion). + description: |- + Usage statistics related to the run. This value will be `null` if the run is not in a terminal + state (i.e. `in_progress`, `queued`, etc.). + RunObject: type: object required: - id - object - created_at - - finished_at - - model - - fine_tuned_model - - organization_id + - thread_id + - assistant_id - status - - hyperparameters - - training_file - - validation_file - - result_files - - trained_tokens - - error + - required_action + - last_error + - expires_at + - started_at + - cancelled_at + - failed_at + - completed_at + - model + - instructions + - tools + - file_ids + - metadata + - usage properties: id: type: string - description: The object identifier, which can be referenced in the API endpoints. + description: The identifier, which can be referenced in API endpoints. object: type: string enum: - - fine_tuning.job - description: The object type, which is always "fine_tuning.job". + - thread.run + description: The object type, which is always `thread.run`. created_at: type: integer format: unixtime - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - finished_at: - type: string - format: date-time - nullable: true - description: |- - The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be - null if the fine-tuning job is still running. - model: + description: The Unix timestamp (in seconds) for when the run was created. + thread_id: type: string - description: The base model that is being fine-tuned. - fine_tuned_model: - type: string - nullable: true description: |- - The name of the fine-tuned model that is being created. The value will be null if the - fine-tuning job is still running. - organization_id: + The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this + run. + assistant_id: type: string - description: The organization that owns the fine-tuning job. + description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. status: type: string enum: - - created - - pending - - running - - succeeded - - failed + - queued + - in_progress + - requires_action + - cancelling - cancelled + - failed + - completed + - expired description: |- - The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, - `succeeded`, `failed`, or `cancelled`. - hyperparameters: + The status of the run, which can be either `queued`, `in_progress`, `requires_action`, + `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + required_action: type: object - description: |- - The hyperparameters used for the fine-tuning job. See the - [fine-tuning guide](/docs/guides/fine-tuning) for more details. properties: - n_epochs: - anyOf: - - type: string - enum: - - auto - - $ref: '#/components/schemas/NEpochs' - description: |- - The number of epochs to train the model for. An epoch refers to one full cycle through the - training dataset. - - "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the - number manually, we support any number between 1 and 50 epochs. - default: auto - training_file: - type: string - description: |- - The file ID used for training. You can retrieve the training data with the - [Files API](/docs/api-reference/files/retrieve-contents). - validation_file: - type: string - nullable: true - description: |- - The file ID used for validation. You can retrieve the validation results with the - [Files API](/docs/api-reference/files/retrieve-contents). - result_files: - type: array - items: - type: string - description: |- - The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the - [Files API](/docs/api-reference/files/retrieve-contents). - trained_tokens: - type: integer - format: int64 + type: + type: string + enum: + - submit_tool_outputs + description: For now, this is always `submit_tool_outputs`. + submit_tool_outputs: + type: object + properties: + tool_calls: + type: array + items: + $ref: '#/components/schemas/RunToolCallObject' + description: A list of the relevant tool calls. + required: + - tool_calls + description: Details on the tool outputs needed for this run to continue. + required: + - type + - submit_tool_outputs nullable: true description: |- - The total number of billable tokens processed by this fine tuning job. The value will be null - if the fine-tuning job is still running. - error: + Details on the action required to continue the run. Will be `null` if no action is + required. + last_error: type: object - description: |- - For fine-tuning jobs that have `failed`, this will contain more information on the cause of the - failure. properties: - message: - type: string - description: A human-readable error message. code: type: string - description: A machine-readable error code. - param: + enum: + - server_error + - rate_limit_exceeded + description: One of `server_error` or `rate_limit_exceeded`. + message: type: string - nullable: true - description: |- - The parameter that was invalid, usually `training_file` or `validation_file`. This field - will be null if the failure was not parameter-specific. + description: A human-readable description of the error. + required: + - code + - message nullable: true - FineTuningJobEvent: - type: object - required: - - id - - object - - created_at - - level - - message - properties: - id: + description: The last error associated with this run. Will be `null` if there are no errors. + expires_at: type: string - object: + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run will expire. + started_at: type: string - created_at: - type: integer - format: unixtime - level: + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run was started. + cancelled_at: type: string - enum: - - info - - warn - - error - message: + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run was cancelled. + failed_at: type: string - Image: - type: object - description: Represents the url or the content of an image generated by the OpenAI API. - properties: - url: + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run failed. + completed_at: type: string - format: uri - description: The URL of the generated image, if `response_format` is `url` (default). - b64_json: + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run was completed. + model: type: string - format: base64 - description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. - ImagesN: - type: integer - format: int64 - minimum: 1 - maximum: 10 - ImagesResponse: + description: The model that the [assistant](/docs/api-reference/assistants) used for this run. + instructions: + type: string + description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + tools: + $ref: '#/components/schemas/CreateRunRequestToolsItem' + description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + file_ids: + type: array + items: + type: string + description: |- + The list of [File](/docs/api-reference/files) IDs the + [assistant](/docs/api-reference/assistants) used for this run. + default: [] + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + usage: + type: object + allOf: + - $ref: '#/components/schemas/RunCompletionUsage' + nullable: true + description: Represents an execution run on a [thread](/docs/api-reference/threads). + RunStepCompletionUsage: type: object required: - - created - - data + - completion_tokens + - prompt_tokens + - total_tokens properties: - created: + completion_tokens: type: integer - format: unixtime - data: - type: array - items: - $ref: '#/components/schemas/Image' - ListFilesResponse: + format: int64 + description: Number of completion tokens used over the course of the run step. + prompt_tokens: + type: integer + format: int64 + description: Number of prompt tokens used over the course of the run step. + total_tokens: + type: integer + format: int64 + description: Total number of tokens used (prompt + completion). + description: |- + Usage statistics related to the run step. This value will be `null` while the run step's status + is `in_progress`. + RunStepDetails: + oneOf: + - $ref: '#/components/schemas/RunStepDetailsMessageCreationObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsObject' + x-oaiExpandable: true + RunStepDetailsMessageCreationObject: type: object required: - - object - - data + - type + - message_creation properties: - object: + type: type: string - data: - type: array - items: - $ref: '#/components/schemas/OpenAIFile' - ListFineTuneEventsResponse: + enum: + - message_creation + description: Details of the message creation by the run step. + message_creation: + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + required: + - message_id + description: Details of the message creation by the run step. + RunStepDetailsToolCallsCodeObject: type: object required: - - object - - data + - id + - type + - code_interpreter properties: - object: + id: type: string - data: - type: array - items: - $ref: '#/components/schemas/FineTuneEvent' - ListFineTunesResponse: + description: The ID of the tool call. + type: + type: string + enum: + - code_interpreter + description: |- + The type of tool call. This is always going to be `code_interpreter` for this type of tool + call. + code_interpreter: + type: object + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputs' + description: |- + The outputs from the Code Interpreter tool call. Code Interpreter can output one or more + items, including text (`logs`) or images (`image`). Each of these are represented by a + different object type. + required: + - input + - outputs + description: The Code Interpreter tool call definition. + description: Details of the Code Interpreter tool call the run step was involved in. + RunStepDetailsToolCallsCodeOutput: + oneOf: + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject' + x-oaiExpandable: true + RunStepDetailsToolCallsCodeOutputImageObject: type: object required: - - object - - data + - type + - image properties: - object: + type: type: string - data: - type: array - items: - $ref: '#/components/schemas/FineTune' - ListFineTuningJobEventsResponse: + enum: + - image + description: Always `image`. + image: + type: object + properties: + file_id: + type: string + description: The [file](/docs/api-reference/files) ID of the image. + required: + - file_id + RunStepDetailsToolCallsCodeOutputLogsObject: type: object required: - - object - - data + - type + - logs properties: - object: + type: type: string - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJobEvent' - ListModelsResponse: + enum: + - logs + description: Always `logs`. + logs: + type: string + description: The text output from the Code Interpreter tool call. + description: Text output from the Code Interpreter tool call as part of a run step. + RunStepDetailsToolCallsCodeOutputs: + type: array + items: + $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutput' + RunStepDetailsToolCallsFunctionObject: type: object required: - - object - - data + - id + - type + - function properties: - object: + id: type: string - data: - type: array - items: - $ref: '#/components/schemas/Model' - ListPaginatedFineTuningJobsResponse: + description: The ID of the tool call object. + type: + type: string + enum: + - function + description: The type of tool call. This is always going to be `function` for this type of tool call. + function: + type: object + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + nullable: true + description: |- + The output of the function. This will be `null` if the outputs have not been + [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + required: + - name + - arguments + - output + description: The definition of the function that was called. + RunStepDetailsToolCallsObject: type: object required: - - object - - data - - has_more + - type + - tool_calls properties: - object: + type: type: string - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJob' - has_more: - type: boolean - MaxTokens: - type: integer - format: int64 - minimum: 0 - Model: + enum: + - tool_calls + description: Always `tool_calls`. + tool_calls: + $ref: '#/components/schemas/RunStepDetailsToolCallsObjectToolCallsItem' + description: |- + An array of tool calls the run step was involved in. These can be associated with one of three + types of tools: `code_interpreter`, `retrieval`, or `function`. + description: Details of the tool call. + RunStepDetailsToolCallsObjectToolCall: + oneOf: + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsRetrievalObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsFunctionObject' + x-oaiExpandable: true + RunStepDetailsToolCallsObjectToolCallsItem: + type: array + items: + $ref: '#/components/schemas/RunStepDetailsToolCallsObjectToolCall' + RunStepDetailsToolCallsRetrievalObject: type: object - description: Describes an OpenAI model offering that can be used with the API. required: - id - - object - - created - - owned_by + - type + - retrieval properties: id: type: string - description: The model identifier, which can be referenced in the API endpoints. - object: + description: The ID of the tool call object. + type: type: string enum: - - model - description: The object type, which is always "model". - created: - type: integer - format: unixtime - description: The Unix timestamp (in seconds) when the model was created. - owned_by: - type: string - description: The organization that owns the model. - N: - type: integer - format: int64 - minimum: 1 - maximum: 128 - NEpochs: - type: integer - format: int64 - minimum: 1 - maximum: 50 - OpenAIFile: + - retrieval + description: The type of tool call. This is always going to be `retrieval` for this type of tool call. + retrieval: + type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + RunStepObject: type: object - description: The `File` object represents a document that has been uploaded to OpenAI. required: - id - object - - bytes - - createdAt - - filename - - purpose + - created_at + - assistant_id + - thread_id + - run_id + - type - status + - step_details + - last_error + - expires_at + - cancelled_at + - failed_at + - completed_at + - metadata + - usage properties: id: type: string - description: The file identifier, which can be referenced in the API endpoints. + description: The identifier of the run step, which can be referenced in API endpoints. object: type: string enum: - - file - description: The object type, which is always "file". - bytes: - type: integer - format: int64 - description: The size of the file in bytes. - createdAt: + - thread.run.step + description: The object type, which is always `thread.run.step`. + created_at: type: integer format: unixtime - description: The Unix timestamp (in seconds) for when the file was created. - filename: + description: The Unix timestamp (in seconds) for when the run step was created. + assistant_id: type: string - description: The name of the file. - purpose: + description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + thread_id: + type: string + description: The ID of the [thread](/docs/api-reference/threads) that was run. + run_id: + type: string + description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + type: type: string - description: The intended purpose of the file. Currently, only "fine-tune" is supported. + enum: + - message_creation + - tool_calls + description: The type of run step, which can be either `message_creation` or `tool_calls`. status: type: string enum: - - uploaded - - processed - - pending - - error - - deleting - - deleted + - in_progress + - cancelled + - failed + - completed + - expired description: |- - The current status of the file, which can be either `uploaded`, `processed`, `pending`, - `error`, `deleting` or `deleted`. - status_details: + The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, + `completed`, or `expired`. + step_details: + allOf: + - $ref: '#/components/schemas/RunStepDetails' + description: The details of the run step. + last_error: + type: object + properties: + code: + type: string + enum: + - server_error + - rate_limit_exceeded + description: One of `server_error` or `rate_limit_exceeded`. + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + nullable: true + description: The last error associated with this run step. Will be `null` if there are no errors. + expires_at: type: string + format: date-time nullable: true description: |- - Additional details about the status of the file. If the file is in the `error` state, this will - include a message describing the error. - Penalty: - type: number - format: double - minimum: -2 - maximum: 2 - Prompt: - oneOf: - - type: string - - type: array - items: + The Unix timestamp (in seconds) for when the run step expired. A step is considered expired + if the parent run is expired. + cancelled_at: + type: string + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run step was cancelled. + failed_at: + type: string + format: date-time + nullable: true + description: The Unix timestamp (in seconds) for when the run step failed. + completed_at: + type: string + format: date-time + nullable: true + description: T The Unix timestamp (in seconds) for when the run step completed.. + metadata: + type: object + additionalProperties: type: string - - $ref: '#/components/schemas/TokenArray' - - $ref: '#/components/schemas/TokenArrayArray' - nullable: true + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + usage: + type: object + allOf: + - $ref: '#/components/schemas/RunCompletionUsage' + nullable: true + description: Represents a step in execution of a run. + RunToolCallObject: + type: object + required: + - id + - type + - function + properties: + id: + type: string + description: |- + The ID of the tool call. This ID must be referenced when you submit the tool outputs in using + the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + type: + type: string + enum: + - function + description: The type of tool call the output is required for. For now, this is always `function`. + function: + type: object + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments that the model expects you to pass to the function. + required: + - name + - arguments + description: The function definition. + description: Tool call objects Stop: oneOf: - type: string - $ref: '#/components/schemas/StopSequences' - nullable: true StopSequences: type: array items: type: string minItems: 1 maxItems: 4 + SubmitToolOutputsRunRequest: + type: object + required: + - tool_outputs + properties: + tool_outputs: + type: array + items: + type: object + properties: + tool_call_id: + type: string + description: |- + The ID of the tool call in the `required_action` object within the run object the output is + being submitted for. + output: + type: string + description: The output of the tool call to be submitted to continue the run. + description: A list of tools for which the outputs are being submitted. SuffixString: type: string minLength: 1 maxLength: 40 - Temperature: - type: number - format: double - minimum: 0 - maximum: 2 - TokenArray: + ThreadObject: + type: object + required: + - id + - object + - created_at + - metadata + properties: + id: + type: string + description: The identifier, which can be referenced in API endpoints. + object: + type: string + enum: + - thread + description: The object type, which is always `thread`. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the thread was created. + metadata: + type: object + additionalProperties: + type: string + nullable: true + description: |- + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + additional information about the object in a structured format. Keys can be a maximum of 64 + characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + description: Represents a thread that contains [messages](/docs/api-reference/messages). + TokenArrayArray: type: array items: - type: integer - format: int64 + $ref: '#/components/schemas/TokenArrayItem' minItems: 1 - TokenArrayArray: + TokenArrayItem: type: array items: - $ref: '#/components/schemas/TokenArray' + type: integer + format: int64 minItems: 1 - TopP: - type: number - format: double - minimum: 0 - maximum: 1 User: type: string securitySchemes: diff --git a/tspconfig.yaml b/tspconfig.yaml new file mode 100644 index 000000000..652d57a6e --- /dev/null +++ b/tspconfig.yaml @@ -0,0 +1,3 @@ +options: + "@azure-tools/typespec-csharp": + generate-test-project: true \ No newline at end of file