diff --git a/.dotnet.azure/.tests.staging/AoaiTestBase.cs b/.dotnet.azure/.tests.staging/AoaiTestBase.cs deleted file mode 100644 index a04fef9ea..000000000 --- a/.dotnet.azure/.tests.staging/AoaiTestBase.cs +++ /dev/null @@ -1,706 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.ClientModel; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Text.Json; -using System.Threading; -using System.Threading.Tasks; -using Azure.Identity; - -//using Azure.AI.OpenAI.Tests.Models; -//using Azure.AI.OpenAI.Tests.Utils; -//using Azure.AI.OpenAI.Tests.Utils.Config; -//using Azure.Core.TestFramework; -//using Azure.Core.TestFramework.Models; -using NUnit.Framework; -using NUnit.Framework.Interfaces; -using OpenAI.Assistants; -using OpenAI.Audio; -using OpenAI.Batch; -using OpenAI.Chat; -using OpenAI.Embeddings; -using OpenAI.Files; -using OpenAI.FineTuning; -using OpenAI.Images; -//using OpenAI.Tests; -using OpenAI.VectorStores; -using RetryMode = Azure.Core.RetryMode; -using RetryOptions = Azure.Core.RetryOptions; -using TokenCredential = Azure.Core.TokenCredential; - -namespace Azure.AI.OpenAI.Tests; - -#pragma warning disable OPENAI001 - -public class AoaiTestBase // : RecordedTestBase -{ - private const string AZURE_URI_SANITIZER_PATTERN = @"(?<=/(subscriptions|resourceGroups|accounts)/)([^/]+?)(?=(/|$))"; - private const string SMALL_1x1_PNG = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAFiQAABYkAZsVxhQAAAAMSURBVBhXY2BgYAAAAAQAAVzN/2kAAAAASUVORK5CYII="; - - public static readonly DateTimeOffset START_2024 = new DateTimeOffset(2024, 01, 01, 00, 00, 00, TimeSpan.Zero); - public static readonly DateTimeOffset UNIX_EPOCH = -#if NETFRAMEWORK - DateTimeOffset.Parse("1970-01-01T00:00:00.0000000+00:00"); -#else - DateTimeOffset.UnixEpoch; -#endif - - public AoaiTestBase(bool isAsync) { } - - public TClient GetTestClient() - { - AzureOpenAIClient topLevelClient = GetTestTopLevelClient(null); - string getDeployment() => "gpt-4"; - - object clientObject = null; - - switch (typeof(TClient).Name) - { - case nameof(AssistantClient): - clientObject = topLevelClient.GetAssistantClient(); - break; - case nameof(AudioClient): - clientObject = topLevelClient.GetAudioClient(getDeployment()); - break; - case nameof(BatchClient): - clientObject = topLevelClient.GetBatchClient(getDeployment()); - break; - case nameof(ChatClient): - clientObject = topLevelClient.GetChatClient(getDeployment()); - break; - case nameof(EmbeddingClient): - clientObject = topLevelClient.GetEmbeddingClient(getDeployment()); - break; - case nameof(FileClient): - clientObject = topLevelClient.GetFileClient(); - break; - case nameof(FineTuningClient): - clientObject = topLevelClient.GetFineTuningClient(); - break; - case nameof(ImageClient): - clientObject = topLevelClient.GetImageClient(getDeployment()); - break; - case nameof(VectorStoreClient): - clientObject = topLevelClient.GetVectorStoreClient(); - break; - } - - return (TClient)clientObject; - } - - // internal TestConfig TestConfig { get; } - // internal Assets Assets { get; } - // internal DisableRecordingInterceptor RecordingDisabler { get; } - - // protected AoaiTestBase(bool isAsync, RecordedTestMode? mode = null) - // : base(isAsync, mode) - // { - // TestConfig = new TestConfig(Mode); - // Assets = new Assets(TestEnvironment); - - // // Disable additional fluff that is causing issues - // TestDiagnostics = false; - - // // Add sanitizers to prevent resource names from leaking into recordings - // UriRegexSanitizers.Add(new UriRegexSanitizer(SanitizedJsonConfig.HOST_SUBDOMAIN_PATTERN) - // { - // Value = SanitizedJsonConfig.MASK_STRING - // }); - // UriRegexSanitizers.Add(new UriRegexSanitizer(AZURE_URI_SANITIZER_PATTERN) - // { - // Value = SanitizedJsonConfig.MASK_STRING - // }); - // HeaderRegexSanitizers.Add(new HeaderRegexSanitizer("Azure-AsyncOperation") - // { - // Regex = AZURE_URI_SANITIZER_PATTERN, - // Value = SanitizedJsonConfig.MASK_STRING - // }); - // HeaderRegexSanitizers.Add(new HeaderRegexSanitizer("Location") - // { - // Regex = AZURE_URI_SANITIZER_PATTERN, - // Value = SanitizedJsonConfig.MASK_STRING - // }); - // BodyKeySanitizers.Add(new BodyKeySanitizer("$..endpoint") - // { - // Regex = SanitizedJsonConfig.HOST_SUBDOMAIN_PATTERN, - // Value = SanitizedJsonConfig.MASK_STRING - // }); - // BodyKeySanitizers.Add(new BodyKeySanitizer("$..id") - // { - // Regex = AZURE_URI_SANITIZER_PATTERN, - // Value = SanitizedJsonConfig.MASK_STRING - // }); - - // // Add sanitizers to prevent our keys from leaking into the recordings - // JsonPathSanitizers.Add("*..key"); - // JsonPathSanitizers.Add("*..api_key"); - - // // Multi-part form data gives the test-proxy that is used for recording and playback indigestion (it always thinks it needs - // // to re-record the test on playback). So let's add an interceptor that will automatically disable body recording for specific - // // client methods calls, and then re-enable it afterwards. - // RecordingDisabler = new(() => Recording); - // RecordingDisabler.DisableBodyRecordingFor(nameof(FileClient.UploadFileAsync)); - - // IgnoredHeaders.Add("x-ms-client-request-id"); - - // // Data URIs trimmed to prevent the recording from being too large - // BodyKeySanitizers.Add(new BodyKeySanitizer("$..url") - // { - // Regex = @"(?<=data:image/png;base64,)(.+)", - // Value = SMALL_1x1_PNG - // }); - // // Base64 encoded images in the response are replaced with a 1x1 black pixel PNG image to ensure valid data - // BodyKeySanitizers.Add(new BodyKeySanitizer($"..b64_json") - // { - // Value = SMALL_1x1_PNG - // }); - // } - - /// - /// Gets the top level test client to use for testing. - /// - /// The test configuration to use - /// (Optional) The client options to use. - /// (Optional) The token credential to use. If this is null, an API key will be read from the - /// test configuration. - /// (Optional) The key credential to use instead of the one from the configuration. - public virtual AzureOpenAIClient GetTestTopLevelClient( - object config, // IConfiguration? config, - object options = null, // TestClientOptions? options = null, - TokenCredential tokenCredential = null, - ApiKeyCredential keyCredential = null) - { - string rawEndpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT"); - Uri endpoint = new(rawEndpoint); - TokenCredential credential = new DefaultAzureCredential(); - - return new AzureOpenAIClient(endpoint, credential); - // // First validate that the config has the parameters we need - // if (config == null) - // { - // throw CreateKeyNotFoundEx("any configuration"); - // } - // else if (config.Endpoint is null) - // { - // throw CreateKeyNotFoundEx("endpoint"); - // } - // else if (tokenCredential == null && keyCredential == null && string.IsNullOrEmpty(config.Key)) - // { - // throw CreateKeyNotFoundEx("API key"); - // } - - // // Configure the test options as needed - // options ??= new(); - // Action? requestAction = options.ShouldOutputRequests ? DumpRequest : null; - // Action? responseAction = options.ShouldOutputResponses ? DumpResponse : null; - // options.AddPolicy(new TestPipelinePolicy(requestAction, responseAction), PipelinePosition.PerCall); - - // // If we are in playback, or record mode we should set the transport to the test proxy transport, except - // // in the case where we've explicitly specified the transport ourselves. There are cases where we use a - // // mock pipeline and we don't want those to go to the test proxy. - // if (options.Transport == null) - // { - // // TODO FIXME update once test framework code is updated - // /* NOTE: - // * Normally we would call the base class RecordedTestBase.InstrumentClientOptions. Unfortunately this doesn't - // * currently work since the test framework still relies on a version of Azure.Core that has not been updated - // * to use the new System.ClientModel types. Thus InstrumentClientOptions expects a type that inherits from - // * Azure.Core.ClientOptions, whereas we inherit from System.ClientModel.Primitives.ClientPipelineOptions. For - // * now we duplicate the code from InstrumentClientOptions here - // */ - - // if (Mode == RecordedTestMode.Playback) - // { - // // You guessed it: the constructor for RetryOptions is internal only. So plan B: - // RetryOptions retryOpt = (RetryOptions)Activator.CreateInstance(typeof(RetryOptions), true)!; - - // // Not making the timeout zero so retry code still goes async - // retryOpt.Delay = TimeSpan.FromMilliseconds(10); - // retryOpt.Mode = RetryMode.Fixed; - - // options.RetryPolicy = new Utils.Pipeline.ClientRetryPolicyAdapter(retryOpt); - // } - - // // No need to set the transport if we are in Live mode - // if (Mode != RecordedTestMode.Live) - // { - // // Wait what's this? More private or internal only things I need access to? - // var proxyAccess = NonPublic.FromField("_proxy"); - // var disableRecordingAccess = NonPublic.FromField>("_disableRecording"); - - // options.Transport = new Utils.Pipeline.ProxyTransport( - // proxyAccess.Get(this), - // Recording, - // () => disableRecordingAccess.Get(Recording).Value); - // } - // } - - // AzureOpenAIClient topLevelClient; - // if (tokenCredential != null) - // { - // topLevelClient = new AzureOpenAIClient(config.Endpoint, tokenCredential, options); - // } - // else - // { - // topLevelClient = new AzureOpenAIClient(config.Endpoint, keyCredential ?? new ApiKeyCredential(config.Key!), options); - // } - - // return topLevelClient; - } - - // /// - // /// Gets the properly instrumented client to use for testing. This have proper support for automatic sync/async method testing, - // /// as well as recording, and playback support. - // /// - // /// (Optional) The client options to use. - // /// (Optional) The token credential to use. If this is null, an API key will be read from the - // /// test configuration. - // /// (Optional) The key credential to use instead of the one from the configuration. - // /// The test client instance. - // public virtual TClient GetTestClient(TestClientOptions? options = null, TokenCredential? tokenCredential = null, ApiKeyCredential? keyCredential = null) - // => GetTestClient(TestConfig.GetConfig(), options, tokenCredential, keyCredential); - - // /// - // /// Gets the properly instrumented client to use for testing. This have proper support for automatic sync/async method testing, - // /// as well as recording, and playback support. - // /// - // /// - // /// (Optional) The client options to use. - // /// (Optional) The token credential to use. If this is null, an API key will be read from the - // /// test configuration. - // /// (Optional) The key credential to use instead of the one from the configuration. - // /// The test client instance. - // public virtual TClient GetTestClient(string configName, TestClientOptions? options = null, TokenCredential? tokenCredential = null, ApiKeyCredential? keyCredential = null) - // => GetTestClient(TestConfig.GetConfig(configName), options, tokenCredential, keyCredential); - - // /// - // /// Gets a different type of client using the same configuration as the specified client. - // /// - // /// The type of other client to create. - // /// The client instance whose configuration we want to use. - // /// (Optional) The specific deployment to use instead of the one from the config. - // /// - // /// The client instance passed was not instrumented - // public virtual TExplicitClient GetTestClientFrom(TClient client, string? deploymentName = null) - // { - // AzureOpenAiInstrumented? instrumented = _clientToTopLevel.FirstOrDefault(e => ReferenceEquals(client, e.Client)); - // if (instrumented?.TopLevelClient != null - // && instrumented?.Config != null) - // { - // return GetTestClient(instrumented.TopLevelClient, instrumented.Config, deploymentName); - // } - - // throw new NotSupportedException("The client provided was not properly instrumented. Please make sure to get your test client " + - // "instances using the GetTestClient() methods"); - // } - - // /// - // /// Disables the recording of request bodies for the specified method in the current client. - // /// - // /// The method name. - // public virtual void DisableRequestBodyRecording(string methodName) - // => RecordingDisabler.DisableBodyRecordingFor(methodName); - - // /// - // /// Polls until a condition has been met with a maximum wait time. The function will always return the last value even - // /// if the condition was not met. - // /// - // /// The value in the . - // /// The initial value. - // /// The asynchronous function to get the latest state of the value. - // /// When we should stop waiting. - // /// (Optional) The amount of time to wait between retries. This will be ignored in playback - // /// mode. Default is 2 seconds. - // /// (Optional) The maximum amount of time to wait until the condition becomes true. This will be ignored in - // /// playback mode. The default is 2 minutes. - // /// The final state. This will return when the conditions have been met or we timed out. - // protected virtual Task WaitUntilReturnLast(T initialValue, Func>> getAsync, Predicate stopCondition, TimeSpan? waitTimeBetweenRequests = null, TimeSpan? maxWait = null) - // => WaitUntilReturnLast(initialValue, new Func>(async () => await getAsync().ConfigureAwait(false)), stopCondition, waitTimeBetweenRequests, maxWait); - - // /// - // /// Polls until a condition has been met with a maximum wait time. The function will always return the last value even - // /// if the condition was not met. - // /// - // /// The return value. - // /// The initial value. - // /// The asynchronous function to get the latest state of the value. - // /// When we should stop waiting. - // /// (Optional) The amount of time to wait between retries. This will be ignored in playback - // /// mode. Default is 2 seconds. - // /// (Optional) The maximum amount of time to wait until the condition becomes true. This will be ignored in - // /// playback mode. The default is 2 minutes. - // /// The final state. This will return when the conditions have been met or we timed out. - // protected virtual async Task WaitUntilReturnLast(T initialValue, Func> getAsync, Predicate stopCondition, TimeSpan? waitTimeBetweenRequests = null, TimeSpan? maxWait = null) - // { - // TimeSpan delay, max; - // if (Mode == RecordedTestMode.Playback) - // { - // delay = TimeSpan.FromMilliseconds(10); - // max = TimeSpan.FromSeconds(30); - // } - // else - // { - // delay = waitTimeBetweenRequests ?? TimeSpan.FromSeconds(2); - // max = maxWait ?? TimeSpan.FromMinutes(2); - // } - - // DateTimeOffset stopTime = DateTimeOffset.Now + max; - // T result = initialValue; - - // while (!stopCondition(result) && DateTimeOffset.Now < stopTime) - // { - // await Task.Delay(delay).ConfigureAwait(false); - // result = await getAsync().ConfigureAwait(false); - // } - - // return result; - // } - - // /// - // /// Gets the properly instrumented client to use for testing. This have proper support for automatic sync/async method testing, - // /// as well as recording, and playback support. - // /// - // /// The test configuration to use - // /// (Optional) The client options to use. - // /// (Optional) The token credential to use. If this is null, an API key will be read from the - // /// test configuration. - // /// (Optional) The key credential to use instead of the one from the configuration. - // /// The test client instance. - // protected virtual TClient GetTestClient(IConfiguration? config, TestClientOptions? options = null, TokenCredential? tokenCredential = null, ApiKeyCredential? keyCredential = null) - // { - // AzureOpenAIClient topLevelClient = GetTestTopLevelClient(config, options, tokenCredential, keyCredential); - // return GetTestClient(topLevelClient, config!); - // } - - // /// - // /// Gets the properly instrumented client to use for testing. This have proper support for automatic sync/async method testing, - // /// as well as recording, and playback support. - // /// - // /// The type of test client to get. - // /// The top level client to use. - // /// The configuration to use to get the deployment information (if needed). - // /// The instrumented client instance to use. - // /// Support for the type of client being requested has not been implemented yet. - // protected virtual TExplicitClient GetTestClient(AzureOpenAIClient topLevelClient, IConfiguration config, string? deploymentName = null) - // { - // Func getDeployment = () => deploymentName ?? config?.Deployment ?? throw CreateKeyNotFoundEx("deployment"); - // object clientObject; - - // switch (typeof(TExplicitClient).Name) - // { - // case nameof(AssistantClient): - // clientObject = topLevelClient.GetAssistantClient(); - // break; - // case nameof(AudioClient): - // clientObject = topLevelClient.GetAudioClient(getDeployment()); - // break; - // case nameof(BatchClient): - // clientObject = topLevelClient.GetBatchClient(getDeployment()); - // break; - // case nameof(ChatClient): - // clientObject = topLevelClient.GetChatClient(getDeployment()); - // break; - // case nameof(EmbeddingClient): - // clientObject = topLevelClient.GetEmbeddingClient(getDeployment()); - // break; - // case nameof(FileClient): - // clientObject = topLevelClient.GetFileClient(); - // break; - // case nameof(FineTuningClient): - // clientObject = topLevelClient.GetFineTuningClient(); - // break; - // case nameof(ImageClient): - // clientObject = topLevelClient.GetImageClient(getDeployment()); - // break; - // case nameof(VectorStoreClient): - // clientObject = topLevelClient.GetVectorStoreClient(); - // break; - // case nameof(AzureDeploymentClient): - // var accessor = NonPublic.FromField("_transport"); - // clientObject = new AzureDeploymentClient( - // config, - // TestEnvironment.Credential, - // transport: accessor.Get(topLevelClient.Pipeline)); - // break; - // default: - // throw new NotImplementedException($"Test client helpers not yet implemented for {typeof(TExplicitClient)}"); - // }; - - // object instrumented = InstrumentClient(typeof(TExplicitClient), clientObject, [RecordingDisabler]); - - // // Keep track of the corresponding top level client and config - // _clientToTopLevel.Add(new AzureOpenAiInstrumented - // { - // Client = instrumented, - // TopLevelClient = topLevelClient, - // Config = config, - // }); - - // return (TExplicitClient)instrumented; - // } - - // private Exception CreateKeyNotFoundEx(string whatIsMissing) - // { - // return new KeyNotFoundException($"Could not find any {whatIsMissing} to use. Please make sure you have the necessary" + - // $" {TestConfig.AssetsJson} config file, or have the needed environment variables set"); - // } - - // private static void DumpRequest(PipelineRequest request) - // { - // Console.WriteLine($"--- New request ---"); - // string headers = request.Headers - // .Select(header => $"{header.Key}={(header.Key.ToLower().Contains("auth") ? "***" : header.Value)}") - // .Aggregate(string.Empty, (current, next) => string.Format("{0},{1}", current, next)); - // Console.WriteLine($"Headers: {headers}"); - // Console.WriteLine($"{request.Method} URI: {request?.Uri}"); - // if (request!.Content is not null) - // { - // using MemoryStream stream = new(); - // request.Content.WriteTo(stream, default); - // stream.Position = 0; - // using StreamReader reader = new(stream); - // Console.WriteLine(reader.ReadToEnd()); - // } - // } - - // private static void DumpResponse(PipelineResponse response) - // { - // Console.WriteLine($"--- Response --- "); - // } - - protected void ValidateById(string id) - { - Assert.That(id, Is.Not.Null.Or.Empty); - switch (typeof(T).Name) - { - case nameof(Assistant): _assistantIdsToDelete.Add(id); break; - case nameof(AssistantThread): _threadIdsToDelete.Add(id); break; - case nameof(OpenAIFileInfo): _fileIdsToDelete.Add(id); break; - case nameof(ThreadRun): break; - case nameof(VectorStore): _vectorStoreIdsToDelete.Add(id); break; - default: throw new NotImplementedException(); - } - } - - protected void ValidateById(string id, string parentId) - { - Assert.That(id, Is.Not.Null.Or.Empty); - Assert.That(parentId, Is.Not.Null.Or.Empty); - switch (typeof(T).Name) - { - case nameof(ThreadMessage): - _threadIdsWithMessageIdsToDelete.Add((parentId, id)); - break; - case nameof(VectorStoreFileAssociation): - _vectorStoreFileAssociationsToRemove.Add((parentId, id)); - break; - default: - throw new NotImplementedException(); - } - } - - /// - /// Performs basic, invariant validation of a target that was just instantiated from its corresponding origination - /// mechanism. If applicable, the instance is recorded into the test run for cleanup of persistent resources. - /// - /// Instance type being validated. - /// The instance to validate. - /// The provided instance type isn't supported. - protected void Validate(T target) - { - if (target is ThreadMessage message) - { - ValidateById(message.Id, message.ThreadId); - } - else if (target is VectorStoreFileAssociation fileAssociation) - { - ValidateById(fileAssociation.VectorStoreId, fileAssociation.FileId); - } - else - { - ValidateById(target switch - { - Assistant assistant => assistant.Id, - AssistantThread thread => thread.Id, - OpenAIFileInfo file => file.Id, - ThreadRun run => run.Id, - VectorStore store => store.Id, - _ => throw new NotImplementedException(), - }); - } - } - - [TearDown] - protected void Cleanup() - { - AzureOpenAIClient topLevelCleanupClient = GetTestTopLevelClient(null); - //AzureOpenAIClient topLevelCleanupClient = GetTestTopLevelClient(TestConfig.GetConfig(), new() - //{ - // ShouldOutputRequests = false, - // ShouldOutputResponses = false, - //}); - AssistantClient client = topLevelCleanupClient.GetAssistantClient(); - VectorStoreClient vectorStoreClient = topLevelCleanupClient.GetVectorStoreClient(); - FileClient fileClient = topLevelCleanupClient.GetFileClient(); - RequestOptions requestOptions = new() { ErrorOptions = ClientErrorBehaviors.NoThrow, }; - foreach ((string threadId, string messageId) in _threadIdsWithMessageIdsToDelete) - { - Console.WriteLine($"Cleanup: {messageId} -> {client.DeleteMessage(threadId, messageId, requestOptions)?.GetRawResponse().Status}"); - } - foreach (string assistantId in _assistantIdsToDelete) - { - Console.WriteLine($"Cleanup: {assistantId} -> {client.DeleteAssistant(assistantId, requestOptions)?.GetRawResponse().Status}"); - } - foreach (string threadId in _threadIdsToDelete) - { - Console.WriteLine($"Cleanup: {threadId} -> {client.DeleteThread(threadId, requestOptions)?.GetRawResponse().Status}"); - } - foreach ((string vectorStoreId, string fileId) in _vectorStoreFileAssociationsToRemove) - { - Console.WriteLine($"Cleanup: {vectorStoreId}<->{fileId} => {vectorStoreClient.RemoveFileFromStore(vectorStoreId, fileId, requestOptions)?.GetRawResponse().Status}"); - } - foreach (string vectorStoreId in _vectorStoreIdsToDelete) - { - Console.WriteLine($"Cleanup: {vectorStoreId} => {vectorStoreClient.DeleteVectorStore(vectorStoreId, requestOptions)?.GetRawResponse().Status}"); - } - foreach (string fileId in _fileIdsToDelete) - { - Console.WriteLine($"Cleanup: {fileId} -> {fileClient.DeleteFile(fileId, requestOptions)?.GetRawResponse().Status}"); - } - _threadIdsWithMessageIdsToDelete.Clear(); - _assistantIdsToDelete.Clear(); - _threadIdsToDelete.Clear(); - _vectorStoreFileAssociationsToRemove.Clear(); - _vectorStoreIdsToDelete.Clear(); - _fileIdsToDelete.Clear(); - - // If we are in recording mode, update the recorded playback configuration as well - //if (Mode == RecordedTestMode.Record - // && TestContext.CurrentContext.Result.Outcome == ResultState.Success) - //{ - // TestConfig.SavePlaybackConfig(); - //} - } - - protected static void ValidateClientResult(ClientResult result) - { - Assert.That(result, Is.Not.Null); - Assert.That(result.GetRawResponse(), Is.Not.Null); - } - - protected static PipelineResponse ValidateClientResultResponse(ClientResult result) - { - ValidateClientResult(result); - - PipelineResponse response = result.GetRawResponse(); - Assert.That(response.Status, Is.GreaterThanOrEqualTo(200).And.LessThan(300)); - Assert.That(response.Headers, Is.Not.Null); - // Assert.That(response.Headers.GetFirstValueOrDefault("Content-Type"), Does.StartWith("application/json")); - Assert.That(response.Content, Is.Not.Null); - - return response; - } - - protected virtual TModel ValidateAndParse(ClientResult result) where TModel : IJsonModel - { - var response = ValidateClientResultResponse(result); - - TModel model = ModelReaderWriter.Read(response.Content, ModelReaderWriterOptions.Json); - Assert.That(model, Is.Not.Null); - return model!; - } - - //protected virtual TModel ValidateAndParse(ClientResult result, JsonSerializerOptions? options = null) - //{ - // var response = ValidateClientResultResponse(result); - - // using Stream stream = response.Content.ToStream(); - // Assert.That(stream, Is.Not.Null); - - // TModel? model = JsonHelpers.Deserialize(stream, options ?? JsonHelpers.OpenAIJsonOptions); - // Assert.That(model, Is.Not.Null); - // return model!; - //} - - // protected AsyncResultCollection SyncOrAsync(TClient client, Func> sync, Func> async) - // { - // // TODO FIXME HACK Since the test framework doesn't currently support async result collection, this methods provides - // // a simplified way to make explicit calls to the right methods in tests - // TClient rawClient = GetOriginal(client); - - // if (IsAsync) - // { - // return async(rawClient); - // } - // else - // { - // ResultCollection syncCollection = sync(rawClient); - // return new SyncToAsyncResultCollection(syncCollection); - // } - // } - - // protected AsyncPageableCollection SyncOrAsync(TClient client, Func> sync, Func> async) - // { - // // TODO FIXME HACK Since the test framework doesn't currently support async result collection, this methods provides - // // a simplified way to make explicit calls to the right methods in tests - // TClient rawClient = GetOriginal(client); - - // if (IsAsync) - // { - // return async(rawClient); - // } - // else - // { - // PageableCollection syncCollection = sync(rawClient); - // return new SyncToAsyncPageableCollection(syncCollection); - // } - // } - - // protected Task> SyncOrAsyncList(TClient client, Func> sync, Func> async) - // { - // // TODO FIXME HACK Since the test framework doesn't currently support async result collection, this methods provides - // // a simplified way to make explicit calls to the right methods in tests - // TClient rawClient = GetOriginal(client); - - // if (IsAsync) - // { - // return async(rawClient).ToEnumerableAsync(); - // } - // else - // { - // return Task.FromResult(sync(rawClient).ToList()); - // } - // } - - // internal class AzureOpenAiInstrumented - // { - // required public object Client { get; init; } - // required public AzureOpenAIClient TopLevelClient { get; init; } - // required public IConfiguration Config { get; init; } - // } - - private readonly List _assistantIdsToDelete = []; - private readonly List _threadIdsToDelete = []; - private readonly List<(string, string)> _threadIdsWithMessageIdsToDelete = []; - private readonly List _fileIdsToDelete = []; - private readonly List<(string, string)> _vectorStoreFileAssociationsToRemove = []; - private readonly List _vectorStoreIdsToDelete = []; - // internal readonly List _clientToTopLevel = new(); - //} - - //public class TestClientOptions : AzureOpenAIClientOptions - //{ - // public TestClientOptions() : base() - // { } - - // public TestClientOptions(ServiceVersion version) : base(version) - // { } - - // public bool ShouldOutputRequests { get; set; } = true; - // public bool ShouldOutputResponses { get; set; } = true; -} diff --git a/.dotnet.azure/.tests.staging/AssistantTests.cs b/.dotnet.azure/.tests.staging/AssistantTests.cs deleted file mode 100644 index 115428724..000000000 --- a/.dotnet.azure/.tests.staging/AssistantTests.cs +++ /dev/null @@ -1,674 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#nullable disable - -using System; -using System.ClientModel; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Diagnostics; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using Azure.AI.OpenAI.Assistants; -using Azure.Core; -using Azure.Identity; -using NUnit.Framework; -using OpenAI; -using OpenAI.Assistants; -using OpenAI.Audio; -using OpenAI.Batch; -using OpenAI.Chat; -using OpenAI.Embeddings; -using OpenAI.Files; -using OpenAI.FineTuning; -using OpenAI.Images; -using OpenAI.VectorStores; - -namespace Azure.AI.OpenAI.Tests; - -#pragma warning disable OPENAI001 -#pragma warning disable AOAI001 - -public class AssistantTests : AoaiTestBase -{ - public AssistantTests() : base(isAsync: true) { } - public AssistantTests(bool isAsync) : base(isAsync) { } - - [Test] - [Category("Smoke")] - public void CanCreateClient() => Assert.That(GetTestClient(), Is.InstanceOf()); - - [Test] - public async Task BasicAssistantOperationsWork() - { - AssistantClient client = GetTestClient(); - string modelName = "gpt-4"; // client.DeploymentOrThrow(); - Assistant assistant = await client.CreateAssistantAsync(modelName); - Validate(assistant); - Assert.That(assistant.Name, Is.Null.Or.Empty); - assistant = await client.ModifyAssistantAsync(assistant.Id, new AssistantModificationOptions() - { - Name = "test assistant name", - }); - Assert.That(assistant.Name, Is.EqualTo("test assistant name")); - bool deleted = await client.DeleteAssistantAsync(assistant.Id); - Assert.That(deleted, Is.True); - assistant = await client.CreateAssistantAsync(modelName, new AssistantCreationOptions() - { - Metadata = - { - ["testkey"] = "hello!" - }, - }); - Validate(assistant); - Assistant retrievedAssistant = await client.GetAssistantAsync(assistant.Id); - Assert.That(retrievedAssistant.Id, Is.EqualTo(assistant.Id)); - Assert.That(retrievedAssistant.Metadata.TryGetValue("testkey", out string metadataValue) && metadataValue == "hello!"); - Assistant modifiedAssistant = await client.ModifyAssistantAsync(assistant.Id, new AssistantModificationOptions() - { - Metadata = - { - ["testkey"] = "goodbye!", - }, - }); - Assert.That(modifiedAssistant.Id, Is.EqualTo(assistant.Id)); - IAsyncEnumerable recentAssistants = client.GetAssistantsAsync().GetAllValuesAsync(); - //SyncOrAsync( - // client, c => c.GetAssistants(), c => c.GetAssistantsAsync()); - Assistant recentAssistant = null; - await foreach (Assistant asyncAssistant in recentAssistants) - { - recentAssistant = asyncAssistant; - break; - } - Assert.That(recentAssistant, Is.Not.Null); - Assert.That(recentAssistant.Metadata.TryGetValue("testkey", out string newMetadataValue) && newMetadataValue == "goodbye!"); - } - - // [RecordedTest] - // public async Task BasicThreadOperationsWork() - // { - // AssistantClient client = GetTestClient(); - // AssistantThread thread = await client.CreateThreadAsync(); - // Validate(thread); - // Assert.That(thread.CreatedAt, Is.GreaterThan(s_2024)); - // bool deleted = await client.DeleteThreadAsync(thread.Id); - // Assert.That(deleted, Is.True); - - // ThreadCreationOptions options = new() - // { - // Metadata = - // { - // ["threadMetadata"] = "threadMetadataValue", - // } - // }; - // thread = await client.CreateThreadAsync(options); - // Validate(thread); - // Assert.That(thread.Metadata.TryGetValue("threadMetadata", out string threadMetadataValue) && threadMetadataValue == "threadMetadataValue"); - // AssistantThread retrievedThread = await client.GetThreadAsync(thread.Id); - // Assert.That(retrievedThread.Id, Is.EqualTo(thread.Id)); - // thread = await client.ModifyThreadAsync(thread, new ThreadModificationOptions() - // { - // Metadata = - // { - // ["threadMetadata"] = "newThreadMetadataValue", - // }, - // }); - // Assert.That(thread.Metadata.TryGetValue("threadMetadata", out threadMetadataValue) && threadMetadataValue == "newThreadMetadataValue"); - // } - - // [RecordedTest] - // public async Task SettingResponseFormatWorks() - // { - // AssistantClient client = GetTestClient(); - // string modelName = client.DeploymentOrThrow(); - - // Assistant assistant = await client.CreateAssistantAsync(modelName, new() - // { - // ResponseFormat = AssistantResponseFormat.JsonObject, - // }); - // Validate(assistant); - // Assert.That(assistant.ResponseFormat, Is.EqualTo(AssistantResponseFormat.JsonObject)); - // assistant = await client.ModifyAssistantAsync(assistant, new() - // { - // ResponseFormat = AssistantResponseFormat.Text, - // }); - // Assert.That(assistant.ResponseFormat, Is.EqualTo(AssistantResponseFormat.Text)); - // AssistantThread thread = await client.CreateThreadAsync(); - // Validate(thread); - // ThreadMessage message = await client.CreateMessageAsync(thread.Id, MessageRole.User, ["Write some JSON for me!"]); - // Validate(message); - // ThreadRun run = await client.CreateRunAsync(thread, assistant, new() - // { - // ResponseFormat = AssistantResponseFormat.JsonObject, - // }); - // Validate(run); - // Assert.That(run.ResponseFormat, Is.EqualTo(AssistantResponseFormat.JsonObject)); - // } - - // [RecordedTest] - // public async Task StreamingToolCall() - // { - // AssistantClient client = GetTestClient(); - // string modelName = client.DeploymentOrThrow(); - // FunctionToolDefinition getWeatherTool = new("get_current_weather", "Gets the user's current weather"); - // Assistant assistant = await client.CreateAssistantAsync(modelName, new() - // { - // Tools = { getWeatherTool } - // }); - // Validate(assistant); - - // Stopwatch stopwatch = Stopwatch.StartNew(); - // void Print(string message) => Console.WriteLine($"[{stopwatch.ElapsedMilliseconds,6}] {message}"); - - // Print(" >>> Beginning call ... "); - - // ThreadCreationOptions thrdOpt = new() - // { - // InitialMessages = { new(MessageRole.User, ["What should I wear outside right now?"]), }, - // }; - // AsyncResultCollection asyncResults = SyncOrAsync(client, - // c => c.CreateThreadAndRunStreaming(assistant, thrdOpt), - // c => c.CreateThreadAndRunStreamingAsync(assistant, thrdOpt)); - - // Print(" >>> Starting enumeration ..."); - - // ThreadRun run = null; - - // do - // { - // run = null; - // List toolOutputs = new(); - // await foreach (StreamingUpdate update in asyncResults) - // { - // string message = update.UpdateKind.ToString(); - - // if (update is RunUpdate runUpdate) - // { - // message += $" run_id:{runUpdate.Value.Id}"; - // run = runUpdate.Value; - // } - // if (update is RequiredActionUpdate requiredActionUpdate) - // { - // Assert.That(requiredActionUpdate.FunctionName, Is.EqualTo(getWeatherTool.FunctionName)); - // Assert.That(requiredActionUpdate.GetThreadRun().Status, Is.EqualTo(RunStatus.RequiresAction)); - // message += $" {requiredActionUpdate.FunctionName}"; - // toolOutputs.Add(new(requiredActionUpdate.ToolCallId, "warm and sunny")); - // } - // if (update is MessageContentUpdate contentUpdate) - // { - // message += $" {contentUpdate.Text}"; - // } - // Print(message); - // } - // if (toolOutputs.Count > 0) - // { - // asyncResults = SyncOrAsync(client, - // c => c.SubmitToolOutputsToRunStreaming(run, toolOutputs), - // c => c.SubmitToolOutputsToRunStreamingAsync(run, toolOutputs)); - // } - // } while (run?.Status.IsTerminal == false); - // } - - // [RecordedTest] - // public async Task BasicMessageOperationsWork() - // { - // // TODO FIXME Can't currently delete messages on AOAI - // bool aoaiDeleteBugFixed = false; - - // AssistantClient client = GetTestClient(); - // AssistantThread thread = await client.CreateThreadAsync(); - // Validate(thread); - // ThreadMessage message = await client.CreateMessageAsync(thread.Id, MessageRole.User, ["Hello, world!"]); - // Validate(message); - // Assert.That(message.CreatedAt, Is.GreaterThan(s_2024)); - // Assert.That(message.Content?.Count, Is.EqualTo(1)); - // Assert.That(message.Content[0], Is.Not.Null); - // Assert.That(message.Content[0].Text, Is.EqualTo("Hello, world!")); - - // if (aoaiDeleteBugFixed) - // { - // bool deleted = await client.DeleteMessageAsync(message); - // Assert.That(deleted, Is.True); - // } - - // message = await client.CreateMessageAsync(thread.Id, MessageRole.User, ["Goodbye, world!"], new MessageCreationOptions() - // { - // Metadata = - // { - // ["messageMetadata"] = "messageMetadataValue", - // }, - // }); - // Validate(message); - // Assert.That(message.Metadata.TryGetValue("messageMetadata", out string metadataValue) && metadataValue == "messageMetadataValue"); - - // ThreadMessage retrievedMessage = await client.GetMessageAsync(thread.Id, message.Id); - // Assert.That(retrievedMessage.Id, Is.EqualTo(message.Id)); - - // message = await client.ModifyMessageAsync(message, new MessageModificationOptions() - // { - // Metadata = - // { - // ["messageMetadata"] = "newValue", - // } - // }); - // Assert.That(message.Metadata.TryGetValue("messageMetadata", out metadataValue) && metadataValue == "newValue"); - - // var messagePage = await SyncOrAsyncList(client, - // c => c.GetMessages(thread), - // c => c.GetMessagesAsync(thread)); - // if (aoaiDeleteBugFixed) - // { - // Assert.That(messagePage.Count, Is.EqualTo(1)); - // } - // else - // { - // Assert.That(messagePage.Count, Is.EqualTo(2)); - // } - - // Assert.That(messagePage.ElementAt(0).Id, Is.EqualTo(message.Id)); - // Assert.That(messagePage.ElementAt(0).Metadata.TryGetValue("messageMetadata", out metadataValue) && metadataValue == "newValue"); - // } - - // [RecordedTest] - // public async Task ThreadWithInitialMessagesWorks() - // { - // AssistantClient client = GetTestClient(); - // ThreadCreationOptions options = new() - // { - // InitialMessages = - // { - // new ThreadInitializationMessage(MessageRole.User, ["Hello, world!"]), - // new ThreadInitializationMessage(MessageRole.User, - // [ - // "Can you describe this image for me?", - // MessageContent.FromImageUrl(new Uri("https://test.openai.com/image.png")) - // ]) - // { - // Metadata = - // { - // ["messageMetadata"] = "messageMetadataValue", - // }, - // }, - // }, - // }; - // AssistantThread thread = await client.CreateThreadAsync (options); - // Validate(thread); - // List messageList = await SyncOrAsyncList(client, - // c => c.GetMessages(thread, resultOrder: ListOrder.OldestFirst), - // c => c.GetMessagesAsync(thread, resultOrder: ListOrder.OldestFirst)); - // Assert.That(messageList.Count, Is.EqualTo(2)); - // Assert.That(messageList[0].Role, Is.EqualTo(MessageRole.User)); - // Assert.That(messageList[0].Content?.Count, Is.EqualTo(1)); - // Assert.That(messageList[0].Content[0].Text, Is.EqualTo("Hello, world!")); - // Assert.That(messageList[1].Content?.Count, Is.EqualTo(2)); - // Assert.That(messageList[1].Content[0], Is.Not.Null); - // Assert.That(messageList[1].Content[0].Text, Is.EqualTo("Can you describe this image for me?")); - // Assert.That(messageList[1].Content[1], Is.Not.Null); - // Assert.That(messageList[1].Content[1].ImageUrl.AbsoluteUri, Is.EqualTo("https://test.openai.com/image.png")); - // } - - // [RecordedTest] - // public async Task BasicRunOperationsWork() - // { - // AssistantClient client = GetTestClient(); - // string modelName = client.DeploymentOrThrow(); - // Assistant assistant = await client.CreateAssistantAsync(modelName); - // Validate(assistant); - // AssistantThread thread = await client.CreateThreadAsync(); - // Validate(thread); - // List runPage = await SyncOrAsyncList(client, - // c => c.GetRuns(thread.Id), - // c => c.GetRunsAsync(thread.Id)); - // Assert.That(runPage.Count, Is.EqualTo(0)); - // ThreadMessage message = await client.CreateMessageAsync(thread.Id, MessageRole.User, ["Hello, assistant!"]); - // Validate(message); - // ThreadRun run = await client.CreateRunAsync(thread.Id, assistant.Id); - // Validate(run); - // Assert.That(run.Status, Is.EqualTo(RunStatus.Queued)); - // Assert.That(run.CreatedAt, Is.GreaterThan(s_2024)); - // ThreadRun retrievedRun = await client.GetRunAsync(thread.Id, run.Id); - // Assert.That(retrievedRun.Id, Is.EqualTo(run.Id)); - // runPage = await SyncOrAsyncList(client, - // c => c.GetRuns(thread.Id), - // c => c.GetRunsAsync(thread.Id)); - // Assert.That(runPage.Count, Is.EqualTo(1)); - // Assert.That(runPage.ElementAt(0).Id, Is.EqualTo(run.Id)); - - // List messages = await SyncOrAsyncList(client, - // c => c.GetMessages(thread), - // c => c.GetMessagesAsync(thread)); - // Assert.That(messages.Count, Is.GreaterThanOrEqualTo(1)); - - // run = await WaitUntilReturnLast( - // run, - // () => client.GetRunAsync(run), - // r => r.Status.IsTerminal); - // Assert.That(run.Status, Is.EqualTo(RunStatus.Completed)); - - // Assert.Multiple(() => - // { - // Assert.That(run.Status, Is.EqualTo(RunStatus.Completed)); - // Assert.That(run.CompletedAt, Is.GreaterThan(s_2024)); - // Assert.That(run.RequiredActions, Is.Empty); - // Assert.That(run.AssistantId, Is.EqualTo(assistant.Id)); - // Assert.That(run.FailedAt, Is.Null); - // Assert.That(run.IncompleteDetails, Is.Null); - // }); - // messages = await SyncOrAsyncList(client, - // c => c.GetMessages(thread), - // c => c.GetMessagesAsync(thread)); - // Assert.That(messages.Count, Is.EqualTo(2)); - - // Assert.That(messages.ElementAt(0).Role, Is.EqualTo(MessageRole.Assistant)); - // Assert.That(messages.ElementAt(1).Role, Is.EqualTo(MessageRole.User)); - // Assert.That(messages.ElementAt(1).Id, Is.EqualTo(message.Id)); - // } - - // [RecordedTest] - // public async Task BasicRunStepFunctionalityWorks() - // { - // AssistantClient client = GetTestClient(); - // string modelName = client.DeploymentOrThrow(); - // Assistant assistant = await client.CreateAssistantAsync(modelName, new AssistantCreationOptions() - // { - // Tools = { new CodeInterpreterToolDefinition() }, - // Instructions = "Call the code interpreter tool when asked to visualize mathematical concepts.", - // }); - // Validate(assistant); - - // AssistantThread thread = await client.CreateThreadAsync(new ThreadCreationOptions() - // { - // InitialMessages = { new(MessageRole.User, ["Please graph the equation y = 3x + 4"]), }, - // }); - // Validate(thread); - - // ThreadRun run = await client.CreateRunAsync(thread, assistant); - // Validate(run); - - // run = await WaitUntilReturnLast( - // run, - // () => client.GetRunAsync(run), - // r => r.Status.IsTerminal); - // Assert.That(run.Status, Is.EqualTo(RunStatus.Completed)); - // Assert.That(run.Usage?.TotalTokens, Is.GreaterThan(0)); - - // List runSteps = await SyncOrAsyncList(client, - // c => c.GetRunSteps(run), - // c => c.GetRunStepsAsync(run)); - // Assert.That(runSteps.Count(), Is.GreaterThan(1)); - // Assert.Multiple(() => - // { - // Assert.That(runSteps.ElementAt(0).AssistantId, Is.EqualTo(assistant.Id)); - // Assert.That(runSteps.ElementAt(0).ThreadId, Is.EqualTo(thread.Id)); - // Assert.That(runSteps.ElementAt(0).RunId, Is.EqualTo(run.Id)); - // Assert.That(runSteps.ElementAt(0).CreatedAt, Is.GreaterThan(s_2024)); - // Assert.That(runSteps.ElementAt(0).CompletedAt, Is.GreaterThan(s_2024)); - // }); - // RunStepDetails details = runSteps.ElementAt(0).Details; - // Assert.That(details?.CreatedMessageId, Is.Not.Null.Or.Empty); - - // details = runSteps.ElementAt(1).Details; - // Assert.Multiple(() => - // { - // Assert.That(details?.ToolCalls.Count, Is.GreaterThan(0)); - // Assert.That(details.ToolCalls[0].ToolKind, Is.EqualTo(RunStepToolCallKind.CodeInterpreter)); - // Assert.That(details.ToolCalls[0].ToolCallId, Is.Not.Null.Or.Empty); - // Assert.That(details.ToolCalls[0].CodeInterpreterInput, Is.Not.Null.Or.Empty); - // Assert.That(details.ToolCalls[0].CodeInterpreterOutputs?.Count, Is.GreaterThan(0)); - // Assert.That(details.ToolCalls[0].CodeInterpreterOutputs[0].ImageFileId, Is.Not.Null.Or.Empty); - // }); - // } - - // [RecordedTest] - // public async Task FunctionToolsWork() - // { - // AssistantClient client = GetTestClient(); - // string modelName = client.DeploymentOrThrow(); - // Assistant assistant = await client.CreateAssistantAsync(modelName, new AssistantCreationOptions() - // { - // Tools = - // { - // new FunctionToolDefinition() - // { - // FunctionName = "get_favorite_food_for_day_of_week", - // Description = "gets the user's favorite food for a given day of the week, like Tuesday", - // Parameters = BinaryData.FromObjectAsJson(new - // { - // type = "object", - // properties = new - // { - // day_of_week = new - // { - // type = "string", - // description = "a day of the week, like Tuesday or Saturday", - // } - // } - // }), - // }, - // }, - // }); - // Validate(assistant); - // Assert.That(assistant.Tools?.Count, Is.EqualTo(1)); - - // FunctionToolDefinition responseToolDefinition = assistant.Tools[0] as FunctionToolDefinition; - // Assert.That(responseToolDefinition?.FunctionName, Is.EqualTo("get_favorite_food_for_day_of_week")); - // Assert.That(responseToolDefinition?.Parameters, Is.Not.Null); - - // ThreadRun run = await client.CreateThreadAndRunAsync( - // assistant, - // new ThreadCreationOptions() - // { - // InitialMessages = { new(MessageRole.User, ["What should I eat on Thursday?"]) }, - // }, - // new RunCreationOptions() - // { - // AdditionalInstructions = "Call provided tools when appropriate.", - // }); - // Validate(run); - // Console.WriteLine($" Run status right after creation: {run.Status}"); - - // // TODO FIXME: The underlying OpenAI code doesn't consider the "requires_action" status to be terminal even though it is. - // // Work around this here - // run = await WaitUntilReturnLast( - // run, - // () => client.GetRunAsync(run), - // r => r.Status.IsTerminal || r.Status.Equals(RunStatus.RequiresAction)); - - // Assert.That(run.Status, Is.EqualTo(RunStatus.RequiresAction)); - // Assert.That(run.RequiredActions?.Count, Is.EqualTo(1)); - // Assert.That(run.RequiredActions[0].ToolCallId, Is.Not.Null.Or.Empty); - // Assert.That(run.RequiredActions[0].FunctionName, Is.EqualTo("get_favorite_food_for_day_of_week")); - // Assert.That(run.RequiredActions[0].FunctionArguments, Is.Not.Null.Or.Empty); - - // run = await client.SubmitToolOutputsToRunAsync(run, [new(run.RequiredActions[0].ToolCallId, "tacos")]); - // Assert.That(run.Status.IsTerminal, Is.False); - - // run = await WaitUntilReturnLast( - // run, - // () => client.GetRunAsync(run), - // r => r.Status.IsTerminal); - // Assert.That(run.Status, Is.EqualTo(RunStatus.Completed)); - - // List messages = await SyncOrAsyncList(client, - // c => c.GetMessages(run.ThreadId, resultOrder: ListOrder.NewestFirst), - // c => c.GetMessagesAsync(run.ThreadId, resultOrder: ListOrder.NewestFirst)); - // Assert.That(messages.Count, Is.GreaterThan(1)); - // Assert.That(messages.ElementAt(0).Role, Is.EqualTo(MessageRole.Assistant)); - // Assert.That(messages.ElementAt(0).Content?[0], Is.Not.Null); - // Assert.That(messages.ElementAt(0).Content?[0].Text, Does.Contain("tacos")); - // } - - // [RecordedTest] - // public async Task BasicFileSearchWorks() - // { - // // First, we need to upload a simple test file. - // AssistantClient client = GetTestClient(); - // string modelName = client.DeploymentOrThrow(); - // FileClient fileClient = GetTestClientFrom(client); - - // OpenAIFileInfo testFile = await fileClient.UploadFileAsync( - // BinaryData.FromString(""" - // This file describes the favorite foods of several people. - - // Summanus Ferdinand: tacos - // Tekakwitha Effie: pizza - // Filip Carola: cake - // """), - // "favorite_foods.txt", - // FileUploadPurpose.Assistants); - // Validate(testFile); - - // // Create an assistant, using the creation helper to make a new vector store - // Assistant assistant = await client.CreateAssistantAsync(modelName, new() - // { - // Tools = { new FileSearchToolDefinition() }, - // ToolResources = new() - // { - // FileSearch = new() - // { - // NewVectorStores = - // { - // new VectorStoreCreationHelper([testFile]), - // } - // } - // } - // }); - // Validate(assistant); - // Assert.That(assistant.ToolResources?.FileSearch?.VectorStoreIds, Has.Count.EqualTo(1)); - // string createdVectorStoreId = assistant.ToolResources.FileSearch.VectorStoreIds[0]; - // ValidateById(createdVectorStoreId); - - // // Modify an assistant to use the existing vector store - // assistant = await client.ModifyAssistantAsync(assistant, new AssistantModificationOptions() - // { - // ToolResources = new() - // { - // FileSearch = new() - // { - // VectorStoreIds = { assistant.ToolResources.FileSearch.VectorStoreIds[0] }, - // }, - // }, - // }); - // Assert.That(assistant.ToolResources?.FileSearch?.VectorStoreIds, Has.Count.EqualTo(1)); - // Assert.That(assistant.ToolResources.FileSearch.VectorStoreIds[0], Is.EqualTo(createdVectorStoreId)); - - // // Create a thread with an override vector store - // AssistantThread thread = await client.CreateThreadAsync(new ThreadCreationOptions() - // { - // InitialMessages = { new(MessageRole.User, ["Using the files you have available, what's Filip's favorite food?"]) }, - // ToolResources = new() - // { - // FileSearch = new() - // { - // NewVectorStores = - // { - // new VectorStoreCreationHelper([testFile.Id]) - // } - // } - // } - // }); - // Validate(thread); - // Assert.That(thread.ToolResources?.FileSearch?.VectorStoreIds, Has.Count.EqualTo(1)); - // createdVectorStoreId = thread.ToolResources.FileSearch.VectorStoreIds[0]; - // ValidateById(createdVectorStoreId); - - // // Ensure that modifying the thread with an existing vector store works - // thread = await client.ModifyThreadAsync(thread, new ThreadModificationOptions() - // { - // ToolResources = new() - // { - // FileSearch = new() - // { - // VectorStoreIds = { createdVectorStoreId }, - // } - // } - // }); - // Assert.That(thread.ToolResources?.FileSearch?.VectorStoreIds, Has.Count.EqualTo(1)); - // Assert.That(thread.ToolResources.FileSearch.VectorStoreIds[0], Is.EqualTo(createdVectorStoreId)); - - // ThreadRun run = await client.CreateRunAsync(thread, assistant); - // Validate(run); - // run = await WaitUntilReturnLast( - // run, - // () => client.GetRunAsync(run), - // r => r.Status.IsTerminal); - // Assert.That(run.Status, Is.EqualTo(RunStatus.Completed)); - - // AsyncPageableCollection messages = SyncOrAsync(client, - // c => c.GetMessages(thread, resultOrder: ListOrder.NewestFirst), - // c => c.GetMessagesAsync(thread, resultOrder: ListOrder.NewestFirst)); - // bool hasAtLeastOne = false; - // bool hasCake = false; - // await foreach (ThreadMessage message in messages) - // { - // hasAtLeastOne = true; - // foreach (MessageContent content in message.Content) - // { - // Console.WriteLine(content.Text); - // hasCake |= content.Text?.ToLowerInvariant().Contains("cake") == true; - // foreach (TextAnnotation annotation in content.TextAnnotations) - // { - // Console.WriteLine($" --> From file: {annotation.InputFileId}, replacement: {annotation.TextToReplace}"); - // } - // } - // } - // Assert.That(hasAtLeastOne, Is.True); - // Assert.That(hasCake, Is.True); - // } - - // [RecordedTest] - // public async Task StreamingRunWorks() - // { - // AssistantClient client = GetTestClient(); - // string modelName = client.DeploymentOrThrow(); - // Assistant assistant = await client.CreateAssistantAsync(modelName); - // Validate(assistant); - - // AssistantThread thread = await client.CreateThreadAsync(new ThreadCreationOptions() - // { - // InitialMessages = { new(MessageRole.User, ["Hello there, assistant! How are you today?"]), }, - // }); - // Validate(thread); - - // AsyncResultCollection streamingResult = SyncOrAsync(client, - // c => c.CreateRunStreaming(thread.Id, assistant.Id), - // c => c.CreateRunStreamingAsync(thread.Id, assistant.Id)); - - // StringBuilder content = new(); - // DateTimeOffset? lastUpdate = null; - // StreamingUpdateReason? lastUpdateReason = null; - - // await foreach (StreamingUpdate update in streamingResult) - // { - // if (update is RunUpdate runUpdate) - // { - // lastUpdateReason = runUpdate.UpdateKind; - // lastUpdate = update.UpdateKind switch - // { - // StreamingUpdateReason.RunCreated => runUpdate.Value.CreatedAt, - // StreamingUpdateReason.RunQueued => runUpdate.Value.StartedAt, - // StreamingUpdateReason.RunInProgress => runUpdate.Value.StartedAt, - // StreamingUpdateReason.RunCompleted => runUpdate.Value.CompletedAt, - // _ => null, - // }; - // } - // if (update is MessageContentUpdate contentUpdate) - // { - // // TODO FIXME: The OpenAI library code is currently incorrectly returning a MessageRole.User value here. - // // It should instead be null or at least Assistant - // //Assert.That(contentUpdate.Role, Is.Null.Or.EqualTo(MessageRole.Assistant)); - // Assert.That(contentUpdate.Text, Is.Not.Null); // can be empty string - // content.Append(contentUpdate.Text); - // } - // } - - // Assert.That(lastUpdateReason, Is.EqualTo(StreamingUpdateReason.RunCompleted)); - // Assert.That(lastUpdate, Is.Not.Null.And.GreaterThan(s_2024)); - // Assert.That(content, Has.Length.GreaterThan(0)); - // } - - private static readonly DateTimeOffset s_2024 = new(2024, 1, 1, 0, 0, 0, TimeSpan.Zero); -} diff --git a/.dotnet.azure/.tests.staging/Azure.AI.OpenAI.Tests.csproj b/.dotnet.azure/.tests.staging/Azure.AI.OpenAI.Tests.csproj deleted file mode 100644 index 3cb4fedeb..000000000 --- a/.dotnet.azure/.tests.staging/Azure.AI.OpenAI.Tests.csproj +++ /dev/null @@ -1,21 +0,0 @@ - - - net7.0 - - $(NoWarn);CS1591 - Unsigned - latest - - - - - - - - - - - - - - diff --git a/.dotnet.azure/CHANGELOG.md b/.dotnet.azure/CHANGELOG.md new file mode 100644 index 000000000..fc64493a3 --- /dev/null +++ b/.dotnet.azure/CHANGELOG.md @@ -0,0 +1,525 @@ +# Release History + +## 2.0.0-beta.5 (2024-09-03) + +This update increments library compatibility to `OpenAI 2.0.0-beta.11`, including several breaking changes. + +### Features Added + +- Added the `OpenAIChatModelFactory` in the `OpenAI.Chat` namespace (a static class that can be used to instantiate OpenAI models for mocking in non-live test scenarios). ([79014ab](https://github.com/openai/openai-dotnet/commit/79014abc01a00e13d5a334d3f6529ed590b8ee98)) + +### Breaking Changes + +- Updated fine-tuning pagination methods `GetJobs`, `GetEvents`, and `GetJobCheckpoints` to return `IEnumerable` instead of `ClientResult`. ([5773292](https://github.com/openai/openai-dotnet/commit/57732927575c6c48f30bded0afb9f5b16d4f30da)) +- Updated the batching pagination method `GetBatches` to return `IEnumerable` instead of `ClientResult`. ([5773292](https://github.com/openai/openai-dotnet/commit/57732927575c6c48f30bded0afb9f5b16d4f30da)) +- Changed `GeneratedSpeechVoice` from an enum to an "extensible enum". ([79014ab](https://github.com/openai/openai-dotnet/commit/79014abc01a00e13d5a334d3f6529ed590b8ee98)) +- Changed `GeneratedSpeechFormat` from an enum to an "extensible enum". ([cc9169a](https://github.com/openai/openai-dotnet/commit/cc9169ad2ff92bb7312eed3b7e64e45da5da1d18)) +- Renamed `SpeechGenerationOptions`'s `Speed` property to `SpeedRatio`. ([cc9169a](https://github.com/openai/openai-dotnet/commit/cc9169ad2ff92bb7312eed3b7e64e45da5da1d18)) + +### Bugs Fixed + +- Corrected an internal deserialization issue that caused recent updates to Assistants `file_search` to fail when streaming a run. Strongly typed support for `ranking_options` is not included but will arrive soon. ([cc9169a](https://github.com/openai/openai-dotnet/commit/cc9169ad2ff92bb7312eed3b7e64e45da5da1d18)) +- Mitigated a .NET runtime issue that prevented `ChatResponseFormat` from serializing correct on targets including Unity. ([cc9169a](https://github.com/openai/openai-dotnet/commit/cc9169ad2ff92bb7312eed3b7e64e45da5da1d18)) + +## 2.0.0-beta.4 (2024-08-30) + +This small release increments library compatibility to the latest `OpenAI 2.0.0-beta.10`. Prior to this update, interactions with the two breaking changes described below prevented full interoperability. + +### Breaking Changes + +- `AudioClient`'s `GenerateSpeechFromText()` method is renamed to `GenerateSpeech()` +- `OpenAIFileInfo`'s `SizeInBytes` is now of type `int?` (previously `long?`) + +## 2.0.0-beta.3 (2024-08-23) + +This change updates the library for compatibility with the latest `2.0.0-beta.9` of the `OpenAI` package and the `2024-07-01-preview` Azure OpenAI service API version label, as published on 8/5. + +### Features Added + +- The library now directly supports alternative authentication audiences, including Azure Government. This can be specified by providing an appropriate `AzureOpenAIAudience` value to the `AzureOpenAIClientOptions.Audience` property when creating a client. See the client configuration section of the README for more details. + +Additional new features from the `OpenAI` package can be found in [the OpenAI changelog](https://github.com/openai/openai-dotnet/blob/main/CHANGELOG.md). + +**Please note**: Structured Outputs support is not yet available with the `2024-07-01-preview` service API version. This means that attempting to use the feature with this library version will fail with an unrecognized property for either `response_format` or `strict` in request payloads; all existing functionality is unaffected. Azure OpenAI support for Structured Outputs is coming soon. + +### Breaking Changes + +No Azure-specific breaking changes are present in this update. + +The update from `OpenAI` `2.0.0-beta.7` to `2.0.0-beta.9` does bring a number of breaking changes, however, as described in [the OpenAI changelog](https://github.com/openai/openai-dotnet/blob/main/CHANGELOG.md): + +- Removed client constructors that do not explicitly take an API key parameter or an endpoint via an `OpenAIClientOptions` parameter, making it clearer how to appropriately instantiate a client. ([13a9c68](https://github.com/openai/openai-dotnet/commit/13a9c68647c8d54475f1529a63b13ad711bd4ba6)) +- Removed the endpoint parameter from all client constructors, making it clearer that an alternative endpoint must be specified via the `OpenAIClientOptions` parameter. ([13a9c68](https://github.com/openai/openai-dotnet/commit/13a9c68647c8d54475f1529a63b13ad711bd4ba6)) +- Removed `OpenAIClient`'s `Endpoint` `protected` property. ([13a9c68](https://github.com/openai/openai-dotnet/commit/13a9c68647c8d54475f1529a63b13ad711bd4ba6)) +- Made `OpenAIClient`'s constructor that takes a `ClientPipeline` parameter `protected internal` instead of just `protected`. ([13a9c68](https://github.com/openai/openai-dotnet/commit/13a9c68647c8d54475f1529a63b13ad711bd4ba6)) +- Renamed the `User` property in applicable Options classes to `EndUserId`, making its purpose clearer. ([13a9c68](https://github.com/openai/openai-dotnet/commit/13a9c68647c8d54475f1529a63b13ad711bd4ba6)) +- Changed name of return types from methods returning streaming collections from `ResultCollection` to `CollectionResult`. ([7bdecfd](https://github.com/openai/openai-dotnet/commit/7bdecfd8d294be933c7779c7e5b6435ba8a8eab0)) +- Changed return types from methods returning paginated collections from `PageableCollection` to `PageCollection`. ([7bdecfd](https://github.com/openai/openai-dotnet/commit/7bdecfd8d294be933c7779c7e5b6435ba8a8eab0)) +- Users must now call `GetAllValues` on the collection of pages to enumerate collection items directly. Corresponding protocol methods return `IEnumerable` where each collection item represents a single service response holding a page of values. ([7bdecfd](https://github.com/openai/openai-dotnet/commit/7bdecfd8d294be933c7779c7e5b6435ba8a8eab0)) +- Updated `VectorStoreFileCounts` and `VectorStoreFileAssociationError` types from `readonly struct` to `class`. ([58f93c8](https://github.com/openai/openai-dotnet/commit/58f93c8d5ea080adfee8b37ae3cc034ebb06c79f)) + +### Bugs Fixed + +- Removed an inappropriate null check in `FileClient.GetFiles()` (azure-sdk-for-net 44912) +- Addressed issues with automatic retry behavior, including for HTTP 429 rate limit errors: + - Authorization headers are now appropriately reapplied to retried requests + - Automatic retry behavior will now honor header-based intervals from `Retry-After` and related response headers +- The client will now originate an `x-ms-client-request-id` header to match prior library behavior and facilitate troubleshooting + +Additional, non-Azure-specific bug fixes can be found in [the OpenAI changelog](https://github.com/openai/openai-dotnet/blob/main/CHANGELOG.md). + +## 2.0.0-beta.2 (2024-06-14) + +### Features Added + +- Per changes to the [OpenAI .NET client library](https://github.com/openai/openai-dotnet), most convenience methods now provide the direct ability to provide optional `CancellationTokens`, removing the need to use protocol methods + +### Breaking Changes + +- In support of `CancellationToken`s in methods, an overriden method signature for streaming chat completions was changed and a new minimum version dependency of 2.0.0-beta.5 is established for the OpenAI dependency. These styles of breaks will be extraordinarily rare. + +### Bugs Fixed + +- See breaking changes: when streaming chat completions, an error of "Unrecognized request argument supplied: stream_options" is introduced when using Azure.AI.OpenAI 2.0.0-beta.1 with OpenAI 2.0.0-beta.5+. This is fixed with the new version. + +## 2.0.0-beta.1 (2024-06-07) + +**Please note**: This update brings a *major* set of changes to the Azure.AI.OpenAI library. + +With the release of the official [OpenAI .NET client library](https://github.com/openai/openai-dotnet), the `Azure.AI.OpenAI` library has migrated to become a companion to OpenAI's package that offers Azure client configuration and strongly-typed extension support for Azure-specific request and response models. + +**We'd love your feedback:** our goal is to move the new `OpenAI` .NET library and its refreshed `Azure.AI.OpenAI` companion into a General Availability status as quickly as we can; we've heard loud and clear that the perpetual preview/prerelease status is an adoption blocker. To reach that goal, your feedback -- either on the issues here, in `azure-sdk-for-net`, or the issues on the new `openai-dotnet` OpenAI repository -- will be invaluable. + +### Features Added + +**OpenAI parity**: built on the OpenAI .NET library, full parity support is available for the breadth of common features, including: + +- Assistants V2 with streaming +- Audio transcription/translation and text-to-speech generation +- (Coming soon) Batch +- Chat completion +- Embeddings +- Files +- Fine-tuning +- Image generation with dall-e-3 +- Vector stores + +**Azure OpenAI**: updated to the latest `2024-05-01-preview` service API, new features include: + +- Assistants v2 with streaming +- Improved configuration for On Your Data +- Expanded Responsible AI content filter annotations + +### Breaking Changes + +Given the nature of this update, breaking changes are extensive. Please see the README and the [OpenAI library README](https://github.com/openai/openai-dotnet/blob/master/README.md) for usage details. OpenAI's library carries forward many of the same design concepts as the Azure.AI.OpenAI library used as a standalone library, but considerable improvements have been made to the surface that will require significant code adjustments. + +## 1.0.0-beta.17 (2024-05-03) + +### Features Added + +- Image input support for `gpt-4-turbo` chat completions now works with image data in addition to internet URLs. + Images may be now be used as `gpt-4-turbo` message content items via one of three constructors: + - `ChatMessageImageContent(Uri)` -- the existing constructor, used for URL-based image references + - `ChatMessageImageContent(Stream,string)` -- (new) used with a stream and known MIME type (like `image/png`) + - `ChatMessageImageContent(BinaryData,string)` -- (new) used with a BinaryData instance and known MIME type + Please see the [readme example](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/openai/Azure.AI.OpenAI/README.md#chat-with-images-using-gpt-4-turbo) for more details. + +### Breaking Changes + +- Public visibility of the `ChatMessageImageUrl` type is removed to promote more flexible use of data sources in + `ChatMessageImageContent`. Code that previously created a `ChatMessageImageUrl` using a `Uri` should simply provide + the `Uri` to the `ChatMessageImageContent` constructor directly. + +## 1.0.0-beta.16 (2024-04-11) + +### Features Added + +**Audio** + +- `GetAudioTranscription()` now supports word-level timestamp granularities via `AudioTranscriptionOptions`: + - The `Verbose` option for `ResponseFormat` must be used for any timing information to be populated + - `TimestampGranularityFlags` accepts a combination of the `.Word` and `.Segment` granularity values in + `AudioTimestampGranularity`, joined when needed via the single-pipe `|` operator + - For example, `TimestampGranularityFlags = AudioTimestampGranularity.Word | AudioTimestampGranularity.Segment` + will request that both word-level and segment-level timestamps are provided on the transcription result + - If not otherwise specified, `Verbose` format will default to using segment-level timestamp information + - Corresponding word-level information is found on the `.Words` collection of `AudioTranscription`, peer to the + existing `.Segments` collection + - Note that word-level timing information incurs a small amount of additional processingly latency; segment-level + timestamps do not encounter this behavior +- `GenerateSpeechFromText()` can now use `Wav` and `Pcm` values from `SpeechGenerationResponseFormat`, these new + options providing alternative uncompressed formats to `Flac` + +**Chat** + +- `ChatCompletions` and `StreamingChatCompletionsUpdate` now include the reported `Model` value from the response +- Log probability information is now included in `StreamingChatCompletionsUpdate` when `logprobs` are requested on + `GetChatCompletionsStreaming()` +- [AOAI] Custom Blocklist information in content filter results is now represented in a more structured + `ContentFilterDetailedResults` type +- [AOAI] A new `IndirectAttack` content filter entry is now present on content filter results for prompts + +### Breaking Changes + +- [AOAI] `AzureChatExtensionMessageContext`'s `RequestContentFilterResults` now uses the new + `ContentFilterDetailedResults` type, changed from the previous `IReadOnlyList`. The + previous list is now present on `CustomBlockLists.Details`, supplemented with a new `CustomBlockLists.Filtered` + property. + +### Bugs Fixed + +- [AOAI] An issue that sometimes caused `StreamingChatCompletionUpdates` from Azure OpenAI to inappropriately exclude + top-level information like `Id` and `CreatedAt` has been addressed + +## 1.0.0-beta.15 (2024-03-20) + +This release targets the latest `2024-03-01-preview` service API label and brings support for the `Dimensions` property when using new embedding models. + +### Features Added + +- `EmbeddingsOptions` now includes the `Dimensions` property, new to Azure OpenAI's `2024-03-01-preview` service API. + +### Bugs Fixed + +- Several issues with the `ImageGenerations` response object being treated as writeable are fixed: + - `ImageGenerations` no longer has an erroneous public constructor + - `ImageGenerations.Created` no longer has a public setter + - `ImageGenerations.Data` is now an `IReadOnlyList` instead of an `IList` + - A corresponding replacement factory method for mocks is added to `AzureOpenAIModelFactory` + +## 1.0.0-beta.14 (2024-03-04) + +### Features Added + +- Text-to-speech using OpenAI TTS models is now supported. See [OpenAI's API reference](https://platform.openai.com/docs/api-reference/audio/createSpeech) or the [Azure OpenAI quickstart](https://learn.microsoft.com/azure/ai-services/openai/text-to-speech-quickstart) for detailed overview and background information. + - The new method `GenerateSpeechFromText` exposes this capability on `OpenAIClient`. + - Text-to-speech converts text into lifelike spoken audio in a chosen voice, together with other optional configurations. + - This method works for both Azure OpenAI and non-Azure `api.openai.com` client configurations + +### Breaking Changes + +"On Your Data" changes: + +- Introduced a new type `AzureChatExtensionDataSourceResponseCitation` for a more structured representation of citation data. +- Correspondingly, updated `AzureChatExtensionsMessageContext`: + - Replaced `Messages` with `Citations` of type `AzureChatExtensionDataSourceResponseCitation`. + - Added `Intent` as a string type. +- Renamed "AzureCognitiveSearch" to "AzureSearch": + - `AzureCognitiveSearchChatExtensionConfiguration` is now `AzureSearchChatExtensionConfiguration`. + - `AzureCognitiveSearchIndexFieldMappingOptions` is now `AzureSearchIndexFieldMappingOptions`. +- Check the project README for updated code snippets. + +### Other Changes + +- New properties in `ChatCompletionsOptions`: + - `EnableLogProbabilities`: Allows retrieval of log probabilities (REST: `logprobs`) + - `LogProbabilitiesPerToken`: The number of most likely tokens to return per token (REST: `top_logprobs`) +- Introduced a new property in `CompletionsOptions`: + - `Suffix`: Defines the suffix that follows the completion of inserted text (REST: `suffix`) +- Image generation response now includes content filtering details (specific to Azure OpenAI endpoint): + - `ImageGenerationData.ContentFilterResults`: Information about the content filtering results. (REST: `content_filter_results`) + - `ImageGenerationData.PromptFilterResults`: Information about the content filtering category (REST: `prompt_filter_results`) + +## 1.0.0-beta.13 (2024-02-01) + +### Breaking Changes + +- Removed the setter of the `Functions` property of the `ChatCompletionsOptions` class as per the guidelines for collection properties. + +### Bugs Fixed + +- Addressed an issue with the public constructor for `ChatCompletionsFunctionToolCall` that failed to set the tool call type in the corresponding request. + +## 1.0.0-beta.12 (2023-12-15) + +Like beta.11, beta.12 is another release that brings further refinements and fixes. It remains based on the `2023-12-01-preview` service API version for Azure OpenAI and does not add any new service capabilities. + +### Features Added + +**Updates for using streaming tool calls:** + +- A new .NET-specific `StreamingToolCallUpdate` type has been added to better represent streaming tool call updates + when using chat tools. + - This new type includes an explicit `ToolCallIndex` property, reflecting `index` in the REST schema, to allow + resilient deserialization of parallel function tool calling. +- A convenience constructor has been added for `ChatRequestAssistantMessage` that can automatically populate from a prior + `ChatResponseMessage` when using non-streaming chat completions. +- A public constructor has been added for `ChatCompletionsFunctionToolCall` to allow more intuitive reconstruction of + `ChatCompletionsToolCall` instances for use in `ChatRequestAssistantMessage` instances made from streaming responses. + +**Other additions:** + +- To facilitate reuse of user message contents, `ChatRequestUserMessage` now provides a public `Content` property (`string`) as well as a public `MultimodalContentItems` property (`IList` type is introduced that implicitly exposes an `IAsyncEnumerable` derived from + the underlying response. +- `OpenAI.GetCompletionsStreaming()` now returns a `StreamingResponse` that may be directly + enumerated over. `StreamingCompletions`, `StreamingChoice`, and the corresponding methods are removed. +- Because Chat Completions use a distinct structure for their streaming response messages, a new + `StreamingChatCompletionsUpdate` type is introduced that encapsulates this update data. +- Correspondingly, `OpenAI.GetChatCompletionsStreaming()` now returns a + `StreamingResponse` that may be enumerated over directly. + `StreamingChatCompletions`, `StreamingChatChoice`, and related methods are removed. +- For more information, please see + [the related pull request description](https://github.com/Azure/azure-sdk-for-net/pull/39347) as well as the + updated snippets in the project README. + +#### `deploymentOrModelName` moved to `*Options.DeploymentName` + +`deploymentOrModelName` and related method parameters on `OpenAIClient` have been moved to `DeploymentName` +properties in the corresponding method options. This is intended to promote consistency across scenario, +language, and Azure/non-Azure OpenAI use. + +As an example, the following: + +```csharp +ChatCompletionsOptions chatCompletionsOptions = new() +{ + Messages = { new(ChatRole.User, "Hello, assistant!") }, +}; +Response response = client.GetChatCompletions("gpt-4", chatCompletionsOptions); +``` + +...is now re-written as: + +```csharp +ChatCompletionsOptions chatCompletionsOptions = new() +{ + DeploymentName = "gpt-4", + Messages = { new(ChatRole.User, "Hello, assistant!") }, +}; +Response response = client.GetChatCompletions(chatCompletionsOptions); +``` + +#### Consistency in complex method options type constructors + +With the migration of `DeploymentName` into method complex options types, these options types have now been snapped to +follow a common pattern: each complex options type will feature a default constructor that allows `init`-style setting +of properties as well as a single additional constructor that accepts *all* required parameters for the corresponding +method. Existing constructors that no longer meet that "all" requirement, including those impacted by the addition of +`DeploymentName`, have been removed. The "convenience" constructors that represented required parameter data +differently -- for example, `EmbeddingsOptions(string)`, have also been removed in favor of the consistent "set of +directly provide" choice. + +More exhaustively, *removed* are: + +- `AudioTranscriptionOptions(BinaryData)` +- `AudioTranslationOptions(BinaryData)` +- `ChatCompletionsOptions(IEnumerable)` +- `CompletionsOptions(IEnumerable)` +- `EmbeddingsOptions(string)` +- `EmbeddingsOptions(IEnumerable)` + +And *added* as replacements are: + +- `AudioTranscriptionOptions(string, BinaryData)` +- `AudioTranslationOptions(string, BinaryData)` +- `ChatCompletionsOptions(string, IEnumerable)` +- `CompletionsOptions(string, IEnumerable)` +- `EmbeddingsOptions(string, IEnumerable)` + +#### Embeddings now represented as `ReadOnlyMemory` + +Changed the representation of embeddings (specifically, the type of the `Embedding` property of the `EmbeddingItem` class) +from `IReadOnlyList` to `ReadOnlyMemory` as part of a broader effort to establish consistency across the +.NET ecosystem. + +#### `SearchKey` and `EmbeddingKey` properties replaced by `SetSearchKey` and `SetEmbeddingKey` methods + +Replaced the `SearchKey` and `EmbeddingKey` properties of the `AzureCognitiveSearchChatExtensionConfiguration` class with +new `SetSearchKey` and `SetEmbeddingKey` methods respectively. These methods simplify the configuration of the Azure Cognitive +Search chat extension by receiving a plain string instead of an `AzureKeyCredential`, promote more sensible key and secret +management, and align with the Azure SDK guidelines. + +## 1.0.0-beta.8 (2023-09-21) + +### Features Added + +- Audio Transcription and Audio Translation using OpenAI Whisper models is now supported. See [OpenAI's API + reference](https://platform.openai.com/docs/api-reference/audio) or the [Azure OpenAI + quickstart](https://learn.microsoft.com/azure/ai-services/openai/whisper-quickstart) for detailed overview and + background information. + - The new methods `GetAudioTranscription` and `GetAudioTranscription` expose these capabilities on `OpenAIClient` + - Transcription produces text in the primary, supported, spoken input language of the audio data provided, together + with any optional associated metadata + - Translation produces text, translated to English and reflective of the audio data provided, together with any + optional associated metadata + - These methods work for both Azure OpenAI and non-Azure `api.openai.com` client configurations + +### Breaking Changes + +- The underlying representation of `PromptFilterResults` (for `Completions` and `ChatCompletions`) has had its response + body key changed from `prompt_annotations` to `prompt_filter_results` +- **Prior versions of the `Azure.AI.OpenAI` library may no longer populate `PromptFilterResults` as expected** and it's + highly recommended to upgrade to this version if the use of Azure OpenAI content moderation annotations for input data + is desired +- If a library version upgrade is not immediately possible, it's advised to use `Response.GetRawResponse()` and manually + extract the `prompt_filter_results` object from the top level of the `Completions` or `ChatCompletions` response `Content` + payload + +### Bugs Fixed + +- Support for the described breaking change for `PromptFilterResults` was added and this library version will now again + deserialize `PromptFilterResults` appropriately +- `PromptFilterResults` and `ContentFilterResults` are now exposed on the result classes for streaming Completions and + Chat Completions. `Streaming(Chat)Completions.PromptFilterResults` will report an index-sorted list of all prompt + annotations received so far while `Streaming(Chat)Choice.ContentFilterResults` will reflect the latest-received + content annotations that were populated and received while streaming + +## 1.0.0-beta.7 (2023-08-25) + +### Features Added + +- The Azure OpenAI "using your own data" feature is now supported. See [the Azure OpenAI using your own data quickstart](https://learn.microsoft.com/azure/ai-services/openai/use-your-data-quickstart) for conceptual background and detailed setup instructions. + - Azure OpenAI chat extensions are configured via a new `AzureChatExtensionsOptions` property on `ChatCompletionsOptions`. When an `AzureChatExtensionsOptions` is provided, configured requests will only work with clients configured to use the Azure OpenAI service, as the capabilities are unique to that service target. + - `AzureChatExtensionsOptions` then has `AzureChatExtensionConfiguration` instances added to its `Extensions` property, with these instances representing the supplementary information needed for Azure OpenAI to use desired data sources to supplement chat completions behavior. + - `ChatChoice` instances on a `ChatCompletions` response value that used chat extensions will then also have their `Message` property supplemented by an `AzureChatExtensionMessageContext` instance. This context contains a collection of supplementary `Messages` that describe the behavior of extensions that were used and supplementary response data, such as citations, provided along with the response. + - See the README sample snippet for a simplified example of request/response use with "using your own data" + +## 1.0.0-beta.6 (2023-07-19) + +### Features Added + +- DALL-E image generation is now supported. See [the Azure OpenAI quickstart](https://learn.microsoft.com/azure/cognitive-services/openai/dall-e-quickstart) for conceptual background and detailed setup instructions. + - `OpenAIClient` gains a new `GetImageGenerations` method that accepts an `ImageGenerationOptions` and produces an `ImageGenerations` via its response. This response object encapsulates the temporary storage location of generated images for future retrieval. + - In contrast to other capabilities, DALL-E image generation does not require explicit creation or specification of a deployment or model. Its surface as such does not include this concept. +- Functions for chat completions are now supported: see [OpenAI's blog post on the topic](https://openai.com/blog/function-calling-and-other-api-updates) for much more detail. + - A list of `FunctionDefinition` objects may be populated on `ChatCompletionsOptions` via its `Functions` property. These definitions include a name and description together with a serialized JSON Schema representation of its parameters; these parameters can be generated easily via `BinaryData.FromObjectAsJson` with dynamic objects -- see the README for example usage. + - **NOTE**: Chat Functions requires a minimum of the `-0613` model versions for `gpt-4` and `gpt-3.5-turbo`/`gpt-35-turbo`. Please ensure you're using these later model versions, as Functions are not supported with older model revisions. For Azure OpenAI, you can update a deployment's model version or create a new model deployment with an updated version via the Azure AI Studio interface, also accessible through Azure Portal. +- (Azure OpenAI specific) Completions and Chat Completions responses now include embedded content filter annotations for prompts and responses +- A new `Azure.AI.OpenAI.AzureOpenAIModelFactory` is now present for mocking. + +### Breaking Changes + +- `ChatMessage`'s one-parameter constructor has been replaced with a no-parameter constructor. Please replace any hybrid construction with one of these two options that either completely rely on property setting or completely rely on constructor parameters. + +## 1.0.0-beta.5 (2023-03-22) + +This is a significant release that brings GPT-4 model support (chat) and the ability to use non-Azure OpenAI (not just Azure OpenAI resources) to the .NET library. It also makes a number of clarifying adjustments to request properties for completions. + +### Features Added +- GPT-4 models are now supported via new `GetChatCompletions` and `GetChatCompletionsStreaming` methods on `OpenAIClient`. These use the `/chat/completions` REST endpoint and represent the [OpenAI Chat messages format](https://platform.openai.com/docs/guides/chat). + - The `gpt-3.5-model` can also be used with Chat completions; prior models like text-davinci-003 cannot be used with Chat completions and should still use the `GetCompletions` methods. +- Support for using OpenAI's endpoint via valid API keys obtained from https://platform.openai.com has been added. `OpenAIClient` has new constructors that accept an OpenAI API key instead of an Azure endpoint URI and credential; once configured, Completions, Chat Completions, and Embeddings can be used with identical calling patterns. + +### Breaking Changes + +A number of Completions request properties have been renamed and further documented for clarity. +- `CompletionsOptions` (REST request payload): + - `CacheLevel` and `CompletionConfig` are removed. + - `LogitBias` (REST: `logit_bias`), previously a `` Dictionary, is now an `` Dictionary named `TokenSelectionBiases`. + - `LogProbability` (REST: `logprobs`) is renamed to `LogProbabilityCount`. + - `Model` is removed (in favor of the method-level parameter for deployment or model name) + - `Prompt` is renamed to `Prompts` + - `SnippetCount` (REST: `n`) is renamed to `ChoicesPerPrompt`. + - `Stop` is renamed to `StopSequences`. +- Method and property documentation are broadly updated, with renames from REST schema (like `n` becoming `ChoicesPerPrompt`) specifically noted in ``. + +## 1.0.0-beta.4 (2023-02-23) + +### Bugs fixed +- Addressed issues that sometimes caused `beta.3`'s new `GetStreamingCompletions` method to execute indefinitely + +## 1.0.0-beta.3 (2023-02-17) + +### Features Added +- Support for streaming Completions responses, a capability that parallels setting `stream=true` in the REST API, is now available. A new `GetStreamingCompletions` method on `OpenAIClient` provides a response value `StreamingCompletions` type. This, in turn, exposes a collection of `StreamingChoice` objects as an `IAsyncEnumerable` that will update as a streamed response progresses. `StreamingChoice` further exposes an `IAsyncEnumerable` of streaming text elements via a `GetTextStreaming` method. Used together, this facilitates providing faster, live-updating responses for Completions via the convenient `await foreach` pattern. +- ASP.NET integration via `Microsoft.Extensions.Azure`'s `IAzureClientBuilder` interfaces is available. `OpenAIClient` is now a supported client type for these extension methods. + +### Breaking Changes +- `CompletionsLogProbability.TokenLogProbability`, available on `Choice` elements of a `Completions` response value's `.Choices` collection when a non-zero `LogProbability` value is provided via `CompletionsOptions`, is now an `IReadOnlyList` vs. its previous type of `IReadOnlyList`. This nullability addition accomodates circumstances where some tokens produce expected null values in log probability arrays. + +### Bugs Fixed +- Setting `CompletionsOptions.Echo` to true while also setting a non-zero `CompletionsOptions.LogProbability` no longer results in a deserialization error during response processing. + +## 1.0.0-beta.2 (2023-02-08) +### Bugs Fixed +- Adjusted bad name `finishReason` to `finish_reason` in deserializer class + +## 1.0.0-beta.1 (2023-02-06) + +### Features Added + +- This is the initial preview release for Azure OpenAI inference capabilities, including completions and embeddings. diff --git a/.dotnet.azure/Directory.Build.props b/.dotnet.azure/Directory.Build.props new file mode 100644 index 000000000..0d1b0a607 --- /dev/null +++ b/.dotnet.azure/Directory.Build.props @@ -0,0 +1,43 @@ + + + $(MSBuildThisFileDirectory) + $(MSBuildThisFileDirectory)eng + $(RepoRoot)src + $(RepoRoot)src/SDKs + true + true + true + + + + + Debug + AnyCPU + $(Platform) + + + + + $(RepoRoot)artifacts\ + $(ArtifactsDir)obj\ + $(ArtifactsDir)bin\ + $(ArtifactsDir)packages\$(Configuration)\ + + $(MSBuildProjectName) + + $([System.IO.Path]::GetFullPath('$(ArtifactsBinDir)$(OutDirName)\')) + $(BaseOutputPath)$(Configuration)\ + $(BaseOutputPath)$(PlatformName)\$(Configuration)\ + + $([System.IO.Path]::GetFullPath('$(ArtifactsObjDir)$(OutDirName)\')) + $(BaseIntermediateOutputPath)$(Configuration)\ + $(BaseIntermediateOutputPath)$(PlatformName)\$(Configuration)\ + + $(ArtifactsPackagesDir)/$(MSBuildProjectName) + + + import-required-properties + + + + diff --git a/.dotnet.azure/Directory.Build.targets b/.dotnet.azure/Directory.Build.targets new file mode 100644 index 000000000..3d4330c2e --- /dev/null +++ b/.dotnet.azure/Directory.Build.targets @@ -0,0 +1,10 @@ + + + + <_Parameter1>SourcePath + <_Parameter2>$(MSBuildProjectDirectory) + + + + + diff --git a/.dotnet.azure/README.md b/.dotnet.azure/README.md new file mode 100644 index 000000000..3a201aa89 --- /dev/null +++ b/.dotnet.azure/README.md @@ -0,0 +1,536 @@ +# Azure OpenAI client library for .NET + +The Azure OpenAI client library for .NET is a companion to the official [OpenAI client library for .NET](https://github.com/openai/openai-dotnet). The Azure OpenAI library configures a client for use with Azure OpenAI and provides additional strongly typed extension support for request and response models specific to Azure OpenAI scenarios. + +Azure OpenAI is a managed service that allows developers to deploy, tune, and generate content from OpenAI models on Azure resources. + + [Source code](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/openai/Azure.AI.OpenAI/src) | [Package (NuGet)](https://www.nuget.org/packages/Azure.AI.OpenAI) | [API reference documentation](https://learn.microsoft.com/azure/cognitive-services/openai/reference) | [Product documentation](https://learn.microsoft.com/azure/cognitive-services/openai/) | [Samples](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/openai/Azure.AI.OpenAI/tests/Samples) + +## Getting started + +### Prerequisites + +To use an Azure OpenAI resource, you must have: + +1. An [Azure subscription](https://azure.microsoft.com/free/dotnet/) +1. [Azure OpenAI access](https://learn.microsoft.com/azure/cognitive-services/openai/overview#how-do-i-get-access-to-azure-openai) + +These prerequisites allow you to create an Azure OpenAI resource and get both a connection URL and API keys. For more information, see [Quickstart: Get started generating text using Azure OpenAI Service](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart). + +### Install the package + +Install the client library for .NET with [NuGet](https://www.nuget.org/): + +```dotnetcli +dotnet add package Azure.AI.OpenAI --prerelease +``` + +The `Azure.AI.OpenAI` package builds on the [official OpenAI package](https://www.nuget.org/packages/OpenAI), which is included as a dependency. + +### Authenticate the client + +To interact with Azure OpenAI or OpenAI, create an instance of [AzureOpenAIClient][azure_openai_client_class] with one of the following approaches: + +- [Create client with a Microsoft Entra credential](#create-client-with-a-microsoft-entra-credential) **(Recommended)** +- [Create client with an API key](#create-client-with-an-api-key) + +#### Create client with a Microsoft Entra credential + +A secure, keyless authentication approach is to use Microsoft Entra ID (formerly Azure Active Directory) via the [Azure Identity library][azure_identity]. To use the library: + +1. Install the [Azure.Identity package](https://www.nuget.org/packages/Azure.Identity): + + ```dotnetcli + dotnet add package Azure.Identity + ``` + +1. Use the desired credential type from the library. For example, [DefaultAzureCredential][azure_identity_dac]: + +```C# Snippet:ConfigureClient:WithEntra +AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new DefaultAzureCredential()); +ChatClient chatClient = azureClient.GetChatClient("my-gpt-4o-mini-deployment"); +``` + +##### Configure client for Azure sovereign cloud** + +If your Microsoft Entra credentials are issued by an entity other than Azure Public Cloud, you can set the `Audience` property on `OpenAIClientOptions` to modify the token authorization scope used for requests. + +For example, the following will configure the client to authenticate tokens via Azure Government Cloud, using `https://cognitiveservices.azure.us/.default` as the authorization scope: + +```C# Snippet:ConfigureClient:GovernmentAudience +AzureOpenAIClientOptions options = new() +{ + Audience = AzureOpenAIAudience.AzureGovernment, +}; +AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new DefaultAzureCredential()); +ChatClient chatClient = azureClient.GetChatClient("my-gpt-4o-mini-deployment"); +``` + +For a custom or non-enumerated value, the authorization scope can be provided directly as the value for `Audience`: + +```C# Snippet:ConfigureClient:CustomAudience +AzureOpenAIClientOptions optionsWithCustomAudience = new() +{ + Audience = "https://cognitiveservices.azure.com/.default", +}; +``` + +#### Create client with an API key + +While not as secure as Microsoft Entra-based authentication, it's possible to authenticate using a client subscription key: + +```C# Snippet:ConfigureClient:WithAOAITopLevelClient +string keyFromEnvironment = Environment.GetEnvironmentVariable("AZURE_OPENAI_API_KEY"); + +AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new AzureKeyCredential(keyFromEnvironment)); +ChatClient chatClient = azureClient.GetChatClient("my-gpt-35-turbo-deployment"); +``` + +## Key concepts + +### Assistants + +See [OpenAI's Assistants API overview](https://platform.openai.com/docs/assistants/overview). + +### Audio transcription/translation and text-to-speech generation + +See [OpenAI Capabilities: Speech to text](https://platform.openai.com/docs/guides/speech-to-text/speech-to-text). + +### Batch + +See [OpenAI's Batch API guide](https://platform.openai.com/docs/guides/batch). + +### Chat completion + +Chat models take a list of messages as input and return a model-generated message as output. Although the chat format is +designed to make multi-turn conversations easy, it's also useful for single-turn tasks without any conversation. + +See [OpenAI Capabilities: Chat completion](https://platform.openai.com/docs/guides/text-generation/chat-completions-api). + +### Image generation + +See [OpenAI Capabilities: Image generation](https://platform.openai.com/docs/guides/images/introduction). + +### Files + +See [OpenAI's Files API reference](https://platform.openai.com/docs/api-reference/files). + +### Text embeddings + +See [OpenAI Capabilities: Embeddings](https://platform.openai.com/docs/guides/embeddings/embeddings). + +### Thread safety + +We guarantee that all client instance methods are thread-safe and independent of each other ([guideline](https://azure.github.io/azure-sdk/dotnet_introduction.html#dotnet-service-methods-thread-safety)). This ensures that the recommendation of reusing client instances is always safe, even across threads. + +### Additional concepts + + +[Client options](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/core/Azure.Core/README.md#configuring-service-clients-using-clientoptions) | +[Accessing the response](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/core/Azure.Core/README.md#accessing-http-response-details-using-responset) | +[Long-running operations](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/core/Azure.Core/README.md#consuming-long-running-operations-using-operationt) | +[Handling failures](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/core/Azure.Core/README.md#reporting-errors-requestfailedexception) | +[Diagnostics](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/core/Azure.Core/samples/Diagnostics.md) | +[Mocking](https://learn.microsoft.com/dotnet/azure/sdk/unit-testing-mocking) | +[Client lifetime](https://devblogs.microsoft.com/azure-sdk/lifetime-management-and-thread-safety-guarantees-of-azure-sdk-net-clients/) + + +## Examples + +You can familiarize yourself with different APIs using [Samples from OpenAI's .NET library](https://github.com/openai/openai-dotnet/tree/main/examples) or [Azure.AI.OpenAI-specific samples](https://github.com/Azure/azure-sdk-for-net/tree/main/sdk/openai/Azure.AI.OpenAI/tests/Samples). Most OpenAI capabilities are available on both Azure OpenAI and OpenAI using the same scenario clients and methods, so not all scenarios are redundantly covered here. + +### Get a chat completion + +```C# Snippet:SimpleChatResponse +AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new DefaultAzureCredential()); +ChatClient chatClient = azureClient.GetChatClient("my-gpt-35-turbo-deployment"); + +ChatCompletion completion = chatClient.CompleteChat( + [ + // System messages represent instructions or other guidance about how the assistant should behave + new SystemChatMessage("You are a helpful assistant that talks like a pirate."), + // User messages represent user input, whether historical or the most recen tinput + new UserChatMessage("Hi, can you help me?"), + // Assistant messages in a request represent conversation history for responses + new AssistantChatMessage("Arrr! Of course, me hearty! What can I do for ye?"), + new UserChatMessage("What's the best way to train a parrot?"), + ]); + +Console.WriteLine($"{completion.Role}: {completion.Content[0].Text}"); +``` + +### Stream chat messages + +Streaming chat completions use the `CompleteChatStreaming` and `CompleteChatStreamingAsync` method, which return a `ResultCollection` or `AsyncCollectionResult` instead of a `ClientResult`. These result collections can be iterated over using `foreach` or `await foreach`, with each update arriving as new data is available from the streamed response. + +```C# Snippet:StreamChatMessages +AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new DefaultAzureCredential()); +ChatClient chatClient = azureClient.GetChatClient("my-gpt-35-turbo-deployment"); + +CollectionResult completionUpdates = chatClient.CompleteChatStreaming( + [ + new SystemChatMessage("You are a helpful assistant that talks like a pirate."), + new UserChatMessage("Hi, can you help me?"), + new AssistantChatMessage("Arrr! Of course, me hearty! What can I do for ye?"), + new UserChatMessage("What's the best way to train a parrot?"), + ]); + +foreach (StreamingChatCompletionUpdate completionUpdate in completionUpdates) +{ + foreach (ChatMessageContentPart contentPart in completionUpdate.ContentUpdate) + { + Console.Write(contentPart.Text); + } +} +``` + +### Use chat tools + +**Tools** extend chat completions by allowing an assistant to invoke defined functions and other capabilities in the +process of fulfilling a chat completions request. To use chat tools, start by defining a function tool. Here, we root the tools in local methods for clarity and convenience: + +```C# Snippet:ChatTools:DefineTool +static string GetCurrentLocation() +{ + // Call the location API here. + return "San Francisco"; +} + +static string GetCurrentWeather(string location, string unit = "celsius") +{ + // Call the weather API here. + return $"31 {unit}"; +} + +ChatTool getCurrentLocationTool = ChatTool.CreateFunctionTool( + functionName: nameof(GetCurrentLocation), + functionDescription: "Get the user's current location" +); + +ChatTool getCurrentWeatherTool = ChatTool.CreateFunctionTool( + functionName: nameof(GetCurrentWeather), + functionDescription: "Get the current weather in a given location", + functionParameters: BinaryData.FromString(""" + { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. Boston, MA" + }, + "unit": { + "type": "string", + "enum": [ "celsius", "fahrenheit" ], + "description": "The temperature unit to use. Infer this from the specified location." + } + }, + "required": [ "location" ] + } + """) +); +``` + +With the tool defined, include that new definition in the options for a chat completions request: + +```C# Snippet:ChatTools:RequestWithFunctions +ChatCompletionOptions options = new() +{ + Tools = { getCurrentLocationTool, getCurrentWeatherTool }, +}; + +List conversationMessages = + [ + new UserChatMessage("What's the weather like in Boston?"), + ]; +ChatCompletion completion = chatClient.CompleteChat(conversationMessages); +``` + +When the assistant decides that one or more tools should be used, the response message includes one or more "tool +calls" that must all be resolved via "tool messages" on the subsequent request. This resolution of tool calls into +new request messages can be thought of as a sort of "callback" for chat completions. + +To provide tool call resolutions to the assistant to allow the request to continue, provide all prior historical +context -- including the original system and user messages, the response from the assistant that included the tool +calls, and the tool messages that resolved each of those tools -- when making a subsequent request. + +```C# Snippet:ChatTools:HandleToolCalls +// Purely for convenience and clarity, this standalone local method handles tool call responses. +string GetToolCallContent(ChatToolCall toolCall) +{ + if (toolCall.FunctionName == getCurrentWeatherTool.FunctionName) + { + // Validate arguments before using them; it's not always guaranteed to be valid JSON! + try + { + using JsonDocument argumentsDocument = JsonDocument.Parse(toolCall.FunctionArguments); + if (!argumentsDocument.RootElement.TryGetProperty("location", out JsonElement locationElement)) + { + // Handle missing required "location" argument + } + else + { + string location = locationElement.GetString(); + if (argumentsDocument.RootElement.TryGetProperty("unit", out JsonElement unitElement)) + { + return GetCurrentWeather(location, unitElement.GetString()); + } + else + { + return GetCurrentWeather(location); + } + } + } + catch (JsonException) + { + // Handle the JsonException (bad arguments) here + } + } + // Handle unexpected tool calls + throw new NotImplementedException(); +} + +if (completion.FinishReason == ChatFinishReason.ToolCalls) +{ + // Add a new assistant message to the conversation history that includes the tool calls + conversationMessages.Add(new AssistantChatMessage(completion)); + + foreach (ChatToolCall toolCall in completion.ToolCalls) + { + conversationMessages.Add(new ToolChatMessage(toolCall.Id, GetToolCallContent(toolCall))); + } + + // Now make a new request with all the messages thus far, including the original +} +``` + +When using tool calls with streaming responses, accumulate tool call details much like you'd accumulate the other +portions of streamed choices, in this case using the accumulated `StreamingToolCallUpdate` data to instantiate new +tool call messages for assistant message history. Note that the model will ignore `ChoiceCount` when providing tools +and that all streamed responses should map to a single, common choice index in the range of `[0..(ChoiceCount - 1)]`. + +```C# Snippet:ChatTools:StreamingChatTools +Dictionary toolCallIdsByIndex = []; +Dictionary functionNamesByIndex = []; +Dictionary functionArgumentBuildersByIndex = []; +StringBuilder contentBuilder = new(); + +foreach (StreamingChatCompletionUpdate streamingChatUpdate + in chatClient.CompleteChatStreaming(conversationMessages, options)) +{ + foreach (ChatMessageContentPart contentPart in streamingChatUpdate.ContentUpdate) + { + contentBuilder.Append(contentPart.Text); + } + foreach (StreamingChatToolCallUpdate toolCallUpdate in streamingChatUpdate.ToolCallUpdates) + { + if (!string.IsNullOrEmpty(toolCallUpdate.Id)) + { + toolCallIdsByIndex[toolCallUpdate.Index] = toolCallUpdate.Id; + } + if (!string.IsNullOrEmpty(toolCallUpdate.FunctionName)) + { + functionNamesByIndex[toolCallUpdate.Index] = toolCallUpdate.FunctionName; + } + if (!string.IsNullOrEmpty(toolCallUpdate.FunctionArgumentsUpdate)) + { + StringBuilder argumentsBuilder + = functionArgumentBuildersByIndex.TryGetValue(toolCallUpdate.Index, out StringBuilder existingBuilder) + ? existingBuilder + : new(); + argumentsBuilder.Append(toolCallUpdate.FunctionArgumentsUpdate); + functionArgumentBuildersByIndex[toolCallUpdate.Index] = argumentsBuilder; + } + } +} + +List toolCalls = []; +foreach (KeyValuePair indexToIdPair in toolCallIdsByIndex) +{ + toolCalls.Add(ChatToolCall.CreateFunctionToolCall( + indexToIdPair.Value, + functionNamesByIndex[indexToIdPair.Key], + functionArgumentBuildersByIndex[indexToIdPair.Key].ToString())); +} + +conversationMessages.Add(new AssistantChatMessage(toolCalls, contentBuilder.ToString())); + +// Placeholder: each tool call must be resolved, like in the non-streaming case +string GetToolCallOutput(ChatToolCall toolCall) => null; + +foreach (ChatToolCall toolCall in toolCalls) +{ + conversationMessages.Add(new ToolChatMessage(toolCall.Id, GetToolCallOutput(toolCall))); +} + +// Repeat with the history and all tool call resolution messages added +``` + +### Use your own data with Azure OpenAI + +The use your own data feature is unique to Azure OpenAI and won't work with a client configured to use the non-Azure service. +See [the Azure OpenAI using your own data quickstart](https://learn.microsoft.com/azure/ai-services/openai/use-your-data-quickstart) for conceptual background and detailed setup instructions. + +**NOTE:** The concurrent use of [Chat Functions](#use-chat-functions) and Azure Chat Extensions on a single request isn't yet supported. Supplying both will result in the Chat Functions information being ignored and the operation behaving as if only the Azure Chat Extensions were provided. To address this limitation, consider separating the evaluation of Chat Functions and Azure Chat Extensions across multiple requests in your solution design. + +```C# Snippet:ChatUsingYourOwnData +// Extension methods to use data sources with options are subject to SDK surface changes. Suppress the +// warning to acknowledge and this and use the subject-to-change AddDataSource method. +#pragma warning disable AOAI001 + +ChatCompletionOptions options = new(); +options.AddDataSource(new AzureSearchChatDataSource() +{ + Endpoint = new Uri("https://your-search-resource.search.windows.net"), + IndexName = "contoso-products-index", + Authentication = DataSourceAuthentication.FromApiKey( + Environment.GetEnvironmentVariable("OYD_SEARCH_KEY")), +}); + +ChatCompletion completion = chatClient.CompleteChat( + [ + new UserChatMessage("What are the best-selling Contoso products this month?"), + ], + options); + +AzureChatMessageContext onYourDataContext = completion.GetAzureMessageContext(); + +if (onYourDataContext?.Intent is not null) +{ + Console.WriteLine($"Intent: {onYourDataContext.Intent}"); +} +foreach (AzureChatCitation citation in onYourDataContext?.Citations ?? []) +{ + Console.WriteLine($"Citation: {citation.Content}"); +} +``` + +### Use Assistants and stream a run + +[Assistants](https://platform.openai.com/docs/assistants/overview) provide a stateful, service-persisted conversational +model that can be enriched with a larger array of tools than Chat Completions. + +Creating an `AssistantClient` is similar to other scenario clients. An important difference is that Assistants features +are marked as `[Experimental]` to reflect the API's beta status, and thus you'll need to suppress the corresponding +warning to instantiate a client. This can be done in the `.csproj` file via the `` element or, as below, in +the code itself with a `#pragma` directive. + +```C# Snippet:Assistants:CreateClient +AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new DefaultAzureCredential()); + +// The Assistants feature area is in beta, with API specifics subject to change. +// Suppress the [Experimental] warning via .csproj or, as here, in the code to acknowledge. +#pragma warning disable OPENAI001 +AssistantClient assistantClient = azureClient.GetAssistantClient(); +``` + +With a client, you can then create Assistants, Threads, and new Messages on a thread in preparation to start a run. As is the case for other shared API surfaces, you should use an Azure OpenAI model deployment name wherever a model name is requested. + +```C# Snippet:Assistants:PrepareToRun +Assistant assistant = await assistantClient.CreateAssistantAsync( + model: "my-gpt-4o-deployment", + new AssistantCreationOptions() + { + Name = "My Friendly Test Assistant", + Instructions = "You politely help with math questions. Use the code interpreter tool when asked to " + + "visualize numbers.", + Tools = { ToolDefinition.CreateCodeInterpreter() }, + }); +ThreadInitializationMessage initialMessage = new( + MessageRole.User, + [ + "Hi, Assistant! Draw a graph for a line with a slope of 4 and y-intercept of 9." + ]); +AssistantThread thread = await assistantClient.CreateThreadAsync(new ThreadCreationOptions() +{ + InitialMessages = { initialMessage }, +}); +``` + +You can then start a run and stream updates as they arrive using the `Streaming` method variants, handling the updates +you're interested in using the enumerated kind of event it is and/or one of the several derived types for the streaming +update class, as shown here for content: + +```C# Snippet:Assistants:StreamRun +RunCreationOptions runOptions = new() +{ + AdditionalInstructions = "When possible, talk like a pirate." +}; +await foreach (StreamingUpdate streamingUpdate + in assistantClient.CreateRunStreamingAsync(thread, assistant, runOptions)) +{ + if (streamingUpdate.UpdateKind == StreamingUpdateReason.RunCreated) + { + Console.WriteLine($"--- Run started! ---"); + } + else if (streamingUpdate is MessageContentUpdate contentUpdate) + { + Console.Write(contentUpdate.Text); + if (contentUpdate.ImageFileId is not null) + { + Console.WriteLine($"[Image content file ID: {contentUpdate.ImageFileId}"); + } + } +} +``` + +Remember that things like Assistants, Threads, and Vector Stores are persistent resources. You can save their IDs to +reuse them later or, as demonstrated below, delete them when no longer desired. + +```C# Snippet:Assistants:Cleanup +// Optionally, delete persistent resources that are no longer needed. +_ = await assistantClient.DeleteAssistantAsync(assistant); +_ = await assistantClient.DeleteThreadAsync(thread); +``` + +## Next steps + +## Troubleshooting + +When you interact with Azure OpenAI using the .NET SDK, errors returned by the service correspond to the same HTTP status codes returned for [REST API][openai_rest] requests. + +For example, if you try to create a client using an endpoint that doesn't match your Azure OpenAI Resource endpoint, a `404` error is returned, indicating `Resource Not Found`. + +## Contributing + +See the [OpenAI CONTRIBUTING.md][openai_contrib] for details on building, testing, and contributing to this library. + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit [cla.microsoft.com][cla]. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, see the [Code of Conduct FAQ][code_of_conduct_faq] or contact [opencode@microsoft.com][email_opencode] with any additional questions or comments. + + +[azure_identity]: https://learn.microsoft.com/dotnet/api/overview/azure/identity-readme?view=azure-dotnet +[azure_identity_dac]: https://learn.microsoft.com/dotnet/api/azure.identity.defaultazurecredential?view=azure-dotnet +[msdocs_openai_chat_quickstart]: https://learn.microsoft.com/azure/ai-services/openai/chatgpt-quickstart?pivots=programming-language-csharp +[msdocs_openai_dalle_quickstart]: https://learn.microsoft.com/azure/ai-services/openai/dall-e-quickstart?pivots=programming-language-csharp +[msdocs_openai_whisper_quickstart]: https://learn.microsoft.com/azure/ai-services/openai/whisper-quickstart +[msdocs_openai_tts_quickstart]: https://learn.microsoft.com/azure/ai-services/openai/text-to-speech-quickstart +[msdocs_openai_completion]: https://learn.microsoft.com/azure/cognitive-services/openai/how-to/completions +[msdocs_openai_embedding]: https://learn.microsoft.com/azure/cognitive-services/openai/concepts/understand-embeddings +[style-guide-msft]: https://docs.microsoft.com/style-guide/capitalization +[style-guide-cloud]: https://aka.ms/azsdk/cloud-style-guide +[azure_openai_client_class]: https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/openai/Azure.AI.OpenAI/src/Custom/AzureOpenAIClient.cs +[openai_rest]: https://learn.microsoft.com/azure/cognitive-services/openai/reference +[azure_openai_completions_docs]: https://learn.microsoft.com/azure/cognitive-services/openai/how-to/completions +[azure_openai_embeddgings_docs]: https://learn.microsoft.com/azure/cognitive-services/openai/concepts/understand-embeddings +[openai_contrib]: https://github.com/Azure/azure-sdk-for-net/blob/main/CONTRIBUTING.md +[cla]: https://cla.microsoft.com +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[code_of_conduct_faq]: https://opensource.microsoft.com/codeofconduct/faq/ +[email_opencode]: mailto:opencode@microsoft.com + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-net/sdk/openai/Azure.AI.OpenAI/README.png) \ No newline at end of file diff --git a/.dotnet.azure/eng/CodeAnalysis.ruleset b/.dotnet.azure/eng/CodeAnalysis.ruleset new file mode 100644 index 000000000..d6ade187a --- /dev/null +++ b/.dotnet.azure/eng/CodeAnalysis.ruleset @@ -0,0 +1,404 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/.dotnet.azure/eng/Directory.Build.Common.props b/.dotnet.azure/eng/Directory.Build.Common.props new file mode 100644 index 000000000..59a538ae8 --- /dev/null +++ b/.dotnet.azure/eng/Directory.Build.Common.props @@ -0,0 +1,56 @@ + + + + true + true + true + 11.0 + true + + $(NoWarn); + NU5105; + + CA1812; + CA1716; + CA1308; + CA1819; + CA1710; + CA1028; + CA1032; + CA1063; + CA1066; + CA1815; + CA2007; + CA2231; + CA2225; + CA1714; + CA1062; + CA1031; + CA2000; + CA2012; + + MSB3245; + AZPROVISION001; + + true + + + + + + netstandard2.0 + $(WarningsNotAsErrors);NU1901;NU1902;NU1903;NU1904 + $(RepoEngPath)\CodeAnalysis.ruleset + + + + + + net8.0;net6.0 + $(RequiredTargetFrameworks);net462 + + + + + + diff --git a/.dotnet.azure/eng/Directory.Build.Common.targets b/.dotnet.azure/eng/Directory.Build.Common.targets new file mode 100644 index 000000000..e3e5db31c --- /dev/null +++ b/.dotnet.azure/eng/Directory.Build.Common.targets @@ -0,0 +1,3 @@ + + + diff --git a/.dotnet.azure/eng/Packages.Data.props b/.dotnet.azure/eng/Packages.Data.props new file mode 100644 index 000000000..f868bc705 --- /dev/null +++ b/.dotnet.azure/eng/Packages.Data.props @@ -0,0 +1,395 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1.0.0-dev.20240806.1 + + + diff --git a/.dotnet.azure/eng/common/testproxy/dotnet-devcert.pfx b/.dotnet.azure/eng/common/testproxy/dotnet-devcert.pfx new file mode 100644 index 000000000..1d59ff89a Binary files /dev/null and b/.dotnet.azure/eng/common/testproxy/dotnet-devcert.pfx differ diff --git a/.dotnet.azure/nuget.config b/.dotnet.azure/nuget.config new file mode 100644 index 000000000..1f889a235 --- /dev/null +++ b/.dotnet.azure/nuget.config @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/.dotnet.azure/sdk/openai/.gitignore b/.dotnet.azure/sdk/openai/.gitignore new file mode 100644 index 000000000..5e5364f16 --- /dev/null +++ b/.dotnet.azure/sdk/openai/.gitignore @@ -0,0 +1 @@ +#Azure.AI.OpenAI/Directory.Build.props \ No newline at end of file diff --git a/.dotnet.azure/Azure.AI.OpenAI.sln b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI.sln similarity index 64% rename from .dotnet.azure/Azure.AI.OpenAI.sln rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI.sln index 7962a56eb..20dcf6c40 100644 --- a/.dotnet.azure/Azure.AI.OpenAI.sln +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI.sln @@ -3,23 +3,27 @@ Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio Version 17 VisualStudioVersion = 17.10.35004.147 MinimumVisualStudioVersion = 10.0.40219.1 -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Azure.AI.OpenAI", "src\Azure.AI.OpenAI.csproj", "{A80B9566-84A5-4AE4-AA0A-72B18646F1EC}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Azure.AI.OpenAI", "Azure.AI.OpenAI\src\Azure.AI.OpenAI.csproj", "{A80B9566-84A5-4AE4-AA0A-72B18646F1EC}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "OpenAI", "..\.dotnet\src\OpenAI.csproj", "{8BEE571B-DB25-4BE5-B9EB-2CA81D12EBC6}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "OpenAI", "..\..\..\.dotnet\src\OpenAI.csproj", "{8BEE571B-DB25-4BE5-B9EB-2CA81D12EBC6}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Azure.AI.OpenAI.Tests", ".tests.staging\Azure.AI.OpenAI.Tests.csproj", "{23DAB09E-3986-4248-AC80-2273C20FCD90}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Azure.AI.OpenAI.Tests", "Azure.AI.OpenAI\tests\Azure.AI.OpenAI.Tests.csproj", "{23DAB09E-3986-4248-AC80-2273C20FCD90}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "OpenAI.TestFramework", "tools\TestFramework\src\OpenAI.TestFramework.csproj", "{D1E3E196-BAA8-47C2-905A-B1C20733AEA8}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Unsigned|Any CPU = Unsigned|Any CPU EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution - {A80B9566-84A5-4AE4-AA0A-72B18646F1EC}.Unsigned|Any CPU.ActiveCfg = Unsigned|Any CPU - {A80B9566-84A5-4AE4-AA0A-72B18646F1EC}.Unsigned|Any CPU.Build.0 = Unsigned|Any CPU + {A80B9566-84A5-4AE4-AA0A-72B18646F1EC}.Unsigned|Any CPU.ActiveCfg = Debug|Any CPU + {A80B9566-84A5-4AE4-AA0A-72B18646F1EC}.Unsigned|Any CPU.Build.0 = Debug|Any CPU {8BEE571B-DB25-4BE5-B9EB-2CA81D12EBC6}.Unsigned|Any CPU.ActiveCfg = Unsigned|Any CPU {8BEE571B-DB25-4BE5-B9EB-2CA81D12EBC6}.Unsigned|Any CPU.Build.0 = Unsigned|Any CPU - {23DAB09E-3986-4248-AC80-2273C20FCD90}.Unsigned|Any CPU.ActiveCfg = Unsigned|Any CPU - {23DAB09E-3986-4248-AC80-2273C20FCD90}.Unsigned|Any CPU.Build.0 = Unsigned|Any CPU + {23DAB09E-3986-4248-AC80-2273C20FCD90}.Unsigned|Any CPU.ActiveCfg = Debug|Any CPU + {23DAB09E-3986-4248-AC80-2273C20FCD90}.Unsigned|Any CPU.Build.0 = Debug|Any CPU + {D1E3E196-BAA8-47C2-905A-B1C20733AEA8}.Unsigned|Any CPU.ActiveCfg = Debug|Any CPU + {D1E3E196-BAA8-47C2-905A-B1C20733AEA8}.Unsigned|Any CPU.Build.0 = Debug|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/Directory.Build.props b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/Directory.Build.props new file mode 100644 index 000000000..924ecfa8f --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/Directory.Build.props @@ -0,0 +1,20 @@ + + + + + + $(RepoRoot)/../.dotnet/src/OpenAI.csproj + 1.1.0-beta.5 + + + + + + + + + diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/assets.json b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/assets.json new file mode 100644 index 000000000..78d850850 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "net", + "TagPrefix": "dotnet.azure/openai/Azure.AI.OpenAI", + "Tag": "dotnet.azure/openai/Azure.AI.OpenAI_9a2f5cd1c9" +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Azure.AI.OpenAI.csproj b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Azure.AI.OpenAI.csproj new file mode 100644 index 000000000..b1440d99b --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Azure.AI.OpenAI.csproj @@ -0,0 +1,65 @@ + + + + + Azure OpenAI's official extension package for using OpenAI's .NET library with the Azure OpenAI Service. + + Azure.AI.OpenAI Client Library + 2.0.0 + beta.5 + Microsoft Azure OpenAI + true + $(RequiredTargetFrameworks) + true + $(NoWarn);CS1591;AZC0012;AZC0102;CS8002;CS0436;AZC0112;OPENAI001 + enable + preview + disable + + + + + 0024000004800000940000000602000000240000525341310004000001000100d15ddcb29688295338af4b7686603fe614abd555e09efba8fb88ee09e1f7b1ccaeed2e8f823fa9eef3fdd60217fc012ea67d2479751a0b8c087a4185541b851bd8b16f8d91b840e51b1cb0ba6fe647997e57429265e85ef62d565db50a69ae1647d54d7bd855e4db3d8a91510e5bcbd0edfbbecaa20a7bd9ae74593daa7b11b4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/.dotnet.azure/src/Custom/Assistants/AzureAssistantClient.Protocol.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/AzureAssistantClient.Protocol.cs similarity index 98% rename from .dotnet.azure/src/Custom/Assistants/AzureAssistantClient.Protocol.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/AzureAssistantClient.Protocol.cs index c21e36c6f..c1c987098 100644 --- a/.dotnet.azure/src/Custom/Assistants/AzureAssistantClient.Protocol.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/AzureAssistantClient.Protocol.cs @@ -4,7 +4,6 @@ using System.ClientModel; using System.ClientModel.Primitives; using System.Diagnostics.CodeAnalysis; -using OpenAI.Assistants; namespace Azure.AI.OpenAI.Assistants; @@ -107,7 +106,7 @@ public override ClientResult CreateMessage(string threadId, BinaryContent conten return ClientResult.FromResponse(Pipeline.ProcessMessage(message, options)); } - /// + /// public override IAsyncEnumerable GetMessagesAsync(string threadId, int? limit, string order, string after, string before, RequestOptions options) { Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); @@ -116,7 +115,6 @@ public override IAsyncEnumerable GetMessagesAsync(string threadId, return PageCollectionHelpers.CreateAsync(enumerator); } - /// public override IEnumerable GetMessages(string threadId, int? limit, string order, string after, string before, RequestOptions options) { Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); @@ -259,7 +257,6 @@ public override ClientResult CreateRun(string threadId, BinaryContent content, R } } - /// public override IAsyncEnumerable GetRunsAsync(string threadId, int? limit, string order, string after, string before, RequestOptions options) { Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); @@ -268,7 +265,6 @@ public override IAsyncEnumerable GetRunsAsync(string threadId, int return PageCollectionHelpers.CreateAsync(enumerator); } - /// public override IEnumerable GetRuns(string threadId, int? limit, string order, string after, string before, RequestOptions options) { Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); @@ -379,7 +375,6 @@ public override ClientResult SubmitToolOutputsToRun(string threadId, string runI } } - /// public override IAsyncEnumerable GetRunStepsAsync(string threadId, string runId, int? limit, string order, string after, string before, RequestOptions options) { Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); @@ -389,7 +384,6 @@ public override IAsyncEnumerable GetRunStepsAsync(string threadId, return PageCollectionHelpers.CreateAsync(enumerator); } - /// public override IEnumerable GetRunSteps(string threadId, string runId, int? limit, string order, string after, string before, RequestOptions options) { Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); diff --git a/.dotnet.azure/src/Custom/Assistants/AzureAssistantClient.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/AzureAssistantClient.cs similarity index 70% rename from .dotnet.azure/src/Custom/Assistants/AzureAssistantClient.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/AzureAssistantClient.cs index aa35d2bc6..c8b48f925 100644 --- a/.dotnet.azure/src/Custom/Assistants/AzureAssistantClient.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/AzureAssistantClient.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using OpenAI.Assistants; using System.ClientModel.Primitives; namespace Azure.AI.OpenAI.Assistants; @@ -17,13 +16,13 @@ internal partial class AzureAssistantClient : AssistantClient private readonly Uri _endpoint; private readonly string _apiVersion; - internal AzureAssistantClient( - ClientPipeline pipeline, - Uri endpoint, - AzureOpenAIClientOptions options) - : base(pipeline, endpoint, options) + internal AzureAssistantClient(ClientPipeline pipeline, Uri endpoint, AzureOpenAIClientOptions options) + : base(pipeline, new OpenAIClientOptions() { Endpoint = endpoint }) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + Argument.AssertNotNull(endpoint, nameof(endpoint)); options ??= new(); + _endpoint = endpoint; _apiVersion = options.Version; } diff --git a/.dotnet.azure/src/Custom/Assistants/Internal/Pagination/AzureAssistantsPageEnumerator.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/Internal/Pagination/AzureAssistantsPageEnumerator.cs similarity index 95% rename from .dotnet.azure/src/Custom/Assistants/Internal/Pagination/AzureAssistantsPageEnumerator.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/Internal/Pagination/AzureAssistantsPageEnumerator.cs index 9b1975a15..f351279a7 100644 --- a/.dotnet.azure/src/Custom/Assistants/Internal/Pagination/AzureAssistantsPageEnumerator.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/Internal/Pagination/AzureAssistantsPageEnumerator.cs @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + using System.ClientModel; using System.ClientModel.Primitives; diff --git a/.dotnet.azure/src/Custom/Assistants/Internal/Pagination/AzureMessagesPageEnumerator.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/Internal/Pagination/AzureMessagesPageEnumerator.cs similarity index 94% rename from .dotnet.azure/src/Custom/Assistants/Internal/Pagination/AzureMessagesPageEnumerator.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/Internal/Pagination/AzureMessagesPageEnumerator.cs index 46486b06b..79ad28d13 100644 --- a/.dotnet.azure/src/Custom/Assistants/Internal/Pagination/AzureMessagesPageEnumerator.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/Internal/Pagination/AzureMessagesPageEnumerator.cs @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + using System.ClientModel; using System.ClientModel.Primitives; @@ -11,7 +14,7 @@ internal partial class AzureMessagesPageEnumerator : MessagesPageEnumerator public AzureMessagesPageEnumerator( ClientPipeline pipeline, Uri endpoint, - string threadId, + string threadId, int? limit, string order, string after, string before, string apiVersion, RequestOptions options) diff --git a/.dotnet.azure/src/Custom/Assistants/Internal/Pagination/AzureRunStepsPageEnumerator.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/Internal/Pagination/AzureRunStepsPageEnumerator.cs similarity index 95% rename from .dotnet.azure/src/Custom/Assistants/Internal/Pagination/AzureRunStepsPageEnumerator.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/Internal/Pagination/AzureRunStepsPageEnumerator.cs index 9d66fbc3a..e7f6ae902 100644 --- a/.dotnet.azure/src/Custom/Assistants/Internal/Pagination/AzureRunStepsPageEnumerator.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/Internal/Pagination/AzureRunStepsPageEnumerator.cs @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + using System.ClientModel; using System.ClientModel.Primitives; diff --git a/.dotnet.azure/src/Custom/Assistants/Internal/Pagination/AzureRunsPageEnumerator.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/Internal/Pagination/AzureRunsPageEnumerator.cs similarity index 95% rename from .dotnet.azure/src/Custom/Assistants/Internal/Pagination/AzureRunsPageEnumerator.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/Internal/Pagination/AzureRunsPageEnumerator.cs index 0b394354a..65afbca20 100644 --- a/.dotnet.azure/src/Custom/Assistants/Internal/Pagination/AzureRunsPageEnumerator.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Assistants/Internal/Pagination/AzureRunsPageEnumerator.cs @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + using System.ClientModel; using System.ClientModel.Primitives; @@ -20,7 +23,6 @@ public AzureRunsPageEnumerator( _apiVersion = apiVersion; } - internal override async Task GetRunsAsync(string threadId, int? limit, string order, string after, string before, RequestOptions options) { Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); diff --git a/.dotnet.azure/src/Custom/Audio/AzureAudioClient.Protocol.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Audio/AzureAudioClient.Protocol.cs similarity index 94% rename from .dotnet.azure/src/Custom/Audio/AzureAudioClient.Protocol.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Audio/AzureAudioClient.Protocol.cs index 98275f687..874b4f3be 100644 --- a/.dotnet.azure/src/Custom/Audio/AzureAudioClient.Protocol.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Audio/AzureAudioClient.Protocol.cs @@ -41,14 +41,14 @@ public override async Task TranslateAudioAsync(BinaryContent conte } [EditorBrowsable(EditorBrowsableState.Never)] - public override ClientResult GenerateSpeechFromText(BinaryContent content, RequestOptions options = null) + public override ClientResult GenerateSpeech(BinaryContent content, RequestOptions options = null) { using PipelineMessage message = CreateGenerateSpeechFromTextRequestMessage(content, options); return ClientResult.FromResponse(Pipeline.ProcessMessage(message, options)); } [EditorBrowsable(EditorBrowsableState.Never)] - public override async Task GenerateSpeechFromTextAsync(BinaryContent content, RequestOptions options = null) + public override async Task GenerateSpeechAsync(BinaryContent content, RequestOptions options = null) { using PipelineMessage message = CreateGenerateSpeechFromTextRequestMessage(content, options); PipelineResponse response = await Pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false); diff --git a/.dotnet.azure/src/Custom/Audio/AzureAudioClient.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Audio/AzureAudioClient.cs similarity index 65% rename from .dotnet.azure/src/Custom/Audio/AzureAudioClient.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Audio/AzureAudioClient.cs index 1ad255b9b..f15480951 100644 --- a/.dotnet.azure/src/Custom/Audio/AzureAudioClient.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Audio/AzureAudioClient.cs @@ -18,14 +18,14 @@ internal partial class AzureAudioClient : AudioClient private readonly Uri _endpoint; private readonly string _apiVersion; - internal AzureAudioClient( - ClientPipeline pipeline, - string deploymentName, - Uri endpoint, - AzureOpenAIClientOptions options) - : base(pipeline, model: deploymentName, endpoint, options) + internal AzureAudioClient(ClientPipeline pipeline, string deploymentName, Uri endpoint, AzureOpenAIClientOptions options) + : base(pipeline, model: deploymentName, new OpenAIClientOptions() { Endpoint = endpoint }) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + Argument.AssertNotNullOrEmpty(deploymentName, nameof(deploymentName)); + Argument.AssertNotNull(endpoint, nameof(endpoint)); options ??= new(); + _deploymentName = deploymentName; _endpoint = endpoint; _apiVersion = options.Version; diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/AzureOpenAIAudience.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/AzureOpenAIAudience.cs new file mode 100644 index 000000000..719fcdd8b --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/AzureOpenAIAudience.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ComponentModel; + +namespace Azure.AI.OpenAI; + +/// +/// Represents cloud authentication audiences available for Azure OpenAI. +/// These audiences correspond to authorization token authentication scopes. +/// +public readonly partial struct AzureOpenAIAudience : IEquatable +{ + private readonly string _value; + + /// + /// Initializes a new instance of the object. + /// + /// + /// Please consider using one of the known, valid values like or . + /// + /// + /// The Microsoft Entra audience to use when forming authorization scopes. + /// For Azure OpenAI, this value corresponds to a URL that identifies the Azure cloud where the resource is located. + /// For more information: . + /// + /// is null. + public AzureOpenAIAudience(string value) + { + Argument.AssertNotNullOrEmpty(value, nameof(value)); + _value = value; + } + + private const string AzurePublicCloudValue = "https://cognitiveservices.azure.com/.default"; + private const string AzureGovernmentValue = "https://cognitiveservices.azure.us/.default"; + + /// + /// The authorization audience used to connect to the public Azure cloud. Default if not otherwise specified. + /// + public static AzureOpenAIAudience AzurePublicCloud { get; } = new AzureOpenAIAudience(AzurePublicCloudValue); + + /// + /// The authorization audience used to authenticate with the Azure Government cloud. + /// + /// + /// For more information, please refer to + /// . + /// + public static AzureOpenAIAudience AzureGovernment { get; } = new AzureOpenAIAudience(AzureGovernmentValue); + + /// Determines if two values are the same. + public static bool operator ==(AzureOpenAIAudience left, AzureOpenAIAudience right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(AzureOpenAIAudience left, AzureOpenAIAudience right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator AzureOpenAIAudience(string value) => new AzureOpenAIAudience(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is AzureOpenAIAudience other && Equals(other); + /// + public bool Equals(AzureOpenAIAudience other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + /// + public override string ToString() => _value; +} diff --git a/.dotnet.azure/src/Custom/AzureOpenAIClient.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/AzureOpenAIClient.cs similarity index 63% rename from .dotnet.azure/src/Custom/AzureOpenAIClient.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/AzureOpenAIClient.cs index 62f5a86b0..7336207a0 100644 --- a/.dotnet.azure/src/Custom/AzureOpenAIClient.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/AzureOpenAIClient.cs @@ -41,6 +41,7 @@ namespace Azure.AI.OpenAI; /// public partial class AzureOpenAIClient : OpenAIClient { + private readonly Uri _endpoint; private readonly AzureOpenAIClientOptions _options; /// @@ -53,64 +54,77 @@ public partial class AzureOpenAIClient : OpenAIClient /// /// /// - /// - /// - /// The Azure OpenAI resource endpoint to use. This should not include model deployment or operation information. - /// + /// The Azure OpenAI resource endpoint to use. This should not include model deployment or operation information. For example: https://my-resource.openai.azure.com. + /// The API key to authenticate with the service. + public AzureOpenAIClient(Uri endpoint, ApiKeyCredential credential) : this(endpoint, credential, new AzureOpenAIClientOptions()) + { + } + + /// + /// Creates a new instance of that will connect to a specified Azure OpenAI + /// service resource endpoint using an API key. + /// + /// /// - /// Example: https://my-resource.openai.azure.com + /// For token-based authentication, including the use of managed identity, please use the alternate constructor: + /// /// - /// - /// The API key to use when authenticating with the specified endpoint. - /// Additional options for the client. - public AzureOpenAIClient(Uri endpoint, ApiKeyCredential credential, AzureOpenAIClientOptions options = null) - : this( - CreatePipeline(GetApiKey(credential, requireExplicitCredential: true), options), - GetEndpoint(endpoint, requireExplicitEndpoint: true), - options) - {} - - /// - public AzureOpenAIClient(Uri endpoint, AzureKeyCredential credential, AzureOpenAIClientOptions options = null) - : this( - CreatePipeline(GetApiKey(new ApiKeyCredential(credential?.Key), requireExplicitCredential: true), options), - GetEndpoint(endpoint, requireExplicitEndpoint: true), - options) - {} + /// + /// The Azure OpenAI resource endpoint to use. This should not include model deployment or operation information. For example: https://my-resource.openai.azure.com. + /// The API key to authenticate with the service. + public AzureOpenAIClient(Uri endpoint, AzureKeyCredential credential) : this(endpoint, credential, new AzureOpenAIClientOptions()) + { + } /// /// Creates a new instance of that will connect to an Azure OpenAI service resource - /// using endpoint and authentication settings from available configuration information. + /// using token authentication, including for tokens issued via managed identity. + /// + /// + /// For API-key-based authentication, please use the alternate constructor: + /// + /// + /// The Azure OpenAI resource endpoint to use. This should not include model deployment or operation information. For example: https://my-resource.openai.azure.com. + /// The token credential to authenticate with the service. + public AzureOpenAIClient(Uri endpoint, TokenCredential credential) : this(endpoint, credential, new AzureOpenAIClientOptions()) + { + } + + /// + /// Creates a new instance of that will connect to a specified Azure OpenAI + /// service resource endpoint using an API key. /// /// /// /// For token-based authentication, including the use of managed identity, please use the alternate constructor: /// /// + /// + /// The Azure OpenAI resource endpoint to use. This should not include model deployment or operation information. For example: https://my-resource.openai.azure.com. + /// The API key to authenticate with the service. + /// The options to configure the client. + public AzureOpenAIClient(Uri endpoint, ApiKeyCredential credential, AzureOpenAIClientOptions options) + : this(CreatePipeline(credential, options), endpoint, options) + { + } + + /// + /// Creates a new instance of that will connect to a specified Azure OpenAI + /// service resource endpoint using an API key. + /// + /// /// - /// The client selects its resource endpoint in the following order of precedence: - /// - /// The property on , if available - /// The setting in an applicable IConfiguration instance, if available - /// The value of the AZURE_OPENAI_ENDPOINT environment variable, if present - /// - /// - /// The client selects its API key credential in the following order of precedence: - /// - /// The setting in an applicable IConfiguration instance, if available - /// The value of the AZURE_OPENAI_API_KEY environment variable, if present - /// - /// - /// Note: resource endpoints should not include model deployment or operation information. - /// - /// - /// Example: https://my-resource.openai.azure.com + /// For token-based authentication, including the use of managed identity, please use the alternate constructor: + /// /// /// - /// Additional options for the client. - public AzureOpenAIClient(AzureOpenAIClientOptions options = null) - : this(CreatePipeline(GetApiKey(), options), GetEndpoint(), options) - {} + /// The Azure OpenAI resource endpoint to use. This should not include model deployment or operation information. For example: https://my-resource.openai.azure.com. + /// The API key to authenticate with the service. + /// The options to configure the client. + public AzureOpenAIClient(Uri endpoint, AzureKeyCredential credential, AzureOpenAIClientOptions options) + : this(CreatePipeline(credential?.Key, options), endpoint, options) + { + } /// /// Creates a new instance of that will connect to an Azure OpenAI service resource @@ -128,15 +142,11 @@ public AzureOpenAIClient(AzureOpenAIClientOptions options = null) /// Example: https://my-resource.openai.azure.com /// /// - /// - /// - /// The API key to use when authenticating with the provided endpoint. - /// - /// + /// The API key to use when authenticating with the provided endpoint. /// The scenario-independent options to use. public AzureOpenAIClient(Uri endpoint, TokenCredential credential, AzureOpenAIClientOptions options = null) - : this(CreatePipeline(credential, options), GetEndpoint(endpoint, requireExplicitEndpoint: true), options) - {} + : this(CreatePipeline(credential, options), endpoint, options) + { } /// /// Creates a new instance of . @@ -145,8 +155,13 @@ public AzureOpenAIClient(Uri endpoint, TokenCredential credential, AzureOpenAICl /// The endpoint to use. /// The additional client options to use. protected AzureOpenAIClient(ClientPipeline pipeline, Uri endpoint, AzureOpenAIClientOptions options) - : base(pipeline, endpoint, options) + : base(pipeline, new OpenAIClientOptions() { Endpoint = endpoint }) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + Argument.AssertNotNull(endpoint, nameof(endpoint)); + options ??= new(); + + _endpoint = endpoint; _options = options; } @@ -162,7 +177,7 @@ protected AzureOpenAIClient() /// A new instance. [Experimental("OPENAI001")] public override AssistantClient GetAssistantClient() - => new AzureAssistantClient(Pipeline, Endpoint, _options); + => new AzureAssistantClient(Pipeline, _endpoint, _options); /// /// Gets a new instance configured for audio operation use with the Azure OpenAI service. @@ -170,21 +185,23 @@ public override AssistantClient GetAssistantClient() /// The model deployment name to use for the new client's audio operations. /// A new instance. public override AudioClient GetAudioClient(string deploymentName) - => new AzureAudioClient(Pipeline, deploymentName, Endpoint, _options); + => new AzureAudioClient(Pipeline, deploymentName, _endpoint, _options); /// /// Gets a new instance configured for batch operation use with the Azure OpenAI service. /// /// The model deployment name to use for the new client's audio operations. /// A new instance. + [Experimental("OPENAI001")] public BatchClient GetBatchClient(string deploymentName) - => new AzureBatchClient(Pipeline, deploymentName, Endpoint, _options); + => new AzureBatchClient(Pipeline, deploymentName, _endpoint, _options); /// /// This method is unsupported for Azure OpenAI. Please use the alternate /// method that accepts a model deployment name, instead. /// [EditorBrowsable(EditorBrowsableState.Never)] + [Experimental("OPENAI001")] public override BatchClient GetBatchClient() => GetBatchClient(deploymentName: null); /// @@ -193,7 +210,7 @@ public BatchClient GetBatchClient(string deploymentName) /// The model deployment name to use for the new client's chat completion operations. /// A new instance. public override ChatClient GetChatClient(string deploymentName) - => new AzureChatClient(Pipeline, deploymentName, Endpoint, _options); + => new AzureChatClient(Pipeline, deploymentName, _endpoint, _options); /// /// Gets a new instance configured for embedding operation use with the Azure OpenAI service. @@ -201,21 +218,22 @@ public override ChatClient GetChatClient(string deploymentName) /// The model deployment name to use for the new client's embedding operations. /// A new instance. public override EmbeddingClient GetEmbeddingClient(string deploymentName) - => new AzureEmbeddingClient(Pipeline, deploymentName, Endpoint, _options); + => new AzureEmbeddingClient(Pipeline, deploymentName, _endpoint, _options); /// /// Gets a new instance configured for file operation use with the Azure OpenAI service. /// /// A new instance. public override FileClient GetFileClient() - => new AzureFileClient(Pipeline, Endpoint, _options); + => new AzureFileClient(Pipeline, _endpoint, _options); /// /// Gets a new instance configured for fine-tuning operation use with the Azure OpenAI service. /// /// A new instance. + [Experimental("OPENAI001")] public override FineTuningClient GetFineTuningClient() - => new AzureFineTuningClient(Pipeline, Endpoint, _options); + => new AzureFineTuningClient(Pipeline, _endpoint, _options); /// /// Gets a new instance configured for image operation use with the Azure OpenAI service. @@ -223,7 +241,7 @@ public override FineTuningClient GetFineTuningClient() /// The model deployment name to use for the new client's image operations. /// A new instance. public override ImageClient GetImageClient(string deploymentName) - => new AzureImageClient(Pipeline, deploymentName, Endpoint, _options); + => new AzureImageClient(Pipeline, deploymentName, _endpoint, _options); /// /// Model management operations are not supported with Azure OpenAI. @@ -249,16 +267,19 @@ public override ModerationClient GetModerationClient(string _) /// A new instance. [Experimental("OPENAI001")] public override VectorStoreClient GetVectorStoreClient() - => new AzureVectorStoreClient(Pipeline, Endpoint, _options); + => new AzureVectorStoreClient(Pipeline, _endpoint, _options); private static ClientPipeline CreatePipeline(PipelinePolicy authenticationPolicy, AzureOpenAIClientOptions options) => ClientPipeline.Create( options ?? new(), - perCallPolicies: [], + perCallPolicies: + [ + CreateAddUserAgentHeaderPolicy(options), + CreateAddClientRequestIdHeaderPolicy(), + ], perTryPolicies: [ authenticationPolicy, - CreateAddUserAgentHeaderPolicy(options), ], beforeTransportPolicies: []); @@ -271,66 +292,14 @@ internal static ClientPipeline CreatePipeline(ApiKeyCredential credential, Azure internal static ClientPipeline CreatePipeline(TokenCredential credential, AzureOpenAIClientOptions options = null) { Argument.AssertNotNull(credential, nameof(credential)); - return CreatePipeline(new AzureTokenAuthenticationPolicy(credential), options); - } - - internal static new ApiKeyCredential GetApiKey(ApiKeyCredential explicitCredential = null, bool requireExplicitCredential = false) - { - if (explicitCredential is not null) - { - return explicitCredential; - } - // To do: IConfiguration support - else if (requireExplicitCredential) - { - throw new ArgumentNullException(nameof(explicitCredential), $"A non-null credential value is required."); - } - else - { - string environmentApiKey = Environment.GetEnvironmentVariable(s_aoaiApiKeyEnvironmentVariable); - if (string.IsNullOrEmpty(environmentApiKey)) - { - throw new InvalidOperationException( - $"No environment variable value was found for AZURE_OPENAI_API_KEY. " - + "Please either populate this environment variable or provide authentication information directly " - + "to the client constructor."); - } - return new(environmentApiKey); - } - } - - internal static Uri GetEndpoint(Uri explicitEndpoint = null, bool requireExplicitEndpoint = false, AzureOpenAIClientOptions options = null) - { - if (explicitEndpoint is not null) - { - return explicitEndpoint; - } - else if (options?.Endpoint is not null) - { - return options.Endpoint; - } - // To do: IConfiguration support - else if (requireExplicitEndpoint) - { - throw new ArgumentNullException(nameof(explicitEndpoint), $"A non-null endpoint value is required."); - } - else - { - string environmentApiKey = Environment.GetEnvironmentVariable(s_aoaiEndpointEnvironmentVariable); - if (string.IsNullOrEmpty(environmentApiKey)) - { - throw new InvalidOperationException( - $"No environment variable value was found for AZURE_OPENAI_ENDPOINT. " - + "Please either populate this environment variable or provide endpoint information directly " - + "to the client constructor."); - } - return new(environmentApiKey); - } + string authorizationScope = options?.Audience?.ToString() + ?? AzureOpenAIAudience.AzurePublicCloud.ToString(); + return CreatePipeline(new AzureTokenAuthenticationPolicy(credential, [authorizationScope]), options); } private static PipelinePolicy CreateAddUserAgentHeaderPolicy(AzureOpenAIClientOptions options = null) { - Core.TelemetryDetails telemetryDetails = new(typeof(AzureOpenAIClient).Assembly); + Core.TelemetryDetails telemetryDetails = new(typeof(AzureOpenAIClient).Assembly, options?.ApplicationId); return new GenericActionPipelinePolicy( requestAction: request => { @@ -341,10 +310,23 @@ private static PipelinePolicy CreateAddUserAgentHeaderPolicy(AzureOpenAIClientOp }); } - private static readonly string s_aoaiEndpointEnvironmentVariable = "AZURE_OPENAI_ENDPOINT"; - private static readonly string s_aoaiApiKeyEnvironmentVariable = "AZURE_OPENAI_API_KEY"; + private static PipelinePolicy CreateAddClientRequestIdHeaderPolicy() + { + return new GenericActionPipelinePolicy(request => + { + if (request?.Headers is not null) + { + string requestId = request.Headers.TryGetValue(s_clientRequestIdHeaderKey, out string existingHeader) == true + ? existingHeader + : Guid.NewGuid().ToString().ToLowerInvariant(); + request.Headers.Set(s_clientRequestIdHeaderKey, requestId); + } + }); + } + private static readonly string s_userAgentHeaderKey = "User-Agent"; - private static PipelineMessageClassifier _pipelineMessageClassifier; + private static readonly string s_clientRequestIdHeaderKey = "x-ms-client-request-id"; + private static PipelineMessageClassifier s_pipelineMessageClassifier; internal static PipelineMessageClassifier PipelineMessageClassifier - => _pipelineMessageClassifier ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200, 201 }); + => s_pipelineMessageClassifier ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200, 201 }); } diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/AzureOpenAIClientOptions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/AzureOpenAIClientOptions.cs new file mode 100644 index 000000000..1b2989bfb --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/AzureOpenAIClientOptions.cs @@ -0,0 +1,103 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel.Primitives; + +namespace Azure.AI.OpenAI; + +/// +/// Defines the scenario-independent, client-level options for the Azure-specific OpenAI client. +/// +public partial class AzureOpenAIClientOptions : ClientPipelineOptions +{ + internal string Version { get; } + + /// + /// The authorization audience to use when authenticating with Azure authentication tokens + /// + /// + /// By default, the public Azure cloud will be used to authenticate tokens. Modify this value to authenticate tokens + /// with other clouds like Azure Government. + /// + public AzureOpenAIAudience? Audience + { + get => _authorizationAudience; + set + { + AssertNotFrozen(); + _authorizationAudience = value; + } + } + private AzureOpenAIAudience? _authorizationAudience; + + /// + public string ApplicationId + { + get => _applicationId; + set + { + AssertNotFrozen(); + _applicationId = value; + } + } + private string _applicationId; + + /// + /// Initializes a new instance of + /// + /// The service API version to use with the client. + /// The provided service API version is not supported. + public AzureOpenAIClientOptions(ServiceVersion version = LatestVersion) + : base() + { + Version = version switch + { + ServiceVersion.V2024_04_01_Preview => "2024-04-01-preview", + ServiceVersion.V2024_05_01_Preview => "2024-05-01-preview", + ServiceVersion.V2024_06_01 => "2024-06-01", + ServiceVersion.V2024_07_01_Preview => "2024-07-01-preview", + _ => throw new NotSupportedException() + }; + RetryPolicy = new RetryWithDelaysPolicy(); + } + + /// The version of the service to use. + public enum ServiceVersion + { + /// Service version "2024-04-01-preview". + V2024_04_01_Preview = 7, + V2024_05_01_Preview = 8, + V2024_06_01 = 9, + V2024_07_01_Preview = 10, + } + + internal class RetryWithDelaysPolicy : ClientRetryPolicy + { + protected override TimeSpan GetNextDelay(PipelineMessage message, int tryCount) + { + TimeSpan? TryGetTimeSpanFromHeader(string headerName, int millisecondsPerValue = 1, bool allowDateTimeOffset = false) + { + if (double.TryParse( + message?.Response?.Headers?.TryGetValue(headerName, out string textValue) == true ? textValue : null, + out double doubleValue) == true) + { + return TimeSpan.FromMilliseconds(millisecondsPerValue * doubleValue); + } + else if (allowDateTimeOffset && DateTimeOffset.TryParse(headerName, out DateTimeOffset delayUntil)) + { + return delayUntil - DateTimeOffset.Now; + } + return null; + } + + TimeSpan? delayFromHeader = + TryGetTimeSpanFromHeader("retry-after-ms") + ?? TryGetTimeSpanFromHeader("x-ms-retry-after-ms") + ?? TryGetTimeSpanFromHeader("Retry-After", millisecondsPerValue: 1000, allowDateTimeOffset: true); + + return delayFromHeader ?? base.GetNextDelay(message, tryCount); + } + } + + private const ServiceVersion LatestVersion = ServiceVersion.V2024_07_01_Preview; +} diff --git a/.dotnet.azure/src/Custom/AzureTokenAuthenticationPolicy.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/AzureTokenAuthenticationPolicy.cs similarity index 60% rename from .dotnet.azure/src/Custom/AzureTokenAuthenticationPolicy.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/AzureTokenAuthenticationPolicy.cs index bf427a097..6fb81cb1a 100644 --- a/.dotnet.azure/src/Custom/AzureTokenAuthenticationPolicy.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/AzureTokenAuthenticationPolicy.cs @@ -3,24 +3,32 @@ using Azure.Core; using System.ClientModel.Primitives; +using System.Net; namespace Azure.AI.OpenAI; internal partial class AzureTokenAuthenticationPolicy : PipelinePolicy { private readonly TokenCredential _credential; + private readonly string[] _scopes; + private readonly TimeSpan _refreshOffset; private AccessToken? _currentToken; - public AzureTokenAuthenticationPolicy(TokenCredential credential) + public AzureTokenAuthenticationPolicy(TokenCredential credential, IEnumerable scopes, TimeSpan? refreshOffset = null) { + Argument.AssertNotNull(credential, nameof(credential)); + Argument.AssertNotNull(scopes, nameof(scopes)); + _credential = credential; + _scopes = scopes.ToArray(); + _refreshOffset = refreshOffset ?? s_defaultRefreshOffset; } public override void Process(PipelineMessage message, IReadOnlyList pipeline, int currentIndex) { if (message?.Request is not null) { - if (!_currentToken.HasValue || _currentToken.Value.ExpiresOn < DateTimeOffset.UtcNow + TimeSpan.FromSeconds(30)) + if (!IsTokenFresh()) { TokenRequestContext tokenRequestContext = CreateRequestContext(message.Request); _currentToken = _credential.GetToken(tokenRequestContext, cancellationToken: default); @@ -28,13 +36,17 @@ public override void Process(PipelineMessage message, IReadOnlyList pipeline, int currentIndex) { if (message?.Request is not null) { - if (!_currentToken.HasValue || _currentToken.Value.ExpiresOn < DateTimeOffset.UtcNow + TimeSpan.FromSeconds(30)) + if (!IsTokenFresh()) { TokenRequestContext tokenRequestContext = CreateRequestContext(message.Request); _currentToken @@ -43,15 +55,26 @@ public override async ValueTask ProcessAsync(PipelineMessage message, IReadOnlyL message?.Request?.Headers?.Add("Authorization", $"Bearer {_currentToken.Value.Token}"); } await ProcessNextAsync(message, pipeline, currentIndex).ConfigureAwait(false); + if (message?.Response?.Status == (int)HttpStatusCode.Unauthorized) + { + _currentToken = null; + } + } + + private bool IsTokenFresh() + { + if (!_currentToken.HasValue) return false; + DateTimeOffset refreshAt = _currentToken.Value.RefreshOn ?? (_currentToken.Value.ExpiresOn - _refreshOffset); + return DateTimeOffset.UtcNow < refreshAt; } - private static TokenRequestContext CreateRequestContext(PipelineRequest request) + private TokenRequestContext CreateRequestContext(PipelineRequest request) { string clientRequestId = request.Headers.TryGetValue("x-ms-client-request-id", out string messageClientId) == true ? messageClientId : null; - return new TokenRequestContext(DefaultAuthorizationScopes, clientRequestId); + return new TokenRequestContext(_scopes, clientRequestId); } - private static readonly string[] DefaultAuthorizationScopes = ["https://cognitiveservices.azure.com/.default"]; -} \ No newline at end of file + private static readonly TimeSpan s_defaultRefreshOffset = TimeSpan.FromMinutes(5); +} diff --git a/.dotnet.azure/src/Custom/Batch/AzureBatchClient.Protocol.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Batch/AzureBatchClient.Protocol.cs similarity index 87% rename from .dotnet.azure/src/Custom/Batch/AzureBatchClient.Protocol.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Batch/AzureBatchClient.Protocol.cs index 15d78fc00..00cec454c 100644 --- a/.dotnet.azure/src/Custom/Batch/AzureBatchClient.Protocol.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Batch/AzureBatchClient.Protocol.cs @@ -3,7 +3,6 @@ using System.ClientModel; using System.ClientModel.Primitives; -using OpenAI.Batch; namespace Azure.AI.OpenAI.Batch; @@ -25,16 +24,16 @@ public override ClientResult CreateBatch(BinaryContent content, RequestOptions o return ClientResult.FromResponse(Pipeline.ProcessMessage(message, options)); } - public override async Task GetBatchesAsync(string after, int? limit, RequestOptions options) + public override IAsyncEnumerable GetBatchesAsync(string after, int? limit, RequestOptions options) { - using PipelineMessage message = CreateGetBatchesRequest(after, limit, options); - return ClientResult.FromResponse(await Pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + BatchesPageEnumerator enumerator = new(Pipeline, _endpoint, after, limit, options); + return PageCollectionHelpers.CreateAsync(enumerator); } - public override ClientResult GetBatches(string after, int? limit, RequestOptions options) + public override IEnumerable GetBatches(string after, int? limit, RequestOptions options) { - using PipelineMessage message = CreateGetBatchesRequest(after, limit, options); - return ClientResult.FromResponse(Pipeline.ProcessMessage(message, options)); + BatchesPageEnumerator enumerator = new(Pipeline, _endpoint, after, limit, options); + return PageCollectionHelpers.Create(enumerator); } public override async Task GetBatchAsync(string batchId, RequestOptions options) diff --git a/.dotnet.azure/src/Custom/Batch/AzureBatchClient.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Batch/AzureBatchClient.cs similarity index 73% rename from .dotnet.azure/src/Custom/Batch/AzureBatchClient.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Batch/AzureBatchClient.cs index 56058a27e..e04d9daf9 100644 --- a/.dotnet.azure/src/Custom/Batch/AzureBatchClient.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Batch/AzureBatchClient.cs @@ -18,15 +18,14 @@ internal partial class AzureBatchClient : BatchClient private readonly string _deploymentName; private readonly string _apiVersion; - internal AzureBatchClient( - ClientPipeline pipeline, - string deploymentName, - Uri endpoint, - AzureOpenAIClientOptions options) - : base(pipeline, endpoint, options) + internal AzureBatchClient(ClientPipeline pipeline, string deploymentName, Uri endpoint, AzureOpenAIClientOptions options) + : base(pipeline, new OpenAIClientOptions() { Endpoint = endpoint }) { - options ??= new(); + Argument.AssertNotNull(pipeline, nameof(pipeline)); Argument.AssertNotNullOrEmpty(deploymentName, nameof(deploymentName)); + Argument.AssertNotNull(endpoint, nameof(endpoint)); + options ??= new(); + _deploymentName = deploymentName; _endpoint = endpoint; _apiVersion = options.Version; diff --git a/.dotnet.azure/src/Custom/Chat/AzureChatClient.Protocol.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/AzureChatClient.Protocol.cs similarity index 100% rename from .dotnet.azure/src/Custom/Chat/AzureChatClient.Protocol.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/AzureChatClient.Protocol.cs diff --git a/.dotnet.azure/src/Custom/Chat/AzureChatClient.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/AzureChatClient.cs similarity index 79% rename from .dotnet.azure/src/Custom/Chat/AzureChatClient.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/AzureChatClient.cs index 89949a52d..86ca7d229 100644 --- a/.dotnet.azure/src/Custom/Chat/AzureChatClient.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/AzureChatClient.cs @@ -21,14 +21,14 @@ internal partial class AzureChatClient : ChatClient private readonly Uri _endpoint; private readonly string _apiVersion; - internal AzureChatClient( - ClientPipeline pipeline, - string deploymentName, - Uri endpoint, - AzureOpenAIClientOptions options) - : base(pipeline, model: deploymentName, endpoint, options) + internal AzureChatClient(ClientPipeline pipeline, string deploymentName, Uri endpoint, AzureOpenAIClientOptions options) + : base(pipeline, model: deploymentName, new OpenAIClientOptions() { Endpoint = endpoint }) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + Argument.AssertNotNullOrEmpty(deploymentName, nameof(deploymentName)); + Argument.AssertNotNull(endpoint, nameof(endpoint)); options ??= new(); + _deploymentName = deploymentName; _endpoint = endpoint; _apiVersion = options.Version; diff --git a/.dotnet.azure/src/Custom/Chat/AzureChatCompletion.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/AzureChatCompletion.cs similarity index 100% rename from .dotnet.azure/src/Custom/Chat/AzureChatCompletion.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/AzureChatCompletion.cs diff --git a/.dotnet.azure/src/Custom/Chat/AzureChatCompletionOptions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/AzureChatCompletionOptions.cs similarity index 93% rename from .dotnet.azure/src/Custom/Chat/AzureChatCompletionOptions.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/AzureChatCompletionOptions.cs index 26d4cc919..54ab5606e 100644 --- a/.dotnet.azure/src/Custom/Chat/AzureChatCompletionOptions.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/AzureChatCompletionOptions.cs @@ -4,7 +4,6 @@ using System.Diagnostics.CodeAnalysis; using Azure.AI.OpenAI.Chat; using Azure.AI.OpenAI.Internal; -using OpenAI.Chat; namespace Azure.AI.OpenAI; @@ -13,6 +12,8 @@ public static partial class AzureChatCompletionOptionsExtensions [Experimental("AOAI001")] public static void AddDataSource(this ChatCompletionOptions options, AzureChatDataSource dataSource) { + options.SerializedAdditionalRawData ??= new Dictionary(); + IList existingSources = AdditionalPropertyHelpers.GetAdditionalListProperty( options.SerializedAdditionalRawData, diff --git a/.dotnet.azure/src/Custom/Chat/AzureStreamingChatCompletionUpdate.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/AzureStreamingChatCompletionUpdate.cs similarity index 100% rename from .dotnet.azure/src/Custom/Chat/AzureStreamingChatCompletionUpdate.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/AzureStreamingChatCompletionUpdate.cs diff --git a/.dotnet.azure/src/Custom/Chat/GeneratorStubs.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/GeneratorStubs.cs similarity index 100% rename from .dotnet.azure/src/Custom/Chat/GeneratorStubs.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/GeneratorStubs.cs diff --git a/.dotnet.azure/src/Custom/Chat/Internal/GeneratorStubs.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/GeneratorStubs.cs similarity index 100% rename from .dotnet.azure/src/Custom/Chat/Internal/GeneratorStubs.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/GeneratorStubs.cs diff --git a/.dotnet.azure/src/Custom/Chat/Internal/InternalAzureChatDataSourceEndpointVectorizationSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalAzureChatDataSourceEndpointVectorizationSource.cs similarity index 100% rename from .dotnet.azure/src/Custom/Chat/Internal/InternalAzureChatDataSourceEndpointVectorizationSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalAzureChatDataSourceEndpointVectorizationSource.cs diff --git a/.dotnet.azure/src/Custom/Chat/Internal/InternalAzureCosmosDBChatDataSourceParameters.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalAzureCosmosDBChatDataSourceParameters.cs similarity index 98% rename from .dotnet.azure/src/Custom/Chat/Internal/InternalAzureCosmosDBChatDataSourceParameters.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalAzureCosmosDBChatDataSourceParameters.cs index 66b46bc1a..a3a7a1bc3 100644 --- a/.dotnet.azure/src/Custom/Chat/Internal/InternalAzureCosmosDBChatDataSourceParameters.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalAzureCosmosDBChatDataSourceParameters.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System.Collections.Generic; - namespace Azure.AI.OpenAI.Chat; [CodeGenModel("AzureCosmosDBChatDataSourceParameters")] diff --git a/.dotnet.azure/src/Custom/Chat/Internal/InternalAzureMachineLearningIndexDataSourceParameters.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalAzureMachineLearningIndexDataSourceParameters.cs similarity index 97% rename from .dotnet.azure/src/Custom/Chat/Internal/InternalAzureMachineLearningIndexDataSourceParameters.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalAzureMachineLearningIndexDataSourceParameters.cs index b678c72f0..07930fab6 100644 --- a/.dotnet.azure/src/Custom/Chat/Internal/InternalAzureMachineLearningIndexDataSourceParameters.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalAzureMachineLearningIndexDataSourceParameters.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System.Collections.Generic; - namespace Azure.AI.OpenAI.Chat; [CodeGenModel("AzureMachineLearningIndexChatDataSourceParameters")] diff --git a/.dotnet.azure/src/Custom/Chat/Internal/InternalAzureSearchChatDataSourceParameters.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalAzureSearchChatDataSourceParameters.cs similarity index 98% rename from .dotnet.azure/src/Custom/Chat/Internal/InternalAzureSearchChatDataSourceParameters.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalAzureSearchChatDataSourceParameters.cs index 5f85bd94c..a8cc7c74f 100644 --- a/.dotnet.azure/src/Custom/Chat/Internal/InternalAzureSearchChatDataSourceParameters.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalAzureSearchChatDataSourceParameters.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System.Collections.Generic; - namespace Azure.AI.OpenAI.Chat; [CodeGenModel("AzureSearchChatDataSourceParameters")] diff --git a/.dotnet.azure/src/Custom/Chat/Internal/InternalElasticsearchChatDataSourceParameters.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalElasticsearchChatDataSourceParameters.cs similarity index 98% rename from .dotnet.azure/src/Custom/Chat/Internal/InternalElasticsearchChatDataSourceParameters.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalElasticsearchChatDataSourceParameters.cs index 7e4e62899..568067e4c 100644 --- a/.dotnet.azure/src/Custom/Chat/Internal/InternalElasticsearchChatDataSourceParameters.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalElasticsearchChatDataSourceParameters.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System.Collections.Generic; - namespace Azure.AI.OpenAI.Chat; [CodeGenModel("ElasticsearchChatDataSourceParameters")] diff --git a/.dotnet.azure/src/Custom/Chat/Internal/InternalPineconeChatDataSourceParameters.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalPineconeChatDataSourceParameters.cs similarity index 98% rename from .dotnet.azure/src/Custom/Chat/Internal/InternalPineconeChatDataSourceParameters.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalPineconeChatDataSourceParameters.cs index a0ceb7784..3b9288b15 100644 --- a/.dotnet.azure/src/Custom/Chat/Internal/InternalPineconeChatDataSourceParameters.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/Internal/InternalPineconeChatDataSourceParameters.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System.Collections.Generic; - namespace Azure.AI.OpenAI.Chat; [CodeGenModel("PineconeChatDataSourceParameters")] diff --git a/.dotnet.azure/src/Custom/Chat/OnYourData/AzureCosmosDBChatDataSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/AzureCosmosDBChatDataSource.cs similarity index 100% rename from .dotnet.azure/src/Custom/Chat/OnYourData/AzureCosmosDBChatDataSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/AzureCosmosDBChatDataSource.cs diff --git a/.dotnet.azure/src/Custom/Chat/OnYourData/AzureMachineLearningIndexChatDataSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/AzureMachineLearningIndexChatDataSource.cs similarity index 100% rename from .dotnet.azure/src/Custom/Chat/OnYourData/AzureMachineLearningIndexChatDataSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/AzureMachineLearningIndexChatDataSource.cs diff --git a/.dotnet.azure/src/Custom/Chat/OnYourData/AzureSearchChatDataSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/AzureSearchChatDataSource.cs similarity index 100% rename from .dotnet.azure/src/Custom/Chat/OnYourData/AzureSearchChatDataSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/AzureSearchChatDataSource.cs diff --git a/.dotnet.azure/src/Custom/Chat/OnYourData/DataSourceAuthentication.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/DataSourceAuthentication.cs similarity index 100% rename from .dotnet.azure/src/Custom/Chat/OnYourData/DataSourceAuthentication.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/DataSourceAuthentication.cs diff --git a/.dotnet.azure/src/Custom/Chat/OnYourData/DataSourceFieldMappings.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/DataSourceFieldMappings.cs similarity index 99% rename from .dotnet.azure/src/Custom/Chat/OnYourData/DataSourceFieldMappings.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/DataSourceFieldMappings.cs index 1593f4e1f..999ade6bc 100644 --- a/.dotnet.azure/src/Custom/Chat/OnYourData/DataSourceFieldMappings.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/DataSourceFieldMappings.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System.Collections.Generic; - namespace Azure.AI.OpenAI.Chat; [CodeGenModel("AzureSearchChatDataSourceParametersFieldsMapping")] diff --git a/.dotnet.azure/src/Custom/Chat/OnYourData/DataSourceOutputContextFlags.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/DataSourceOutputContextFlags.Serialization.cs similarity index 97% rename from .dotnet.azure/src/Custom/Chat/OnYourData/DataSourceOutputContextFlags.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/DataSourceOutputContextFlags.Serialization.cs index c6039ce8f..b5a0ff12d 100644 --- a/.dotnet.azure/src/Custom/Chat/OnYourData/DataSourceOutputContextFlags.Serialization.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/DataSourceOutputContextFlags.Serialization.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System.Collections.Generic; - namespace Azure.AI.OpenAI.Chat; internal static partial class DataSourceOutputContextFlagsExtensions diff --git a/.dotnet.azure/src/Custom/Chat/OnYourData/DataSourceOutputContextFlags.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/DataSourceOutputContextFlags.cs similarity index 100% rename from .dotnet.azure/src/Custom/Chat/OnYourData/DataSourceOutputContextFlags.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/DataSourceOutputContextFlags.cs diff --git a/.dotnet.azure/src/Custom/Chat/OnYourData/DataSourceVectorizer.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/DataSourceVectorizer.cs similarity index 100% rename from .dotnet.azure/src/Custom/Chat/OnYourData/DataSourceVectorizer.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/DataSourceVectorizer.cs diff --git a/.dotnet.azure/src/Custom/Chat/OnYourData/ElasticsearchChatDataSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/ElasticsearchChatDataSource.cs similarity index 100% rename from .dotnet.azure/src/Custom/Chat/OnYourData/ElasticsearchChatDataSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/ElasticsearchChatDataSource.cs diff --git a/.dotnet.azure/src/Custom/Chat/OnYourData/PineconeChatDataSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/PineconeChatDataSource.cs similarity index 100% rename from .dotnet.azure/src/Custom/Chat/OnYourData/PineconeChatDataSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Chat/OnYourData/PineconeChatDataSource.cs diff --git a/.dotnet.azure/src/Custom/Common/AdditionalPropertyHelpers.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Common/AdditionalPropertyHelpers.cs similarity index 100% rename from .dotnet.azure/src/Custom/Common/AdditionalPropertyHelpers.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Common/AdditionalPropertyHelpers.cs diff --git a/.dotnet.azure/src/Custom/Common/ContentFilterBlocklistResult.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Common/ContentFilterBlocklistResult.cs similarity index 96% rename from .dotnet.azure/src/Custom/Common/ContentFilterBlocklistResult.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Common/ContentFilterBlocklistResult.cs index 87acefddb..12a3b13e2 100644 --- a/.dotnet.azure/src/Custom/Common/ContentFilterBlocklistResult.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Common/ContentFilterBlocklistResult.cs @@ -3,8 +3,6 @@ #nullable disable -using System.Collections.Generic; - namespace Azure.AI.OpenAI; [CodeGenModel("AzureContentFilterBlocklistResult")] diff --git a/.dotnet.azure/src/Custom/Common/ContentFilterResultForPrompt.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Common/ContentFilterResultForPrompt.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Custom/Common/ContentFilterResultForPrompt.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Common/ContentFilterResultForPrompt.Serialization.cs diff --git a/.dotnet.azure/src/Custom/Common/ContentFilterResultForPrompt.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Common/ContentFilterResultForPrompt.cs similarity index 100% rename from .dotnet.azure/src/Custom/Common/ContentFilterResultForPrompt.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Common/ContentFilterResultForPrompt.cs diff --git a/.dotnet.azure/src/Custom/Common/ContentFilterResultForResponse.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Common/ContentFilterResultForResponse.cs similarity index 100% rename from .dotnet.azure/src/Custom/Common/ContentFilterResultForResponse.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Common/ContentFilterResultForResponse.cs diff --git a/.dotnet.azure/src/Custom/Common/GeneratorStubs.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Common/GeneratorStubs.cs similarity index 100% rename from .dotnet.azure/src/Custom/Common/GeneratorStubs.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Common/GeneratorStubs.cs diff --git a/.dotnet.azure/src/Custom/Embeddings/AzureEmbeddingClient.Protocol.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Embeddings/AzureEmbeddingClient.Protocol.cs similarity index 100% rename from .dotnet.azure/src/Custom/Embeddings/AzureEmbeddingClient.Protocol.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Embeddings/AzureEmbeddingClient.Protocol.cs diff --git a/.dotnet.azure/src/Custom/Embeddings/AzureEmbeddingClient.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Embeddings/AzureEmbeddingClient.cs similarity index 65% rename from .dotnet.azure/src/Custom/Embeddings/AzureEmbeddingClient.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Embeddings/AzureEmbeddingClient.cs index cc35248ac..7bbdf2477 100644 --- a/.dotnet.azure/src/Custom/Embeddings/AzureEmbeddingClient.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Embeddings/AzureEmbeddingClient.cs @@ -18,14 +18,14 @@ internal partial class AzureEmbeddingClient : EmbeddingClient private readonly Uri _endpoint; private readonly string _apiVersion; - internal AzureEmbeddingClient( - ClientPipeline pipeline, - string deploymentName, - Uri endpoint, - AzureOpenAIClientOptions options) - : base(pipeline, model: deploymentName, endpoint, options) + internal AzureEmbeddingClient(ClientPipeline pipeline, string deploymentName, Uri endpoint, AzureOpenAIClientOptions options) + : base(pipeline, model: deploymentName, new OpenAIClientOptions() { Endpoint = endpoint }) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + Argument.AssertNotNullOrEmpty(deploymentName, nameof(deploymentName)); + Argument.AssertNotNull(endpoint, nameof(endpoint)); options ??= new(); + _deploymentName = deploymentName; _endpoint = endpoint; _apiVersion = options.Version; diff --git a/.dotnet.azure/src/Custom/Files/AzureFileClient.Protocol.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Files/AzureFileClient.Protocol.cs similarity index 97% rename from .dotnet.azure/src/Custom/Files/AzureFileClient.Protocol.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Files/AzureFileClient.Protocol.cs index 921c3072f..07b8d3ccf 100644 --- a/.dotnet.azure/src/Custom/Files/AzureFileClient.Protocol.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Files/AzureFileClient.Protocol.cs @@ -4,7 +4,6 @@ using System.ClientModel; using System.ClientModel.Primitives; using System.ComponentModel; -using OpenAI.Files; namespace Azure.AI.OpenAI.Files; @@ -67,8 +66,6 @@ public override async Task GetFileAsync(string fileId, RequestOpti [EditorBrowsable(EditorBrowsableState.Never)] public override ClientResult GetFiles(string purpose, RequestOptions options) { - Argument.AssertNotNullOrEmpty(purpose, nameof(purpose)); - using PipelineMessage message = CreateGetFilesRequestMessage(purpose, options); return ClientResult.FromResponse(Pipeline.ProcessMessage(message, options)); } @@ -76,8 +73,6 @@ public override ClientResult GetFiles(string purpose, RequestOptions options) [EditorBrowsable(EditorBrowsableState.Never)] public override async Task GetFilesAsync(string purpose, RequestOptions options) { - Argument.AssertNotNullOrEmpty(purpose, nameof(purpose)); - using PipelineMessage message = CreateGetFilesRequestMessage(purpose, options); return ClientResult.FromResponse(await Pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); } diff --git a/.dotnet.azure/src/Custom/Files/AzureFileClient.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Files/AzureFileClient.cs similarity index 91% rename from .dotnet.azure/src/Custom/Files/AzureFileClient.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Files/AzureFileClient.cs index e48e989f3..60eb3403f 100644 --- a/.dotnet.azure/src/Custom/Files/AzureFileClient.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Files/AzureFileClient.cs @@ -18,13 +18,13 @@ internal partial class AzureFileClient : FileClient private readonly Uri _endpoint; private readonly string _apiVersion; - internal AzureFileClient( - ClientPipeline pipeline, - Uri endpoint, - AzureOpenAIClientOptions options) - : base(pipeline, endpoint, options) + internal AzureFileClient(ClientPipeline pipeline, Uri endpoint, AzureOpenAIClientOptions options) + : base(pipeline, new OpenAIClientOptions() { Endpoint = endpoint }) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + Argument.AssertNotNull(endpoint, nameof(endpoint)); options ??= new(); + _endpoint = endpoint; _apiVersion = options.Version; } diff --git a/.dotnet.azure/src/Custom/FineTuning/AzureFineTuningClient.Extensions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/AzureFineTuningClient.Extensions.cs similarity index 100% rename from .dotnet.azure/src/Custom/FineTuning/AzureFineTuningClient.Extensions.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/AzureFineTuningClient.Extensions.cs diff --git a/.dotnet.azure/src/Custom/FineTuning/AzureFineTuningClient.Protocol.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/AzureFineTuningClient.Protocol.cs similarity index 56% rename from .dotnet.azure/src/Custom/FineTuning/AzureFineTuningClient.Protocol.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/AzureFineTuningClient.Protocol.cs index 6c8c29537..479f12cde 100644 --- a/.dotnet.azure/src/Custom/FineTuning/AzureFineTuningClient.Protocol.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/AzureFineTuningClient.Protocol.cs @@ -4,7 +4,6 @@ using System.ClientModel; using System.ClientModel.Primitives; using System.Diagnostics.CodeAnalysis; -using OpenAI.FineTuning; namespace Azure.AI.OpenAI.FineTuning; @@ -38,43 +37,40 @@ public override async Task GetJobAsync(string fineTuningJobId, Req return ClientResult.FromResponse(response); } - public override ClientResult GetJobs(string after, int? limit, RequestOptions options) + public override IEnumerable GetJobs(string after, int? limit, RequestOptions options) { - using PipelineMessage message = CreateGetJobsRequestMessage(after, limit, options); - return ClientResult.FromResponse(Pipeline.ProcessMessage(message, options)); + AzureFineTuningJobsPageEnumerator enumerator = new(Pipeline, _endpoint, after, limit, _apiVersion, options); + return PageCollectionHelpers.Create(enumerator); } - public override async Task GetJobsAsync(string after, int? limit, RequestOptions options) + public override IAsyncEnumerable GetJobsAsync(string after, int? limit, RequestOptions options) { - using PipelineMessage message = CreateGetJobsRequestMessage(after, limit, options); - PipelineResponse response = await Pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false); - return ClientResult.FromResponse(response); + AzureFineTuningJobsPageEnumerator enumerator = new(Pipeline, _endpoint, after, limit, _apiVersion, options); + return PageCollectionHelpers.CreateAsync(enumerator); } - public override ClientResult GetJobEvents(string fineTuningJobId, string after, int? limit, RequestOptions options) + public override IEnumerable GetJobEvents(string fineTuningJobId, string after, int? limit, RequestOptions options) { - using PipelineMessage message = CreateGetJobEventsRequestMessage(fineTuningJobId, after, limit, options); - return ClientResult.FromResponse(Pipeline.ProcessMessage(message, options)); + AzureFineTuningJobEventsPageEnumerator enumerator = new(Pipeline, _endpoint, fineTuningJobId, after, limit, _apiVersion, options); + return PageCollectionHelpers.Create(enumerator); } - public override async Task GetJobEventsAsync(string fineTuningJobId, string after, int? limit, RequestOptions options) + public override IAsyncEnumerable GetJobEventsAsync(string fineTuningJobId, string after, int? limit, RequestOptions options) { - using PipelineMessage message = CreateGetJobEventsRequestMessage(fineTuningJobId, after, limit, options); - PipelineResponse response = await Pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false); - return ClientResult.FromResponse(response); + AzureFineTuningJobEventsPageEnumerator enumerator = new(Pipeline, _endpoint, fineTuningJobId, after, limit, _apiVersion, options); + return PageCollectionHelpers.CreateAsync(enumerator); } - public override ClientResult GetJobCheckpoints(string fineTuningJobId, string after, int? limit, RequestOptions options) + public override IEnumerable GetJobCheckpoints(string fineTuningJobId, string after, int? limit, RequestOptions options) { - using PipelineMessage message = CreateGetJobCheckpointsRequestMessage(fineTuningJobId, after, limit, options); - return ClientResult.FromResponse(Pipeline.ProcessMessage(message, options)); + AzureFineTuningJobCheckpointsPageEnumerator enumerator = new(Pipeline, _endpoint, fineTuningJobId, after, limit, _apiVersion, options); + return PageCollectionHelpers.Create(enumerator); } - public override async Task GetJobCheckpointsAsync(string fineTuningJobId, string after, int? limit, RequestOptions options) + public override IAsyncEnumerable GetJobCheckpointsAsync(string fineTuningJobId, string after, int? limit, RequestOptions options) { - using PipelineMessage message = CreateGetJobCheckpointsRequestMessage(fineTuningJobId, after, limit, options); - PipelineResponse response = await Pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false); - return ClientResult.FromResponse(response); + AzureFineTuningJobCheckpointsPageEnumerator enumerator = new(Pipeline, _endpoint, fineTuningJobId, after, limit, _apiVersion, options); + return PageCollectionHelpers.CreateAsync(enumerator); } public override ClientResult CancelJob(string fineTuningJobId, RequestOptions options) @@ -114,16 +110,6 @@ private PipelineMessage CreateCreateJobRequestMessage(BinaryContent content, Req .WithOptions(options) .Build(); - private PipelineMessage CreateGetJobsRequestMessage(string after, int? limit, RequestOptions options) - => new AzureOpenAIPipelineMessageBuilder(Pipeline, _endpoint, _apiVersion) - .WithMethod("GET") - .WithPath("fine_tuning", "jobs") - .WithOptionalQueryParameter("after", after) - .WithOptionalQueryParameter("limit", limit) - .WithAccept("application/json") - .WithOptions(options) - .Build(); - private PipelineMessage CreateGetJobRequestMessage(string jobId, RequestOptions options) => new AzureOpenAIPipelineMessageBuilder(Pipeline, _endpoint, _apiVersion) .WithMethod("GET") @@ -132,26 +118,6 @@ private PipelineMessage CreateGetJobRequestMessage(string jobId, RequestOptions .WithOptions(options) .Build(); - private PipelineMessage CreateGetJobEventsRequestMessage(string jobId, string after, int? limit, RequestOptions options) - => new AzureOpenAIPipelineMessageBuilder(Pipeline, _endpoint, _apiVersion) - .WithMethod("GET") - .WithPath("fine_tuning", "jobs", jobId, "events") - .WithOptionalQueryParameter("after", after) - .WithOptionalQueryParameter("limit", limit) - .WithAccept("application/json") - .WithOptions(options) - .Build(); - - private PipelineMessage CreateGetJobCheckpointsRequestMessage(string jobId, string after, int? limit, RequestOptions options) - => new AzureOpenAIPipelineMessageBuilder(Pipeline, _endpoint, _apiVersion) - .WithMethod("GET") - .WithPath("fine_tuning", "jobs", jobId, "checkpoints") - .WithOptionalQueryParameter("after", after) - .WithOptionalQueryParameter("limit", limit) - .WithAccept("application/json") - .WithOptions(options) - .Build(); - private PipelineMessage CreateCancelJobRequestMessage(string jobId, RequestOptions options) => new AzureOpenAIPipelineMessageBuilder(Pipeline, _endpoint, _apiVersion) .WithMethod("POST") diff --git a/.dotnet.azure/src/Custom/FineTuning/AzureFineTuningClient.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/AzureFineTuningClient.cs similarity index 71% rename from .dotnet.azure/src/Custom/FineTuning/AzureFineTuningClient.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/AzureFineTuningClient.cs index 2b3c0f1d6..5c69e1bdd 100644 --- a/.dotnet.azure/src/Custom/FineTuning/AzureFineTuningClient.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/AzureFineTuningClient.cs @@ -17,13 +17,13 @@ internal partial class AzureFineTuningClient : FineTuningClient private readonly Uri _endpoint; private readonly string _apiVersion; - internal AzureFineTuningClient( - ClientPipeline pipeline, - Uri endpoint, - AzureOpenAIClientOptions options) - : base(pipeline, endpoint, options) + internal AzureFineTuningClient(ClientPipeline pipeline, Uri endpoint, AzureOpenAIClientOptions options) + : base(pipeline, new OpenAIClientOptions() { Endpoint = endpoint }) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + Argument.AssertNotNull(endpoint, nameof(endpoint)); options ??= new(); + _endpoint = endpoint; _apiVersion = options.Version; } diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/Pagination/AzureFineTuningJobCheckpointsPageEnumerator.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/Pagination/AzureFineTuningJobCheckpointsPageEnumerator.cs new file mode 100644 index 000000000..f002a2bf5 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/Pagination/AzureFineTuningJobCheckpointsPageEnumerator.cs @@ -0,0 +1,96 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace Azure.AI.OpenAI.FineTuning; + +internal class AzureFineTuningJobCheckpointsPageEnumerator : FineTuningJobCheckpointsPageEnumerator +{ + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + private readonly string _apiVersion; + + private readonly int? _limit; + private readonly string _jobId; + private readonly RequestOptions _options; + private string? _after; + + public AzureFineTuningJobCheckpointsPageEnumerator( + ClientPipeline pipeline, + Uri endpoint, + string jobId, string after, int? limit, + string apiVersion, + RequestOptions options) + : base(pipeline, endpoint, jobId, after!, limit, options) + { + _pipeline = pipeline; + _endpoint = endpoint; + _apiVersion = apiVersion; + + _jobId = jobId; + _after = after; + _limit = limit; + _options = options; + } + + public override async Task GetNextAsync(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response?.Content); + + if (doc?.RootElement.TryGetProperty("data", out JsonElement dataElement) == true + && dataElement.EnumerateArray().LastOrDefault().TryGetProperty("id", out JsonElement idElement) == true) + { + _after = idElement.GetString(); + } + + return await GetJobCheckpointsAsync(_jobId, _after!, _limit, _options).ConfigureAwait(false); + } + + public override ClientResult GetNext(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response?.Content); + + if (doc?.RootElement.TryGetProperty("data", out JsonElement dataElement) == true + && dataElement.EnumerateArray().LastOrDefault().TryGetProperty("id", out JsonElement idElement) == true) + { + _after = idElement.GetString(); + } + + return GetJobCheckpoints(_jobId, _after!, _limit, _options); + } + + internal override async Task GetJobCheckpointsAsync(string jobId, string after, int? limit, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using PipelineMessage message = CreateGetFineTuningJobCheckpointsRequest(jobId, after, limit, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + + internal override ClientResult GetJobCheckpoints(string jobId, string after, int? limit, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using PipelineMessage message = CreateGetFineTuningJobCheckpointsRequest(jobId, after, limit, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + + internal new PipelineMessage CreateGetFineTuningJobCheckpointsRequest(string fineTuningJobId, string after, int? limit, RequestOptions options) + => new AzureOpenAIPipelineMessageBuilder(_pipeline, _endpoint, _apiVersion) + .WithMethod("GET") + .WithPath("fine_tuning", "jobs", fineTuningJobId, "checkpoints") + .WithOptionalQueryParameter("after", after) + .WithOptionalQueryParameter("limit", limit) + .WithAccept("application/json") + .WithOptions(options) + .Build(); +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/Pagination/AzureFineTuningJobEventsPageEnumerator.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/Pagination/AzureFineTuningJobEventsPageEnumerator.cs new file mode 100644 index 000000000..a18cbe777 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/Pagination/AzureFineTuningJobEventsPageEnumerator.cs @@ -0,0 +1,96 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace Azure.AI.OpenAI.FineTuning; + +internal class AzureFineTuningJobEventsPageEnumerator : FineTuningJobEventsPageEnumerator +{ + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + private readonly string _apiVersion; + + private readonly int? _limit; + private readonly string _jobId; + private readonly RequestOptions _options; + private string? _after; + + public AzureFineTuningJobEventsPageEnumerator( + ClientPipeline pipeline, + Uri endpoint, + string jobId, string? after, int? limit, + string apiVersion, + RequestOptions options) + : base(pipeline, endpoint, jobId, after!, limit, options) + { + _pipeline = pipeline; + _endpoint = endpoint; + _apiVersion = apiVersion; + + _jobId = jobId; + _after = after; + _limit = limit; + _options = options; + } + + public override async Task GetNextAsync(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response?.Content); + + if (doc?.RootElement.TryGetProperty("data", out JsonElement dataElement) == true + && dataElement.EnumerateArray().LastOrDefault().TryGetProperty("id", out JsonElement idElement) == true) + { + _after = idElement.GetString(); + } + + return await GetJobEventsAsync(_jobId, _after!, _limit, _options).ConfigureAwait(false); + } + + public override ClientResult GetNext(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response?.Content); + + if (doc?.RootElement.TryGetProperty("data", out JsonElement dataElement) == true + && dataElement.EnumerateArray().LastOrDefault().TryGetProperty("id", out JsonElement idElement) == true) + { + _after = idElement.GetString(); + } + + return GetJobEvents(_jobId, _after!, _limit, _options); + } + + internal override async Task GetJobEventsAsync(string jobId, string after, int? limit, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using PipelineMessage message = CreateGetFineTuningEventsRequest(jobId, after, limit, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + + internal override ClientResult GetJobEvents(string jobId, string after, int? limit, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using PipelineMessage message = CreateGetFineTuningEventsRequest(jobId, after, limit, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + + internal new PipelineMessage CreateGetFineTuningEventsRequest(string fineTuningJobId, string after, int? limit, RequestOptions options) + => new AzureOpenAIPipelineMessageBuilder(_pipeline, _endpoint, _apiVersion) + .WithMethod("GET") + .WithPath("fine_tuning", "jobs", fineTuningJobId, "events") + .WithOptionalQueryParameter("after", after) + .WithOptionalQueryParameter("limit", limit) + .WithAccept("application/json") + .WithOptions(options) + .Build(); +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/Pagination/AzureFineTuningJobsPageEnumerator.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/Pagination/AzureFineTuningJobsPageEnumerator.cs new file mode 100644 index 000000000..ef8a57075 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/FineTuning/Pagination/AzureFineTuningJobsPageEnumerator.cs @@ -0,0 +1,91 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace Azure.AI.OpenAI.FineTuning; + +internal class AzureFineTuningJobsPageEnumerator : FineTuningJobsPageEnumerator +{ + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + private readonly string _apiVersion; + + private readonly int? _limit; + private readonly RequestOptions _options; + private string? _after; + + public AzureFineTuningJobsPageEnumerator( + ClientPipeline pipeline, + Uri endpoint, + string? after, + int? limit, + string apiVersion, + RequestOptions options) + : base(pipeline, endpoint, after!, limit, options) + { + _pipeline = pipeline; + _endpoint = endpoint; + _apiVersion = apiVersion; + + _after = after; + _limit = limit; + _options = options; + } + + public override async Task GetNextAsync(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response?.Content); + + if (doc?.RootElement.TryGetProperty("data", out JsonElement dataElement) == true + && dataElement.EnumerateArray().LastOrDefault().TryGetProperty("id", out JsonElement idElement) == true) + { + _after = idElement.GetString(); + } + + return await GetJobsAsync(_after!, _limit, _options).ConfigureAwait(false); + } + + public override ClientResult GetNext(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response?.Content); + + if (doc?.RootElement.TryGetProperty("data", out JsonElement dataElement) == true + && dataElement.EnumerateArray().LastOrDefault().TryGetProperty("id", out JsonElement idElement) == true) + { + _after = idElement.GetString(); + } + + return GetJobs(_after!, _limit, _options); + } + + internal override async Task GetJobsAsync(string after, int? limit, RequestOptions options) + { + using PipelineMessage message = CreateGetFineTuningJobsRequest(after, limit, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + + internal override ClientResult GetJobs(string after, int? limit, RequestOptions options) + { + using PipelineMessage message = CreateGetFineTuningJobsRequest(after, limit, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + + internal new PipelineMessage CreateGetFineTuningJobsRequest(string? after, int? limit, RequestOptions options) + => new AzureOpenAIPipelineMessageBuilder(_pipeline, _endpoint, _apiVersion) + .WithMethod("GET") + .WithPath("fine_tuning", "jobs") + .WithOptionalQueryParameter("after", after) + .WithOptionalQueryParameter("limit", limit) + .WithAccept("application/json") + .WithOptions(options) + .Build(); +} diff --git a/.dotnet.azure/src/Custom/Images/AzureGeneratedImage.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Images/AzureGeneratedImage.cs similarity index 100% rename from .dotnet.azure/src/Custom/Images/AzureGeneratedImage.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Images/AzureGeneratedImage.cs diff --git a/.dotnet.azure/src/Custom/Images/AzureImageClient.Protocol.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Images/AzureImageClient.Protocol.cs similarity index 100% rename from .dotnet.azure/src/Custom/Images/AzureImageClient.Protocol.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Images/AzureImageClient.Protocol.cs diff --git a/.dotnet.azure/src/Custom/Images/AzureImageClient.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Images/AzureImageClient.cs similarity index 65% rename from .dotnet.azure/src/Custom/Images/AzureImageClient.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Images/AzureImageClient.cs index f6e35676f..ebeda9dae 100644 --- a/.dotnet.azure/src/Custom/Images/AzureImageClient.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Images/AzureImageClient.cs @@ -18,14 +18,14 @@ internal partial class AzureImageClient : ImageClient private readonly Uri _endpoint; private readonly string _apiVersion; - internal AzureImageClient( - ClientPipeline pipeline, - string deploymentName, - Uri endpoint, - AzureOpenAIClientOptions options) - : base(pipeline, model: deploymentName, endpoint, options) + internal AzureImageClient(ClientPipeline pipeline, string deploymentName, Uri endpoint, AzureOpenAIClientOptions options) + : base(pipeline, model: deploymentName, new OpenAIClientOptions() { Endpoint = endpoint }) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + Argument.AssertNotNullOrEmpty(deploymentName, nameof(deploymentName)); + Argument.AssertNotNull(endpoint, nameof(endpoint)); options ??= new(); + _deploymentName = deploymentName; _endpoint = endpoint; _apiVersion = options.Version; diff --git a/.dotnet.azure/src/Custom/Images/GeneratorStubs.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Images/GeneratorStubs.cs similarity index 100% rename from .dotnet.azure/src/Custom/Images/GeneratorStubs.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Images/GeneratorStubs.cs diff --git a/.dotnet.azure/src/Custom/Internal/AzureOpenAIChatError.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Internal/AzureOpenAIChatError.cs similarity index 100% rename from .dotnet.azure/src/Custom/Internal/AzureOpenAIChatError.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Internal/AzureOpenAIChatError.cs diff --git a/.dotnet.azure/src/Custom/Internal/AzureOpenAIDalleError.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Internal/AzureOpenAIDalleError.cs similarity index 100% rename from .dotnet.azure/src/Custom/Internal/AzureOpenAIDalleError.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Internal/AzureOpenAIDalleError.cs diff --git a/.dotnet.azure/src/Custom/Internal/ClientPipelineExtensions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Internal/ClientPipelineExtensions.cs similarity index 100% rename from .dotnet.azure/src/Custom/Internal/ClientPipelineExtensions.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Internal/ClientPipelineExtensions.cs diff --git a/.dotnet.azure/src/Custom/Internal/ClientUriBuilder.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Internal/ClientUriBuilder.cs similarity index 100% rename from .dotnet.azure/src/Custom/Internal/ClientUriBuilder.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Internal/ClientUriBuilder.cs diff --git a/.dotnet.azure/src/Custom/Internal/GeneratorStubs.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Internal/GeneratorStubs.cs similarity index 100% rename from .dotnet.azure/src/Custom/Internal/GeneratorStubs.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/Internal/GeneratorStubs.cs diff --git a/.dotnet.azure/src/Custom/VectorStores/AzureVectorStoreClient.Protocol.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/VectorStores/AzureVectorStoreClient.Protocol.cs similarity index 99% rename from .dotnet.azure/src/Custom/VectorStores/AzureVectorStoreClient.Protocol.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/VectorStores/AzureVectorStoreClient.Protocol.cs index b14c3b56e..63040be8b 100644 --- a/.dotnet.azure/src/Custom/VectorStores/AzureVectorStoreClient.Protocol.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/VectorStores/AzureVectorStoreClient.Protocol.cs @@ -3,7 +3,6 @@ using System.ClientModel; using System.ClientModel.Primitives; -using OpenAI.VectorStores; namespace Azure.AI.OpenAI.VectorStores; diff --git a/.dotnet.azure/src/Custom/VectorStores/AzureVectorStoreClient.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/VectorStores/AzureVectorStoreClient.cs similarity index 72% rename from .dotnet.azure/src/Custom/VectorStores/AzureVectorStoreClient.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/VectorStores/AzureVectorStoreClient.cs index 5d6bcd29a..52b07ee81 100644 --- a/.dotnet.azure/src/Custom/VectorStores/AzureVectorStoreClient.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/VectorStores/AzureVectorStoreClient.cs @@ -19,13 +19,13 @@ internal partial class AzureVectorStoreClient : VectorStoreClient private readonly Uri _endpoint; private readonly string _apiVersion; - internal AzureVectorStoreClient( - ClientPipeline pipeline, - Uri endpoint, - AzureOpenAIClientOptions options) - : base(pipeline, endpoint, options) + internal AzureVectorStoreClient(ClientPipeline pipeline, Uri endpoint, AzureOpenAIClientOptions options) + : base(pipeline, new OpenAIClientOptions() { Endpoint = endpoint }) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + Argument.AssertNotNull(endpoint, nameof(endpoint)); options ??= new(); + _endpoint = endpoint; _apiVersion = options.Version; } diff --git a/.dotnet.azure/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoreFileBatchesPageEnumerator.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoreFileBatchesPageEnumerator.cs similarity index 96% rename from .dotnet.azure/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoreFileBatchesPageEnumerator.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoreFileBatchesPageEnumerator.cs index bfb2bf07c..21ee0a3fb 100644 --- a/.dotnet.azure/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoreFileBatchesPageEnumerator.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoreFileBatchesPageEnumerator.cs @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + using System.ClientModel; using System.ClientModel.Primitives; diff --git a/.dotnet.azure/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoreFilesPageEnumerator.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoreFilesPageEnumerator.cs similarity index 94% rename from .dotnet.azure/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoreFilesPageEnumerator.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoreFilesPageEnumerator.cs index 5cf8b09ff..4152a4869 100644 --- a/.dotnet.azure/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoreFilesPageEnumerator.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoreFilesPageEnumerator.cs @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + using System.ClientModel; using System.ClientModel.Primitives; @@ -11,7 +14,7 @@ internal partial class AzureVectorStoreFilesPageEnumerator : VectorStoreFilesPag public AzureVectorStoreFilesPageEnumerator( ClientPipeline pipeline, Uri endpoint, - string vectorStoreId, + string vectorStoreId, int? limit, string order, string after, string before, string filter, string apiVersion, RequestOptions options) diff --git a/.dotnet.azure/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoresPageEnumerator.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoresPageEnumerator.cs similarity index 95% rename from .dotnet.azure/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoresPageEnumerator.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoresPageEnumerator.cs index 823045419..d8a90039f 100644 --- a/.dotnet.azure/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoresPageEnumerator.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Custom/VectorStores/Internal/Pagination/AzureVectorStoresPageEnumerator.cs @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + using System.ClientModel; using System.ClientModel.Primitives; diff --git a/.dotnet.azure/src/Generated/AzureChatCitation.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatCitation.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureChatCitation.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatCitation.Serialization.cs diff --git a/.dotnet.azure/src/Generated/AzureChatCitation.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatCitation.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureChatCitation.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatCitation.cs diff --git a/.dotnet.azure/src/Generated/AzureChatDataSource.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatDataSource.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureChatDataSource.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatDataSource.Serialization.cs diff --git a/.dotnet.azure/src/Generated/AzureChatDataSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatDataSource.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureChatDataSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatDataSource.cs diff --git a/.dotnet.azure/src/Generated/AzureChatMessageContext.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatMessageContext.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureChatMessageContext.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatMessageContext.Serialization.cs diff --git a/.dotnet.azure/src/Generated/AzureChatMessageContext.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatMessageContext.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureChatMessageContext.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatMessageContext.cs diff --git a/.dotnet.azure/src/Generated/AzureChatRetrievedDocument.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatRetrievedDocument.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureChatRetrievedDocument.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatRetrievedDocument.Serialization.cs diff --git a/.dotnet.azure/src/Generated/AzureChatRetrievedDocument.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatRetrievedDocument.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureChatRetrievedDocument.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatRetrievedDocument.cs diff --git a/.dotnet.azure/src/Generated/AzureChatRetrievedDocumentFilterReason.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatRetrievedDocumentFilterReason.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureChatRetrievedDocumentFilterReason.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureChatRetrievedDocumentFilterReason.cs diff --git a/.dotnet.azure/src/Generated/AzureCosmosDBChatDataSource.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureCosmosDBChatDataSource.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureCosmosDBChatDataSource.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureCosmosDBChatDataSource.Serialization.cs diff --git a/.dotnet.azure/src/Generated/AzureCosmosDBChatDataSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureCosmosDBChatDataSource.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureCosmosDBChatDataSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureCosmosDBChatDataSource.cs diff --git a/.dotnet.azure/src/Generated/AzureMachineLearningIndexChatDataSource.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureMachineLearningIndexChatDataSource.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureMachineLearningIndexChatDataSource.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureMachineLearningIndexChatDataSource.Serialization.cs diff --git a/.dotnet.azure/src/Generated/AzureMachineLearningIndexChatDataSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureMachineLearningIndexChatDataSource.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureMachineLearningIndexChatDataSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureMachineLearningIndexChatDataSource.cs diff --git a/.dotnet.azure/src/Generated/AzureOpenAIChatError.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIChatError.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureOpenAIChatError.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIChatError.Serialization.cs diff --git a/.dotnet.azure/src/Generated/AzureOpenAIChatError.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIChatError.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureOpenAIChatError.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIChatError.cs diff --git a/.dotnet.azure/src/Generated/AzureOpenAIChatErrorResponse.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIChatErrorResponse.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureOpenAIChatErrorResponse.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIChatErrorResponse.Serialization.cs diff --git a/.dotnet.azure/src/Generated/AzureOpenAIChatErrorResponse.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIChatErrorResponse.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureOpenAIChatErrorResponse.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIChatErrorResponse.cs diff --git a/.dotnet.azure/src/Generated/AzureOpenAIDalleError.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIDalleError.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureOpenAIDalleError.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIDalleError.Serialization.cs diff --git a/.dotnet.azure/src/Generated/AzureOpenAIDalleError.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIDalleError.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureOpenAIDalleError.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIDalleError.cs diff --git a/.dotnet.azure/src/Generated/AzureOpenAIDalleErrorResponse.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIDalleErrorResponse.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureOpenAIDalleErrorResponse.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIDalleErrorResponse.Serialization.cs diff --git a/.dotnet.azure/src/Generated/AzureOpenAIDalleErrorResponse.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIDalleErrorResponse.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureOpenAIDalleErrorResponse.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureOpenAIDalleErrorResponse.cs diff --git a/.dotnet.azure/src/Generated/AzureSearchChatDataSource.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureSearchChatDataSource.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureSearchChatDataSource.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureSearchChatDataSource.Serialization.cs diff --git a/.dotnet.azure/src/Generated/AzureSearchChatDataSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureSearchChatDataSource.cs similarity index 100% rename from .dotnet.azure/src/Generated/AzureSearchChatDataSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/AzureSearchChatDataSource.cs diff --git a/.dotnet.azure/src/Generated/ContentFilterBlocklistResult.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterBlocklistResult.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/ContentFilterBlocklistResult.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterBlocklistResult.Serialization.cs diff --git a/.dotnet.azure/src/Generated/ContentFilterBlocklistResult.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterBlocklistResult.cs similarity index 100% rename from .dotnet.azure/src/Generated/ContentFilterBlocklistResult.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterBlocklistResult.cs diff --git a/.dotnet.azure/src/Generated/ContentFilterDetectionResult.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterDetectionResult.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/ContentFilterDetectionResult.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterDetectionResult.Serialization.cs diff --git a/.dotnet.azure/src/Generated/ContentFilterDetectionResult.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterDetectionResult.cs similarity index 100% rename from .dotnet.azure/src/Generated/ContentFilterDetectionResult.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterDetectionResult.cs diff --git a/.dotnet.azure/src/Generated/ContentFilterProtectedMaterialCitedResult.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterProtectedMaterialCitedResult.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/ContentFilterProtectedMaterialCitedResult.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterProtectedMaterialCitedResult.Serialization.cs diff --git a/.dotnet.azure/src/Generated/ContentFilterProtectedMaterialCitedResult.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterProtectedMaterialCitedResult.cs similarity index 100% rename from .dotnet.azure/src/Generated/ContentFilterProtectedMaterialCitedResult.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterProtectedMaterialCitedResult.cs diff --git a/.dotnet.azure/src/Generated/ContentFilterProtectedMaterialResult.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterProtectedMaterialResult.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/ContentFilterProtectedMaterialResult.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterProtectedMaterialResult.Serialization.cs diff --git a/.dotnet.azure/src/Generated/ContentFilterProtectedMaterialResult.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterProtectedMaterialResult.cs similarity index 100% rename from .dotnet.azure/src/Generated/ContentFilterProtectedMaterialResult.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterProtectedMaterialResult.cs diff --git a/.dotnet.azure/src/Generated/ContentFilterResultForPrompt.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterResultForPrompt.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/ContentFilterResultForPrompt.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterResultForPrompt.Serialization.cs diff --git a/.dotnet.azure/src/Generated/ContentFilterResultForPrompt.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterResultForPrompt.cs similarity index 100% rename from .dotnet.azure/src/Generated/ContentFilterResultForPrompt.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterResultForPrompt.cs diff --git a/.dotnet.azure/src/Generated/ContentFilterResultForResponse.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterResultForResponse.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/ContentFilterResultForResponse.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterResultForResponse.Serialization.cs diff --git a/.dotnet.azure/src/Generated/ContentFilterResultForResponse.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterResultForResponse.cs similarity index 100% rename from .dotnet.azure/src/Generated/ContentFilterResultForResponse.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterResultForResponse.cs diff --git a/.dotnet.azure/src/Generated/ContentFilterSeverity.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterSeverity.cs similarity index 100% rename from .dotnet.azure/src/Generated/ContentFilterSeverity.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterSeverity.cs diff --git a/.dotnet.azure/src/Generated/ContentFilterSeverityResult.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterSeverityResult.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/ContentFilterSeverityResult.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterSeverityResult.Serialization.cs diff --git a/.dotnet.azure/src/Generated/ContentFilterSeverityResult.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterSeverityResult.cs similarity index 100% rename from .dotnet.azure/src/Generated/ContentFilterSeverityResult.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ContentFilterSeverityResult.cs diff --git a/.dotnet.azure/src/Generated/DataSourceAuthentication.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/DataSourceAuthentication.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/DataSourceAuthentication.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/DataSourceAuthentication.Serialization.cs diff --git a/.dotnet.azure/src/Generated/DataSourceAuthentication.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/DataSourceAuthentication.cs similarity index 100% rename from .dotnet.azure/src/Generated/DataSourceAuthentication.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/DataSourceAuthentication.cs diff --git a/.dotnet.azure/src/Generated/DataSourceFieldMappings.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/DataSourceFieldMappings.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/DataSourceFieldMappings.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/DataSourceFieldMappings.Serialization.cs diff --git a/.dotnet.azure/src/Generated/DataSourceFieldMappings.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/DataSourceFieldMappings.cs similarity index 100% rename from .dotnet.azure/src/Generated/DataSourceFieldMappings.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/DataSourceFieldMappings.cs diff --git a/.dotnet.azure/src/Generated/DataSourceQueryType.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/DataSourceQueryType.cs similarity index 100% rename from .dotnet.azure/src/Generated/DataSourceQueryType.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/DataSourceQueryType.cs diff --git a/.dotnet.azure/src/Generated/DataSourceVectorizer.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/DataSourceVectorizer.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/DataSourceVectorizer.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/DataSourceVectorizer.Serialization.cs diff --git a/.dotnet.azure/src/Generated/DataSourceVectorizer.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/DataSourceVectorizer.cs similarity index 100% rename from .dotnet.azure/src/Generated/DataSourceVectorizer.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/DataSourceVectorizer.cs diff --git a/.dotnet.azure/src/Generated/ElasticsearchChatDataSource.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ElasticsearchChatDataSource.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/ElasticsearchChatDataSource.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ElasticsearchChatDataSource.Serialization.cs diff --git a/.dotnet.azure/src/Generated/ElasticsearchChatDataSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ElasticsearchChatDataSource.cs similarity index 100% rename from .dotnet.azure/src/Generated/ElasticsearchChatDataSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ElasticsearchChatDataSource.cs diff --git a/.dotnet.azure/src/Generated/ImageContentFilterResultForPrompt.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ImageContentFilterResultForPrompt.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/ImageContentFilterResultForPrompt.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ImageContentFilterResultForPrompt.Serialization.cs diff --git a/.dotnet.azure/src/Generated/ImageContentFilterResultForPrompt.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ImageContentFilterResultForPrompt.cs similarity index 100% rename from .dotnet.azure/src/Generated/ImageContentFilterResultForPrompt.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ImageContentFilterResultForPrompt.cs diff --git a/.dotnet.azure/src/Generated/ImageContentFilterResultForResponse.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ImageContentFilterResultForResponse.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/ImageContentFilterResultForResponse.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ImageContentFilterResultForResponse.Serialization.cs diff --git a/.dotnet.azure/src/Generated/ImageContentFilterResultForResponse.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ImageContentFilterResultForResponse.cs similarity index 100% rename from .dotnet.azure/src/Generated/ImageContentFilterResultForResponse.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/ImageContentFilterResultForResponse.cs diff --git a/.dotnet.azure/src/Generated/Internal/Argument.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/Argument.cs similarity index 100% rename from .dotnet.azure/src/Generated/Internal/Argument.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/Argument.cs diff --git a/.dotnet.azure/src/Generated/Internal/BinaryContentHelper.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/BinaryContentHelper.cs similarity index 99% rename from .dotnet.azure/src/Generated/Internal/BinaryContentHelper.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/BinaryContentHelper.cs index 94ae48ee4..e6f35c517 100644 --- a/.dotnet.azure/src/Generated/Internal/BinaryContentHelper.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/BinaryContentHelper.cs @@ -53,7 +53,7 @@ public static BinaryContent FromEnumerable(IEnumerable enumerable) } public static BinaryContent FromEnumerable(ReadOnlySpan span) - where T : notnull + where T : notnull { Utf8JsonBinaryContent content = new Utf8JsonBinaryContent(); content.JsonWriter.WriteStartArray(); diff --git a/.dotnet.azure/src/Generated/Internal/ChangeTrackingDictionary.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/ChangeTrackingDictionary.cs similarity index 100% rename from .dotnet.azure/src/Generated/Internal/ChangeTrackingDictionary.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/ChangeTrackingDictionary.cs diff --git a/.dotnet.azure/src/Generated/Internal/ChangeTrackingList.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/ChangeTrackingList.cs similarity index 100% rename from .dotnet.azure/src/Generated/Internal/ChangeTrackingList.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/ChangeTrackingList.cs diff --git a/.dotnet.azure/src/Generated/Internal/ClientPipelineExtensions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/ClientPipelineExtensions.cs similarity index 100% rename from .dotnet.azure/src/Generated/Internal/ClientPipelineExtensions.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/ClientPipelineExtensions.cs diff --git a/.dotnet.azure/src/Generated/Internal/ClientUriBuilder.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/ClientUriBuilder.cs similarity index 100% rename from .dotnet.azure/src/Generated/Internal/ClientUriBuilder.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/ClientUriBuilder.cs diff --git a/.dotnet.azure/src/Generated/Internal/ErrorResult.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/ErrorResult.cs similarity index 100% rename from .dotnet.azure/src/Generated/Internal/ErrorResult.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/ErrorResult.cs diff --git a/.dotnet.azure/src/Generated/Internal/ModelSerializationExtensions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/ModelSerializationExtensions.cs similarity index 100% rename from .dotnet.azure/src/Generated/Internal/ModelSerializationExtensions.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/ModelSerializationExtensions.cs diff --git a/.dotnet.azure/src/Generated/Internal/MultipartFormDataBinaryContent.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/MultipartFormDataBinaryContent.cs similarity index 100% rename from .dotnet.azure/src/Generated/Internal/MultipartFormDataBinaryContent.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/MultipartFormDataBinaryContent.cs diff --git a/.dotnet.azure/src/Generated/Internal/Optional.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/Optional.cs similarity index 100% rename from .dotnet.azure/src/Generated/Internal/Optional.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/Optional.cs diff --git a/.dotnet.azure/src/Generated/Internal/Utf8JsonBinaryContent.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/Utf8JsonBinaryContent.cs similarity index 100% rename from .dotnet.azure/src/Generated/Internal/Utf8JsonBinaryContent.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/Utf8JsonBinaryContent.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceAccessTokenAuthenticationOptions.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceAccessTokenAuthenticationOptions.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceAccessTokenAuthenticationOptions.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceAccessTokenAuthenticationOptions.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceAccessTokenAuthenticationOptions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceAccessTokenAuthenticationOptions.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceAccessTokenAuthenticationOptions.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceAccessTokenAuthenticationOptions.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceApiKeyAuthenticationOptions.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceApiKeyAuthenticationOptions.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceApiKeyAuthenticationOptions.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceApiKeyAuthenticationOptions.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceApiKeyAuthenticationOptions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceApiKeyAuthenticationOptions.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceApiKeyAuthenticationOptions.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceApiKeyAuthenticationOptions.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceConnectionStringAuthenticationOptions.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceConnectionStringAuthenticationOptions.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceConnectionStringAuthenticationOptions.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceConnectionStringAuthenticationOptions.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceConnectionStringAuthenticationOptions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceConnectionStringAuthenticationOptions.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceConnectionStringAuthenticationOptions.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceConnectionStringAuthenticationOptions.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceDeploymentNameVectorizationSource.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceDeploymentNameVectorizationSource.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceDeploymentNameVectorizationSource.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceDeploymentNameVectorizationSource.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceDeploymentNameVectorizationSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceDeploymentNameVectorizationSource.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceDeploymentNameVectorizationSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceDeploymentNameVectorizationSource.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceEncodedApiKeyAuthenticationOptions.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceEncodedApiKeyAuthenticationOptions.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceEncodedApiKeyAuthenticationOptions.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceEncodedApiKeyAuthenticationOptions.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceEncodedApiKeyAuthenticationOptions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceEncodedApiKeyAuthenticationOptions.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceEncodedApiKeyAuthenticationOptions.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceEncodedApiKeyAuthenticationOptions.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceEndpointVectorizationSource.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceEndpointVectorizationSource.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceEndpointVectorizationSource.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceEndpointVectorizationSource.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceEndpointVectorizationSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceEndpointVectorizationSource.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceEndpointVectorizationSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceEndpointVectorizationSource.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceKeyAndKeyIdAuthenticationOptions.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceKeyAndKeyIdAuthenticationOptions.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceKeyAndKeyIdAuthenticationOptions.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceKeyAndKeyIdAuthenticationOptions.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceKeyAndKeyIdAuthenticationOptions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceKeyAndKeyIdAuthenticationOptions.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceKeyAndKeyIdAuthenticationOptions.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceKeyAndKeyIdAuthenticationOptions.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceModelIdVectorizationSource.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceModelIdVectorizationSource.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceModelIdVectorizationSource.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceModelIdVectorizationSource.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceModelIdVectorizationSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceModelIdVectorizationSource.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceModelIdVectorizationSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceModelIdVectorizationSource.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceSystemAssignedManagedIdentityAuthenticationOptions.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceSystemAssignedManagedIdentityAuthenticationOptions.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceSystemAssignedManagedIdentityAuthenticationOptions.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceSystemAssignedManagedIdentityAuthenticationOptions.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceSystemAssignedManagedIdentityAuthenticationOptions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceSystemAssignedManagedIdentityAuthenticationOptions.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceSystemAssignedManagedIdentityAuthenticationOptions.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceSystemAssignedManagedIdentityAuthenticationOptions.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceUserAssignedManagedIdentityAuthenticationOptions.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceUserAssignedManagedIdentityAuthenticationOptions.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceUserAssignedManagedIdentityAuthenticationOptions.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceUserAssignedManagedIdentityAuthenticationOptions.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureChatDataSourceUserAssignedManagedIdentityAuthenticationOptions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceUserAssignedManagedIdentityAuthenticationOptions.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureChatDataSourceUserAssignedManagedIdentityAuthenticationOptions.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureChatDataSourceUserAssignedManagedIdentityAuthenticationOptions.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureContentFilterBlocklistIdResult.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterBlocklistIdResult.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureContentFilterBlocklistIdResult.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterBlocklistIdResult.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureContentFilterBlocklistIdResult.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterBlocklistIdResult.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureContentFilterBlocklistIdResult.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterBlocklistIdResult.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureContentFilterBlocklistResultDetail.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterBlocklistResultDetail.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureContentFilterBlocklistResultDetail.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterBlocklistResultDetail.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureContentFilterBlocklistResultDetail.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterBlocklistResultDetail.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureContentFilterBlocklistResultDetail.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterBlocklistResultDetail.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResults.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResults.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResults.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResults.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResults.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResults.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResults.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResults.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResultsError.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResultsError.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResultsError.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResultsError.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResultsError.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResultsError.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResultsError.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureContentFilterResultForPromptContentFilterResultsError.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureCosmosDBChatDataSourceParameters.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureCosmosDBChatDataSourceParameters.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureCosmosDBChatDataSourceParameters.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureCosmosDBChatDataSourceParameters.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureCosmosDBChatDataSourceParameters.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureCosmosDBChatDataSourceParameters.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureCosmosDBChatDataSourceParameters.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureCosmosDBChatDataSourceParameters.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureMachineLearningIndexChatDataSourceParameters.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureMachineLearningIndexChatDataSourceParameters.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureMachineLearningIndexChatDataSourceParameters.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureMachineLearningIndexChatDataSourceParameters.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureMachineLearningIndexChatDataSourceParameters.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureMachineLearningIndexChatDataSourceParameters.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureMachineLearningIndexChatDataSourceParameters.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureMachineLearningIndexChatDataSourceParameters.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureOpenAIChatErrorInnerError.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureOpenAIChatErrorInnerError.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureOpenAIChatErrorInnerError.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureOpenAIChatErrorInnerError.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureOpenAIChatErrorInnerError.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureOpenAIChatErrorInnerError.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureOpenAIChatErrorInnerError.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureOpenAIChatErrorInnerError.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureOpenAIChatErrorInnerErrorCode.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureOpenAIChatErrorInnerErrorCode.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureOpenAIChatErrorInnerErrorCode.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureOpenAIChatErrorInnerErrorCode.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureOpenAIDalleErrorInnerError.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureOpenAIDalleErrorInnerError.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureOpenAIDalleErrorInnerError.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureOpenAIDalleErrorInnerError.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureOpenAIDalleErrorInnerError.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureOpenAIDalleErrorInnerError.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureOpenAIDalleErrorInnerError.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureOpenAIDalleErrorInnerError.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureOpenAIDalleErrorInnerErrorCode.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureOpenAIDalleErrorInnerErrorCode.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureOpenAIDalleErrorInnerErrorCode.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureOpenAIDalleErrorInnerErrorCode.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureSearchChatDataSourceParameters.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureSearchChatDataSourceParameters.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureSearchChatDataSourceParameters.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureSearchChatDataSourceParameters.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureSearchChatDataSourceParameters.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureSearchChatDataSourceParameters.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureSearchChatDataSourceParameters.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureSearchChatDataSourceParameters.cs diff --git a/.dotnet.azure/src/Generated/InternalAzureSearchChatDataSourceParametersIncludeContext.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureSearchChatDataSourceParametersIncludeContext.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalAzureSearchChatDataSourceParametersIncludeContext.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalAzureSearchChatDataSourceParametersIncludeContext.cs diff --git a/.dotnet.azure/src/Generated/InternalElasticsearchChatDataSourceParameters.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalElasticsearchChatDataSourceParameters.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalElasticsearchChatDataSourceParameters.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalElasticsearchChatDataSourceParameters.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalElasticsearchChatDataSourceParameters.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalElasticsearchChatDataSourceParameters.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalElasticsearchChatDataSourceParameters.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalElasticsearchChatDataSourceParameters.cs diff --git a/.dotnet.azure/src/Generated/InternalPineconeChatDataSourceParameters.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalPineconeChatDataSourceParameters.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalPineconeChatDataSourceParameters.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalPineconeChatDataSourceParameters.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalPineconeChatDataSourceParameters.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalPineconeChatDataSourceParameters.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalPineconeChatDataSourceParameters.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalPineconeChatDataSourceParameters.cs diff --git a/.dotnet.azure/src/Generated/InternalUnknownAzureChatDataSource.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalUnknownAzureChatDataSource.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalUnknownAzureChatDataSource.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalUnknownAzureChatDataSource.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalUnknownAzureChatDataSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalUnknownAzureChatDataSource.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalUnknownAzureChatDataSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalUnknownAzureChatDataSource.cs diff --git a/.dotnet.azure/src/Generated/InternalUnknownAzureChatDataSourceAuthenticationOptions.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalUnknownAzureChatDataSourceAuthenticationOptions.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalUnknownAzureChatDataSourceAuthenticationOptions.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalUnknownAzureChatDataSourceAuthenticationOptions.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalUnknownAzureChatDataSourceAuthenticationOptions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalUnknownAzureChatDataSourceAuthenticationOptions.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalUnknownAzureChatDataSourceAuthenticationOptions.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalUnknownAzureChatDataSourceAuthenticationOptions.cs diff --git a/.dotnet.azure/src/Generated/InternalUnknownAzureChatDataSourceVectorizationSource.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalUnknownAzureChatDataSourceVectorizationSource.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalUnknownAzureChatDataSourceVectorizationSource.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalUnknownAzureChatDataSourceVectorizationSource.Serialization.cs diff --git a/.dotnet.azure/src/Generated/InternalUnknownAzureChatDataSourceVectorizationSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalUnknownAzureChatDataSourceVectorizationSource.cs similarity index 100% rename from .dotnet.azure/src/Generated/InternalUnknownAzureChatDataSourceVectorizationSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/InternalUnknownAzureChatDataSourceVectorizationSource.cs diff --git a/.dotnet.azure/src/Generated/PineconeChatDataSource.Serialization.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/PineconeChatDataSource.Serialization.cs similarity index 100% rename from .dotnet.azure/src/Generated/PineconeChatDataSource.Serialization.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/PineconeChatDataSource.Serialization.cs diff --git a/.dotnet.azure/src/Generated/PineconeChatDataSource.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/PineconeChatDataSource.cs similarity index 100% rename from .dotnet.azure/src/Generated/PineconeChatDataSource.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Generated/PineconeChatDataSource.cs diff --git a/.dotnet.azure/src/Utility/AzureOpenAIPipelineMessageBuilder.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/AzureOpenAIPipelineMessageBuilder.cs similarity index 100% rename from .dotnet.azure/src/Utility/AzureOpenAIPipelineMessageBuilder.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/AzureOpenAIPipelineMessageBuilder.cs diff --git a/.dotnet.azure/src/Utility/CustomSerializationHelpers.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/CustomSerializationHelpers.cs similarity index 98% rename from .dotnet.azure/src/Utility/CustomSerializationHelpers.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/CustomSerializationHelpers.cs index 5691e6a4a..23fc567fa 100644 --- a/.dotnet.azure/src/Utility/CustomSerializationHelpers.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/CustomSerializationHelpers.cs @@ -1,8 +1,9 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + #nullable enable -using System; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace Azure.AI.OpenAI; @@ -127,4 +128,4 @@ internal static void WriteSerializedAdditionalRawData(this Utf8JsonWriter writer } } } -} \ No newline at end of file +} diff --git a/.dotnet.azure/src/Utility/Generator/CodeGenClientAttribute.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Generator/CodeGenClientAttribute.cs similarity index 100% rename from .dotnet.azure/src/Utility/Generator/CodeGenClientAttribute.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Generator/CodeGenClientAttribute.cs diff --git a/.dotnet.azure/src/Utility/Generator/CodeGenMemberAttribute.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Generator/CodeGenMemberAttribute.cs similarity index 100% rename from .dotnet.azure/src/Utility/Generator/CodeGenMemberAttribute.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Generator/CodeGenMemberAttribute.cs diff --git a/.dotnet.azure/src/Utility/Generator/CodeGenModelAttribute.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Generator/CodeGenModelAttribute.cs similarity index 100% rename from .dotnet.azure/src/Utility/Generator/CodeGenModelAttribute.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Generator/CodeGenModelAttribute.cs diff --git a/.dotnet.azure/src/Utility/Generator/CodeGenSerializationAttribute.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Generator/CodeGenSerializationAttribute.cs similarity index 100% rename from .dotnet.azure/src/Utility/Generator/CodeGenSerializationAttribute.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Generator/CodeGenSerializationAttribute.cs diff --git a/.dotnet.azure/src/Utility/Generator/CodeGenSuppressAttribute.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Generator/CodeGenSuppressAttribute.cs similarity index 100% rename from .dotnet.azure/src/Utility/Generator/CodeGenSuppressAttribute.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Generator/CodeGenSuppressAttribute.cs diff --git a/.dotnet.azure/src/Utility/Generator/CodeGenTypeAttribute.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Generator/CodeGenTypeAttribute.cs similarity index 100% rename from .dotnet.azure/src/Utility/Generator/CodeGenTypeAttribute.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Generator/CodeGenTypeAttribute.cs diff --git a/.dotnet.azure/src/Utility/GenericActionPipelinePolicy.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/GenericActionPipelinePolicy.cs similarity index 92% rename from .dotnet.azure/src/Utility/GenericActionPipelinePolicy.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/GenericActionPipelinePolicy.cs index 9c67f0c76..79e5ccc30 100644 --- a/.dotnet.azure/src/Utility/GenericActionPipelinePolicy.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/GenericActionPipelinePolicy.cs @@ -26,7 +26,7 @@ public override void Process(PipelineMessage message, IReadOnlyList pipeline, int currentIndex) { _requestAction?.Invoke(message.Request); - await ProcessNextAsync(message, pipeline, currentIndex); + await ProcessNextAsync(message, pipeline, currentIndex).ConfigureAwait(false); _responseAction?.Invoke(message.Response); } } diff --git a/.dotnet.azure/src/Utility/Polyfill/System.Diagnostics.CodeAnalysis.ExperimentalAttribute.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Polyfill/System.Diagnostics.CodeAnalysis.ExperimentalAttribute.cs similarity index 100% rename from .dotnet.azure/src/Utility/Polyfill/System.Diagnostics.CodeAnalysis.ExperimentalAttribute.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Polyfill/System.Diagnostics.CodeAnalysis.ExperimentalAttribute.cs diff --git a/.dotnet.azure/src/Utility/Polyfill/System.Diagnostics.CodeAnalysis.SetsRequiredMembersAttribute.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Polyfill/System.Diagnostics.CodeAnalysis.SetsRequiredMembersAttribute.cs similarity index 100% rename from .dotnet.azure/src/Utility/Polyfill/System.Diagnostics.CodeAnalysis.SetsRequiredMembersAttribute.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Polyfill/System.Diagnostics.CodeAnalysis.SetsRequiredMembersAttribute.cs diff --git a/.dotnet.azure/src/Utility/Polyfill/System.Runtime.CompilerServices.CompilerFeatureRequiredAttribute.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Polyfill/System.Runtime.CompilerServices.CompilerFeatureRequiredAttribute.cs similarity index 100% rename from .dotnet.azure/src/Utility/Polyfill/System.Runtime.CompilerServices.CompilerFeatureRequiredAttribute.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Polyfill/System.Runtime.CompilerServices.CompilerFeatureRequiredAttribute.cs diff --git a/.dotnet.azure/src/Utility/Polyfill/System.Runtime.CompilerServices.IsExternalInit.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Polyfill/System.Runtime.CompilerServices.IsExternalInit.cs similarity index 100% rename from .dotnet.azure/src/Utility/Polyfill/System.Runtime.CompilerServices.IsExternalInit.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Polyfill/System.Runtime.CompilerServices.IsExternalInit.cs diff --git a/.dotnet.azure/src/Utility/Polyfill/System.Runtime.CompilerServices.RequiredMemberAttribute.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Polyfill/System.Runtime.CompilerServices.RequiredMemberAttribute.cs similarity index 100% rename from .dotnet.azure/src/Utility/Polyfill/System.Runtime.CompilerServices.RequiredMemberAttribute.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/src/Utility/Polyfill/System.Runtime.CompilerServices.RequiredMemberAttribute.cs diff --git a/.dotnet.azure/tests/Assets/edit_sample_image.png b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/edit_sample_image.png similarity index 100% rename from .dotnet.azure/tests/Assets/edit_sample_image.png rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/edit_sample_image.png diff --git a/.dotnet.azure/tests/Assets/edit_sample_mask.png b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/edit_sample_mask.png similarity index 100% rename from .dotnet.azure/tests/Assets/edit_sample_mask.png rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/edit_sample_mask.png diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/fine_tuning.jsonl b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/fine_tuning.jsonl new file mode 100644 index 000000000..2c345d259 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/fine_tuning.jsonl @@ -0,0 +1,11 @@ +{"messages":[{"role":"system","content":"Given a sports headline, provide the following fields in a JSON dictionary, where applicable: \"player\" (full name), \"team\", \"sport\", and \"gender\". In the case there is more than one team return an array of that dictionary. Do not include any markdown characters such as ```json and ```"},{"role":"user","content":"Sources: Colts grant RB Taylor OK to seek trade"},{"role":"assistant","content":"{\n \"player\": \"Jonathan Taylor\",\n \"team\": \"Indianapolis Colts\",\n \"sport\": \"football\",\n \"gender\": \"male\"\n}"}]} +{"messages":[{"role":"system","content":"Given a sports headline, provide the following fields in a JSON dictionary, where applicable: \"player\" (full name), \"team\", \"sport\", and \"gender\". In the case there is more than one team return an array of that dictionary. Do not include any markdown characters such as ```json and ```"},{"role":"user","content":"OSU 'split down middle' on starting QB battle"},{"role":"assistant","content":"{\n \"team\": \"Ohio State University\",\n \"sport\": \"football\"\n}"}]} +{"messages":[{"role":"system","content":"Given a sports headline, provide the following fields in a JSON dictionary, where applicable: \"player\" (full name), \"team\", \"sport\", and \"gender\". In the case there is more than one team return an array of that dictionary. Do not include any markdown characters such as ```json and ```"},{"role":"user","content":"Christian McCaffrey given 2 year extension by 49ers"},{"role":"assistant","content":"{\n \"player\": \"Christian McCaffrey\",\n \"team\": \"San Francisco 49ers\",\n \"sport\": \"football\",\n \"gender\": \"male\"\n}"}]} +{"messages":[{"role":"system","content":"Given a sports headline, provide the following fields in a JSON dictionary, where applicable: \"player\" (full name), \"team\", \"sport\", and \"gender\". In the case there is more than one team return an array of that dictionary. Do not include any markdown characters such as ```json and ```"},{"role":"user","content":"Tucupita Marcano banned for life by MLB for betting on baseball"},{"role":"assistant","content":"{\n \"player\": \"Tucupita Marcano\",\n \"sport\": \"baseball\",\n \"gender\": \"male\"\n}"}]} +{"messages":[{"role":"system","content":"Given a sports headline, provide the following fields in a JSON dictionary, where applicable: \"player\" (full name), \"team\", \"sport\", and \"gender\". In the case there is more than one team return an array of that dictionary. Do not include any markdown characters such as ```json and ```"},{"role":"user","content":"Who will win the 2024 NBA finals? Predictions about the Celtics vs. Mavericks matchup"},{"role":"assistant","content":"[\n {\n \"team\": \"Celtics\",\n \"sport\": \"basketball\"\n },\n {\n \"team\": \"Mavericks\",\n \"sport\": \"basketball\"\n }\n]"}]} +{"messages":[{"role":"system","content":"Given a sports headline, provide the following fields in a JSON dictionary, where applicable: \"player\" (full name), \"team\", \"sport\", and \"gender\". In the case there is more than one team return an array of that dictionary. Do not include any markdown characters such as ```json and ```"},{"role":"user","content":"Pavleski will not play in 2024-2025 season"},{"role":"assistant","content":"{\n \"player\": \"Pavelski\",\n \"sport\": \"hockey\",\n \"gender\": \"male\"\n}"}]} +{"messages":[{"role":"system","content":"Given a sports headline, provide the following fields in a JSON dictionary, where applicable: \"player\" (full name), \"team\", \"sport\", and \"gender\". In the case there is more than one team return an array of that dictionary. Do not include any markdown characters such as ```json and ```"},{"role":"user","content":"Charges against Scottie Scheffler have been dropped after arrest"},{"role":"assistant","content":"{\n \"player\": \"Scottie Scheffler\",\n \"sport\": \"golf\",\n \"gender\": \"male\"\n}"}]} +{"messages":[{"role":"system","content":"Given a sports headline, provide the following fields in a JSON dictionary, where applicable: \"player\" (full name), \"team\", \"sport\", and \"gender\". In the case there is more than one team return an array of that dictionary. Do not include any markdown characters such as ```json and ```"},{"role":"user","content":"Perez picked by Red Bull for F1 2025 deal"},{"role":"assistant","content":"{\n \"player\": \"Perez\",\n \"team\": \"Red Bull\",\n \"sport\": \"F1\",\n \"gender\": \"male\"\n}"}]} +{"messages":[{"role":"system","content":"Given a sports headline, provide the following fields in a JSON dictionary, where applicable: \"player\" (full name), \"team\", \"sport\", and \"gender\". In the case there is more than one team return an array of that dictionary. Do not include any markdown characters such as ```json and ```"},{"role":"user","content":"DL Johnson III waived by 49ers"},{"role":"assistant","content":"{\n \"player\": \"DL Johnson III\",\n \"team\": \"49ers\",\n \"sport\": \"football\",\n \"gender\": \"male\"\n}"}]} +{"messages":[{"role":"system","content":"Given a sports headline, provide the following fields in a JSON dictionary, where applicable: \"player\" (full name), \"team\", \"sport\", and \"gender\". In the case there is more than one team return an array of that dictionary. Do not include any markdown characters such as ```json and ```"},{"role":"user","content":"Trevor Williams placed on injured list"},{"role":"assistant","content":"{\n \"player\": \"Trevor Williams\",\n \"sport\": \"baseball\",\n \"gender\": \"male\"\n}"}]} +{"messages":[{"role":"system","content":"Given a sports headline, provide the following fields in a JSON dictionary, where applicable: \"player\" (full name), \"team\", \"sport\", and \"gender\". In the case there is more than one team return an array of that dictionary. Do not include any markdown characters such as ```json and ```"},{"role":"user","content":"Coco Gauff, and Iga Swiatek will meet in French Open semis"},{"role":"assistant","content":"[\n {\n \"player\": \"Coco Gauff\",\n \"sport\": \"tennis\",\n \"gender\": \"female\"\n },\n {\n \"player\": \"Iga Swiatek\",\n \"sport\": \"tennis\",\n \"gender\": \"female\"\n }\n]"}]} \ No newline at end of file diff --git a/.dotnet.azure/tests/Assets/french.wav b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/french.wav similarity index 100% rename from .dotnet.azure/tests/Assets/french.wav rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/french.wav diff --git a/.dotnet.azure/tests/Assets/hello_world.m4a b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/hello_world.m4a similarity index 100% rename from .dotnet.azure/tests/Assets/hello_world.m4a rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/hello_world.m4a diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/playback_test_config.json b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/playback_test_config.json new file mode 100644 index 000000000..d55e5d96e --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/playback_test_config.json @@ -0,0 +1,38 @@ +{ + "default": { + "endpoint": "https://Sanitized.openai.azure.com/", + "key": "Sanitized", + "deployment": "gpt-4-turbo", + "resource_group": "Sanitized", + "subscription_id": "Sanitized" + }, + "audio": { + "deployment": "whisper" + }, + "embedding": { + "deployment": "text-embedding-ada-002" + }, + "fine_tuning": { + "deployment": "gpt-35-turbo-0613", + "fine_tuned_model": "gpt-35-turbo-0613.ft-53f9c10199f84dfea3ec772341862ff5-azure-ai-openai-integration-test" + }, + "image": { + "deployment": "dall-e-3" + }, + "rate_limited_chat": { + "endpoint": "https://Sanitized.openai.azure.com/", + "key": "Sanitized", + "deployment": "gpt-35-turbo-low-quota" + }, + "search": { + "endpoint": "https://Sanitized.search.windows.net/", + "key": "Sanitized", + "index": "openaiwikisearchindex" + }, + "tts": { + "deployment": "tts" + }, + "vision": { + "deployment": "gpt-4-vision-preview" + } +} \ No newline at end of file diff --git a/.dotnet.azure/tests/Assets/speed-talking.wav b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/speed-talking.wav similarity index 100% rename from .dotnet.azure/tests/Assets/speed-talking.wav rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/speed-talking.wav diff --git a/.dotnet.azure/tests/Assets/stop_sign.png b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/stop_sign.png similarity index 100% rename from .dotnet.azure/tests/Assets/stop_sign.png rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/stop_sign.png diff --git a/.dotnet.azure/tests/Assets/variation_sample_image.png b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/variation_sample_image.png similarity index 100% rename from .dotnet.azure/tests/Assets/variation_sample_image.png rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Assets/variation_sample_image.png diff --git a/.dotnet.azure/tests/AssistantTests.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/AssistantTests.cs similarity index 58% rename from .dotnet.azure/tests/AssistantTests.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/AssistantTests.cs index 70408f170..7e05d4cdc 100644 --- a/.dotnet.azure/tests/AssistantTests.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/AssistantTests.cs @@ -3,40 +3,65 @@ #nullable disable -using Azure.AI.OpenAI.Assistants; +using System; +using System.ClientModel; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Azure.AI.OpenAI.Tests.Utils.Config; using OpenAI; using OpenAI.Assistants; using OpenAI.Files; +using OpenAI.TestFramework; +using OpenAI.TestFramework.Utils; using OpenAI.VectorStores; -using System.ClientModel; -using System.ClientModel.Primitives; -using System.Diagnostics; namespace Azure.AI.OpenAI.Tests; -#pragma warning disable OPENAI001 - -public class AssistantTests : TestBase +public class AssistantTests(bool isAsync) : AoaiTestBase(isAsync) { [Test] [Category("Smoke")] public void CanCreateClient() => Assert.That(GetTestClient(), Is.InstanceOf()); [Test] - public void BasicAssistantOperationsWork() + [Category("Smoke")] + public void VerifyClientOptionMutability() + { + AzureOpenAIClientOptions options = null; + Assert.DoesNotThrow(() => + options = new AzureOpenAIClientOptions() + { + ApplicationId = "init does not throw", + }); + Assert.DoesNotThrow(() => + options.ApplicationId = "set before freeze OK"); + AzureOpenAIClient azureClient = new( + new Uri("https://www.microsoft.com/placeholder"), + new ApiKeyCredential("placeholder"), + options); + Assert.Throws(() => + options.ApplicationId = "set after freeze throws"); + } + + [RecordedTest] + public async Task BasicAssistantOperationsWork() { AssistantClient client = GetTestClient(); - Assistant assistant = client.CreateAssistant("gpt-35-turbo-latest"); + string modelName = client.DeploymentOrThrow(); + Assistant assistant = await client.CreateAssistantAsync(modelName); Validate(assistant); Assert.That(assistant.Name, Is.Null.Or.Empty); - assistant = client.ModifyAssistant(assistant.Id, new AssistantModificationOptions() + assistant = await client.ModifyAssistantAsync(assistant.Id, new AssistantModificationOptions() { Name = "test assistant name", }); Assert.That(assistant.Name, Is.EqualTo("test assistant name")); - bool deleted = client.DeleteAssistant(assistant.Id); + bool deleted = await client.DeleteAssistantAsync(assistant.Id); Assert.That(deleted, Is.True); - assistant = client.CreateAssistant("gpt-35-turbo-latest", new AssistantCreationOptions() + assistant = await client.CreateAssistantAsync(modelName, new AssistantCreationOptions() { Metadata = { @@ -44,10 +69,10 @@ public void BasicAssistantOperationsWork() }, }); Validate(assistant); - Assistant retrievedAssistant = client.GetAssistant(assistant.Id); + Assistant retrievedAssistant = await client.GetAssistantAsync(assistant.Id); Assert.That(retrievedAssistant.Id, Is.EqualTo(assistant.Id)); Assert.That(retrievedAssistant.Metadata.TryGetValue("testkey", out string metadataValue) && metadataValue == "hello!"); - Assistant modifiedAssistant = client.ModifyAssistant(assistant.Id, new AssistantModificationOptions() + Assistant modifiedAssistant = await client.ModifyAssistantAsync(assistant.Id, new AssistantModificationOptions() { Metadata = { @@ -55,20 +80,20 @@ public void BasicAssistantOperationsWork() }, }); Assert.That(modifiedAssistant.Id, Is.EqualTo(assistant.Id)); - PageableCollection recentAssistants = client.GetAssistants(); - Assistant listedAssistant = recentAssistants.FirstOrDefault(pageItem => pageItem.Id == assistant.Id); - Assert.That(listedAssistant, Is.Not.Null); - Assert.That(listedAssistant.Metadata.TryGetValue("testkey", out string newMetadataValue) && newMetadataValue == "goodbye!"); + AsyncPageCollection recentAssistants = client.GetAssistantsAsync(); + Assistant firstAssistant = await recentAssistants.GetAllValuesAsync().FirstOrDefaultAsync(); + Assert.That(firstAssistant, Is.Not.Null); + Assert.That(firstAssistant.Metadata.TryGetValue("testkey", out string newMetadataValue) && newMetadataValue == "goodbye!"); } - [Test] - public void BasicThreadOperationsWork() + [RecordedTest] + public async Task BasicThreadOperationsWork() { AssistantClient client = GetTestClient(); - AssistantThread thread = client.CreateThread(); + AssistantThread thread = await client.CreateThreadAsync(); Validate(thread); Assert.That(thread.CreatedAt, Is.GreaterThan(s_2024)); - bool deleted = client.DeleteThread(thread.Id); + bool deleted = await client.DeleteThreadAsync(thread.Id); Assert.That(deleted, Is.True); ThreadCreationOptions options = new() @@ -78,12 +103,12 @@ public void BasicThreadOperationsWork() ["threadMetadata"] = "threadMetadataValue", } }; - thread = client.CreateThread(options); + thread = await client.CreateThreadAsync(options); Validate(thread); Assert.That(thread.Metadata.TryGetValue("threadMetadata", out string threadMetadataValue) && threadMetadataValue == "threadMetadataValue"); - AssistantThread retrievedThread = client.GetThread(thread.Id); + AssistantThread retrievedThread = await client.GetThreadAsync(thread.Id); Assert.That(retrievedThread.Id, Is.EqualTo(thread.Id)); - thread = client.ModifyThread(thread, new ThreadModificationOptions() + thread = await client.ModifyThreadAsync(thread, new ThreadModificationOptions() { Metadata = { @@ -93,26 +118,28 @@ public void BasicThreadOperationsWork() Assert.That(thread.Metadata.TryGetValue("threadMetadata", out threadMetadataValue) && threadMetadataValue == "newThreadMetadataValue"); } - [Test] - public void SettingResponseFormatWorks() + [RecordedTest] + public async Task SettingResponseFormatWorks() { AssistantClient client = GetTestClient(); - Assistant assistant = client.CreateAssistant("gpt-35-turbo-latest", new() + string modelName = client.DeploymentOrThrow(); + + Assistant assistant = await client.CreateAssistantAsync(modelName, new() { ResponseFormat = AssistantResponseFormat.JsonObject, }); Validate(assistant); Assert.That(assistant.ResponseFormat, Is.EqualTo(AssistantResponseFormat.JsonObject)); - assistant = client.ModifyAssistant(assistant, new() + assistant = await client.ModifyAssistantAsync(assistant, new() { ResponseFormat = AssistantResponseFormat.Text, }); Assert.That(assistant.ResponseFormat, Is.EqualTo(AssistantResponseFormat.Text)); - AssistantThread thread = client.CreateThread(); + AssistantThread thread = await client.CreateThreadAsync(); Validate(thread); - ThreadMessage message = client.CreateMessage(thread, ["Write some JSON for me!"]); + ThreadMessage message = await client.CreateMessageAsync(thread.Id, MessageRole.User, ["Write some JSON for me!"]); Validate(message); - ThreadRun run = client.CreateRun(thread, assistant, new() + ThreadRun run = await client.CreateRunAsync(thread, assistant, new() { ResponseFormat = AssistantResponseFormat.JsonObject, }); @@ -120,12 +147,13 @@ public void SettingResponseFormatWorks() Assert.That(run.ResponseFormat, Is.EqualTo(AssistantResponseFormat.JsonObject)); } - [TestCase] + [RecordedTest] public async Task StreamingToolCall() { AssistantClient client = GetTestClient(); - FunctionToolDefinition getWeatherTool = new("get_current_weather", "Gets the user's current weather"); - Assistant assistant = await client.CreateAssistantAsync("gpt-35-turbo-latest", new() + string modelName = client.DeploymentOrThrow(); + FunctionToolDefinition getWeatherTool = new("get_current_weather") { Description = "Gets the user's current weather" }; + Assistant assistant = await client.CreateAssistantAsync(modelName, new() { Tools = { getWeatherTool } }); @@ -135,12 +163,13 @@ public async Task StreamingToolCall() void Print(string message) => Console.WriteLine($"[{stopwatch.ElapsedMilliseconds,6}] {message}"); Print(" >>> Beginning call ... "); - AsyncResultCollection asyncResults = client.CreateThreadAndRunStreamingAsync( - assistant, - new() - { - InitialMessages = { new(["What should I wear outside right now?"]), }, - }); + + ThreadCreationOptions thrdOpt = new() + { + InitialMessages = { new(MessageRole.User, ["What should I wear outside right now?"]), }, + }; + AsyncCollectionResult asyncResults = client.CreateThreadAndRunStreamingAsync(assistant, thrdOpt); + Print(" >>> Starting enumeration ..."); ThreadRun run = null; @@ -148,7 +177,7 @@ public async Task StreamingToolCall() do { run = null; - List toolOutputs = []; + List toolOutputs = new(); await foreach (StreamingUpdate update in asyncResults) { string message = update.UpdateKind.ToString(); @@ -178,24 +207,29 @@ public async Task StreamingToolCall() } while (run?.Status.IsTerminal == false); } - [Test] - public void BasicMessageOperationsWork() + [RecordedTest] + public async Task BasicMessageOperationsWork() { + // TODO FIXME Can't currently delete messages on AOAI + bool aoaiDeleteBugFixed = false; + AssistantClient client = GetTestClient(); - AssistantThread thread = client.CreateThread(); + AssistantThread thread = await client.CreateThreadAsync(); Validate(thread); - ThreadMessage message = client.CreateMessage(thread, ["Hello, world!"]); + ThreadMessage message = await client.CreateMessageAsync(thread.Id, MessageRole.User, ["Hello, world!"]); Validate(message); Assert.That(message.CreatedAt, Is.GreaterThan(s_2024)); Assert.That(message.Content?.Count, Is.EqualTo(1)); Assert.That(message.Content[0], Is.Not.Null); Assert.That(message.Content[0].Text, Is.EqualTo("Hello, world!")); - // BUG: Can't currently delete messages on AOAI - bool deleted = client.DeleteMessage(message); - Assert.That(deleted, Is.True); + if (aoaiDeleteBugFixed) + { + bool deleted = await client.DeleteMessageAsync(message); + Assert.That(deleted, Is.True); + } - message = client.CreateMessage(thread, ["Goodbye, world!"], new MessageCreationOptions() + message = await client.CreateMessageAsync(thread.Id, MessageRole.User, ["Goodbye, world!"], new MessageCreationOptions() { Metadata = { @@ -205,10 +239,10 @@ public void BasicMessageOperationsWork() Validate(message); Assert.That(message.Metadata.TryGetValue("messageMetadata", out string metadataValue) && metadataValue == "messageMetadataValue"); - ThreadMessage retrievedMessage = client.GetMessage(thread.Id, message.Id); + ThreadMessage retrievedMessage = await client.GetMessageAsync(thread.Id, message.Id); Assert.That(retrievedMessage.Id, Is.EqualTo(message.Id)); - message = client.ModifyMessage(message, new MessageModificationOptions() + message = await client.ModifyMessageAsync(message, new MessageModificationOptions() { Metadata = { @@ -217,28 +251,33 @@ public void BasicMessageOperationsWork() }); Assert.That(message.Metadata.TryGetValue("messageMetadata", out metadataValue) && metadataValue == "newValue"); - PageableCollection messagePage = client.GetMessages(thread); - Assert.That(messagePage.Count, Is.EqualTo(2)); - // BUG: Can't currently delete messages - Assert.That(messagePage.Count, Is.EqualTo(1)); + var messagePage = await client.GetMessagesAsync(thread).ToListAsync(); + if (aoaiDeleteBugFixed) + { + Assert.That(messagePage.Count, Is.EqualTo(1)); + } + else + { + Assert.That(messagePage.Count, Is.EqualTo(2)); + } + Assert.That(messagePage.ElementAt(0).Id, Is.EqualTo(message.Id)); Assert.That(messagePage.ElementAt(0).Metadata.TryGetValue("messageMetadata", out metadataValue) && metadataValue == "newValue"); } - [Test] - public void ThreadWithInitialMessagesWorks() + [RecordedTest] + public async Task ThreadWithInitialMessagesWorks() { + const string userGreeting = "Hello, world!"; + const string userQuestion = "Can you describe why stop signs are the shape and color that they are?"; + AssistantClient client = GetTestClient(); ThreadCreationOptions options = new() { InitialMessages = { - new(["Hello, world!"]), - new( - [ - "Can you describe this image for me?", - MessageContent.FromImageUrl(new Uri("https://test.openai.com/image.png")) - ]) + new ThreadInitializationMessage(MessageRole.User, [userGreeting]), + new ThreadInitializationMessage(MessageRole.User, [ userQuestion ]) { Metadata = { @@ -247,52 +286,49 @@ public void ThreadWithInitialMessagesWorks() }, }, }; - AssistantThread thread = client.CreateThread(options); + AssistantThread thread = await client.CreateThreadAsync(options); Validate(thread); - PageableCollection messagePage = client.GetMessages(thread, resultOrder: ListOrder.OldestFirst); - List messageList = messagePage.ToList(); + List messageList = await client.GetMessagesAsync(thread, new() { Order = MessageCollectionOrder.Ascending }).ToListAsync(); Assert.That(messageList.Count, Is.EqualTo(2)); Assert.That(messageList[0].Role, Is.EqualTo(MessageRole.User)); Assert.That(messageList[0].Content?.Count, Is.EqualTo(1)); - Assert.That(messageList[0].Content[0].Text, Is.EqualTo("Hello, world!")); - Assert.That(messageList[1].Content?.Count, Is.EqualTo(2)); + Assert.That(messageList[0].Content[0].Text, Is.EqualTo(userGreeting)); Assert.That(messageList[1].Content[0], Is.Not.Null); - Assert.That(messageList[1].Content[0].Text, Is.EqualTo("Can you describe this image for me?")); - Assert.That(messageList[1].Content[1], Is.Not.Null); - Assert.That(messageList[1].Content[1].ImageUrl.AbsoluteUri, Is.EqualTo("https://test.openai.com/image.png")); + Assert.That(messageList[1].Content[0].Text, Is.EqualTo(userQuestion)); } - [Test] - public void BasicRunOperationsWork() + [RecordedTest] + public async Task BasicRunOperationsWork() { AssistantClient client = GetTestClient(); - Assistant assistant = client.CreateAssistant("gpt-35-turbo-latest"); + string modelName = client.DeploymentOrThrow(); + Assistant assistant = await client.CreateAssistantAsync(modelName); Validate(assistant); - AssistantThread thread = client.CreateThread(); + AssistantThread thread = await client.CreateThreadAsync(); Validate(thread); - PageableCollection runPage = client.GetRuns(thread.Id); + List runPage = await client.GetRunsAsync(thread.Id).ToListAsync(); Assert.That(runPage.Count, Is.EqualTo(0)); - ThreadMessage message = client.CreateMessage(thread.Id, ["Hello, assistant!"]); + ThreadMessage message = await client.CreateMessageAsync(thread.Id, MessageRole.User, ["Hello, assistant!"]); Validate(message); - Thread.Sleep(3000); - ThreadRun run = client.CreateRun(thread.Id, assistant.Id); + ThreadRun run = await client.CreateRunAsync(thread.Id, assistant.Id); Validate(run); Assert.That(run.Status, Is.EqualTo(RunStatus.Queued)); Assert.That(run.CreatedAt, Is.GreaterThan(s_2024)); - ThreadRun retrievedRun = client.GetRun(thread.Id, run.Id); + ThreadRun retrievedRun = await client.GetRunAsync(thread.Id, run.Id); Assert.That(retrievedRun.Id, Is.EqualTo(run.Id)); - runPage = client.GetRuns(thread.Id); + runPage = await client.GetRunsAsync(thread.Id).ToListAsync(); Assert.That(runPage.Count, Is.EqualTo(1)); Assert.That(runPage.ElementAt(0).Id, Is.EqualTo(run.Id)); - PageableCollection messages = client.GetMessages(thread); - Assert.That(messages.Count, Is.EqualTo(1)); + List messages = await client.GetMessagesAsync(thread).ToListAsync(); + Assert.That(messages.Count, Is.GreaterThanOrEqualTo(1)); + + run = await WaitUntilReturnLast( + run, + () => client.GetRunAsync(run), + r => r.Status.IsTerminal); + Assert.That(run.Status, Is.EqualTo(RunStatus.Completed)); - for (int i = 0; i < 10 && !run.Status.IsTerminal; i++) - { - Thread.Sleep(500); - run = client.GetRun(run); - } Assert.Multiple(() => { Assert.That(run.Status, Is.EqualTo(RunStatus.Completed)); @@ -302,7 +338,7 @@ public void BasicRunOperationsWork() Assert.That(run.FailedAt, Is.Null); Assert.That(run.IncompleteDetails, Is.Null); }); - messages = client.GetMessages(thread); + messages = await client.GetMessagesAsync(thread).ToListAsync(); Assert.That(messages.Count, Is.EqualTo(2)); Assert.That(messages.ElementAt(0).Role, Is.EqualTo(MessageRole.Assistant)); @@ -310,35 +346,35 @@ public void BasicRunOperationsWork() Assert.That(messages.ElementAt(1).Id, Is.EqualTo(message.Id)); } - [Test] - public void BasicRunStepFunctionalityWorks() + [RecordedTest] + public async Task BasicRunStepFunctionalityWorks() { AssistantClient client = GetTestClient(); - Assistant assistant = client.CreateAssistant("gpt-35-turbo-latest", new AssistantCreationOptions() + string modelName = client.DeploymentOrThrow(); + Assistant assistant = await client.CreateAssistantAsync(modelName, new AssistantCreationOptions() { Tools = { new CodeInterpreterToolDefinition() }, Instructions = "Call the code interpreter tool when asked to visualize mathematical concepts.", }); Validate(assistant); - AssistantThread thread = client.CreateThread(new() + AssistantThread thread = await client.CreateThreadAsync(new ThreadCreationOptions() { - InitialMessages = { new(["Please graph the equation y = 3x + 4"]), }, - }); + InitialMessages = { new(MessageRole.User, ["Please graph the equation y = 3x + 4"]), }, + }); Validate(thread); - ThreadRun run = client.CreateRun(thread, assistant); + ThreadRun run = await client.CreateRunAsync(thread, assistant); Validate(run); - while (!run.Status.IsTerminal) - { - Thread.Sleep(1000); - run = client.GetRun(run); - } + run = await WaitUntilReturnLast( + run, + () => client.GetRunAsync(run), + r => r.Status.IsTerminal); Assert.That(run.Status, Is.EqualTo(RunStatus.Completed)); Assert.That(run.Usage?.TotalTokens, Is.GreaterThan(0)); - PageableCollection runSteps = client.GetRunSteps(run); + List runSteps = await client.GetRunStepsAsync(run).ToListAsync(); Assert.That(runSteps.Count(), Is.GreaterThan(1)); Assert.Multiple(() => { @@ -363,11 +399,12 @@ public void BasicRunStepFunctionalityWorks() }); } - [Test] - public void FunctionToolsWork() + [RecordedTest] + public async Task FunctionToolsWork() { AssistantClient client = GetTestClient(); - Assistant assistant = client.CreateAssistant("gpt-35-turbo-latest", new AssistantCreationOptions() + string modelName = client.DeploymentOrThrow(); + Assistant assistant = await client.CreateAssistantAsync(modelName, new AssistantCreationOptions() { Tools = { @@ -397,11 +434,11 @@ public void FunctionToolsWork() Assert.That(responseToolDefinition?.FunctionName, Is.EqualTo("get_favorite_food_for_day_of_week")); Assert.That(responseToolDefinition?.Parameters, Is.Not.Null); - ThreadRun run = client.CreateThreadAndRun( + ThreadRun run = await client.CreateThreadAndRunAsync( assistant, new ThreadCreationOptions() { - InitialMessages = { new(["What should I eat on Thursday?"]) }, + InitialMessages = { new(MessageRole.User, ["What should I eat on Thursday?"]) }, }, new RunCreationOptions() { @@ -409,55 +446,59 @@ public void FunctionToolsWork() }); Validate(run); Console.WriteLine($" Run status right after creation: {run.Status}"); - for (int i = 0; i < 10 && !run.Status.IsTerminal; i++) - { - Thread.Sleep(500); - run = client.GetRun(run); - } + + // TODO FIXME: The underlying OpenAI code doesn't consider the "requires_action" status to be terminal even though it is. + // Work around this here + run = await WaitUntilReturnLast( + run, + () => client.GetRunAsync(run), + r => r.Status.IsTerminal || r.Status.Equals(RunStatus.RequiresAction)); + Assert.That(run.Status, Is.EqualTo(RunStatus.RequiresAction)); Assert.That(run.RequiredActions?.Count, Is.EqualTo(1)); Assert.That(run.RequiredActions[0].ToolCallId, Is.Not.Null.Or.Empty); Assert.That(run.RequiredActions[0].FunctionName, Is.EqualTo("get_favorite_food_for_day_of_week")); Assert.That(run.RequiredActions[0].FunctionArguments, Is.Not.Null.Or.Empty); - run = client.SubmitToolOutputsToRun(run, [new(run.RequiredActions[0].ToolCallId, "tacos")]); + run = await client.SubmitToolOutputsToRunAsync(run, [new(run.RequiredActions[0].ToolCallId, "tacos")]); Assert.That(run.Status.IsTerminal, Is.False); - for (int i = 0; i < 10 && !run.Status.IsTerminal; i++) - { - Thread.Sleep(500); - run = client.GetRun(run); - } + run = await WaitUntilReturnLast( + run, + () => client.GetRunAsync(run), + r => r.Status.IsTerminal); Assert.That(run.Status, Is.EqualTo(RunStatus.Completed)); - PageableCollection messages = client.GetMessages(run.ThreadId, resultOrder: ListOrder.NewestFirst); + List messages = await client.GetMessagesAsync(run.ThreadId, new() { Order = MessageCollectionOrder.Descending }) + .ToListAsync(); Assert.That(messages.Count, Is.GreaterThan(1)); Assert.That(messages.ElementAt(0).Role, Is.EqualTo(MessageRole.Assistant)); Assert.That(messages.ElementAt(0).Content?[0], Is.Not.Null); Assert.That(messages.ElementAt(0).Content?[0].Text, Does.Contain("tacos")); } - [Test] - public void BasicFileSearchWorks() + [RecordedTest] + public async Task BasicFileSearchWorks() { // First, we need to upload a simple test file. AssistantClient client = GetTestClient(); - FileClient fileClient = GetChildTestClient(client); + string modelName = client.DeploymentOrThrow(); + FileClient fileClient = GetTestClientFrom(client); - OpenAIFileInfo testFile = fileClient.UploadFile( + OpenAIFileInfo testFile = await fileClient.UploadFileAsync( BinaryData.FromString(""" - This file describes the favorite foods of several people. + This file describes the favorite foods of several people. - Summanus Ferdinand: tacos - Tekakwitha Effie: pizza - Filip Carola: cake - """).ToStream(), + Summanus Ferdinand: tacos + Tekakwitha Effie: pizza + Filip Carola: cake + """), "favorite_foods.txt", FileUploadPurpose.Assistants); Validate(testFile); // Create an assistant, using the creation helper to make a new vector store - Assistant assistant = client.CreateAssistant("gpt-35-turbo-latest", new() + Assistant assistant = await client.CreateAssistantAsync(modelName, new() { Tools = { new FileSearchToolDefinition() }, ToolResources = new() @@ -466,7 +507,7 @@ This file describes the favorite foods of several people. { NewVectorStores = { - new VectorStoreCreationHelper([testFile.Id]), + new VectorStoreCreationHelper([testFile]), } } } @@ -477,7 +518,7 @@ This file describes the favorite foods of several people. ValidateById(createdVectorStoreId); // Modify an assistant to use the existing vector store - assistant = client.ModifyAssistant(assistant, new AssistantModificationOptions() + assistant = await client.ModifyAssistantAsync(assistant, new AssistantModificationOptions() { ToolResources = new() { @@ -491,9 +532,9 @@ This file describes the favorite foods of several people. Assert.That(assistant.ToolResources.FileSearch.VectorStoreIds[0], Is.EqualTo(createdVectorStoreId)); // Create a thread with an override vector store - AssistantThread thread = client.CreateThread(new ThreadCreationOptions() + AssistantThread thread = await client.CreateThreadAsync(new ThreadCreationOptions() { - InitialMessages = { new(["Using the files you have available, what's Filip's favorite food?"]) }, + InitialMessages = { new(MessageRole.User, ["Using the files you have available, what's Filip's favorite food?"]) }, ToolResources = new() { FileSearch = new() @@ -511,7 +552,7 @@ This file describes the favorite foods of several people. ValidateById(createdVectorStoreId); // Ensure that modifying the thread with an existing vector store works - thread = client.ModifyThread(thread, new ThreadModificationOptions() + thread = await client.ModifyThreadAsync(thread, new ThreadModificationOptions() { ToolResources = new() { @@ -524,58 +565,67 @@ This file describes the favorite foods of several people. Assert.That(thread.ToolResources?.FileSearch?.VectorStoreIds, Has.Count.EqualTo(1)); Assert.That(thread.ToolResources.FileSearch.VectorStoreIds[0], Is.EqualTo(createdVectorStoreId)); - ThreadRun run = client.CreateRun(thread, assistant); + ThreadRun run = await client.CreateRunAsync(thread, assistant); Validate(run); - do - { - Thread.Sleep(1000); - run = client.GetRun(run); - } while (run?.Status.IsTerminal == false); + run = await WaitUntilReturnLast( + run, + () => client.GetRunAsync(run), + r => r.Status.IsTerminal); Assert.That(run.Status, Is.EqualTo(RunStatus.Completed)); - PageableCollection messages = client.GetMessages(thread, resultOrder: ListOrder.NewestFirst); - foreach (ThreadMessage message in messages) + AsyncPageCollection messages = client.GetMessagesAsync(thread, new() { Order = MessageCollectionOrder.Descending }); + int numPages = 0; + int numThreads = 0; + bool hasCake = false; + await foreach (PageResult page in messages) { + numPages++; + foreach (ThreadMessage message in page.Values) + { + numThreads++; foreach (MessageContent content in message.Content) { Console.WriteLine(content.Text); + hasCake |= content.Text?.ToLowerInvariant().Contains("cake") == true; foreach (TextAnnotation annotation in content.TextAnnotations) { Console.WriteLine($" --> From file: {annotation.InputFileId}, replacement: {annotation.TextToReplace}"); } } } - Assert.That(messages.Count() > 1); - Assert.That(messages.Any(message => message.Content.Any(content => content.Text.ToLower().Contains("cake")))); + } + + Assert.That(numPages, Is.GreaterThan(0)); + Assert.That(numThreads, Is.GreaterThan(0)); + Assert.That(hasCake, Is.True); } - [Test] + [RecordedTest] public async Task StreamingRunWorks() { AssistantClient client = GetTestClient(); - Assistant assistant = await client.CreateAssistantAsync("gpt-35-turbo-latest"); + string modelName = client.DeploymentOrThrow(); + Assistant assistant = await client.CreateAssistantAsync(modelName); Validate(assistant); - AssistantThread thread = await client.CreateThreadAsync(new() + AssistantThread thread = await client.CreateThreadAsync(new ThreadCreationOptions() { - InitialMessages = { new(["Hello there, assistant! How are you today?"]), }, + InitialMessages = { new(MessageRole.User, ["Hello there, assistant! How are you today?"]), }, }); Validate(thread); - Stopwatch stopwatch = Stopwatch.StartNew(); - void Print(string message) => Console.WriteLine($"[{stopwatch.ElapsedMilliseconds,6}] {message}"); - - AsyncResultCollection streamingResult - = client.CreateRunStreamingAsync(thread.Id, assistant.Id); + AsyncCollectionResult streamingResult = client.CreateRunStreamingAsync(thread.Id, assistant.Id); - Print(">>> Connected <<<"); + StringBuilder content = new(); + DateTimeOffset? lastUpdate = null; + StreamingUpdateReason? lastUpdateReason = null; await foreach (StreamingUpdate update in streamingResult) { - string message = $"{update.UpdateKind} "; if (update is RunUpdate runUpdate) { - DateTimeOffset? time = update.UpdateKind switch + lastUpdateReason = runUpdate.UpdateKind; + lastUpdate = update.UpdateKind switch { StreamingUpdateReason.RunCreated => runUpdate.Value.CreatedAt, StreamingUpdateReason.RunQueued => runUpdate.Value.StartedAt, @@ -583,20 +633,21 @@ AsyncResultCollection streamingResult StreamingUpdateReason.RunCompleted => runUpdate.Value.CompletedAt, _ => null, }; - message += $"at {time}"; } if (update is MessageContentUpdate contentUpdate) { - if (contentUpdate.Role.HasValue) - { - message += $"[{contentUpdate.Role}]"; - } - message += $"[{contentUpdate.MessageIndex}] {contentUpdate.Text}"; + // TODO FIXME: The OpenAI library code is currently incorrectly returning a MessageRole.User value here. + // It should instead be null or at least Assistant + //Assert.That(contentUpdate.Role, Is.Null.Or.EqualTo(MessageRole.Assistant)); + Assert.That(contentUpdate.Text, Is.Not.Null); // can be empty string + content.Append(contentUpdate.Text); } - Print(message); } - Print(">>> Done <<<"); + + Assert.That(lastUpdateReason, Is.EqualTo(StreamingUpdateReason.RunCompleted)); + Assert.That(lastUpdate, Is.Not.Null.And.GreaterThan(s_2024)); + Assert.That(content, Has.Length.GreaterThan(0)); } private static readonly DateTimeOffset s_2024 = new(2024, 1, 1, 0, 0, 0, TimeSpan.Zero); -} \ No newline at end of file +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/AudioTests.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/AudioTests.cs new file mode 100644 index 000000000..5312b8083 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/AudioTests.cs @@ -0,0 +1,167 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.IO; +using System.Threading.Tasks; +using OpenAI.Audio; +using OpenAI.TestFramework; + +namespace Azure.AI.OpenAI.Tests; + +public class AudioTests(bool isAsync) : AoaiTestBase(isAsync) +{ + [Test] + [Category("Smoke")] + public void CanCreateClient() => Assert.That(GetTestClient(), Is.InstanceOf()); + + [RecordedTest] + public async Task TranscriptionWorks() + { + AudioClient audioClient = GetTestClient(); + AudioTranscription transcription = await audioClient.TranscribeAudioAsync(Assets.HelloWorld.RelativePath); + Assert.That(transcription?.Text, Is.Not.Null.Or.Empty); + } + + [RecordedTest] + public async Task TranslationWorks() + { + AudioClient audioClient = GetTestClient(); + AudioTranslation translation = await audioClient.TranslateAudioAsync(Assets.WhisperFrenchDescription.RelativePath); + Assert.That(translation?.Text, Is.Not.Null.Or.Empty); + } + + [RecordedTest] + public async Task TextToSpeechWorks() + { + AudioClient audioClient = GetTestClient("tts"); + BinaryData ttsData = await audioClient.GenerateSpeechAsync( + "hello, world!", + GeneratedSpeechVoice.Alloy); + Assert.That(ttsData, Is.Not.Null); + } + + [RecordedTest] + [TestCase(AudioTranscriptionFormat.Simple)] + [TestCase(AudioTranscriptionFormat.Verbose)] + [TestCase(AudioTranscriptionFormat.Srt)] + [TestCase(AudioTranscriptionFormat.Vtt)] + [TestCase(null)] + public async Task TranscriptionWorksWithFormat(AudioTranscriptionFormat? format) + { + AudioClient client = GetTestClient(); + + var audioInfo = Assets.HelloWorld; + using Stream audioFileStream = File.OpenRead(audioInfo.RelativePath); + AudioTranscriptionOptions options = new() + { + Temperature = 0.25f, + ResponseFormat = format, + }; + + AudioTranscription transcription = await client.TranscribeAudioAsync( + audioFileStream, audioInfo.Name, options); + + Assert.That(transcription, Is.Not.Null); + Assert.That(transcription.Text, Is.Not.Null.Or.Empty); + + if (format == AudioTranscriptionFormat.Simple) + { + Assert.That(transcription.Duration, Is.Null); + Assert.That(transcription.Language, Is.Null); + Assert.That(transcription.Segments, Is.Null.Or.Empty); + } + else if (format == AudioTranscriptionFormat.Verbose) + { + Assert.That(transcription.Duration, Is.GreaterThan(TimeSpan.FromSeconds(0))); + Assert.That(transcription.Language, Is.Not.Null.Or.Empty); + Assert.That(transcription.Segments, Is.Not.Null.Or.Empty); + + TranscribedSegment firstSegment = transcription.Segments[0]; + Assert.That(firstSegment, Is.Not.Null); + Assert.That(firstSegment.Id, Is.EqualTo(0)); + Assert.That(firstSegment.Start, Is.GreaterThanOrEqualTo(TimeSpan.FromSeconds(0))); + Assert.That(firstSegment.End, Is.GreaterThan(firstSegment.Start)); + Assert.That(firstSegment.Text, Is.Not.Null.Or.Empty); + } + } + + [RecordedTest] + [TestCase(AudioTimestampGranularities.Default)] + [TestCase(AudioTimestampGranularities.Word)] + [TestCase(AudioTimestampGranularities.Segment)] + [TestCase(AudioTimestampGranularities.Word | AudioTimestampGranularities.Segment)] + public async Task TranscriptionTimestampGranularitiesWork(AudioTimestampGranularities granularityFlags) + { + AudioClient client = GetTestClient(); + var audioInfo = Assets.HelloWorld; + using Stream audioFileStream = File.OpenRead(audioInfo.RelativePath); + AudioTranscriptionOptions options = new() + { + Granularities = granularityFlags, + ResponseFormat = AudioTranscriptionFormat.Verbose, + }; + ClientResult transcriptionResult = await client.TranscribeAudioAsync( + audioFileStream, + audioInfo.Name, + options); + PipelineResponse response = transcriptionResult.GetRawResponse(); + Assert.That(response, Is.Not.Null); + AudioTranscription transcription = transcriptionResult.Value; + Assert.That(transcription.Text, Is.Not.Null.Or.Empty); + Assert.That( + transcription.Words?.Count > 0, + Is.EqualTo(granularityFlags.HasFlag(AudioTimestampGranularities.Word)), + "Word-level information should appear (and only appear) when requested"); + Assert.That( + transcription.Segments?.Count > 0, + Is.EqualTo(granularityFlags.HasFlag(AudioTimestampGranularities.Segment) || granularityFlags == AudioTimestampGranularities.Default), + "Segment-level information should appear (and only appear) when requested or when no flags were provided"); + } + + [RecordedTest] + [TestCase(AudioTranslationFormat.Simple)] + [TestCase(AudioTranslationFormat.Verbose)] + [TestCase(AudioTranslationFormat.Srt)] + [TestCase(AudioTranslationFormat.Vtt)] + [TestCase(null)] + public async Task TranslationWorksWithFormat(AudioTranslationFormat? format) + { + AudioClient client = GetTestClient(); + + var audioInfo = Assets.WhisperFrenchDescription; + using Stream audioFileStream = File.OpenRead(audioInfo.RelativePath); + AudioTranslationOptions options = new() + { + ResponseFormat = format, + }; + + AudioTranslation translation = await client.TranslateAudioAsync( + audioFileStream, audioInfo.Name, options); + + Assert.That(translation, Is.Not.Null); + Assert.That(translation.Text, Is.Not.Null.Or.Empty); + + if (format == AudioTranslationFormat.Simple) + { + Assert.That(translation.Duration, Is.Null); + Assert.That(translation.Language, Is.Null); + Assert.That(translation.Segments, Is.Null.Or.Empty); + } + else if (format == AudioTranslationFormat.Verbose) + { + Assert.That(translation.Duration, Is.GreaterThan(TimeSpan.FromSeconds(0))); + Assert.That(translation.Language, Is.Not.Null.Or.Empty); + Assert.That(translation.Segments, Is.Not.Null.Or.Empty); + + TranscribedSegment firstSegment = translation.Segments[0]; + Assert.That(firstSegment, Is.Not.Null); + Assert.That(firstSegment.Id, Is.EqualTo(0)); + Assert.That(firstSegment.Start, Is.GreaterThanOrEqualTo(TimeSpan.FromSeconds(0))); + Assert.That(firstSegment.End, Is.GreaterThan(firstSegment.Start)); + Assert.That(firstSegment.Text, Is.Not.Null.Or.Empty); + } + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Azure.AI.OpenAI.Tests.csproj b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Azure.AI.OpenAI.Tests.csproj new file mode 100644 index 000000000..5cc3cb985 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Azure.AI.OpenAI.Tests.csproj @@ -0,0 +1,61 @@ + + + + $(RequiredTargetFrameworks) + + + $(NoWarn);CS1591;CS8002;SA1402;SA1507;SA1508;SA1633;SA1028;SA1505;OPENAI001;AOAI001 + preview + enable + + + + + + + + + + + + + + + + + + + + + + + + + + + PreserveNewest + + + Never + + + + + + Utils\Polyfill\%(RecursiveDir)\%(Filename).cs + + + + + + + <_Parameter1>TestProjectSourceBasePath + <_Parameter2>$(MSBuildThisFileDirectory) + + + + diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/BatchTests.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/BatchTests.cs new file mode 100644 index 000000000..357c022d1 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/BatchTests.cs @@ -0,0 +1,222 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Net.Http; +using System.Text.Json; +using System.Threading.Tasks; +using Azure.AI.OpenAI.Tests.Models; +using Azure.AI.OpenAI.Tests.Utils; +using Azure.AI.OpenAI.Tests.Utils.Config; +using OpenAI.Batch; +using OpenAI.Chat; +using OpenAI.Embeddings; +using OpenAI.Files; +using OpenAI.TestFramework; +using OpenAI.TestFramework.Mocks; +using OpenAI.TestFramework.Utils; + +namespace Azure.AI.OpenAI.Tests; + +public class BatchTests : AoaiTestBase +{ + public BatchTests(bool isAsync) : base(isAsync) + { } + + [Test] + [Category("Smoke")] + public void CanCreateClient() => Assert.That(GetTestClient(), Is.InstanceOf()); + + [RecordedTest] + [Ignore("Azure OpenAI does not yet support batch file uploads")] + public async Task SimpleBatchCompletionsTest() + { + BatchClient batchClient = GetTestClient(new TestClientOptions(AzureOpenAIClientOptions.ServiceVersion.V2024_06_01)); + await using BatchOperations ops = new(this, batchClient); + + // Create the batch operations to send and upload them + ops.ChatClient.CompleteChat([new SystemChatMessage("You are a saccharine AI"), new UserChatMessage("Tell me about yourself")]); + ops.ChatClient.CompleteChat([new UserChatMessage("Give me a large random number")]); + Assert.That(ops.Operations, Has.Count.EqualTo(2)); + string inputFileId = await ops.UploadBatchFileAsync(); + + // Create the batch operation + using var requestContent = new BatchOptions() + { + InputFileId = inputFileId, + Endpoint = ops.Operations.Select(o => o.Url).Distinct().First(), + Metadata = + { + [ "description" ] = "Azure OpenAI .Net SDK integration test framework " + nameof(SimpleBatchCompletionsTest), + } + }.ToBinaryContent(); + + ClientResult response = await batchClient.CreateBatchAsync(requestContent); + BatchObject batchObj = ExtractAndValidateBatchObj(response); + + // Poll until we've completed, failed, or were canceled + while ("completed" != batchObj.Status) + { + response = await batchClient.GetBatchAsync(batchObj.Id, new()); + batchObj = ExtractAndValidateBatchObj(response); + } + + Assert.That(batchObj.OutputFileID, Is.Not.Null.Or.Empty); + BinaryData outputData = await ops.DownloadAndValidateResultAsync(batchObj.OutputFileID!); + var parsedOutput = BatchResult.From(outputData); + Assert.That(parsedOutput, Is.Not.Null); + Assert.That(parsedOutput, Has.Count.EqualTo(ops.Operations.Count)); + for (int i = 0; i < parsedOutput.Count; i++) + { + Assert.That(parsedOutput[i].CustomId, Is.EqualTo(ops.Operations[i].CustomId), "Wrong custom ID at index {0}", i); + var completion = parsedOutput[i].Response!; + Assert.That(completion, Is.Not.Null); + Assert.That(completion.Role, Is.EqualTo(ChatMessageRole.Assistant)); + Assert.That(completion.Content, Has.Count.EqualTo(1)); + Assert.That(completion.Content[0].Kind, Is.EqualTo(ChatMessageContentPartKind.Text)); + Assert.That(completion.Content[0].Text, Is.Not.Null.Or.Empty); + } + + } + + #region helper methods + + private BinaryData ValidateHasRawJsonResponse(ClientResult result) + { + Assert.That(result, Is.Not.Null); + PipelineResponse response = result.GetRawResponse(); + Assert.That(response, Is.Not.Null); + Assert.That(response.Status, Is.GreaterThanOrEqualTo(200).And.LessThan(300)); + Assert.That(response.Headers.GetFirstOrDefault("Content-Type"), Does.StartWith("application/json")); + + return response.Content; + } + + private void ValidateBatchResult(BatchObject batchObj) + { + Assert.That(batchObj, Is.Not.Null); + Assert.That(batchObj.Id, Is.Not.Null.Or.Empty); + Assert.That(batchObj.Status, Is.Not.Null); + Assert.That(batchObj.Status, Is.AnyOf("validating", "in_progress", "finalizing", "completed")); + } + + private BatchObject ExtractAndValidateBatchObj(ClientResult result) + { + var binaryData = ValidateHasRawJsonResponse(result); + var batchObj = BatchObject.From(binaryData); + ValidateBatchResult(batchObj); + return batchObj; + } + + #endregion + + #region helper classes + + private class BatchOperations : IAsyncDisposable + { + private MockHttpMessageHandler _handler; + private List _operations; + private string? _uploadId; + private FileClient _fileClient; + + public BatchOperations(AoaiTestBase testBase, BatchClient batchClient) + { + _handler = new(MockHttpMessageHandler.ReturnEmptyJson); + _handler.OnRequest += HandleRequest; + _operations = new(); + + BatchFileName = "batch-" + Guid.NewGuid().ToString("D") + ".json"; + + _fileClient = testBase.GetTestClientFrom(batchClient); + + // Generate the fake pipeline to capture requests and save them to a file later + AzureOpenAIClient fakeTopLevel = new AzureOpenAIClient( + new Uri("https://not.a.real.endpoint.fake"), + new ApiKeyCredential("not.a.real.key"), + new() { Transport = _handler.Transport }); + + ChatClient = fakeTopLevel.GetChatClient(testBase.TestConfig.GetConfig().DeploymentOrThrow("chat client")); + EmbeddingClient = fakeTopLevel.GetEmbeddingClient(testBase.TestConfig.GetConfig().DeploymentOrThrow("embedding client")); + } + + public string BatchFileName { get; } + public IReadOnlyList Operations => _operations; + public ChatClient ChatClient { get; } + public EmbeddingClient EmbeddingClient { get; } + + public async Task UploadBatchFileAsync() + { + if (Operations.Count == 0) + { + throw new InvalidOperationException(); + } + + using MemoryStream stream = new MemoryStream(); + JsonHelpers.Serialize(stream, _operations, JsonOptions.OpenAIJsonOptions); + stream.Seek(0, SeekOrigin.Begin); + var data = BinaryData.FromStream(stream); + + using var content = BinaryContent.Create(data); + + OpenAIFileInfo file = await _fileClient.UploadFileAsync(data, BatchFileName, FileUploadPurpose.Batch); + _uploadId = file.Id; + Assert.That(_uploadId, Is.Not.Null.Or.Empty); + return _uploadId; + } + + public async Task DownloadAndValidateResultAsync(string outputId) + { + ClientResult response = await _fileClient.DownloadFileAsync(outputId); + Assert.That(response, Is.Not.Null); + Assert.That(response.Value, Is.Not.Null); + return response.Value; + } + + public async ValueTask DisposeAsync() + { + // clean up any files + if (_uploadId != null) + { + await _fileClient.DeleteFileAsync(_uploadId); + } + + _handler.OnRequest -= HandleRequest; + _handler.Dispose(); + _operations.Clear(); + } + + private void HandleRequest(object? sender, CapturedRequest request) + { + JsonElement? element = null; + if (request.Content != null) + { + using var json = JsonDocument.Parse(request.Content.ToMemory()); + element = json.RootElement.Clone(); + } + + BatchOperation operation = new() + { + Method = request.Method, + Url = request.Uri?.AbsolutePath ?? string.Empty, + Body = element + }; + + _operations.Add(operation); + } + + public class BatchOperation + { + public string CustomId { get; } = Guid.NewGuid().ToString(); + public HttpMethod Method { get; init; } = HttpMethod.Get; + public string Url { get; init; } = string.Empty; + public JsonElement? Body { get; init; } + } + } + + #endregion +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/ChatTests.Functions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/ChatTests.Functions.cs new file mode 100644 index 000000000..ebe881c6a --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/ChatTests.Functions.cs @@ -0,0 +1,280 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text; +using System.Text.Json; +using System.Threading.Tasks; +using OpenAI.Chat; +using OpenAI.TestFramework; + +namespace Azure.AI.OpenAI.Tests; + +public partial class ChatTests +{ + [Obsolete] + private static readonly ChatFunction FUNCTION_TEMPERATURE = new( + "get_future_temperature", + "requests the anticipated future temperature at a provided location to help inform advice about topics like choice of attire", + BinaryData.FromString( + """ + { + "type": "object", + "properties": { + "locationName": { + "type": "string", + "description": "the name or brief description of a location for weather information" + }, + "date": { + "type": "string", + "description": "the day, month, and year for which to retrieve weather information" + } + } + } + """)); + + public enum FunctionCallTestType + { + Auto, + None, + Function, + } + + [RecordedTest] + [TestCase(FunctionCallTestType.None)] + [TestCase(FunctionCallTestType.Auto)] + [TestCase(FunctionCallTestType.Function)] + [Obsolete] + public async Task SimpleFunctionCallWorks(FunctionCallTestType functionCallType) + { + ChatClient client = GetTestClient(); + + List messages = new() + { + new SystemChatMessage("You are a helpful assistant."), + new UserChatMessage("What should I wear in Honolulu next Thursday?") + }; + var requestOptions = new ChatCompletionOptions() + { + FunctionChoice = functionCallType switch + { + FunctionCallTestType.Auto => ChatFunctionChoice.Auto, + FunctionCallTestType.None => ChatFunctionChoice.None, + FunctionCallTestType.Function => new ChatFunctionChoice(FUNCTION_TEMPERATURE), + _ => throw new NotImplementedException(), + }, + Functions = { FUNCTION_TEMPERATURE }, + MaxTokens = 512, + }; + + ClientResult response = await client.CompleteChatAsync(messages, requestOptions); + Assert.That(response, Is.Not.Null); + + ChatCompletion completion = response.Value; + Assert.IsNotNull(completion); + Assert.That(completion.Id, Is.Not.Null.Or.Empty); + + ContentFilterResultForPrompt filter = completion.GetContentFilterResultForPrompt(); + Assert.IsNotNull(filter); + Assert.That(filter.SelfHarm, Is.Not.Null); + Assert.That(filter.SelfHarm.Filtered, Is.False); + Assert.That(filter.SelfHarm.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); + + if (functionCallType == FunctionCallTestType.None) + { + Assert.That(completion.FinishReason, Is.EqualTo(ChatFinishReason.Stop)); + Assert.That(completion.FunctionCall, Is.Null); + + Assert.That(completion.Content, Has.Count.GreaterThan(0)); + Assert.That(completion.Content, Has.All.Not.Null); + + ChatMessageContentPart content = completion.Content[0]; + Assert.That(content.Kind, Is.EqualTo(ChatMessageContentPartKind.Text)); + Assert.That(content.Text, Is.Not.Null.Or.Empty); + + // test complete, as we were merely validating that we didn't get what we shouldn't + return; + } + + // TODO old tests look for stop reason of function_call for both auto and function, but the service currently returns "stop" + // for function + if (functionCallType == FunctionCallTestType.Auto) + { + Assert.That(completion.FinishReason, Is.EqualTo(ChatFinishReason.FunctionCall)); + } + else + { + Assert.That(completion.FinishReason, Is.EqualTo(ChatFinishReason.Stop)); + } + + Assert.That(completion.Content, Has.Count.EqualTo(0)); + + Assert.That(completion.FunctionCall, Is.Not.Null); + Assert.That(completion.FunctionCall.FunctionName, Is.EqualTo(FUNCTION_TEMPERATURE.FunctionName)); + Assert.That(completion.FunctionCall.FunctionArguments, Is.Not.Null); + var parsedArgs = JsonSerializer.Deserialize(completion.FunctionCall.FunctionArguments, SERIALIZER_OPTIONS)!; + Assert.That(parsedArgs, Is.Not.Null); + Assert.That(parsedArgs.LocationName, Is.Not.Null.Or.Empty); + Assert.That(parsedArgs.Date, Is.Not.Null.Or.Empty); + + // Complete the function call + messages.Add(new AssistantChatMessage(completion.FunctionCall)); + messages.Add(new FunctionChatMessage(FUNCTION_TEMPERATURE.FunctionName, JsonSerializer.Serialize(new + { + temperature = 31, + unit = "celsius" + }))); + + requestOptions = new() + { + Functions = { FUNCTION_TEMPERATURE }, + MaxTokens = requestOptions.MaxTokens, + }; + + completion = await client.CompleteChatAsync(messages, requestOptions); + Assert.That(completion, Is.Not.Null); + Assert.That(completion.FinishReason, Is.EqualTo(ChatFinishReason.Stop)); + + ContentFilterResultForResponse responseFilter = completion.GetContentFilterResultForResponse(); + Assert.That(responseFilter, Is.Not.Null); + Assert.That(responseFilter.Hate, Is.Not.Null); + Assert.That(responseFilter.Hate.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); + Assert.That(responseFilter.Hate.Filtered, Is.False); + + Assert.That(completion.Content, Has.Count.GreaterThan(0)); + Assert.That(completion.Content[0], Is.Not.Null); + Assert.That(completion.Content[0].Text, Is.Not.Null.Or.Empty); + Assert.That(completion.Content[0].Kind, Is.EqualTo(ChatMessageContentPartKind.Text)); + } + + [RecordedTest] + [TestCase(FunctionCallTestType.None)] + [TestCase(FunctionCallTestType.Auto)] + [TestCase(FunctionCallTestType.Function)] + [Obsolete] + public async Task SimpleFunctionCallWorksStreaming(FunctionCallTestType functionCallType) + { + StringBuilder content = new(); + bool foundPromptFilter = false; + bool foundResponseFilter = false; + string? functionName = null; + StringBuilder functionArgs = new(); + + ChatClient client = GetTestClient(); + + List messages = new() + { + new SystemChatMessage("You are a helpful assistant."), + new UserChatMessage("What should I wear in Honolulu next Thursday?") + }; + var requestOptions = new ChatCompletionOptions() + { + FunctionChoice = functionCallType switch + { + FunctionCallTestType.Auto => ChatFunctionChoice.Auto, + FunctionCallTestType.None => ChatFunctionChoice.None, + FunctionCallTestType.Function => new ChatFunctionChoice(FUNCTION_TEMPERATURE), + _ => throw new NotImplementedException(), + }, + Functions = { FUNCTION_TEMPERATURE }, + MaxTokens = 512, + }; + + Action validateUpdate = (update) => + { + Assert.That(update.ContentUpdate, Is.Not.Null); + Assert.That(update.ContentUpdate, Has.All.Not.Null); + + if (update.FunctionCallUpdate != null) + { + Assert.That(update.FunctionCallUpdate.FunctionName, Is.Null.Or.EqualTo(FUNCTION_TEMPERATURE.FunctionName)); + functionName ??= update.FunctionCallUpdate.FunctionName; + + Assert.That(update.FunctionCallUpdate.FunctionArgumentsUpdate, Is.Not.Null); + functionArgs.Append(update.FunctionCallUpdate.FunctionArgumentsUpdate); + } + + foreach (var part in update.ContentUpdate) + { + Assert.That(part.Kind, Is.EqualTo(ChatMessageContentPartKind.Text)); + Assert.That(part.Text, Is.Not.Null); // Could be empty string + + content.Append(part.Text); + } + + var promptFilter = update.GetContentFilterResultForPrompt(); + if (!foundPromptFilter && promptFilter?.Hate != null) + { + Assert.That(promptFilter.Hate.Filtered, Is.False); + Assert.That(promptFilter.Hate.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); + foundPromptFilter = true; + } + + var responseFilter = update.GetContentFilterResultForResponse(); + if (!foundResponseFilter && responseFilter?.Hate != null) + { + Assert.That(responseFilter.Hate.Filtered, Is.False); + Assert.That(responseFilter.Hate.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); + foundResponseFilter = true; + } + }; + + AsyncCollectionResult response = client.CompleteChatStreamingAsync(messages, requestOptions); + Assert.That(response, Is.Not.Null); + + await foreach (StreamingChatCompletionUpdate update in response) + { + validateUpdate(update); + } + + Assert.That(foundPromptFilter, Is.True); + + if (functionCallType != FunctionCallTestType.None) + { + Assert.That(functionName, Is.Not.Null); + var parsedArgs = JsonSerializer.Deserialize(functionArgs.ToString(), SERIALIZER_OPTIONS)!; + Assert.That(parsedArgs, Is.Not.Null); + Assert.That(parsedArgs.LocationName, Is.Not.Null.Or.Empty); + Assert.That(parsedArgs.Date, Is.Not.Null.Or.Empty); + + // TODO FIXME: There isn't a clear or obvious way to pass the assitant function message back to the service, and the constructors that allow + // us manual control are internal. So let's use JSON. + var converted = ModelReaderWriter.Read(BinaryData.FromString(JsonSerializer.Serialize(new { name = functionName, arguments = functionArgs.ToString() }))); + messages.Add(new AssistantChatMessage(converted)); + messages.Add(new FunctionChatMessage(FUNCTION_TEMPERATURE.FunctionName, JsonSerializer.Serialize(new + { + temperature = 31, + unit = "celsius" + }))); + + requestOptions = new() + { + Functions = { FUNCTION_TEMPERATURE }, + MaxTokens = requestOptions.MaxTokens, + }; + + content.Clear(); + foundPromptFilter = false; + foundResponseFilter = false; + functionName = null; + functionArgs.Clear(); + + response = client.CompleteChatStreamingAsync(messages, requestOptions); + Assert.That(response, Is.Not.Null); + + await foreach (StreamingChatCompletionUpdate update in response) + { + validateUpdate(update); + } + } + + Assert.That(foundPromptFilter, Is.True); + Assert.That(foundResponseFilter, Is.True); + Assert.That(functionName, Is.Null); + Assert.That(functionArgs, Has.Length.EqualTo(0)); + Assert.That(content.ToString(), Is.Not.Null.Or.Empty); + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/ChatTests.Tools.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/ChatTests.Tools.cs new file mode 100644 index 000000000..b226ae725 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/ChatTests.Tools.cs @@ -0,0 +1,327 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.ClientModel; +using System.Collections.Generic; +using System.Text; +using System.Text.Json; +using System.Threading.Tasks; +using OpenAI.Chat; +using OpenAI.TestFramework; + +namespace Azure.AI.OpenAI.Tests +{ + public partial class ChatTests + { + private static readonly JsonSerializerOptions SERIALIZER_OPTIONS = new() + { + PropertyNameCaseInsensitive = true, + PropertyNamingPolicy = JsonNamingPolicy.CamelCase + }; + + private const string TOOL_TEMPERATURE_NAME = "get_future_temperature"; + private static readonly ChatTool TOOL_TEMPERATURE = ChatTool.CreateFunctionTool( + TOOL_TEMPERATURE_NAME, + "requests the anticipated future temperature at a provided location to help inform advice about topics like choice of attire", + BinaryData.FromString( + """ + { + "type": "object", + "properties": { + "locationName": { + "type": "string", + "description": "the name or brief description of a location for weather information" + }, + "date": { + "type": "string", + "description": "the day, month, and year for which to retrieve weather information" + } + } + } + """)); + + private class TemperatureFunctionRequestArguments + { + public string? LocationName { get; set; } + public string? Date { get; set; } + } + + public enum ToolChoiceTestType + { + None, + Auto, + Tool, + Required + } + + [RecordedTest] + [TestCase(ToolChoiceTestType.None)] + [TestCase(ToolChoiceTestType.Auto)] + [TestCase(ToolChoiceTestType.Tool)] + [TestCase(ToolChoiceTestType.Required, Ignore = "This seems to be considered invalid")] + public async Task SimpleToolWorks(ToolChoiceTestType toolChoice) + { + ChatClient client = GetTestClient(); + + List messages = new() + { + new SystemChatMessage("You are a helpful assistant."), + new UserChatMessage("What should I wear in Honolulu next Thursday?") + }; + var requestOptions = new ChatCompletionOptions() + { + ToolChoice = toolChoice switch + { + ToolChoiceTestType.None => ChatToolChoice.CreateNoneChoice(), + ToolChoiceTestType.Auto => ChatToolChoice.CreateAutoChoice(), + ToolChoiceTestType.Tool => ChatToolChoice.CreateFunctionChoice(TOOL_TEMPERATURE_NAME), + ToolChoiceTestType.Required => ChatToolChoice.CreateRequiredChoice(), + _ => throw new NotImplementedException(), + }, + Tools = { TOOL_TEMPERATURE }, + MaxTokens = 512, + }; + + ClientResult response = await client.CompleteChatAsync(messages, requestOptions); + Assert.That(response, Is.Not.Null); + + ChatCompletion completion = response.Value; + Assert.IsNotNull(completion); + Assert.That(completion.Id, Is.Not.Null.Or.Empty); + + ContentFilterResultForPrompt filter = completion.GetContentFilterResultForPrompt(); + Assert.IsNotNull(filter); + Assert.That(filter.SelfHarm, Is.Not.Null); + Assert.That(filter.SelfHarm.Filtered, Is.False); + Assert.That(filter.SelfHarm.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); + + if (toolChoice == ToolChoiceTestType.None) + { + Assert.That(completion.FinishReason, Is.EqualTo(ChatFinishReason.Stop)); + Assert.That(completion.ToolCalls, Has.Count.EqualTo(0)); + + Assert.That(completion.Content, Has.Count.GreaterThan(0)); + Assert.That(completion.Content, Has.All.Not.Null); + + ChatMessageContentPart content = completion.Content[0]; + Assert.That(content.Kind, Is.EqualTo(ChatMessageContentPartKind.Text)); + Assert.That(content.Text, Is.Not.Null.Or.Empty); + + // test complete, as we were merely validating that we didn't get what we shouldn't + return; + } + + // TODO old tests look for stop reason of function_call for both auto and function, but the service currently returns "stop" + // for function + if (toolChoice == ToolChoiceTestType.Auto) + { + Assert.That(completion.FinishReason, Is.EqualTo(ChatFinishReason.ToolCalls)); + } + else + { + Assert.That(completion.FinishReason, Is.EqualTo(ChatFinishReason.Stop)); + } + + Assert.That(completion.Content, Has.Count.EqualTo(0)); + Assert.That(completion.ToolCalls, Has.Count.EqualTo(1)); + Assert.That(completion.ToolCalls, Has.All.Not.Null); + + ChatToolCall toolCall = completion.ToolCalls[0]; + Assert.That(toolCall.Id, Is.Not.Null.Or.Empty); + Assert.That(toolCall.Kind, Is.EqualTo(ChatToolCallKind.Function)); + Assert.That(toolCall.FunctionName, Is.EqualTo(TOOL_TEMPERATURE_NAME)); + Assert.That(toolCall.FunctionArguments, Is.Not.Null); + var parsedArgs = JsonSerializer.Deserialize(toolCall.FunctionArguments, SERIALIZER_OPTIONS)!; + Assert.That(parsedArgs, Is.Not.Null); + Assert.That(parsedArgs.LocationName, Is.Not.Null.Or.Empty); + Assert.That(parsedArgs.Date, Is.Not.Null.Or.Empty); + + // Complete the tool call + messages.Add(new AssistantChatMessage([toolCall])); + messages.Add(new ToolChatMessage(toolCall.Id, JsonSerializer.Serialize(new + { + temperature = 31, + unit = "celsius" + }))); + + requestOptions = new() + { + Tools = { TOOL_TEMPERATURE }, + MaxTokens = requestOptions.MaxTokens + }; + + completion = await client.CompleteChatAsync(messages, requestOptions); + Assert.That(completion, Is.Not.Null); + Assert.That(completion.FinishReason, Is.EqualTo(ChatFinishReason.Stop)); + + ContentFilterResultForPrompt promptFilter = completion.GetContentFilterResultForPrompt(); + Assert.That(promptFilter, Is.Not.Null); + Assert.That(promptFilter.Hate, Is.Not.Null); + Assert.That(promptFilter.Hate.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); + Assert.That(promptFilter.Hate.Filtered, Is.False); + + ContentFilterResultForResponse responseFilter = completion.GetContentFilterResultForResponse(); + Assert.That(responseFilter, Is.Not.Null); + Assert.That(responseFilter.Hate, Is.Not.Null); + Assert.That(responseFilter.Hate.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); + Assert.That(responseFilter.Hate.Filtered, Is.False); + + Assert.That(completion.Content, Has.Count.GreaterThan(0)); + Assert.That(completion.Content, Has.All.Not.Null); + Assert.That(completion.Content[0].Text, Is.Not.Null.Or.Empty); + Assert.That(completion.Content[0].Kind, Is.EqualTo(ChatMessageContentPartKind.Text)); + } + + [RecordedTest] + [TestCase(ToolChoiceTestType.None)] + [TestCase(ToolChoiceTestType.Auto)] + [TestCase(ToolChoiceTestType.Tool)] + [TestCase(ToolChoiceTestType.Required, Ignore = "This seems to be considered invalid")] + public async Task SimpleToolWorksStreaming(ToolChoiceTestType toolChoice) + { + StringBuilder content = new(); + bool foundPromptFilter = false; + bool foundResponseFilter = false; + string? toolId = null; + string? toolName = null; + StringBuilder toolArgs = new(); + + ChatClient client = GetTestClient(); + + List messages = new() + { + new SystemChatMessage("You are a helpful assistant."), + new UserChatMessage("What should I wear in Honolulu next Thursday?") + }; + var requestOptions = new ChatCompletionOptions() + { + ToolChoice = toolChoice switch + { + ToolChoiceTestType.None => ChatToolChoice.CreateNoneChoice(), + ToolChoiceTestType.Auto => ChatToolChoice.CreateAutoChoice(), + ToolChoiceTestType.Tool => ChatToolChoice.CreateFunctionChoice(TOOL_TEMPERATURE_NAME), + ToolChoiceTestType.Required => ChatToolChoice.CreateRequiredChoice(), + _ => throw new NotImplementedException(), + }, + Tools = { TOOL_TEMPERATURE }, + MaxTokens = 512, + }; + + Action validateUpdate = (update) => + { + Assert.That(update.ContentUpdate, Is.Not.Null); + Assert.That(update.ContentUpdate, Has.All.Not.Null); + Assert.That(update.ToolCallUpdates, Is.Not.Null); + Assert.That(update.ToolCallUpdates, Has.All.Not.Null); + + if (update.ToolCallUpdates.Count > 0) + { + Assert.That(update.ToolCallUpdates, Has.Count.EqualTo(1)); + + StreamingChatToolCallUpdate toolUpdate = update.ToolCallUpdates[0]; + Assert.That(toolUpdate.Index, Is.EqualTo(0)); + Assert.That(toolUpdate.Id, Is.Null.Or.Not.Empty); + toolId ??= toolUpdate.Id; + Assert.That(toolUpdate.FunctionName, Is.Null.Or.EqualTo(TOOL_TEMPERATURE_NAME)); + toolName ??= toolUpdate.FunctionName; + + Assert.That(toolUpdate.FunctionArgumentsUpdate, Is.Not.Null); + toolArgs.Append(toolUpdate.FunctionArgumentsUpdate); + } + + foreach (var part in update.ContentUpdate) + { + Assert.That(part.Kind, Is.EqualTo(ChatMessageContentPartKind.Text)); + Assert.That(part.Text, Is.Not.Null); // Could be empty string + + content.Append(part.Text); + } + + var promptFilter = update.GetContentFilterResultForPrompt(); + if (!foundPromptFilter && promptFilter?.Hate != null) + { + Assert.That(promptFilter.Hate.Filtered, Is.False); + Assert.That(promptFilter.Hate.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); + foundPromptFilter = true; + } + + var responseFilter = update.GetContentFilterResultForResponse(); + if (!foundResponseFilter && responseFilter?.Hate != null) + { + Assert.That(responseFilter.Hate.Filtered, Is.False); + Assert.That(responseFilter.Hate.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); + foundResponseFilter = true; + } + }; + + AsyncCollectionResult response = client.CompleteChatStreamingAsync(messages, requestOptions); + Assert.That(response, Is.Not.Null); + + await foreach (StreamingChatCompletionUpdate update in response) + { + validateUpdate(update); + } + + Assert.That(foundPromptFilter, Is.True); + + if (toolChoice != ToolChoiceTestType.None) + { + Assert.That(content, Has.Length.EqualTo(0)); + Assert.That(toolId, Is.Not.Null); + Assert.That(toolName, Is.Not.Null); + Assert.That(toolArgs, Has.Length.GreaterThan(0)); + var parsedArgs = JsonSerializer.Deserialize(toolArgs.ToString(), SERIALIZER_OPTIONS)!; + Assert.That(parsedArgs, Is.Not.Null); + Assert.That(parsedArgs.LocationName, Is.Not.Null.Or.Empty); + Assert.That(parsedArgs.Date, Is.Not.Null.Or.Empty); + + // Complete the tool call + messages.Add( + new AssistantChatMessage( + [ + ChatToolCall.CreateFunctionToolCall( + toolId, + toolName, + toolArgs.ToString() + ) + ] + ) + ); + messages.Add(new ToolChatMessage(toolId, JsonSerializer.Serialize(new + { + temperature = 31, + unit = "celsius" + }))); + + requestOptions = new() + { + Tools = { TOOL_TEMPERATURE }, + MaxTokens = requestOptions.MaxTokens + }; + + content.Clear(); + foundPromptFilter = false; + foundResponseFilter = false; + toolId = null; + toolName = null; + toolArgs.Clear(); + + response = client.CompleteChatStreamingAsync(messages, requestOptions); + Assert.That(response, Is.Not.Null); + + await foreach (StreamingChatCompletionUpdate update in response) + { + validateUpdate(update); + } + } + + Assert.That(foundPromptFilter, Is.True); + Assert.That(foundResponseFilter, Is.True); + Assert.That(content.ToString(), Is.Not.Null.Or.Empty); + Assert.That(toolId, Is.Null); + Assert.That(toolName, Is.Null); + Assert.That(toolArgs, Has.Length.EqualTo(0)); + } + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/ChatTests.Vision.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/ChatTests.Vision.cs new file mode 100644 index 000000000..cd7afbe53 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/ChatTests.Vision.cs @@ -0,0 +1,137 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.ClientModel; +using System.IO; +using System.Text; +using System.Threading.Tasks; +using OpenAI.Chat; +using OpenAI.TestFramework; + +namespace Azure.AI.OpenAI.Tests +{ + public partial class ChatTests + { + [RecordedTest] + [TestCase(true)] + [TestCase(false)] + public async Task ChatWithImages(bool useUri) + { + var imageAsset = Assets.DogAndCat; + ChatClient client = GetTestClient("vision"); + + ChatMessageContentPart imagePart; + if (useUri) + { + imagePart = ChatMessageContentPart.CreateImagePart( + imageAsset.Url, ChatImageDetailLevel.Low); + } + else + { + using var stream = File.OpenRead(imageAsset.RelativePath); + var imageData = BinaryData.FromStream(stream); + + imagePart = ChatMessageContentPart.CreateImagePart( + imageData, imageAsset.MimeType, ChatImageDetailLevel.Low); + } + + ChatMessage[] messages = + [ + new SystemChatMessage("You are a helpful assistant that helps describe images."), + new UserChatMessage(imagePart, ChatMessageContentPart.CreateTextPart("describe this image")) + ]; + + ChatCompletionOptions options = new() + { + MaxTokens = 2048, + }; + + var response = await client.CompleteChatAsync(messages, options); + Assert.That(response, Is.Not.Null); + + Assert.That(response.Value.Id, Is.Not.Null.Or.Empty); + Assert.That(response.Value.CreatedAt, Is.GreaterThan(START_2024)); + Assert.That(response.Value.FinishReason, Is.EqualTo(ChatFinishReason.Stop)); + Assert.That(response.Value.Role, Is.EqualTo(ChatMessageRole.Assistant)); + Assert.That(response.Value.Usage, Is.Not.Null); + Assert.That(response.Value.Usage.InputTokens, Is.GreaterThan(10)); + Assert.That(response.Value.Usage.OutputTokens, Is.GreaterThan(10)); + Assert.That(response.Value.Usage.TotalTokens, Is.GreaterThan(20)); + + Assert.That(response.Value.Content, Has.Count.EqualTo(1)); + ChatMessageContentPart choice = response.Value.Content[0]; + Assert.That(choice.Kind, Is.EqualTo(ChatMessageContentPartKind.Text)); + Assert.That(choice.Text, Is.Not.Null.Or.Empty); + Assert.That(choice.Text.ToLowerInvariant(), Does.Contain("dog").Or.Contain("cat")); + + // TODO FIXME: Some models (e.g. gpt-4o either randomly return prompt filters with some missing entries) + var promptFilter = response.Value.GetContentFilterResultForPrompt(); + Assert.That(promptFilter, Is.Not.Null); + //Assert.That(promptFilter.Hate, Is.Not.Null); + //Assert.That(promptFilter.Hate.Filtered, Is.False); + //Assert.That(promptFilter.Hate.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); + + var responseFilter = response.Value.GetContentFilterResultForResponse(); + Assert.That(responseFilter, Is.Not.Null); + Assert.That(responseFilter.Hate, Is.Not.Null); + Assert.That(responseFilter.Hate.Filtered, Is.False); + Assert.That(responseFilter.Hate.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); + } + + [RecordedTest] + [TestCase(true)] + [TestCase(false)] + public async Task ChatWithImagesStreaming(bool useUri) + { + bool foundPromptFilter = false; + bool foundResponseFilter = false; + StringBuilder content = new(); + + ChatClient client = GetTestClient("vision"); + + ChatMessageContentPart imagePart; + var imageAsset = Assets.DogAndCat; + if (useUri) + { + imagePart = ChatMessageContentPart.CreateImagePart( + imageAsset.Url, ChatImageDetailLevel.Low); + } + else + { + using var stream = File.OpenRead(imageAsset.RelativePath); + var imageData = BinaryData.FromStream(stream); + + imagePart = ChatMessageContentPart.CreateImagePart( + imageData, imageAsset.MimeType, ChatImageDetailLevel.Low); + } + + ChatMessage[] messages = + [ + new SystemChatMessage("You are a helpful assistant that helps describe images."), + new UserChatMessage(imagePart, ChatMessageContentPart.CreateTextPart("describe this image")) + ]; + + ChatCompletionOptions options = new() + { + MaxTokens = 2048, + }; + + AsyncCollectionResult response = client.CompleteChatStreamingAsync(messages, options); + Assert.That(response, Is.Not.Null); + + await foreach (StreamingChatCompletionUpdate update in response) + { + ValidateUpdate(update, content, ref foundPromptFilter, ref foundResponseFilter); + } + + // TOOD FIXME: gpt-4o models seem to return inconsistent prompt filters to skip this for now + //Assert.That(foundPromptFilter, Is.True); + Assert.That(foundResponseFilter, Is.True); + Assert.That(content, Has.Length.GreaterThan(0)); + + string c = content.ToString().ToLowerInvariant(); + Assert.That(c, Does.Contain("dog").Or.Contain("cat")); + } + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/ChatTests.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/ChatTests.cs new file mode 100644 index 000000000..4831d1df8 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/ChatTests.cs @@ -0,0 +1,575 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Net.Http; +using System.Reflection; +using System.Text; +using System.Threading.Tasks; +using Azure.AI.OpenAI.Chat; +using Azure.AI.OpenAI.Tests.Utils.Config; +using OpenAI.Chat; +using OpenAI.TestFramework; +using OpenAI.TestFramework.Mocks; +using OpenAI.TestFramework.Utils; + +namespace Azure.AI.OpenAI.Tests; + +public partial class ChatTests : AoaiTestBase +{ + public ChatTests(bool isAsync) : base(isAsync) + { } + + #region General tests + + [Test] + [Category("Smoke")] + public async Task DefaultUserAgentStringWorks() + { + using MockHttpMessageHandler pipeline = new(MockHttpMessageHandler.ReturnEmptyJson); + + Uri endpoint = new Uri("https://www.bing.com/"); + string apiKey = "not-a-real-one"; + string model = "ignore"; + + AzureOpenAIClient topLevel = new( + endpoint, + new ApiKeyCredential(apiKey), + new AzureOpenAIClientOptions() + { + Transport = pipeline.Transport + }); + + ChatClient client = WrapClient(topLevel.GetChatClient(model)); + + await client.CompleteChatAsync([new UserChatMessage("Hello")]); + + Assert.That(pipeline.Requests, Is.Not.Empty); + + var request = pipeline.Requests[0]; + Assert.That(request.Method, Is.EqualTo(HttpMethod.Post)); + Assert.That(request.Uri?.GetLeftPart(UriPartial.Authority), Is.EqualTo(endpoint.GetLeftPart(UriPartial.Authority))); + Assert.That(request.Headers.GetValueOrDefault("api-key")?.FirstOrDefault(), Is.EqualTo(apiKey)); + Assert.That(request.Headers.GetValueOrDefault("User-Agent")?.FirstOrDefault(), Does.Contain("azsdk-net-AI.OpenAI/")); + Assert.That(request.Content, Is.Not.Null); + var jsonString = request.Content.ToString(); + Assert.That(jsonString, Is.Not.Null.Or.Empty); + Assert.That(jsonString, Does.Contain("\"messages\"").And.Contain("\"model\"").And.Contain(model)); + } + + [Test] + [Category("Smoke")] + public void DataSourceSerializationWorks() + { + AzureSearchChatDataSource source = new() + { + Endpoint = new Uri("https://some-search-resource.azure.com"), + Authentication = DataSourceAuthentication.FromApiKey("test-api-key"), + IndexName = "index-name-here", + FieldMappings = new() + { + ContentFieldNames = { "hello" }, + TitleFieldName = "hi", + }, + AllowPartialResult = true, + QueryType = DataSourceQueryType.Simple, + OutputContextFlags = DataSourceOutputContextFlags.AllRetrievedDocuments | DataSourceOutputContextFlags.Citations, + VectorizationSource = DataSourceVectorizer.FromEndpoint( + new Uri("https://my-embedding.com"), + DataSourceAuthentication.FromApiKey("embedding-api-key")), + }; + dynamic serialized = ModelReaderWriter.Write(source).ToDynamicFromJson(); + Assert.That(serialized?.type?.ToString(), Is.EqualTo("azure_search")); + Assert.That(serialized?.parameters?.authentication?.type?.ToString(), Is.EqualTo("api_key")); + Assert.That(serialized?.parameters?.authentication?.key?.ToString(), Does.Contain("test")); + Assert.That(serialized?.parameters?.index_name?.ToString(), Is.EqualTo("index-name-here")); + Assert.That(serialized?.parameters?.fields_mapping?.content_fields?[0]?.ToString(), Is.EqualTo("hello")); + Assert.That(serialized?.parameters?.fields_mapping?.title_field?.ToString(), Is.EqualTo("hi")); + Assert.That(bool.TryParse(serialized?.parameters?.allow_partial_result?.ToString(), out bool parsed) && parsed == true); + Assert.That(serialized?.parameters?.query_type?.ToString(), Is.EqualTo("simple")); + Assert.That(serialized?.parameters?.include_contexts?[0]?.ToString(), Is.EqualTo("citations")); + Assert.That(serialized?.parameters?.include_contexts?[1]?.ToString(), Is.EqualTo("all_retrieved_documents")); + Assert.That(serialized?.parameters?.embedding_dependency?.type?.ToString(), Is.EqualTo("endpoint")); + + ChatCompletionOptions options = new(); + options.AddDataSource(new ElasticsearchChatDataSource() + { + Authentication = DataSourceAuthentication.FromAccessToken("foo-token"), + Endpoint = new Uri("https://my-elasticsearch.com"), + IndexName = "my-index-name", + InScope = true, + }); + + IReadOnlyList sourcesFromOptions = options.GetDataSources(); + Assert.That(sourcesFromOptions, Has.Count.EqualTo(1)); + Assert.That(sourcesFromOptions[0], Is.InstanceOf()); + Assert.That(((ElasticsearchChatDataSource)sourcesFromOptions[0]).IndexName, Is.EqualTo("my-index-name")); + + options.AddDataSource(new AzureCosmosDBChatDataSource() + { + Authentication = DataSourceAuthentication.FromApiKey("api-key"), + ContainerName = "my-container-name", + DatabaseName = "my_database_name", + FieldMappings = new() + { + ContentFieldNames = { "hello", "world" }, + }, + IndexName = "my-index-name", + VectorizationSource = DataSourceVectorizer.FromDeploymentName("my-deployment"), + }); + sourcesFromOptions = options.GetDataSources(); + Assert.That(sourcesFromOptions, Has.Count.EqualTo(2)); + Assert.That(sourcesFromOptions[1], Is.InstanceOf()); + } + + [RecordedTest] + public async Task ChatCompletionBadKeyGivesHelpfulError() + { + string mockKey = "not-a-valid-key-and-should-still-be-sanitized"; + + try + { + ChatClient chatClient = GetTestClient(keyCredential: new ApiKeyCredential(mockKey)); + _ = await chatClient.CompleteChatAsync([new UserChatMessage("oops, this won't work with that key!")]); + Assert.Fail("No exception was thrown"); + } + catch (Exception thrownException) + { + Assert.That(thrownException, Is.InstanceOf()); + Assert.That(thrownException.Message, Does.Contain("invalid subscription key")); + Assert.That(thrownException.Message, Does.Not.Contain(mockKey)); + } + } + + [RecordedTest] + [Category("Smoke")] + public async Task DefaultAzureCredentialWorks() + { + ChatClient chatClient = GetTestClient(tokenCredential: this.TestEnvironment.Credential); + ChatCompletion chatCompletion = await chatClient.CompleteChatAsync([ChatMessage.CreateUserMessage("Hello, world!")]); + Assert.That(chatCompletion, Is.Not.Null); + Assert.That(chatCompletion.Content, Is.Not.Null.Or.Empty); + Assert.That(chatCompletion.Content[0].Text, Is.Not.Null.Or.Empty); + } + + [RecordedTest] + [Ignore("Delay behavior not emulated by recordings, and needs to be run manually with some time in between iterations due to service throttling behaviour")] + [TestCase("x-ms-retry-after-ms", "1000", 1000)] + [TestCase("retry-after-ms", "1400", 1400)] + [TestCase("Retry-After", "1", 1000)] + [TestCase("Retry-After", "1.5", 1500)] + [TestCase("retry-after-ms", "200", 200)] + [TestCase("x-fake-test-retry-header", "1400", 800)] + public async Task RateLimitedRetryWorks(string headerName, string headerValue, double expectedDelayMilliseconds) + { + const string responseClass = "HttpClientTransportResponse"; + const string responseField = "_httpResponse"; + IConfiguration testConfig = TestConfig.GetConfig("rate_limited_chat")!; + Assert.That(testConfig, Is.Not.Null); + + int failureCount = 0; + string? clientRequestId = null; + + TestPipelinePolicy replaceHeadersPolicy = new( + requestAction: (request) => + { + clientRequestId ??= request.Headers.GetFirstOrDefault("x-ms-client-request-id"); + }, + responseAction: (response) => + { + if (response.Status != 200) + { + failureCount++; + + Type httpPipelineResponseType = typeof(HttpClientPipelineTransport).GetNestedType(responseClass, BindingFlags.NonPublic) + ?? throw new InvalidOperationException($"Could not the expected {responseClass} inner non public class"); + FieldInfo httpResponseField = httpPipelineResponseType.GetField(responseField, BindingFlags.Instance | BindingFlags.NonPublic) + ?? throw new InvalidOperationException($"Could not find the expected {responseClass}.{responseField} field)"); + HttpResponseMessage httpResponse = httpResponseField.GetValue(response) as HttpResponseMessage + ?? throw new InvalidOperationException($"Could note determine the HttpResponseMessage to modify"); + + httpResponse.Headers.Remove("x-ms-retry-after-ms"); + httpResponse.Headers.Remove("retry-after-ms"); + httpResponse.Headers.Remove("Retry-After"); + httpResponse.Headers.TryAddWithoutValidation(headerName, headerValue); + } + }); + + TestClientOptions options = new(); + options.AddPolicy(replaceHeadersPolicy, PipelinePosition.PerTry); + + ChatClient client = GetTestClient(testConfig, options); + + BinaryContent requestContent = BinaryContent.Create(BinaryData.FromString($$""" + { + "model": "{{testConfig.Deployment}}", + "messages": [ + { "role": "user", "content": "Write three haikus about tropical fruit." } + ] + } + """)); + RequestOptions noThrowOptions = new() { ErrorOptions = ClientErrorBehaviors.NoThrow }; + + TimeSpan? observed200Delay = null; + TimeSpan? observed429Delay = null; + + for (int i = 0; i < 4 && !observed429Delay.HasValue; i++) + { + Stopwatch requestWatch = Stopwatch.StartNew(); + ClientResult protocolResult = await client.CompleteChatAsync(requestContent, noThrowOptions); + PipelineResponse response = protocolResult.GetRawResponse(); + bool responseHasRequestId = response.Headers.TryGetValue("x-ms-client-request-id", out string? requestIdFromResponse); + Assert.That(responseHasRequestId, Is.True); + Assert.That(requestIdFromResponse, Is.EqualTo(clientRequestId)); + switch (response.Status) + { + case 200: + observed200Delay = requestWatch.Elapsed; + break; + case 429: + observed429Delay = requestWatch.Elapsed; + break; + default: + Assert.Fail(); + break; + } + clientRequestId = null; + } + + Assert.That(observed200Delay.HasValue, Is.True); + Assert.That(observed429Delay.HasValue, Is.True); + Assert.That(failureCount, Is.EqualTo(4)); + Assert.That(observed429Delay!.Value.TotalMilliseconds, Is.GreaterThan(expectedDelayMilliseconds)); + Assert.That(observed429Delay!.Value.TotalMilliseconds, Is.LessThan(3 * expectedDelayMilliseconds + 2 * observed200Delay!.Value.TotalMilliseconds)); + } + + #endregion + + #region Regular chat completions tests + + [RecordedTest] + public async Task ChatCompletion() + { + ChatClient chatClient = GetTestClient(); + ClientResult chatCompletion = await chatClient.CompleteChatAsync([new UserChatMessage("hello, world!")]); + Assert.That(chatCompletion, Is.Not.Null); + Assert.That(chatCompletion.Value, Is.Not.Null); + Assert.That(chatCompletion.Value, Is.InstanceOf()); + Assert.That(chatCompletion.Value.Content, Is.Not.Null.Or.Empty); + } + + [RecordedTest] + public async Task ChatCompletionWithHistoryAndLogProbabilities() + { + ChatClient client = GetTestClient(); + + ChatCompletion response = await client.CompleteChatAsync( + [ + new SystemChatMessage("You are a helpful assistant."), + new UserChatMessage("I am baking a pizza, can you help me?"), + new AssistantChatMessage("Of course, I'd be happy to help! What do you need assistance with? Do you need a recipe, cooking time and temperature suggestions, topping ideas, or something else?"), + new UserChatMessage("What temperature should I bake at?") + ], + new ChatCompletionOptions() + { + IncludeLogProbabilities = true, + TopLogProbabilityCount = 3 + }); + + Assert.That(response, Is.Not.Null); + Assert.That(response.Id, Is.Not.Null.Or.Empty); + Assert.That(response.CreatedAt, Is.GreaterThan(new DateTimeOffset(2024, 01, 01, 00, 00, 00, TimeSpan.Zero))); + Assert.That(response.FinishReason, Is.Not.Null.Or.Empty); + Assert.That(response.Content, Is.Not.Null.Or.Empty); + Assert.That(response.Content.Count, Is.EqualTo(1)); + Assert.That(response.Usage, Is.Not.Null); + Assert.That(response.Usage.InputTokens, Is.GreaterThan(10)); + Assert.That(response.Usage.OutputTokens, Is.GreaterThan(10)); + Assert.That(response.Usage.TotalTokens, Is.GreaterThan(20)); + Assert.That(response.ContentTokenLogProbabilities, Is.Not.Null.Or.Empty); + foreach (var logProb in response.ContentTokenLogProbabilities) + { + Assert.That(logProb, Is.Not.Null); + Assert.That(logProb.TopLogProbabilities, Is.Not.Null.Or.Empty); + Assert.That(logProb.TopLogProbabilities.Count, Is.EqualTo(3)); + } + + ChatMessageContentPart content = response.Content[0]; + Assert.That(content.Kind, Is.EqualTo(ChatMessageContentPartKind.Text)); + Assert.That(content.Text, Is.Not.Null.Or.Empty); + Assert.That(content.Text, Does + .Contain("Fahrenheit") + .Or.Contain("Celsius") + .Or.Contain("°F") + .Or.Contain("°C") + .Or.Contain("oven")); + } + + [RecordedTest] + public async Task ChatCompletionWithTextFormat() + { + ChatClient client = GetTestClient(); + ChatCompletionOptions options = new() + { + ResponseFormat = ChatResponseFormat.CreateTextFormat() + }; + + ChatCompletion response = await client.CompleteChatAsync([new UserChatMessage("Give me a random number")], options); + Assert.That(response, Is.Not.Null); + Assert.That(response.Content, Is.Not.Null.Or.Empty); + Assert.That(response.Content[0].Text, Is.Not.Null.Or.Empty); + } + + [RecordedTest] + public async Task ChatCompletionContentFilter() + { + ChatClient client = GetTestClient(); + ClientResult chatCompletionResult = await client.CompleteChatAsync([ChatMessage.CreateUserMessage("Hello, world!")]); + Console.WriteLine($"--- RESPONSE ---"); + ChatCompletion chatCompletion = chatCompletionResult; + ContentFilterResultForPrompt promptFilterResult = chatCompletion.GetContentFilterResultForPrompt(); + Assert.That(promptFilterResult, Is.Not.Null); + Assert.That(promptFilterResult.Sexual?.Filtered, Is.False); + Assert.That(promptFilterResult.Sexual?.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); + ContentFilterResultForResponse responseFilterResult = chatCompletion.GetContentFilterResultForResponse(); + Assert.That(responseFilterResult, Is.Not.Null); + Assert.That(responseFilterResult.Hate?.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); + Assert.That(responseFilterResult.ProtectedMaterialCode, Is.Null); + } + + [RecordedTest] + public async Task SearchExtensionWorks() + { + var searchConfig = TestConfig.GetConfig("search")!; + Assert.That(searchConfig, Is.Not.Null); + string searchIndex = searchConfig.GetValueOrThrow("index"); + + AzureSearchChatDataSource source = new() + { + Endpoint = searchConfig.Endpoint, + Authentication = DataSourceAuthentication.FromApiKey(searchConfig.Key), + IndexName = searchIndex, + AllowPartialResult = true, + QueryType = DataSourceQueryType.Simple, + }; + ChatCompletionOptions options = new(); + options.AddDataSource(source); + + ChatClient client = GetTestClient(); + + ClientResult chatCompletionResult = await client.CompleteChatAsync( + [new UserChatMessage("What does the term 'PR complete' mean?")], + options); + Assert.That(chatCompletionResult, Is.Not.Null); + + ChatCompletion chatCompletion = chatCompletionResult.Value; + Assert.That(chatCompletion, Is.Not.Null); + Assert.That(chatCompletion.FinishReason, Is.EqualTo(ChatFinishReason.Stop)); + Assert.That(chatCompletion.Content, Is.Not.Null.Or.Empty); + + var content = chatCompletion.Content[0]; + Assert.That(content.Kind, Is.EqualTo(ChatMessageContentPartKind.Text)); + Assert.That(content.Text, Is.Not.Null.Or.Empty); + + AzureChatMessageContext context = chatCompletion.GetAzureMessageContext(); + Assert.IsNotNull(context); + Assert.That(context.Intent, Is.Not.Null.Or.Empty); + Assert.That(context.Citations, Has.Count.GreaterThan(0)); + Assert.That(context.Citations[0].Filepath, Is.Not.Null.Or.Empty); + Assert.That(context.Citations[0].Content, Is.Not.Null.Or.Empty); + Assert.That(context.Citations[0].ChunkId, Is.Not.Null.Or.Empty); + Assert.That(context.Citations[0].Title, Is.Not.Null.Or.Empty); + } + + #endregion + + #region Streaming chat completion tests + + [RecordedTest] + public async Task ChatCompletionBadKeyGivesHelpfulErrorStreaming() + { + string mockKey = "not-a-valid-key-and-should-still-be-sanitized"; + + try + { + ChatClient chatClient = GetTestClient(keyCredential: new ApiKeyCredential(mockKey)); + var messages = new[] { new UserChatMessage("oops, this won't work with that key!") }; + + AsyncCollectionResult result = chatClient.CompleteChatStreamingAsync(messages); + await foreach (StreamingChatCompletionUpdate update in result) + { + Assert.Fail("No exception was thrown"); + } + + Assert.Fail("No exception was thrown"); + } + catch (Exception thrownException) + { + Assert.That(thrownException, Is.InstanceOf()); + Assert.That(thrownException.Message, Does.Contain("invalid subscription key")); + Assert.That(thrownException.Message, Does.Not.Contain(mockKey)); + } + } + + [RecordedTest] + public async Task ChatCompletionStreaming() + { + StringBuilder builder = new(); + bool foundPromptFilter = false; + bool foundResponseFilter = false; + + ChatClient chatClient = GetTestClient(); + + ChatMessage[] messages = + [ + new SystemChatMessage("You are a curmudgeon"), + new UserChatMessage("Hello, assitant!") + ]; + ChatCompletionOptions options = new() + { + MaxTokens = 512, + IncludeLogProbabilities = true, + TopLogProbabilityCount = 1, + }; + + AsyncCollectionResult streamingResults = chatClient.CompleteChatStreamingAsync(messages, options); + Assert.That(streamingResults, Is.Not.Null); + + await foreach (StreamingChatCompletionUpdate update in streamingResults) + { + ValidateUpdate(update, builder, ref foundPromptFilter, ref foundResponseFilter); + } + + string allText = builder.ToString(); + Assert.That(allText, Is.Not.Null.Or.Empty); + + Assert.That(foundPromptFilter, Is.True); + Assert.That(foundResponseFilter, Is.True); + } + + [RecordedTest] + public async Task SearchExtensionWorksStreaming() + { + StringBuilder builder = new(); + bool foundPromptFilter = false; + bool foundResponseFilter = false; + List contexts = new(); + + var searchConfig = TestConfig.GetConfig("search")!; + Assert.That(searchConfig, Is.Not.Null); + string searchIndex = searchConfig.GetValueOrThrow("index"); + + AzureSearchChatDataSource source = new() + { + Endpoint = searchConfig.Endpoint, + Authentication = DataSourceAuthentication.FromApiKey(searchConfig.Key), + IndexName = searchIndex, + AllowPartialResult = true, + QueryType = DataSourceQueryType.Simple, + }; + + ChatCompletionOptions options = new(); + options.AddDataSource(source); + + ChatMessage[] messages = [new UserChatMessage("What does the term 'PR complete' mean?")]; + + ChatClient client = GetTestClient(); + + AsyncCollectionResult chatUpdates = client.CompleteChatStreamingAsync(messages, options); + Assert.IsNotNull(chatUpdates); + + await foreach (StreamingChatCompletionUpdate update in chatUpdates) + { + ValidateUpdate(update, builder, ref foundPromptFilter, ref foundResponseFilter); + + AzureChatMessageContext context = update.GetAzureMessageContext(); + if (context != null) + { + contexts.Add(context); + } + } + + string allText = builder.ToString(); + Assert.That(allText, Is.Not.Null.Or.Empty); + + // TODO FIXME: When using data sources, the service does not appear to return request nor response filtering information + //Assert.That(foundPromptFilter, Is.True); + //Assert.That(foundResponseFilter, Is.True); + + Assert.That(contexts, Has.Count.EqualTo(1)); + Assert.That(contexts[0].Intent, Is.Not.Null.Or.Empty); + Assert.That(contexts[0].Citations, Has.Count.GreaterThan(0)); + Assert.That(contexts[0].Citations[0].Content, Is.Not.Null.Or.Empty); + Assert.That(contexts[0].Citations[0].Filepath, Is.Not.Null.Or.Empty); + Assert.That(contexts[0].Citations[0].ChunkId, Is.Not.Null.Or.Empty); + Assert.That(contexts[0].Citations[0].Title, Is.Not.Null.Or.Empty); + } + + #endregion + + #region Helper methods + + private void ValidateUpdate(StreamingChatCompletionUpdate update, StringBuilder builder, ref bool foundPromptFilter, ref bool foundResponseFilter) + { + if (update.CreatedAt == UNIX_EPOCH) + { + // This is the first message that usually contains the service's request content filtering + ContentFilterResultForPrompt promptFilter = update.GetContentFilterResultForPrompt(); + if (promptFilter?.SelfHarm != null) + { + Assert.That(promptFilter.SelfHarm.Filtered, Is.False); + Assert.That(promptFilter.SelfHarm.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); + foundPromptFilter = true; + } + } + else + { + Assert.That(update.Id, Is.Not.Null.Or.Empty); + Assert.That(update.CreatedAt, Is.GreaterThan(new DateTimeOffset(2024, 01, 01, 00, 00, 00, TimeSpan.Zero))); + Assert.That(update.FinishReason, Is.Null.Or.EqualTo(ChatFinishReason.Stop)); + if (update.Usage != null) + { + Assert.That(update.Usage.InputTokens, Is.GreaterThanOrEqualTo(0)); + Assert.That(update.Usage.OutputTokens, Is.GreaterThanOrEqualTo(0)); + Assert.That(update.Usage.TotalTokens, Is.GreaterThanOrEqualTo(0)); + } + + Assert.That(update.Model, Is.Not.Null); + Assert.That(update.Role, Is.Null.Or.EqualTo(ChatMessageRole.Assistant)); + Assert.That(update.ContentUpdate, Is.Not.Null); + + Assert.That(update.ContentTokenLogProbabilities, Is.Not.Null); + foreach (var logProb in update.ContentTokenLogProbabilities) + { + Assert.That(logProb.TopLogProbabilities, Is.Not.Null); + Assert.That(logProb.TopLogProbabilities.Count, Is.EqualTo(1)); + } + + foreach (ChatMessageContentPart part in update.ContentUpdate) + { + Assert.That(part.Kind, Is.EqualTo(ChatMessageContentPartKind.Text)); + Assert.That(part.Text, Is.Not.Null); + + builder.Append(part.Text); + } + + if (!foundResponseFilter) + { + ContentFilterResultForResponse responseFilter = update.GetContentFilterResultForResponse(); + if (responseFilter?.Violence != null) + { + Assert.That(responseFilter.Violence.Filtered, Is.False); + Assert.That(responseFilter.Violence.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); + foundResponseFilter = true; + } + } + } + + #endregion + } +} diff --git a/.dotnet.azure/tests/EmbeddingTests.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/EmbeddingTests.cs similarity index 55% rename from .dotnet.azure/tests/EmbeddingTests.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/EmbeddingTests.cs index 9a2b6a077..771d59fbb 100644 --- a/.dotnet.azure/tests/EmbeddingTests.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/EmbeddingTests.cs @@ -1,25 +1,27 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using Azure.AI.OpenAI; -using Azure.AI.OpenAI.Embeddings; -using OpenAI.Chat; -using OpenAI.Embeddings; using System.ClientModel; +using System.Threading.Tasks; +using OpenAI.Embeddings; +using OpenAI.TestFramework; namespace Azure.AI.OpenAI.Tests; -public class EmbeddingTests : TestBase +public class EmbeddingTests : AoaiTestBase { + public EmbeddingTests(bool isAsync) : base(isAsync) + { } + [Test] [Category("Smoke")] public void CanCreateClient() => Assert.That(GetTestClient(), Is.InstanceOf()); - [Test] - public void SimpleEmbeddingWithTopLevelClient() + [RecordedTest] + public async Task SimpleEmbeddingWithTopLevelClient() { EmbeddingClient embeddingClient = GetTestClient(); - ClientResult embeddingResult = embeddingClient.GenerateEmbedding("sample text to embed"); + ClientResult embeddingResult = await embeddingClient.GenerateEmbeddingAsync("sample text to embed"); Assert.That(embeddingResult?.Value?.Vector.Length, Is.GreaterThan(0)); } -} \ No newline at end of file +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/FileTests.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/FileTests.cs new file mode 100644 index 000000000..48b533798 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/FileTests.cs @@ -0,0 +1,40 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Threading.Tasks; +using OpenAI.Files; +using OpenAI.TestFramework; + +namespace Azure.AI.OpenAI.Tests; + +public class FileTests : AoaiTestBase +{ + public FileTests(bool isAsync) : base(isAsync) + { } + + [Test] + [Category("Smoke")] + public void CanCreateClient() => Assert.That(GetTestClient(), Is.InstanceOf()); + + [RecordedTest] + public async Task CanUploadAndDeleteFiles() + { + FileClient client = GetTestClient(); + OpenAIFileInfo file = await client.UploadFileAsync( + BinaryData.FromString("hello, world!"), + "test_file_delete_me.txt", + FileUploadPurpose.Assistants); + Validate(file); + bool deleted = await client.DeleteFileAsync(file.Id); + Assert.IsTrue(deleted); + } + + [RecordedTest] + public async Task CanListFiles() + { + FileClient client = GetTestClient(); + OpenAIFileInfoCollection files = await client.GetFilesAsync(); + Assert.That(files, Has.Count.GreaterThan(0)); + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/FineTuningTests.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/FineTuningTests.cs new file mode 100644 index 000000000..dcd559eb7 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/FineTuningTests.cs @@ -0,0 +1,416 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Linq; +using System.Text.Json; +using System.Threading.Tasks; +using Azure.AI.OpenAI.FineTuning; +using Azure.AI.OpenAI.Tests.Models; +using Azure.AI.OpenAI.Tests.Utils; +using Azure.AI.OpenAI.Tests.Utils.Config; +using OpenAI.Chat; +using OpenAI.Files; +using OpenAI.FineTuning; +using OpenAI.TestFramework; +using OpenAI.TestFramework.Utils; + +namespace Azure.AI.OpenAI.Tests; + +public class FineTuningTests : AoaiTestBase +{ + public FineTuningTests(bool isAsync) : base(isAsync) + { } + + [Test] + [Category("Smoke")] + public void CanCreateClient() => Assert.That(GetTestClient(), Is.InstanceOf()); + + [RecordedTest] + public async Task JobsFineTuning() + { + FineTuningClient client = GetTestClient(); + + int count = 25; + + await foreach (FineTuningJob job in EnumerateJobsAsync(client)) + { + if (count-- <= 0) + { + break; + } + + Assert.That(job, Is.Not.Null); + Assert.That(job.ID, !(Is.Null.Or.Empty)); + Assert.That(job.FineTunedModel, Is.Null.Or.Not.Empty); // this either null or set to some non-empty value + Assert.That(job.Status, !(Is.Null.Or.Empty)); + Assert.That(job.Object, Is.EqualTo("fine_tuning.job")); + } + } + + [RecordedTest] + public async Task CheckpointsFineTuning() + { + string fineTunedModel = GetFineTunedModel(); + FineTuningClient client = GetTestClient(); + + // Check if the model exists by searching all jobs + FineTuningJob job = await EnumerateJobsAsync(client) + .FirstOrDefaultAsync(j => j.FineTunedModel == fineTunedModel)!; + Assert.That(job, Is.Not.Null); + Assert.That(job!.Status, Is.EqualTo("succeeded")); + + int count = 25; + await foreach (FineTuningCheckpoint checkpoint in EnumerateCheckpoints(client, job.ID)) + { + if (count-- <= 0) + { + break; + } + + Assert.That(checkpoint, Is.Not.Null); + Assert.That(checkpoint.ID, !(Is.Null.Or.Empty)); + Assert.That(checkpoint.CreatedAt, Is.GreaterThan(START_2024)); + Assert.That(checkpoint.FineTunedModelCheckpoint, !(Is.Null.Or.Empty)); + Assert.That(checkpoint.Metrics, Is.Not.Null); + Assert.That(checkpoint.Metrics.Step, Is.GreaterThan(0)); + Assert.That(checkpoint.Metrics.TrainLoss, Is.GreaterThan(0)); + Assert.That(checkpoint.Metrics.TrainMeanTokenAccuracy, Is.GreaterThan(0)); + //Assert.That(checkpoint.Metrics.ValidLoss, Is.GreaterThan(0)); + //Assert.That(checkpoint.Metrics.ValidMeanTokenAccuracy, Is.GreaterThan(0)); + //Assert.That(checkpoint.Metrics.FullValidLoss, Is.GreaterThan(0)); + //Assert.That(checkpoint.Metrics.FullValidMeanTokenAccuracy, Is.GreaterThan(0)); + } + } + + [RecordedTest] + public async Task EventsFineTuning() + { + string fineTunedModel = GetFineTunedModel(); + FineTuningClient client = GetTestClient(); + + // Check if the model exists by searching all jobs + FineTuningJob job = await EnumerateJobsAsync(client) + .FirstOrDefaultAsync(j => j.FineTunedModel == fineTunedModel)!; + Assert.That(job, Is.Not.Null); + Assert.That(job!.Status, Is.EqualTo("succeeded")); + + HashSet ids = new(); + + int count = 25; + var asyncEnum = EnumerateAsync((after, limit, opt) => client.GetJobEventsAsync(job.ID, after, limit, opt)); + await foreach (FineTuningJobEvent evt in asyncEnum) + { + if (count-- <= 0) + { + break; + } + + Assert.That(evt, Is.Not.Null); + Assert.That(evt.ID, !(Is.Null.Or.Empty)); + Assert.That(evt.Object, Is.EqualTo("fine_tuning.job.event")); + Assert.That(evt.CreatedAt, Is.GreaterThan(START_2024)); + Assert.That(evt.Level, !(Is.Null.Or.Empty)); + Assert.That(evt.Message, !(Is.Null.Or.Empty)); + + bool added = ids.Add(evt.ID); + Assert.That(added, Is.True, "Duplicate event ID detected {0}", evt.ID); + } + } + + [RecordedTest] + public async Task DeleteFineTuningModel() + { + FineTuningClient client = GetTestClient(); + Assert.That(client, Is.Not.Null); + Assert.That(client, Is.InstanceOf()); + + // The service always happily returns HTTP 204 regardless of whether or not the model exists + bool deleted = await DeleteJobAndVerifyAsync(client, "does-not-exist"); + Assert.That(deleted, Is.True); + } + + [RecordedTest] + public async Task CreateAndCancelFineTuning() + { + var fineTuningFile = Assets.FineTuning; + + FineTuningClient client = GetTestClient(); + FileClient fileClient = GetTestClientFrom(client); + + // upload training data + OpenAIFileInfo uploadedFile = await UploadAndWaitForCompleteOrFail(fileClient, fineTuningFile.RelativePath); + + // Create the fine tuning job + using var requestContent = new FineTuningOptions() + { + Model = client.DeploymentOrThrow(), + TrainingFile = uploadedFile.Id + }.ToBinaryContent(); + + ClientResult result = await client.CreateJobAsync(requestContent); + FineTuningJob job = ValidateAndParse(result); + Assert.That(job.ID, !(Is.Null.Or.Empty)); + + await using RunOnScopeExit _ = new(async () => + { + bool deleted = await DeleteJobAndVerifyAsync(client, job.ID); + Assert.True(deleted, "Failed to delete fine tuning job: {0}", job.ID); + }); + + // Wait for some events to become available + ListResponse events; + int maxLoops = 10; + do + { + result = await client.GetJobEventsAsync(job.ID, null, 10, new()).FirstOrDefaultAsync(); + events = ValidateAndParse>(result); + + if (events.Data?.Count > 0) + { + Assert.That(events.Data[0], Is.Not.Null); + Assert.That(events.Data[0].ID, !(Is.Null.Or.Empty)); + Assert.That(events.Data[0].Level, !(Is.Null.Or.Empty)); + Assert.That(events.Data[0].Message, !(Is.Null.Or.Empty)); + Assert.That(events.Data[0].CreatedAt, Is.GreaterThan(START_2024)); + + break; + } + + await Task.Delay(TimeSpan.FromSeconds(2)); + + } while (maxLoops-- > 0); + + // Cancel the fine tuning job + result = await client.CancelJobAsync(job.ID, new()); + job = ValidateAndParse(result); + + // Make sure the job status shows as cancelled + job = await WaitForJobToEnd(client, job); + Assert.That(job.Status, Is.EqualTo("cancelled")); + } + + [RecordedTest(AutomaticRecord = false)] + [Category("LongRunning")] // CAUTION: This test can take up 30 *minutes* to run in live mode + public async Task CreateAndDeleteFineTuning() + { + var fineTuningFile = Assets.FineTuning; + + FineTuningClient client = GetTestClient(); + FileClient fileClient = GetTestClientFrom(client); + + // upload training data + OpenAIFileInfo uploadedFile = await UploadAndWaitForCompleteOrFail(fileClient, fineTuningFile.RelativePath); + Assert.That(uploadedFile.Status, Is.EqualTo(OpenAIFileStatus.Processed)); + + // Create the fine tuning job + using var requestContent = new FineTuningOptions() + { + Model = client.DeploymentOrThrow(), + TrainingFile = uploadedFile.Id + }.ToBinaryContent(); + + ClientResult result = await client.CreateJobAsync(requestContent); + FineTuningJob job = ValidateAndParse(result); + Assert.That(job.ID, Is.Not.Null.Or.Empty); + Assert.That(job.Error, Is.Null); + Assert.That(job.Status, !(Is.Null.Or.EqualTo("failed").Or.EqualTo("cancelled"))); + + // Wait for the fine tuning to complete + job = await WaitForJobToEnd(client, job); + Assert.That(job.Status, Is.EqualTo("succeeded"), "Fine tuning did not succeed"); + Assert.That(job.FineTunedModel, Is.Not.Null.Or.Empty); + + // Delete the fine tuned model + bool deleted = await DeleteJobAndVerifyAsync(client, job.ID); + Assert.True(deleted, "Failed to delete fine tuning model: {0}", job.FineTunedModel); + } + + [RecordedTest(AutomaticRecord = false)] + [Category("LongRunning")] // CAUTION: This test can take around 10 to 15 *minutes* in live mode to run + public async Task DeployAndChatWithModel() + { + string fineTunedModel = GetFineTunedModel(); + FineTuningClient client = GetTestClient(); + + AzureDeploymentClient deploymentClient = GetTestClientFrom(client); + string? deploymentName = null; + await using RunOnScopeExit _ = new(async () => + { + if (deploymentName != null) + { + await deploymentClient.DeleteDeploymentAsync(deploymentName); + } + }); + + // Check if the model exists by searching all jobs + FineTuningJob? job = await EnumerateJobsAsync(client) + .FirstOrDefaultAsync(j => j.FineTunedModel == fineTunedModel); + Assert.That(job, Is.Not.Null); + Assert.That(job!.Status, Is.EqualTo("succeeded")); + + // Deploy the model and wait for the deployment to finish + deploymentName = "azure-ai-openai-test-" + Recording?.Random.NewGuid().ToString(); + AzureDeployedModel deployment = await deploymentClient.CreateDeploymentAsync(deploymentName, fineTunedModel); + Assert.That(deployment, Is.Not.Null); + Assert.That(deployment.ID, !(Is.Null.Or.Empty)); + Assert.That(deployment.Properties, Is.Not.Null); + + deployment = await WaitUntilReturnLast( + deployment, + () => deploymentClient.GetDeploymentAsync(deploymentName), + (d) => + { + Assert.That(deployment?.Properties?.ProvisioningState, !(Is.Null.Or.Empty)); + + return d.Properties.ProvisioningState == "Succeeded" + || d.Properties.ProvisioningState == "Failed" + || d.Properties.ProvisioningState == "Canceled"; + }, + TimeSpan.FromMinutes(1), + TimeSpan.FromMinutes(30)); + + Assert.That(deployment.Properties.ProvisioningState, Is.EqualTo("Succeeded")); + + // Run a chat completion test + ChatClient chatClient = GetTestClientFrom(client, deploymentName); + + ChatCompletion completion = await chatClient.CompleteChatAsync( + [ + new SystemChatMessage("Convert sports headline to JSON: \"player\" (full name), \"team\", \"sport\", and \"gender\". If more than one return an array. No markdown"), + new UserChatMessage("Pavleski will not play in 2024-2025 season") + ]); + Assert.That(completion, Is.Not.Null); + Assert.That(completion.FinishReason, Is.EqualTo(ChatFinishReason.Stop)); + Assert.That(completion.Content, Has.Count.GreaterThan(0)); + Assert.That(completion.Content[0].Kind, Is.EqualTo(ChatMessageContentPartKind.Text)); + Assert.That(completion.Content[0].Text, !Is.Null.Or.Empty); + + // we expect a JSON payload as the response so let's try to deserialize it + using var jsonDoc = JsonDocument.Parse(completion.Content[0].Text, new() + { + AllowTrailingCommas = true, + CommentHandling = JsonCommentHandling.Skip, + MaxDepth = 2 + }); + JsonElement json = jsonDoc.RootElement; + if (json.ValueKind == JsonValueKind.Array) + { + json = json.EnumerateArray().FirstOrDefault(); + } + + Assert.That(json.ValueKind, Is.EqualTo(JsonValueKind.Object)); + Assert.That(json.EnumerateObject().Select(p => p.Name), Has.Some.Match("(player)|(team)|(sport)|(gender)")); + } + + #region helper methods + + private string GetFineTunedModel() + { + string? model = TestConfig.GetConfig() + ?.GetValue("fine_tuned_model"); + Assert.That(model, !(Is.Null.Or.Empty), "Failed to find the already fine tuned model to use"); + return model!; + } + + private async Task UploadAndWaitForCompleteOrFail(FileClient fileClient, string path) + { + OpenAIFileInfo uploadedFile = await fileClient.UploadFileAsync(path, FileUploadPurpose.FineTune); + Validate(uploadedFile); + + uploadedFile = await WaitUntilReturnLast( + uploadedFile, + () => fileClient.GetFileAsync(uploadedFile.Id), + f => f.Status == OpenAIFileStatus.Processed || f.Status == OpenAIFileStatus.Error, + TimeSpan.FromSeconds(5), + TimeSpan.FromMinutes(5)) + .ConfigureAwait(false); + + return uploadedFile; + } + + private Task WaitForJobToEnd(FineTuningClient client, FineTuningJob job) + { + RequestOptions options = new(); + string jobId = job.ID; + + // NOTE: Fine tuning jobs can take up 30 minutes to complete so the timeouts here are longer to account for that + return WaitUntilReturnLast( + job, + async () => + { + ClientResult result = await client.GetJobAsync(jobId, options).ConfigureAwait(false); + return ValidateAndParse(result); + }, + j => j.Status == "cancelled" || j.Status == "failed" || j.Status == "succeeded", + TimeSpan.FromMinutes(1), + TimeSpan.FromMinutes(40)); + } + + private IAsyncEnumerable EnumerateJobsAsync(FineTuningClient client) + => EnumerateAsync((after, limit, opt) => client.GetJobsAsync(after, limit, opt)); + + private IAsyncEnumerable EnumerateCheckpoints(FineTuningClient client, string jobId) + => EnumerateAsync((after, limit, opt) => client.GetJobCheckpointsAsync(jobId, after, limit, opt)); + + private async IAsyncEnumerable EnumerateAsync(Func> getAsyncEnumerable) + where T : FineTuningModelBase + { + int numPerFetch = 10; + RequestOptions reqOptions = new(); + + await foreach (ClientResult pageResult in getAsyncEnumerable(null, numPerFetch, reqOptions)) + { + ListResponse items = ValidateAndParse>(pageResult); + if (items.Data?.Count > 0) + { + foreach (T item in items.Data) + { + yield return item; + } + } + } + } + + private async Task DeleteJobAndVerifyAsync(FineTuningClient client, string jobId, TimeSpan? timeBetween = null, TimeSpan? maxWaitTime = null) + { + var stopTime = DateTimeOffset.Now + (maxWaitTime ?? TimeSpan.FromMinutes(1)); + var sleepTime = timeBetween ?? TimeSpan.FromSeconds(2); + + RequestOptions noThrow = new() + { + ErrorOptions = ClientErrorBehaviors.NoThrow + }; + + // Since the DeleteJob and DeleteJobAsync are extensions methods, we need to call them on the unwrapped type, + // instead of the dynamically wrapped type. + var rawClient = UnWrap(client); + + bool success = false; + while (DateTimeOffset.Now < stopTime) + { + ClientResult result = IsAsync + ? await rawClient.DeleteJobAsync(jobId, noThrow).ConfigureAwait(false) + : rawClient.DeleteJob(jobId, noThrow); + Assert.That(result, Is.Not.Null); + + // verify the deletion actually succeeded + result = await client.GetJobAsync(jobId, noThrow).ConfigureAwait(false); + var rawResponse = result.GetRawResponse(); + success = rawResponse.Status == 404; + if (success) + { + break; + } + + await Task.Delay(sleepTime).ConfigureAwait(false); + } + + return success; + } + + #endregion +} diff --git a/.dotnet.azure/tests/ImageTests.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/ImageTests.cs similarity index 51% rename from .dotnet.azure/tests/ImageTests.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/ImageTests.cs index e603efc61..f82f5762f 100644 --- a/.dotnet.azure/tests/ImageTests.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/ImageTests.cs @@ -1,78 +1,76 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -#nullable disable - -using Azure.AI.OpenAI.Images; -using Azure.Core; -using OpenAI.Images; +using System; using System.ClientModel; +using System.Threading.Tasks; +using OpenAI.Images; +using OpenAI.TestFramework; namespace Azure.AI.OpenAI.Tests; -public class ImageTests : TestBase +public class ImageTests(bool isAsync) : AoaiTestBase(isAsync) { - [Test] + [RecordedTest] [Category("Smoke")] - public void CanCreateClient() => Assert.That(GetTestClient(), Is.InstanceOf()); + public void CanCreateClient() + { + ImageClient client = GetTestClient(tokenCredential: TestEnvironment.Credential); + Assert.That(client, Is.InstanceOf()); + } - [Test] - public void BadKeyGivesHelpfulError() + [RecordedTest] + public async Task BadKeyGivesHelpfulError() { - string endpointFromEnvironment = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT"); - Uri endpoint = new(endpointFromEnvironment); string mockKey = "not-a-valid-key-and-should-still-be-sanitized"; - ApiKeyCredential credential = new(mockKey); - AzureOpenAIClient topLevelClient = new(endpoint, credential); - ImageClient client = topLevelClient.GetImageClient("dall-e-3"); - Exception thrownException = null; + try { - _ = client.GenerateImage("a delightful exception message, in contemporary watercolor"); + ImageClient client = GetTestClient(keyCredential: new ApiKeyCredential(mockKey)); + _ = await client.GenerateImageAsync("a delightful exception message, in contemporary watercolor"); + Assert.Fail("No exception was thrown"); } - catch (Exception ex) + catch (Exception thrownException) { - thrownException = ex; + Assert.That(thrownException, Is.InstanceOf()); + Assert.That(thrownException.Message, Does.Contain("invalid subscription key")); + Assert.That(thrownException.Message, Does.Not.Contain(mockKey)); } - Assert.That(thrownException, Is.InstanceOf()); - Assert.That(thrownException.Message, Does.Contain("invalid subscription key")); - Assert.That(thrownException.Message, Does.Not.Contain(mockKey)); } - [Test] - public void CanCreateSimpleImage() + [RecordedTest] + public async Task CanCreateSimpleImage() { ImageClient client = GetTestClient(); - GeneratedImage image = client.GenerateImage("a small watermelon", new() + GeneratedImage image = await client.GenerateImageAsync("a tabby cat", new() { Quality = GeneratedImageQuality.Standard, Size = GeneratedImageSize.W1024xH1024, - User = "test_user", + EndUserId = "test_user", ResponseFormat = GeneratedImageFormat.Bytes, }); Assert.That(image, Is.Not.Null); Assert.That(image.ImageBytes, Is.Not.Null); } - [Test] - public void CanGetContentFilterResults() + [RecordedTest] + public async Task CanGetContentFilterResults() { ImageClient client = GetTestClient(); - ClientResult imageResult = client.GenerateImage("a small watermelon", new() + ClientResult imageResult = await client.GenerateImageAsync("a tabby cat", new() { Quality = GeneratedImageQuality.Standard, Size = GeneratedImageSize.W1024xH1024, - User = "test_user", + EndUserId = "test_user", ResponseFormat = GeneratedImageFormat.Uri, }); GeneratedImage image = imageResult.Value; Assert.That(image, Is.Not.Null); Assert.That(image.ImageUri, Is.Not.Null); Console.WriteLine($"RESPONSE--\n{imageResult.GetRawResponse().Content}"); -#pragma warning disable OPENAI002 ImageContentFilterResultForPrompt promptResults = image.GetContentFilterResultForPrompt(); ImageContentFilterResultForResponse responseResults = image.GetContentFilterResultForResponse(); Assert.That(promptResults?.Sexual?.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); Assert.That(responseResults?.Sexual?.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); } -} \ No newline at end of file +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/AutoOrLongValue.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/AutoOrLongValue.cs new file mode 100644 index 000000000..e794736a3 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/AutoOrLongValue.cs @@ -0,0 +1,95 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; +using System.Globalization; +using System.Text.Json; + +namespace Azure.AI.OpenAI.Tests.Models; + +public readonly struct AutoOrLongValue +{ + public const string NULL = "<>"; + public const string AUTO = "auto"; + + private readonly long? _longValue; + private readonly string _stringValue; + + public AutoOrLongValue() + { + _longValue = null; + _stringValue = NULL; + } + + public AutoOrLongValue(string value) + { + if (value == null) + { + throw new ArgumentNullException("value"); + } + else if (string.Equals(value, AUTO, StringComparison.OrdinalIgnoreCase)) + { + _longValue = null; + _stringValue = AUTO; + } + else if (string.Equals(value, NULL, StringComparison.OrdinalIgnoreCase)) + { + _longValue = null; + _stringValue = NULL; + } + else + { + throw new NotSupportedException(); + } + } + + public AutoOrLongValue(long value) + { + _longValue = value; + _stringValue = value.ToString(CultureInfo.InvariantCulture); + } + + public JsonElement? ToJsonElement() + { + if (_stringValue == NULL) + { + return null; + } + + using var json = JsonDocument.Parse( + _longValue?.ToString(CultureInfo.InvariantCulture) + ?? $"\"{_stringValue}\""); + + return json.RootElement.Clone(); + } + + public static AutoOrLongValue FromJsonElement(JsonElement element) + { + if (element.ValueKind == JsonValueKind.String) + { + return new(element.GetString() ?? NULL); + } + else if (element.ValueKind == JsonValueKind.Null) + { + return new(); + } + else if (element.ValueKind == JsonValueKind.Number) + { + return new(element.GetInt64()); + } + else + { + throw new JsonException("Unsupported element kind: " + element.ValueKind); + } + } + + public bool HasValue => _stringValue != NULL && HasLongValue; + public string StringValue => _stringValue; + public bool HasLongValue => _longValue.HasValue; + public long LongValue => _longValue ?? throw new InvalidOperationException("No corresponding long value"); + + public static implicit operator AutoOrLongValue(long val) => new AutoOrLongValue(val); + public static implicit operator AutoOrLongValue(string? val) => new AutoOrLongValue(val ?? NULL); +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/AzureDeployedModel.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/AzureDeployedModel.cs new file mode 100644 index 000000000..bcb3cfaa8 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/AzureDeployedModel.cs @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +namespace Azure.AI.OpenAI.Tests.Models; + +public class AzureDeployedModel +{ + required public string ID { get; init; } + required public string Name { get; init; } + required public Props Properties { get; init; } + + public class Props + { + required public ModelInfo Model { get; init; } + required public string ProvisioningState { get; init; } + } + + public class ModelInfo + { + public string? Model { get; init; } + required public string Name { get; init; } + required public string Version { get; init; } + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/AzureDeploymentClient.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/AzureDeploymentClient.cs new file mode 100644 index 000000000..3b51c441d --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/AzureDeploymentClient.cs @@ -0,0 +1,252 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Globalization; +using System.IO; +using System.Linq; +using System.Net.Http; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; +using Azure.AI.OpenAI.Tests.Utils; +using Azure.AI.OpenAI.Tests.Utils.Config; +using OpenAI.TestFramework.Utils; + +namespace Azure.AI.OpenAI.Tests.Models; + +internal class AzureDeploymentClient : IDisposable +{ + private const string DEFAULT_API_VERSION = "2023-10-01-preview"; + private const string DEFAULT_SKU_NAME = "standard"; + private const int DEFAULT_CAPACITY = 1; + + private CancellationTokenSource _cts; + private ClientPipeline _pipeline; + private Core.AccessToken? _cachedAuthToken; + private readonly Core.TokenCredential _credential; + private readonly string _subscriptionId; + private readonly string _resourceGroup; + private readonly string _resourceName; + private readonly string _endpointUrl; + private readonly string _apiVersion; + + internal AzureDeploymentClient() + { + // for mocking + _cts = new(); + _pipeline = ClientPipeline.Create(); + _subscriptionId = _resourceGroup = _resourceName = _endpointUrl = string.Empty; + _apiVersion = DEFAULT_API_VERSION; + _credential = null!; + } + + public AzureDeploymentClient(IConfiguration config, Core.TokenCredential credential, string? apiVersion = null, PipelineTransport? transport = null) + { + if (config == null) + { + throw new ArgumentNullException(nameof(config)); + } + + _cts = new(); + _pipeline = ClientPipeline.Create(new ClientPipelineOptions() + { + Transport = transport ?? new HttpClientPipelineTransport() + }); + _credential = credential ?? throw new ArgumentNullException(nameof(credential)); + + _subscriptionId = config.GetValueOrThrow("subscription_id"); + _resourceGroup = config.GetValueOrThrow("resource_group"); + _resourceName = config.Endpoint?.IdnHost.Split('.').FirstOrDefault() + ?? throw new KeyNotFoundException("Could extract the resource name from the endpoint URL in the config"); + + _endpointUrl = $"https://management.azure.com/subscriptions/{_subscriptionId}/resourceGroups/{_resourceGroup}/providers/Microsoft.CognitiveServices/accounts/{_resourceName}/deployments/"; + + _apiVersion = DEFAULT_API_VERSION; + if (!string.IsNullOrWhiteSpace(apiVersion)) + { + _apiVersion = Uri.EscapeDataString(apiVersion); + } + } + + public virtual AzureDeployedModel CreateDeployment(string deploymentName, string modelName, string? skuName = DEFAULT_SKU_NAME, int capacity = DEFAULT_CAPACITY, CancellationToken token = default) + => CreateDeploymentAsync(false, deploymentName, modelName, skuName, capacity, token).GetAwaiter().GetResult(); + + public virtual Task CreateDeploymentAsync(string deploymentName, string modelName, string? skuName = DEFAULT_SKU_NAME, int capacity = DEFAULT_CAPACITY, CancellationToken token = default) + => CreateDeploymentAsync(true, deploymentName, modelName, skuName, capacity, token).AsTask(); + + public virtual AzureDeployedModel GetDeployment(string deploymentName, CancellationToken token = default) + => GetDeploymentAsync(false, deploymentName, token).GetAwaiter().GetResult(); + + public virtual Task GetDeploymentAsync(string deploymentName, CancellationToken token = default) + => GetDeploymentAsync(true, deploymentName, token).AsTask(); + + public virtual bool DeleteDeployment(string deploymentName, CancellationToken token = default) + => DeleteDeploymentAsync(false, deploymentName, token).GetAwaiter().GetResult(); + + public virtual Task DeleteDeploymentAsync(string deploymentName, CancellationToken token = default) + => DeleteDeploymentAsync(true, deploymentName, token).AsTask(); + + public void Dispose() + { + _cts.Cancel(); + _cts.Dispose(); + } + + private async ValueTask CreateDeploymentAsync(bool isAsync, string deploymentName, string modelName, string? skuName, int capacity, CancellationToken token) + { + BinaryContent content = ToJsonContent(new + { + sku = new + { + name = skuName, + capacity = capacity.ToString(CultureInfo.InvariantCulture), + }, + properties = new + { + model = new + { + format = "OpenAI", + name = modelName, + version = "1" + } + } + }); + + PipelineResponse response = await SendRequestAsync(isAsync, HttpMethod.Put, deploymentName, content, token) + .ConfigureAwait(false); + return FromJsonContent(response, token); + } + + private async ValueTask GetDeploymentAsync(bool isAsync, string deploymentName, CancellationToken token) + { + PipelineResponse response = await SendRequestAsync(isAsync, HttpMethod.Get, deploymentName, null, token) + .ConfigureAwait(false); + return FromJsonContent(response, token); + } + + private async ValueTask DeleteDeploymentAsync(bool isAsync, string deploymentName, CancellationToken token) + { + PipelineResponse response = await SendRequestAsync(isAsync, HttpMethod.Delete, deploymentName, null, token) + .ConfigureAwait(false); + ThrowOnFailed(response); + return true; + } + + private static BinaryContent ToJsonContent(T value) + { + Utf8JsonBinaryContent content = new(); + JsonSerializer.Serialize(content.JsonWriter, value, typeof(T), JsonOptions.AzureJsonOptions); + return content; + } + + private class ErrorDetail + { + public string? Code { get; init; } + public string? Message { get; init; } + } + + private class ErrorInfo + { + public ErrorDetail? Error { get; init; } + } + + private static void ThrowOnFailed(PipelineResponse response) + { + if (response.IsError) + { + if (response.Content != null + && response.Headers.GetFirstOrDefault("Content-Type")?.StartsWith("application/json") == true) + { + using Stream errorStream = response.Content.ToStream(); + ErrorInfo? error = JsonHelpers.Deserialize(errorStream, JsonOptions.AzureJsonOptions); + if (error?.Error != null) + { + throw new ClientResultException($"[{response.Status} - {error.Error.Code}] {error.Error.Message}", response); + } + } + + throw new ClientResultException(response); + } + } + + private static T FromJsonContent(PipelineResponse response, CancellationToken token) + { + ThrowOnFailed(response); + + using Stream stream = response.Content.ToStream(); + return JsonHelpers.Deserialize(stream, JsonOptions.AzureJsonOptions) + ?? throw new InvalidDataException("Service returned a null JSON response body"); + } + + private async ValueTask SendRequestAsync(bool isAsync, HttpMethod method, string pathPart, BinaryContent? body, CancellationToken token) + { + var linked = CancellationTokenSource.CreateLinkedTokenSource(_cts.Token, token); + + PipelineMessage message = _pipeline.CreateMessage(); + message.Apply(new() + { + CancellationToken = linked.Token, + ErrorOptions = ClientErrorBehaviors.NoThrow + }); + + string requestId = Guid.NewGuid().ToString(); + string bearerToken = await GetOrRenewAuthTokenAsync(isAsync, requestId, token).ConfigureAwait(false); + + string fullEndpoint = _endpointUrl + pathPart + "?api-version=" + _apiVersion; + + PipelineRequest request = message.Request; + request.Method = method.Method; + request.Uri = new Uri(fullEndpoint); + request.Headers.Add("x-ms-client-request-id", requestId); + request.Headers.Add("Authorization", "Bearer " + bearerToken); + if (body != null) + { + request.Headers.Add("Content-Type", "application/json"); + request.Content = body; + } + + if (isAsync) + { + await _pipeline.SendAsync(message).ConfigureAwait(false); + } + else + { + _pipeline.Send(message); + } + + return message.Response ?? throw new InvalidOperationException("No response was set after sending"); + } + + private async ValueTask GetOrRenewAuthTokenAsync(bool isAsync, string requestId, CancellationToken token) + { + // TODO FIXME: Use more streamlined way to get bearer auth token + if (_cachedAuthToken?.ExpiresOn > DateTimeOffset.Now.AddSeconds(-5)) + { + return _cachedAuthToken.Value.Token; + } + + var context = new Core.TokenRequestContext( + [ + "https://management.azure.com/.default" + ], + requestId); + + Core.AccessToken authToken; + if (isAsync) + { + authToken = await _credential.GetTokenAsync(context, token).ConfigureAwait(false); + } + else + { + authToken = _credential.GetToken(context, token); + } + + string bearerToken = authToken.Token; + _cachedAuthToken = authToken; + return bearerToken; + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/BatchObject.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/BatchObject.cs new file mode 100644 index 000000000..0c0e7d517 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/BatchObject.cs @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Text.Json; +using Azure.AI.OpenAI.Tests.Utils; + +namespace Azure.AI.OpenAI.Tests.Models; + +public class BatchObject +{ + public static BatchObject From(BinaryData data) + { + return JsonSerializer.Deserialize(data, JsonOptions.OpenAIJsonOptions) + ?? throw new InvalidOperationException("Response was null JSON"); + } + + public string? Status { get; set; } + public string? Id { get; set; } + public string? OutputFileID { get; set; } + public string? ErrorFileId { get; set; } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/BatchOptions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/BatchOptions.cs new file mode 100644 index 000000000..40a580370 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/BatchOptions.cs @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.ClientModel; +using System.Collections.Generic; +using System.IO; +using Azure.AI.OpenAI.Tests.Utils; +using OpenAI.TestFramework.Utils; + +namespace Azure.AI.OpenAI.Tests.Models; + +public class BatchOptions +{ + public string? InputFileId { get; set; } + public string? Endpoint { get; set; } + public string CompletionWindow { get; set; } = "24h"; + public IDictionary Metadata { get; } = new Dictionary(); + + public BinaryContent ToBinaryContent() + { + using MemoryStream stream = new MemoryStream(); + JsonHelpers.Serialize(stream, this, JsonOptions.OpenAIJsonOptions); + + stream.Seek(0, SeekOrigin.Begin); + var data = BinaryData.FromStream(stream); + return BinaryContent.Create(data); + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/BatchResult.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/BatchResult.cs new file mode 100644 index 000000000..9d36d8d18 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/BatchResult.cs @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.IO; +using System.Text; +using System.Text.Json; +using Azure.AI.OpenAI.Tests.Utils; + +namespace Azure.AI.OpenAI.Tests.Models; + +public class BatchResult +{ + public string? ID { get; init; } + public string? CustomId { get; init; } + public T? Response { get; init; } + public JsonElement? Error { get; init; } + + public static IReadOnlyList> From(BinaryData data) + { + List> list = new(); + using var reader = new StreamReader(data.ToStream(), Encoding.UTF8, false); + string? line; + while ((line = reader.ReadLine()) != null) + { + if (string.IsNullOrWhiteSpace(line)) + { + break; + } + + var entry = JsonSerializer.Deserialize>(line, JsonOptions.OpenAIJsonOptions); + if (entry != null) + { + list.Add(entry); + } + } + + return list; + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningCheckpoint.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningCheckpoint.cs new file mode 100644 index 000000000..8ab2bb46a --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningCheckpoint.cs @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; + +namespace Azure.AI.OpenAI.Tests.Models +{ + public class FineTuningCheckpoint : FineTuningModelBase + { + public DateTimeOffset CreatedAt { get; init; } + public string? FineTunedModelCheckpoint { get; init; } + public string? FineTuningJobID { get; init; } + public int StepNumber { get; init; } + public MetricsInfo Metrics { get; init; } = new MetricsInfo(); + + public class MetricsInfo + { + public int Step { get; init; } + public float TrainLoss { get; init; } + public float TrainMeanTokenAccuracy { get; init; } + public float ValidLoss { get; init; } + public float ValidMeanTokenAccuracy { get; init; } + public float FullValidLoss { get; init; } + public float FullValidMeanTokenAccuracy { get; init; } + } + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningHyperparameters.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningHyperparameters.cs new file mode 100644 index 000000000..9057245c8 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningHyperparameters.cs @@ -0,0 +1,89 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace Azure.AI.OpenAI.Tests.Models; + +public class FineTuningHyperparameters : IJsonModel +{ + private Dictionary _values = new(); + + public AutoOrLongValue? BatchSize + { + get => Get("batch_size"); + set => Set("batch_size", value); + } + + public AutoOrLongValue? LearningRateMultiplier + { + get => Get("learning_rate_multiplier"); + set => Set("learning_rate_multiplier", value); + } + + public AutoOrLongValue? NumEpochs + { + get => Get("n_epochs"); + set => Set("n_epochs", value); + } + + private AutoOrLongValue? Get(string key) + { + if (_values.TryGetValue(key, out JsonElement element)) + { + return AutoOrLongValue.FromJsonElement(element); + } + + return null; + } + + private void Set(string key, AutoOrLongValue? value) + { + JsonElement? element = value?.ToJsonElement(); + if (element == null) + { + _values.Remove(key); + } + else + { + _values[key] = element.Value; + } + } + + FineTuningHyperparameters IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var dict = JsonSerializer.Deserialize>(ref reader); + FineTuningHyperparameters instance = new(); + instance._values = dict ?? new Dictionary(); + return instance; + } + + FineTuningHyperparameters IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + ReadOnlyMemory rawData = data.ToMemory(); + var reader = new Utf8JsonReader(rawData.Span); + return ((IJsonModel)this).Create(ref reader, options); + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) + => ModelReaderWriterOptions.Json.Format; + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + foreach (var kvp in _values) + { + writer.WritePropertyName(kvp.Key); + kvp.Value.WriteTo(writer); + } + writer.WriteEndObject(); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + => ModelReaderWriter.Write(this, options); +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningJob.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningJob.cs new file mode 100644 index 000000000..e2834f3fe --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningJob.cs @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; +using System.Collections.Generic; + +namespace Azure.AI.OpenAI.Tests.Models; + +public class FineTuningJob : FineTuningModelBase +{ + public DateTimeOffset CreatedAt { get; init; } + public IReadOnlyDictionary? Error { get; set; } + public string? FineTunedModel { get; init; } + public string Model { get; init; } = string.Empty; + public string? OrganizationID { get; init; } + public string Status { get; set; } = string.Empty; + public IReadOnlyList? ResultFiles { get; init; } + public int? TrainedTokens { get; init; } + public DateTimeOffset EstimatedFinish { get; init; } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningJobEvent.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningJobEvent.cs new file mode 100644 index 000000000..4e444b0b7 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningJobEvent.cs @@ -0,0 +1,15 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; + +namespace Azure.AI.OpenAI.Tests.Models; + +public class FineTuningJobEvent : FineTuningModelBase +{ + public DateTimeOffset CreatedAt { get; init; } + public string? Level { get; init; } + public string? Message { get; init; } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningModelBase.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningModelBase.cs new file mode 100644 index 000000000..3e75eee9e --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningModelBase.cs @@ -0,0 +1,12 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +namespace Azure.AI.OpenAI.Tests.Models; + +public abstract class FineTuningModelBase +{ + required public string ID { get; init; } + required public string Object { get; init; } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningOptions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningOptions.cs new file mode 100644 index 000000000..4c44995f8 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/FineTuningOptions.cs @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.IO; +using Azure.AI.OpenAI.Tests.Utils; +using OpenAI.TestFramework.Utils; + +namespace Azure.AI.OpenAI.Tests.Models; + +public class FineTuningOptions +{ + required public string TrainingFile { get; init; } + required public string Model { get; init; } + public int? Seed { get; set; } + public string? Suffix { get; set; } + public FineTuningHyperparameters? Hyperparameters { get; init; } + + public BinaryContent ToBinaryContent() + { + MemoryStream stream = new(); + JsonHelpers.Serialize(stream, this, JsonOptions.OpenAIJsonOptions); + stream.Seek(0, SeekOrigin.Begin); + return BinaryContent.Create(stream); + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/ListResponse.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/ListResponse.cs new file mode 100644 index 000000000..c5207f796 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Models/ListResponse.cs @@ -0,0 +1,14 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System.Collections.Generic; + +namespace Azure.AI.OpenAI.Tests.Models; + +public class ListResponse +{ + public bool HasMore { get; init; } + public IReadOnlyList? Data { get; init; } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Properties/AssemblyInfo.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Properties/AssemblyInfo.cs new file mode 100644 index 000000000..278184836 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Properties/AssemblyInfo.cs @@ -0,0 +1,4 @@ +using System.Runtime.CompilerServices; +using Castle.Core.Internal; + +[assembly: InternalsVisibleTo(InternalsVisible.ToDynamicProxyGenAssembly2)] diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Samples/00_ClientConfiguration.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Samples/00_ClientConfiguration.cs new file mode 100644 index 000000000..a4ebb8fc1 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Samples/00_ClientConfiguration.cs @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable disable + +using System; +using Azure.Identity; +using OpenAI.Chat; + +namespace Azure.AI.OpenAI.Samples; + +public partial class AzureOpenAISamples +{ + public void CreateAnAzureOpenAIClient() + { + #region Snippet:ConfigureClient:WithAOAITopLevelClient + string keyFromEnvironment = Environment.GetEnvironmentVariable("AZURE_OPENAI_API_KEY"); + + AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new AzureKeyCredential(keyFromEnvironment)); + ChatClient chatClient = azureClient.GetChatClient("my-gpt-35-turbo-deployment"); + #endregion + } + + public void CreateAnAzureOpenAIClientWithEntra() + { + #region Snippet:ConfigureClient:WithEntra + AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new DefaultAzureCredential()); + ChatClient chatClient = azureClient.GetChatClient("my-gpt-4o-mini-deployment"); + #endregion + } + + public void UseAzureGovernment() + { + #region Snippet:ConfigureClient:GovernmentAudience + AzureOpenAIClientOptions options = new() + { + Audience = AzureOpenAIAudience.AzureGovernment, + }; + AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new DefaultAzureCredential()); + ChatClient chatClient = azureClient.GetChatClient("my-gpt-4o-mini-deployment"); + #endregion + } + + public void UseCustomAuthorizationScope() + { + #region Snippet:ConfigureClient:CustomAudience + AzureOpenAIClientOptions optionsWithCustomAudience = new() + { + Audience = "https://cognitiveservices.azure.com/.default", + }; + #endregion + + AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new DefaultAzureCredential()); + ChatClient chatClient = azureClient.GetChatClient("my-gpt-4o-mini-deployment"); + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Samples/01_Chat.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Samples/01_Chat.cs new file mode 100644 index 000000000..3ad7584e5 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Samples/01_Chat.cs @@ -0,0 +1,292 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable disable + +using System; +using System.ClientModel; +using System.Collections.Generic; +using System.Text; +using System.Text.Json; +using Azure.Identity; +using OpenAI.Chat; + +namespace Azure.AI.OpenAI.Samples; + +public partial class AzureOpenAISamples +{ + public void BasicChat() + { + #region Snippet:SimpleChatResponse + AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new DefaultAzureCredential()); + ChatClient chatClient = azureClient.GetChatClient("my-gpt-35-turbo-deployment"); + + ChatCompletion completion = chatClient.CompleteChat( + [ + // System messages represent instructions or other guidance about how the assistant should behave + new SystemChatMessage("You are a helpful assistant that talks like a pirate."), + // User messages represent user input, whether historical or the most recen tinput + new UserChatMessage("Hi, can you help me?"), + // Assistant messages in a request represent conversation history for responses + new AssistantChatMessage("Arrr! Of course, me hearty! What can I do for ye?"), + new UserChatMessage("What's the best way to train a parrot?"), + ]); + + Console.WriteLine($"{completion.Role}: {completion.Content[0].Text}"); + #endregion + } + + public void StreamingChat() + { + #region Snippet:StreamChatMessages + AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new DefaultAzureCredential()); + ChatClient chatClient = azureClient.GetChatClient("my-gpt-35-turbo-deployment"); + + CollectionResult completionUpdates = chatClient.CompleteChatStreaming( + [ + new SystemChatMessage("You are a helpful assistant that talks like a pirate."), + new UserChatMessage("Hi, can you help me?"), + new AssistantChatMessage("Arrr! Of course, me hearty! What can I do for ye?"), + new UserChatMessage("What's the best way to train a parrot?"), + ]); + + foreach (StreamingChatCompletionUpdate completionUpdate in completionUpdates) + { + foreach (ChatMessageContentPart contentPart in completionUpdate.ContentUpdate) + { + Console.Write(contentPart.Text); + } + } + #endregion + } + + public void ChatWithTools() + { + #region Snippet:ChatTools:DefineTool + static string GetCurrentLocation() + { + // Call the location API here. + return "San Francisco"; + } + + static string GetCurrentWeather(string location, string unit = "celsius") + { + // Call the weather API here. + return $"31 {unit}"; + } + + ChatTool getCurrentLocationTool = ChatTool.CreateFunctionTool( + functionName: nameof(GetCurrentLocation), + functionDescription: "Get the user's current location" + ); + + ChatTool getCurrentWeatherTool = ChatTool.CreateFunctionTool( + functionName: nameof(GetCurrentWeather), + functionDescription: "Get the current weather in a given location", + functionParameters: BinaryData.FromString(""" + { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. Boston, MA" + }, + "unit": { + "type": "string", + "enum": [ "celsius", "fahrenheit" ], + "description": "The temperature unit to use. Infer this from the specified location." + } + }, + "required": [ "location" ] + } + """) + ); + #endregion + + AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new DefaultAzureCredential()); + ChatClient chatClient = azureClient.GetChatClient("my-gpt-35-turbo-deployment"); + + #region Snippet:ChatTools:RequestWithFunctions + ChatCompletionOptions options = new() + { + Tools = { getCurrentLocationTool, getCurrentWeatherTool }, + }; + + List conversationMessages = + [ + new UserChatMessage("What's the weather like in Boston?"), + ]; + ChatCompletion completion = chatClient.CompleteChat(conversationMessages); + #endregion + + #region Snippet:ChatTools:HandleToolCalls + // Purely for convenience and clarity, this standalone local method handles tool call responses. + string GetToolCallContent(ChatToolCall toolCall) + { + if (toolCall.FunctionName == getCurrentWeatherTool.FunctionName) + { + // Validate arguments before using them; it's not always guaranteed to be valid JSON! + try + { + using JsonDocument argumentsDocument = JsonDocument.Parse(toolCall.FunctionArguments); + if (!argumentsDocument.RootElement.TryGetProperty("location", out JsonElement locationElement)) + { + // Handle missing required "location" argument + } + else + { + string location = locationElement.GetString(); + if (argumentsDocument.RootElement.TryGetProperty("unit", out JsonElement unitElement)) + { + return GetCurrentWeather(location, unitElement.GetString()); + } + else + { + return GetCurrentWeather(location); + } + } + } + catch (JsonException) + { + // Handle the JsonException (bad arguments) here + } + } + // Handle unexpected tool calls + throw new NotImplementedException(); + } + + if (completion.FinishReason == ChatFinishReason.ToolCalls) + { + // Add a new assistant message to the conversation history that includes the tool calls + conversationMessages.Add(new AssistantChatMessage(completion)); + + foreach (ChatToolCall toolCall in completion.ToolCalls) + { + conversationMessages.Add(new ToolChatMessage(toolCall.Id, GetToolCallContent(toolCall))); + } + + // Now make a new request with all the messages thus far, including the original + } + #endregion + } + + public void StreamingChatToolCalls() + { + static string GetCurrentLocation() + { + // Call the location API here. + return "San Francisco"; + } + + static string GetCurrentWeather(string location, string unit = "celsius") + { + // Call the weather API here. + return $"31 {unit}"; + } + + ChatTool getCurrentLocationTool = ChatTool.CreateFunctionTool( + functionName: nameof(GetCurrentLocation), + functionDescription: "Get the user's current location" + ); + + ChatTool getCurrentWeatherTool = ChatTool.CreateFunctionTool( + functionName: nameof(GetCurrentWeather), + functionDescription: "Get the current weather in a given location", + functionParameters: BinaryData.FromString(""" + { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. Boston, MA" + }, + "unit": { + "type": "string", + "enum": [ "celsius", "fahrenheit" ], + "description": "The temperature unit to use. Infer this from the specified location." + } + }, + "required": [ "location" ] + } + """) + ); + + AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new DefaultAzureCredential()); + ChatClient chatClient = azureClient.GetChatClient("my-gpt-35-turbo-deployment"); + + ChatCompletionOptions options = new() + { + Tools = { getCurrentLocationTool, getCurrentWeatherTool }, + }; + + List conversationMessages = + [ + new UserChatMessage("What's the weather like in Boston?"), + ]; + + #region Snippet:ChatTools:StreamingChatTools + Dictionary toolCallIdsByIndex = []; + Dictionary functionNamesByIndex = []; + Dictionary functionArgumentBuildersByIndex = []; + StringBuilder contentBuilder = new(); + + foreach (StreamingChatCompletionUpdate streamingChatUpdate + in chatClient.CompleteChatStreaming(conversationMessages, options)) + { + foreach (ChatMessageContentPart contentPart in streamingChatUpdate.ContentUpdate) + { + contentBuilder.Append(contentPart.Text); + } + foreach (StreamingChatToolCallUpdate toolCallUpdate in streamingChatUpdate.ToolCallUpdates) + { + if (!string.IsNullOrEmpty(toolCallUpdate.Id)) + { + toolCallIdsByIndex[toolCallUpdate.Index] = toolCallUpdate.Id; + } + if (!string.IsNullOrEmpty(toolCallUpdate.FunctionName)) + { + functionNamesByIndex[toolCallUpdate.Index] = toolCallUpdate.FunctionName; + } + if (!string.IsNullOrEmpty(toolCallUpdate.FunctionArgumentsUpdate)) + { + StringBuilder argumentsBuilder + = functionArgumentBuildersByIndex.TryGetValue(toolCallUpdate.Index, out StringBuilder existingBuilder) + ? existingBuilder + : new(); + argumentsBuilder.Append(toolCallUpdate.FunctionArgumentsUpdate); + functionArgumentBuildersByIndex[toolCallUpdate.Index] = argumentsBuilder; + } + } + } + + List toolCalls = []; + foreach (KeyValuePair indexToIdPair in toolCallIdsByIndex) + { + toolCalls.Add(ChatToolCall.CreateFunctionToolCall( + indexToIdPair.Value, + functionNamesByIndex[indexToIdPair.Key], + functionArgumentBuildersByIndex[indexToIdPair.Key].ToString())); + } + + conversationMessages.Add(new AssistantChatMessage(toolCalls, contentBuilder.ToString())); + + // Placeholder: each tool call must be resolved, like in the non-streaming case + string GetToolCallOutput(ChatToolCall toolCall) => null; + + foreach (ChatToolCall toolCall in toolCalls) + { + conversationMessages.Add(new ToolChatMessage(toolCall.Id, GetToolCallOutput(toolCall))); + } + + // Repeat with the history and all tool call resolution messages added + #endregion + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Samples/02_Oyd.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Samples/02_Oyd.cs new file mode 100644 index 000000000..1f1a85d69 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Samples/02_Oyd.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable disable + +using System; +using Azure.AI.OpenAI.Chat; +using Azure.Identity; +using OpenAI.Chat; + +namespace Azure.AI.OpenAI.Samples; + +public partial class AzureOpenAISamples +{ + public void OnYourDataSearch() + { + AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new DefaultAzureCredential()); + ChatClient chatClient = azureClient.GetChatClient("my-gpt-35-turbo-deployment"); + + #region Snippet:ChatUsingYourOwnData + // Extension methods to use data sources with options are subject to SDK surface changes. Suppress the + // warning to acknowledge and this and use the subject-to-change AddDataSource method. + #pragma warning disable AOAI001 + + ChatCompletionOptions options = new(); + options.AddDataSource(new AzureSearchChatDataSource() + { + Endpoint = new Uri("https://your-search-resource.search.windows.net"), + IndexName = "contoso-products-index", + Authentication = DataSourceAuthentication.FromApiKey( + Environment.GetEnvironmentVariable("OYD_SEARCH_KEY")), + }); + + ChatCompletion completion = chatClient.CompleteChat( + [ + new UserChatMessage("What are the best-selling Contoso products this month?"), + ], + options); + + AzureChatMessageContext onYourDataContext = completion.GetAzureMessageContext(); + + if (onYourDataContext?.Intent is not null) + { + Console.WriteLine($"Intent: {onYourDataContext.Intent}"); + } + foreach (AzureChatCitation citation in onYourDataContext?.Citations ?? []) + { + Console.WriteLine($"Citation: {citation.Content}"); + } + #endregion + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Samples/03_Assistants.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Samples/03_Assistants.cs new file mode 100644 index 000000000..aaf5b15ff --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Samples/03_Assistants.cs @@ -0,0 +1,78 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable disable + +using System; +using System.Threading.Tasks; +using Azure.Identity; +using OpenAI.Assistants; + +namespace Azure.AI.OpenAI.Samples; + +public partial class AzureOpenAISamples +{ + public async Task StreamingAssistantRunAsync() + { + #region Snippet:Assistants:CreateClient + AzureOpenAIClient azureClient = new( + new Uri("https://your-azure-openai-resource.com"), + new DefaultAzureCredential()); + + // The Assistants feature area is in beta, with API specifics subject to change. + // Suppress the [Experimental] warning via .csproj or, as here, in the code to acknowledge. + #pragma warning disable OPENAI001 + AssistantClient assistantClient = azureClient.GetAssistantClient(); + #endregion + + #region Snippet:Assistants:PrepareToRun + Assistant assistant = await assistantClient.CreateAssistantAsync( + model: "my-gpt-4o-deployment", + new AssistantCreationOptions() + { + Name = "My Friendly Test Assistant", + Instructions = "You politely help with math questions. Use the code interpreter tool when asked to " + + "visualize numbers.", + Tools = { ToolDefinition.CreateCodeInterpreter() }, + }); + ThreadInitializationMessage initialMessage = new( + MessageRole.User, + [ + "Hi, Assistant! Draw a graph for a line with a slope of 4 and y-intercept of 9." + ]); + AssistantThread thread = await assistantClient.CreateThreadAsync(new ThreadCreationOptions() + { + InitialMessages = { initialMessage }, + }); + #endregion + + #region Snippet:Assistants:StreamRun + RunCreationOptions runOptions = new() + { + AdditionalInstructions = "When possible, talk like a pirate." + }; + await foreach (StreamingUpdate streamingUpdate + in assistantClient.CreateRunStreamingAsync(thread, assistant, runOptions)) + { + if (streamingUpdate.UpdateKind == StreamingUpdateReason.RunCreated) + { + Console.WriteLine($"--- Run started! ---"); + } + else if (streamingUpdate is MessageContentUpdate contentUpdate) + { + Console.Write(contentUpdate.Text); + if (contentUpdate.ImageFileId is not null) + { + Console.WriteLine($"[Image content file ID: {contentUpdate.ImageFileId}"); + } + } + } + #endregion + + #region Snippet:Assistants:Cleanup + // Optionally, delete persistent resources that are no longer needed. + _ = await assistantClient.DeleteAssistantAsync(assistant); + _ = await assistantClient.DeleteThreadAsync(thread); + #endregion + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/AoaiTestBase.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/AoaiTestBase.cs new file mode 100644 index 000000000..6160c3f6b --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/AoaiTestBase.cs @@ -0,0 +1,746 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; +using System.Text.Json; +using System.Threading.Tasks; +using Azure.AI.OpenAI.Tests.Models; +using Azure.AI.OpenAI.Tests.Utils; +using Azure.AI.OpenAI.Tests.Utils.Config; +using NUnit.Framework.Interfaces; +using OpenAI.Assistants; +using OpenAI.Audio; +using OpenAI.Batch; +using OpenAI.Chat; +using OpenAI.Embeddings; +using OpenAI.Files; +using OpenAI.FineTuning; +using OpenAI.Images; +using OpenAI.TestFramework; +using OpenAI.TestFramework.Recording.Proxy; +using OpenAI.TestFramework.Recording.Proxy.Service; +using OpenAI.TestFramework.Recording.RecordingProxy; +using OpenAI.TestFramework.Recording.Sanitizers; +using OpenAI.TestFramework.Utils; +using OpenAI.VectorStores; +using TokenCredential = Azure.Core.TokenCredential; + +namespace Azure.AI.OpenAI.Tests; + +public class AoaiTestBase : RecordedClientTestBase where TClient : class +{ + private const string AZURE_URI_SANITIZER_PATTERN = @"(?<=/(subscriptions|resourceGroups|accounts)/)([^/]+?)(?=(/|$))"; + private const string SMALL_1x1_PNG = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAFiQAABYkAZsVxhQAAAAMSURBVBhXY2BgYAAAAAQAAVzN/2kAAAAASUVORK5CYII="; + + public static readonly DateTimeOffset START_2024 = new DateTimeOffset(2024, 01, 01, 00, 00, 00, TimeSpan.Zero); + public static readonly DateTimeOffset UNIX_EPOCH = +#if NETFRAMEWORK + DateTimeOffset.Parse("1970-01-01T00:00:00.0000000+00:00"); +#else + DateTimeOffset.UnixEpoch; +#endif + + internal TestConfig TestConfig { get; } + + internal Assets Assets { get; } + + public AzureTestEnvironment TestEnvironment { get; } + + protected AoaiTestBase(bool isAsync) : this(isAsync, null) + { } + + protected AoaiTestBase(bool isAsync, RecordedTestMode? mode = null) + : base(isAsync, mode) + { + TestConfig = new TestConfig(() => Mode); + Assets = new Assets(); + TestEnvironment = new AzureTestEnvironment(Mode); + + // Remove some of the default sanitizers to customize their behaviour + RecordingOptions.SanitizersToRemove.AddRange( + [ + "AZSDK2003", // Location header (we use a less restrictive sanitizer) + "AZSDK4001", // Replaces entire host name in URL. We want to mask only subdomain part to make it easier to distinguish requests + "AZSDK3430", // OpenAI liberally uses "id" in its JSON responses, and we want to keep them in the recordings + "AZSDK3493", // $..name in JSON. OpenAI uses this for things that don't need to be sanitized + ]); + + // Prevent resource names from leaking into recordings + RecordingOptions.Sanitizers.AddRange( + [ + new UriRegexSanitizer(SanitizedJsonConfig.HOST_SUBDOMAIN_PATTERN) + { + Value = SanitizedJsonConfig.MASK_STRING + }, + new UriRegexSanitizer(AZURE_URI_SANITIZER_PATTERN) + { + Value = SanitizedJsonConfig.MASK_STRING + }, + new HeaderRegexSanitizer("Location") + { + Regex = AZURE_URI_SANITIZER_PATTERN, + Value = SanitizedJsonConfig.MASK_STRING + }, + new HeaderRegexSanitizer("Azure-AsyncOperation") + { + Regex = AZURE_URI_SANITIZER_PATTERN, + Value = SanitizedJsonConfig.MASK_STRING + }, + new BodyKeySanitizer("$..endpoint") + { + Regex = SanitizedJsonConfig.HOST_SUBDOMAIN_PATTERN, + Value = SanitizedJsonConfig.MASK_STRING + } + ]); + + // Prevent keys from leaking into our recordings + RecordingOptions.SanitizeJsonBody("*..key", "*..api_key"); + + // Because the current implementation of multi-part form content data in OpenAI and Azure OpenAI uses random + // to generate boundaries, this causes problems during playback as the boundary will be different each time. + // Longer term, we should find a way to pass the TestRecording.Random to the multi-part form generator in the + // code. The simplest solution for now is to disable recording the body for these mime types + RecordingOptions.RequestOverride = request => + { + if (request?.Headers.GetFirstOrDefault("Content-Type")?.StartsWith("multipart/form-data") == true) + { + return RequestRecordMode.RecordWithoutRequestBody; + } + + return RequestRecordMode.Record; + }; + RecordingOptions.Sanitizers.Add(new HeaderRegexSanitizer("Content-Type") + { + Regex = @"multipart/form-data; boundary=[^\s]+", + Value = "multipart/form-data; boundary=***" + }); + + // Data URIs trimmed to prevent the recording from being too large + RecordingOptions.Sanitizers.Add(new BodyKeySanitizer("$..url") + { + Regex = @"(?<=data:image/png;base64,)(.+)", + Value = SMALL_1x1_PNG + }); + + // Base64 encoded images in the response are replaced with a 1x1 black pixel PNG image to ensure valid data + RecordingOptions.Sanitizers.Add(new BodyKeySanitizer($"..b64_json") + { + Value = SMALL_1x1_PNG + }); + } + + /// + /// Gets the top level test client to use for testing. + /// + /// The test configuration to use + /// (Optional) The client options to use. + /// (Optional) The token credential to use. If this is null, an API key will be read from the + /// test configuration. + /// (Optional) The key credential to use instead of the one from the configuration. + public virtual AzureOpenAIClient GetTestTopLevelClient( + IConfiguration? config, + TestClientOptions? options = null, + TokenCredential? tokenCredential = null, + ApiKeyCredential? keyCredential = null) + { + // First validate that the config has the parameters we need + if (config == null) + { + throw CreateKeyNotFoundEx("any configuration"); + } + else if (config.Endpoint is null) + { + throw CreateKeyNotFoundEx("endpoint"); + } + else if (tokenCredential == null && keyCredential == null && string.IsNullOrEmpty(config.Key)) + { + throw CreateKeyNotFoundEx("API key"); + } + + // Configure the test options as needed + options ??= new(); + Action? requestAction = options.ShouldOutputRequests ? DumpRequest : null; + Action? responseAction = options.ShouldOutputResponses ? DumpResponse : null; + options.AddPolicy(new TestPipelinePolicy(requestAction, responseAction), PipelinePosition.PerCall); + + options = ConfigureClientOptions(options); + + AzureOpenAIClient topLevelClient; + if (tokenCredential != null) + { + topLevelClient = new AzureOpenAIClient(config.Endpoint, tokenCredential, options); + } + else + { + topLevelClient = new AzureOpenAIClient(config.Endpoint, keyCredential ?? new ApiKeyCredential(config.Key!), options); + } + + return topLevelClient; + } + + /// + /// Gets the properly instrumented client to use for testing. This have proper support for automatic sync/async method testing, + /// as well as recording, and playback support. + /// + /// (Optional) The client options to use. + /// (Optional) The token credential to use. If this is null, an API key will be read from the + /// test configuration. + /// (Optional) The key credential to use instead of the one from the configuration. + /// The test client instance. + public virtual TClient GetTestClient(TestClientOptions? options = null, TokenCredential? tokenCredential = null, ApiKeyCredential? keyCredential = null) + => GetTestClient(TestConfig.GetConfig(), options, tokenCredential, keyCredential); + + /// + /// Gets the properly instrumented client to use for testing. This have proper support for automatic sync/async method testing, + /// as well as recording, and playback support. + /// + /// + /// (Optional) The client options to use. + /// (Optional) The token credential to use. If this is null, an API key will be read from the + /// test configuration. + /// (Optional) The key credential to use instead of the one from the configuration. + /// The test client instance. + public virtual TClient GetTestClient(string configName, TestClientOptions? options = null, TokenCredential? tokenCredential = null, ApiKeyCredential? keyCredential = null) + => GetTestClient(TestConfig.GetConfig(configName), options, tokenCredential, keyCredential); + + /// + /// Gets a different type of client using the same configuration as the specified client. + /// + /// The type of other client to create. + /// The client instance whose configuration we want to use. + /// (Optional) The specific deployment to use instead of the one from the config. + /// + /// The client instance passed was not instrumented + public virtual TExplicitClient GetTestClientFrom(TClient client, string? deploymentName = null) + { + var instrumented = (TopLevelInfo?)GetClientContext(client); + if (instrumented?.TopLevelClient != null + && instrumented?.Config != null) + { + return GetTestClient(instrumented.TopLevelClient, instrumented.Config, deploymentName); + } + + throw new NotSupportedException("The client provided was not properly instrumented. Please make sure to get your test client " + + "instances using the GetTestClient() methods"); + } + + #region overrides + + /// + protected override RecordedTestMode GetDefaultRecordedTestMode() + => AzureTestEnvironment.DefaultRecordMode; + + /// + protected override bool GetDefaultAutomaticRecordEnabled() + => AzureTestEnvironment.DefaultAutomaticRecordEnabled; + + /// + protected override ProxyServiceOptions CreateProxyServiceOptions() + => new() + { + DotnetExecutable = TestEnvironment.DotNetExe.FullName, + TestProxyDll = TestEnvironment.TestProxyDll.FullName, + DevCertFile = TestEnvironment.TestProxyHttpsCert.FullName, + DevCertPassword = TestEnvironment.TestProxyHttpsCertPassword, + StorageLocationDir = TestEnvironment.RepoRoot.FullName, + }; + + /// + protected override RecordingStartInformation CreateRecordingSessionStartInfo() + { + // This uses the same directory structure as the previous Azure.Core.TestFramework used for an easy drop in replacement. + // For example, suppose your test class is (and your class name matches the file name): + // c:\src\azure-sdk-for-net\sdk\openai\Azure.AI.OpenAI\tests\ChatTests.cs + // Then this would return something like: + // sdk\openai\Azure.AI.OpenAI\tests\SessionRecords\ChatTests\TestName.json + DirectoryInfo? sourceDir = GetType().Assembly.GetAssemblySourceDir(); + string relativeDir = PathHelpers.GetRelativePath( + TestEnvironment.RepoRoot.FullName, + sourceDir?.FullName ?? TestEnvironment.RepoRoot.FullName); + + string recordingFile = Path.Combine( + relativeDir, + "SessionRecords", + GetType().Name, + GetRecordedTestFileName()); + + // Start at the source directory for the current test project, and then walk up the directory structure searching for + // an "assets.json" file. + string? assetsFile = null; + for ( + DirectoryInfo? current = sourceDir; + current != null && current.FullName != TestEnvironment.RepoRoot.FullName; + current = current?.Parent) + { + string file = Path.Combine(current!.FullName, "assets.json"); + if (File.Exists(file)) + { + assetsFile = file; + break; + } + } + + return new() + { + RecordingFile = recordingFile, + AssetsFile = assetsFile + }; + } + + #endregion + + /// + /// Polls until a condition has been met with a maximum wait time. The function will always return the last value even + /// if the condition was not met. + /// + /// The value in the . + /// The initial value. + /// The asynchronous function to get the latest state of the value. + /// When we should stop waiting. + /// (Optional) The amount of time to wait between retries. This will be ignored in playback + /// mode. Default is 2 seconds. + /// (Optional) The maximum amount of time to wait until the condition becomes true. This will be ignored in + /// playback mode. The default is 2 minutes. + /// The final state. This will return when the conditions have been met or we timed out. + protected virtual Task WaitUntilReturnLast(T initialValue, Func>> getAsync, Predicate stopCondition, TimeSpan? waitTimeBetweenRequests = null, TimeSpan? maxWait = null) + => WaitUntilReturnLast(initialValue, new Func>(async () => await getAsync().ConfigureAwait(false)), stopCondition, waitTimeBetweenRequests, maxWait); + + /// + /// Polls until a condition has been met with a maximum wait time. The function will always return the last value even + /// if the condition was not met. + /// + /// The return value. + /// The initial value. + /// The asynchronous function to get the latest state of the value. + /// When we should stop waiting. + /// (Optional) The amount of time to wait between retries. This will be ignored in playback + /// mode. Default is 2 seconds. + /// (Optional) The maximum amount of time to wait until the condition becomes true. This will be ignored in + /// playback mode. The default is 2 minutes. + /// The final state. This will return when the conditions have been met or we timed out. + protected virtual async Task WaitUntilReturnLast(T initialValue, Func> getAsync, Predicate stopCondition, TimeSpan? waitTimeBetweenRequests = null, TimeSpan? maxWait = null) + { + TimeSpan delay, max; + if (Mode == RecordedTestMode.Playback) + { + delay = TimeSpan.FromMilliseconds(10); + max = TimeSpan.FromSeconds(30); + } + else + { + delay = waitTimeBetweenRequests ?? TimeSpan.FromSeconds(2); + max = maxWait ?? TimeSpan.FromMinutes(2); + } + + DateTimeOffset stopTime = DateTimeOffset.Now + max; + T result = initialValue; + + while (!stopCondition(result) && DateTimeOffset.Now < stopTime) + { + await Task.Delay(delay).ConfigureAwait(false); + result = await getAsync().ConfigureAwait(false); + } + + return result; + } + + /// + /// Gets the properly instrumented client to use for testing. This have proper support for automatic sync/async method testing, + /// as well as recording, and playback support. + /// + /// The test configuration to use + /// (Optional) The client options to use. + /// (Optional) The token credential to use. If this is null, an API key will be read from the + /// test configuration. + /// (Optional) The key credential to use instead of the one from the configuration. + /// The test client instance. + protected virtual TClient GetTestClient(IConfiguration? config, TestClientOptions? options = null, TokenCredential? tokenCredential = null, ApiKeyCredential? keyCredential = null) + { + AzureOpenAIClient topLevelClient = GetTestTopLevelClient(config, options, tokenCredential, keyCredential); + return GetTestClient(topLevelClient, config!); + } + + /// + /// Gets the properly instrumented client to use for testing. This have proper support for automatic sync/async method testing, + /// as well as recording, and playback support. + /// + /// The type of test client to get. + /// The top level client to use. + /// The configuration to use to get the deployment information (if needed). + /// The instrumented client instance to use. + /// Support for the type of client being requested has not been implemented yet. + protected virtual TExplicitClient GetTestClient(AzureOpenAIClient topLevelClient, IConfiguration config, string? deploymentName = null) + { + Func getDeployment = () => deploymentName ?? config?.Deployment ?? throw CreateKeyNotFoundEx("deployment"); + object clientObject; + + switch (typeof(TExplicitClient).Name) + { + case nameof(AssistantClient): + clientObject = topLevelClient.GetAssistantClient(); + break; + case nameof(AudioClient): + clientObject = topLevelClient.GetAudioClient(getDeployment()); + break; + case nameof(BatchClient): + clientObject = topLevelClient.GetBatchClient(getDeployment()); + break; + case nameof(ChatClient): + clientObject = topLevelClient.GetChatClient(getDeployment()); + break; + case nameof(EmbeddingClient): + clientObject = topLevelClient.GetEmbeddingClient(getDeployment()); + break; + case nameof(FileClient): + clientObject = topLevelClient.GetFileClient(); + break; + case nameof(FineTuningClient): + clientObject = topLevelClient.GetFineTuningClient(); + break; + case nameof(ImageClient): + clientObject = topLevelClient.GetImageClient(getDeployment()); + break; + case nameof(VectorStoreClient): + clientObject = topLevelClient.GetVectorStoreClient(); + break; + case nameof(AzureDeploymentClient): + var accessor = NonPublic.FromField("_transport"); + clientObject = new AzureDeploymentClient( + config, + TestEnvironment.Credential, + transport: accessor.Get(topLevelClient.Pipeline)); + break; + default: + throw new NotImplementedException($"Test client helpers not yet implemented for {typeof(TExplicitClient)}"); + }; + + object instrumented = WrapClient( + typeof(TExplicitClient), + clientObject, + new TopLevelInfo + { + TopLevelClient = topLevelClient, + Config = config, + }, + null); + + return (TExplicitClient)instrumented; + } + + private Exception CreateKeyNotFoundEx(string whatIsMissing) + { + return new KeyNotFoundException($"Could not find any {whatIsMissing} to use. Please make sure you have the necessary" + + $" {TestConfig.AssetsJson} config file, or have the needed environment variables set"); + } + + private static void DumpRequest(PipelineRequest request) + { + Console.WriteLine($"--- New request ---"); + Console.WriteLine($"{request.Method} {request?.Uri}"); + string headers = string.Join("\n ", + request!.Headers + .Select(kvp => $"{kvp.Key}: {(kvp.Key.ToLowerInvariant().Contains("auth") ? "***" : kvp.Value)}")); + Console.Write(" "); + Console.WriteLine(headers); + + if (request?.Content is not null) + { + using MemoryStream stream = new(); + request.Content.WriteTo(stream, default); + stream.Position = 0; + + string? contentType = request.Headers.GetFirstOrDefault("Content-Type"); + if (IsProbableTextContent(contentType)) + { + DumpText(contentType, stream); + } + else + { + DumpHex(stream); + } + } + } + + private static void DumpResponse(PipelineResponse response) + { + Console.WriteLine($"--- Response ---"); + Console.WriteLine($"{response.Status} - {response.ReasonPhrase}"); + string headers = string.Join( + "\n ", + response.Headers + .Where(kvp => !kvp.Key.ToLowerInvariant().Contains("client-")) + .Select(kvp => $"{kvp.Key}: {kvp.Value}")); + Console.Write(" "); + Console.WriteLine(headers); + + response.BufferContent(); + + if (response!.Content is not null) + { + using Stream stream = response.Content.ToStream(); + string? contentType = response.Headers.GetFirstOrDefault("Content-Type"); + if (IsProbableTextContent(contentType)) + { + DumpText(contentType, stream); + } + else + { + DumpHex(stream); + } + } + + Console.WriteLine(); + } + + private static bool IsProbableTextContent(string? contentType) + { + contentType = contentType?.ToLowerInvariant() ?? string.Empty; + return contentType.StartsWith("application/json") + || contentType.StartsWith("text/"); + } + + private static void DumpText(string? contentType, Stream stream) + { + if (contentType?.ToLowerInvariant().StartsWith("application/json") == true) + { + var json = JsonDocument.Parse(stream); + + stream = new MemoryStream(); + using (Utf8JsonWriter writer = new(stream, new() { Indented = true })) + { + json.WriteTo(writer); + } + + stream.Seek(0, SeekOrigin.Begin); + } + + using StreamReader reader = new(stream); + Console.WriteLine(reader.ReadToEnd()); + } + + private static void DumpHex(Stream stream, int maxLines = 256) + { + byte[] buffer = new byte[32]; + StringBuilder hex = new(3 * buffer.Length); + StringBuilder chars = new(buffer.Length); + + int read = 0; + for (int lines = 0; (read = stream.FillBuffer(buffer)) > 0 && lines < maxLines; lines++) + { + for (int i = 0; i < read; i++) + { + hex.AppendFormat("{0:X2} ", buffer[i]); + + char c = Convert.ToChar(buffer[i]); + chars.Append(char.IsControl(c) ? ' ' : c); + } + + Console.Write(hex.PadRight(buffer.Length * 3)); + Console.Write("| "); + Console.WriteLine(chars); + + hex.Clear(); + chars.Clear(); + } + + if (read != 0) + { + Console.WriteLine(" ... truncated"); + } + } + + protected void ValidateById(string id) + { + Assert.That(id, Is.Not.Null.Or.Empty); + switch (typeof(T).Name) + { + case nameof(Assistant): + _assistantIdsToDelete.Add(id); + break; + case nameof(AssistantThread): + _threadIdsToDelete.Add(id); + break; + case nameof(OpenAIFileInfo): + _fileIdsToDelete.Add(id); + break; + case nameof(ThreadRun): + break; + case nameof(VectorStore): + _vectorStoreIdsToDelete.Add(id); + break; + default: + throw new NotImplementedException(); + } + } + + protected void ValidateById(string id, string parentId) + { + Assert.That(id, Is.Not.Null.Or.Empty); + Assert.That(parentId, Is.Not.Null.Or.Empty); + switch (typeof(T).Name) + { + case nameof(ThreadMessage): + _threadIdsWithMessageIdsToDelete.Add((parentId, id)); + break; + case nameof(VectorStoreFileAssociation): + _vectorStoreFileAssociationsToRemove.Add((parentId, id)); + break; + default: + throw new NotImplementedException(); + } + } + + /// + /// Performs basic, invariant validation of a target that was just instantiated from its corresponding origination + /// mechanism. If applicable, the instance is recorded into the test run for cleanup of persistent resources. + /// + /// Instance type being validated. + /// The instance to validate. + /// The provided instance type isn't supported. + protected void Validate(T target) + { + if (target is ThreadMessage message) + { + ValidateById(message.Id, message.ThreadId); + } + else if (target is VectorStoreFileAssociation fileAssociation) + { + ValidateById(fileAssociation.VectorStoreId, fileAssociation.FileId); + } + else + { + ValidateById(target switch + { + Assistant assistant => assistant.Id, + AssistantThread thread => thread.Id, + OpenAIFileInfo file => file.Id, + ThreadRun run => run.Id, + VectorStore store => store.Id, + _ => throw new NotImplementedException(), + }); + } + } + + [TearDown] + protected void Cleanup() + { + AzureOpenAIClient topLevelCleanupClient = GetTestTopLevelClient(TestConfig.GetConfig(), new() + { + ShouldOutputRequests = false, + ShouldOutputResponses = false, + }); + AssistantClient client = topLevelCleanupClient.GetAssistantClient(); + VectorStoreClient vectorStoreClient = topLevelCleanupClient.GetVectorStoreClient(); + FileClient fileClient = topLevelCleanupClient.GetFileClient(); + RequestOptions requestOptions = new() { ErrorOptions = ClientErrorBehaviors.NoThrow, }; + foreach ((string threadId, string messageId) in _threadIdsWithMessageIdsToDelete) + { + Console.WriteLine($"Cleanup: {messageId} -> {client.DeleteMessage(threadId, messageId, requestOptions)?.GetRawResponse().Status}"); + } + foreach (string assistantId in _assistantIdsToDelete) + { + Console.WriteLine($"Cleanup: {assistantId} -> {client.DeleteAssistant(assistantId, requestOptions)?.GetRawResponse().Status}"); + } + foreach (string threadId in _threadIdsToDelete) + { + Console.WriteLine($"Cleanup: {threadId} -> {client.DeleteThread(threadId, requestOptions)?.GetRawResponse().Status}"); + } + foreach ((string vectorStoreId, string fileId) in _vectorStoreFileAssociationsToRemove) + { + Console.WriteLine($"Cleanup: {vectorStoreId}<->{fileId} => {vectorStoreClient.RemoveFileFromStore(vectorStoreId, fileId, requestOptions)?.GetRawResponse().Status}"); + } + foreach (string vectorStoreId in _vectorStoreIdsToDelete) + { + Console.WriteLine($"Cleanup: {vectorStoreId} => {vectorStoreClient.DeleteVectorStore(vectorStoreId, requestOptions)?.GetRawResponse().Status}"); + } + foreach (string fileId in _fileIdsToDelete) + { + Console.WriteLine($"Cleanup: {fileId} -> {fileClient.DeleteFile(fileId, requestOptions)?.GetRawResponse().Status}"); + } + _threadIdsWithMessageIdsToDelete.Clear(); + _assistantIdsToDelete.Clear(); + _threadIdsToDelete.Clear(); + _vectorStoreFileAssociationsToRemove.Clear(); + _vectorStoreIdsToDelete.Clear(); + _fileIdsToDelete.Clear(); + + // If we are in recording mode, update the recorded playback configuration as well + if (Mode == RecordedTestMode.Record + && TestContext.CurrentContext.Result.Outcome == ResultState.Success) + { + TestConfig.SavePlaybackConfig(); + } + } + + protected static void ValidateClientResult(ClientResult result) + { + Assert.That(result, Is.Not.Null); + Assert.That(result.GetRawResponse(), Is.Not.Null); + } + + protected static PipelineResponse ValidateClientResultResponse(ClientResult result) + { + ValidateClientResult(result); + + PipelineResponse response = result.GetRawResponse(); + Assert.That(response.Status, Is.GreaterThanOrEqualTo(200).And.LessThan(300)); + Assert.That(response.Headers, Is.Not.Null); + Assert.That(response.Headers.GetFirstOrDefault("Content-Type"), Does.StartWith("application/json")); + Assert.That(response.Content, Is.Not.Null); + + return response; + } + + protected virtual TModel ValidateAndParse(ClientResult result) where TModel : IJsonModel + { + var response = ValidateClientResultResponse(result); + + TModel? model = ModelReaderWriter.Read(response.Content, ModelReaderWriterOptions.Json); + Assert.That(model, Is.Not.Null); + return model!; + } + + protected virtual TModel ValidateAndParse(ClientResult result, JsonSerializerOptions? options = null) + { + var response = ValidateClientResultResponse(result); + + using Stream stream = response.Content.ToStream(); + Assert.That(stream, Is.Not.Null); + + TModel? model = JsonHelpers.Deserialize(stream, options ?? JsonOptions.OpenAIJsonOptions); + Assert.That(model, Is.Not.Null); + return model!; + } + + internal class TopLevelInfo + { + //required public object Client { get; init; } + required public AzureOpenAIClient TopLevelClient { get; init; } + required public IConfiguration Config { get; init; } + } + + private readonly List _assistantIdsToDelete = []; + private readonly List _threadIdsToDelete = []; + private readonly List<(string, string)> _threadIdsWithMessageIdsToDelete = []; + private readonly List _fileIdsToDelete = []; + private readonly List<(string, string)> _vectorStoreFileAssociationsToRemove = []; + private readonly List _vectorStoreIdsToDelete = []; +} + +public class TestClientOptions : AzureOpenAIClientOptions +{ + public TestClientOptions() : base() + { } + + public TestClientOptions(ServiceVersion version) : base(version) + { } + + public bool ShouldOutputRequests { get; set; } = true; + public bool ShouldOutputResponses { get; set; } = true; +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Assets.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Assets.cs new file mode 100644 index 000000000..15590bc9c --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Assets.cs @@ -0,0 +1,81 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.IO; + +namespace Azure.AI.OpenAI.Tests +{ + public class Assets + { + public Assets() + { + HelloWorld = new() + { + Type = AssetType.Audio, + Language = "en", + Description = "Hello world", + Name = "hello_world.m4a", + RelativePath = GetPath("hello_world.m4a"), + MimeType = "audio/m4a" + }; + WhisperFrenchDescription = new() + { + Type = AssetType.Audio, + Language = "fr", + Description = "Whisper description in French", + Name = "french.wav", + RelativePath = GetPath("french.wav"), + MimeType = "audio/wave" + }; + DogAndCat = new() + { + Type = AssetType.Image, + Language = null, + Description = "A picture of a cat next to a dog", + Name = "variation_sample_image.jpg", + RelativePath = GetPath("variation_sample_image.png"), + MimeType = "image/png", + Url = new Uri("https://cdn.openai.com/API/images/guides/image_variation_original.webp") + }; + FineTuning = new() + { + Type = AssetType.Text, + Language = "en", + Description = "Fine tuning data for Open AI to generate a JSON object based on sports headlines", + Name = "fine_tuning.jsonl", + RelativePath = GetPath("fine_tuning.jsonl"), + MimeType = "text/plain" + }; + } + + public virtual AssetInfo HelloWorld { get; } + public virtual AssetInfo WhisperFrenchDescription { get; } + public virtual AssetInfo DogAndCat { get; } + public virtual AssetInfo FineTuning { get; } + + protected virtual string GetPath(string assetName) + { + return Path.Combine("Assets", assetName); + } + } + + public enum AssetType + { + Text, + Audio, + Image, + Raw + } + + public class AssetInfo + { + required public AssetType Type { get; init; } + required public string Name { get; init; } + required public string RelativePath { get; init; } + required public string MimeType { get; init; } + public string? Language { get; init; } + public string? Description { get; init; } + public Uri? Url { get; init; } + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/AzureTestEnvironment.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/AzureTestEnvironment.cs new file mode 100644 index 000000000..a0321edcc --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/AzureTestEnvironment.cs @@ -0,0 +1,255 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Reflection; +using Azure.Core; +using Azure.Identity; +using OpenAI.TestFramework; +using OpenAI.TestFramework.Mocks; +using OpenAI.TestFramework.Recording; +using OpenAI.TestFramework.Utils; + +namespace Azure.AI.OpenAI.Tests.Utils; + +/// +/// Represents an Azure test environment. +/// +public class AzureTestEnvironment +{ + private readonly RecordedTestMode _mode; + private readonly string _optionPrefix; + private TokenCredential? _credential; + + /// + /// Initializes a new instance. + /// + /// The recorded test mode to use. + public AzureTestEnvironment(RecordedTestMode mode) + { + _mode = mode; + + /** + * We want to be able to to find "root" folders: + * - The root of the Git repo on disk + * - The root folder of the source code (eng/sdk) + * These two are usually the same. In external repos, they may however be a little different. + * + * To search for these folders, we use a simple method where we search up from these starting folders: + * - Check the "SourcePath" assembly metadata attribute value. All projects in the Azure C# repo automatically have this attribute + * added as part of the build "magic" (see {repo_root}\Directory.Build.Targets) + * - Where the executing assembly is running from + * Until we find a parent folder that contains a specific subfolder(s). + */ + DirectoryInfo?[] startingPoints = + [ + AssemblyHelper.GetAssemblySourceDir(), + new FileInfo(Assembly.GetExecutingAssembly().Location).Directory, + ]; + + RepoRoot = FindFirstParentWithSubfolders(startingPoints, ".git") + ?? throw new InvalidOperationException("Could not determine the GIT root folder for this repository"); + + string sourceRoot = (FindFirstParentWithSubfolders(startingPoints, "eng", "sdk") ?? RepoRoot) + .FullName; + + DotNetExe = AssemblyHelper.GetDotnetExecutable() + ?? throw new InvalidOperationException( + "Could not determine the dotnet executable to use. Do you have .Net installed or have your paths correctly configured?"); + + TestProxyDll = new FileInfo( + AssemblyHelper.GetAssemblyMetadata("TestProxyPath") + ?? throw new InvalidOperationException("Could not determine the path to the recording test proxy DLL")); + + TestProxyHttpsCert = new FileInfo(Path.Combine( + sourceRoot, + "eng", + "common", + "testproxy", + "dotnet-devcert.pfx")); + if (!TestProxyHttpsCert.Exists) + { + throw new InvalidOperationException("Could not find test proxy HTTPS root certificate to use."); + } + + TestProxyHttpsCertPassword = "password"; + + string? serviceName = null; + DirectoryInfo? sourceDir = GetType().Assembly.GetAssemblySourceDir(); + if (sourceDir != null) + { + string relativePath = PathHelpers.GetRelativePath( + Path.Combine(sourceRoot, "sdk"), + sourceDir.FullName); + serviceName = relativePath + .Split(new char[] { Path.DirectorySeparatorChar }, StringSplitOptions.RemoveEmptyEntries) + .FirstOrDefault()!; + } + + _optionPrefix = serviceName?.ToUpperInvariant() + "_"; + } + + /// + /// Gets the root Git folder. + /// + public DirectoryInfo RepoRoot { get; } + + /// + /// Gets the path to the dotnet executable. This will be used in combination with to start the + /// recording test proxy service. + /// + public FileInfo DotNetExe { get; } + + /// + /// The path to test proxy DLL that will be used when starting the recording test proxy service. + /// + public FileInfo TestProxyDll { get; } + + /// + /// Gets the HTTPS certificate file to use as the signing certificate for HTTPS connections to the test proxy. + /// + public FileInfo TestProxyHttpsCert { get; } + + /// + /// Gets the password for . + /// + public string TestProxyHttpsCertPassword { get; } + + /// + /// Gets the token credential to use during testing. This will change depending on the record mode. + /// + public TokenCredential Credential => _credential ??= GetCredential(); + + /// + /// Gets the default record mode to use for the test. This will attempt to read from the test context, or environment variables. + /// + public static RecordedTestMode DefaultRecordMode + { + get + { + string? modeString = TestContext.Parameters["TestMode"] + ?? Environment.GetEnvironmentVariable("AZURE_TEST_MODE"); + + if (Enum.TryParse(modeString, true, out RecordedTestMode mode)) + { + return mode; + } + + return RecordedTestMode.Playback; + } + } + + /// + /// Gets whether or not we should automatically record tests. + /// + public static bool DefaultAutomaticRecordEnabled + { + get + { + string? enabledString = TestContext.Parameters["DisableAutoRecording"] + ?? Environment.GetEnvironmentVariable("AZURE_DISABLE_AUTO_RECORDING"); + + if (bool.TryParse(enabledString, out bool enabled)) + { + return !enabled; + } + + return true; + } + } + + /// + /// Gets an optional value from environment variables. + /// + /// The name of the value to retrieve. + /// The value, or null if it did not exist. + public string? GetOptionalVariable(string name) + { + return new[] + { + _optionPrefix + name, + name, + "AZURE_" + name + } + .Select(Environment.GetEnvironmentVariable) + .FirstOrDefault(value => !string.IsNullOrWhiteSpace(value)); + } + + /// + /// Gets a value from environment variables, or throws an exception if it does not exist. + /// + /// The name of the value to retrieve. + /// The value. + /// If the value did not exist. + public string GetVariable(string name) + { + string? optionalVariable = GetOptionalVariable(name); + return optionalVariable + ?? throw new InvalidOperationException($"Could not find required environment variable '{_optionPrefix + name }' or '{name}'."); + } + + private static DirectoryInfo? FindFirstParentWithSubfolders(IEnumerable startingDirs, params string[] subFolders) + => startingDirs + .Select(d => FindParentWithSubfolders(d, subFolders)) + .FirstOrDefault(d => d != null); + + private static DirectoryInfo? FindParentWithSubfolders(DirectoryInfo? start, params string[] subFolders) + { + if (subFolders == null || subFolders.Length == 0) + { + return null; + } + + for (DirectoryInfo? current = start; current != null; current = current.Parent) + { + if (!current.Exists) + { + return null; + } + else if (subFolders.All(sub => current.EnumerateDirectories(sub).Any())) + { + return current; + } + } + + return null; + } + + private TokenCredential GetCredential() + { + if (_mode == RecordedTestMode.Playback) + { + return new MockTokenCredential(); + } + + // I'm not sure exactly what the possible combinations to use here are, so I've essentially copied the logic + // TestEnvironment.cs in Azure.Core.TestFramework (though it is a little simplified here) + string? clientSecret = GetOptionalVariable("CLIENT_SECRET"); + string? systemAccessToken = GetOptionalVariable("SYSTEM_ACCESSTOKEN"); + + if (!string.IsNullOrWhiteSpace(clientSecret)) + { + return new ClientSecretCredential( + GetVariable("TENANT_ID"), + GetVariable("CLIENT_ID"), + clientSecret); + } + else if (!string.IsNullOrWhiteSpace(systemAccessToken)) + { + return new AzurePipelinesCredential( + GetVariable("AZURESUBSCRIPTION_TENANT_ID"), + GetVariable("AZURESUBSCRIPTION_CLIENT_ID"), + GetVariable("AZURESUBSCRIPTION_SERVICE_CONNECTION_ID"), + systemAccessToken, + new AzurePipelinesCredentialOptions { AuthorityHost = new Uri(GetVariable("AZURE_AUTHORITY_HOST")) }); + } + else + { + return new DefaultAzureCredential( + new DefaultAzureCredentialOptions() { ExcludeManagedIdentityCredential = true }); + } + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/BasicConfig.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/BasicConfig.cs new file mode 100644 index 000000000..d7f7e596d --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/BasicConfig.cs @@ -0,0 +1,58 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; +using System.Collections.Generic; + +namespace Azure.AI.OpenAI.Tests.Utils.Config +{ + /// + /// A basic configuration that allows you to directly set values. + /// + public class BasicConfig : IConfiguration + { + private Dictionary _values = new Dictionary(StringComparer.OrdinalIgnoreCase); + + /// + public Uri? Endpoint { get; set; } + /// + public string? Key { get; set; } + /// + public string? Deployment { get; set; } + + /// + /// Adds an additional value to the configuration. + /// + /// The type of the value to add. + /// The key. + /// The value to add. + /// The instance for chaining. + public BasicConfig AddValue(string key, TVal? value) + { + if (value != null) + { + _values[key] = value; + } + else + { + _values.Remove(key); + } + + return this; + } + + /// + public TVal? GetValue(string key) + { + if (_values.TryGetValue(key, out object? val) + && val is TVal cast) + { + return cast; + } + + return default; + } + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/EnvironmentValuesConfig.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/EnvironmentValuesConfig.cs new file mode 100644 index 000000000..3cd72d98c --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/EnvironmentValuesConfig.cs @@ -0,0 +1,85 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; +using System.ComponentModel; + +namespace Azure.AI.OpenAI.Tests.Utils.Config +{ + /// + /// Configuration that reads from environment variables. + /// + public class EnvironmentValuesConfig : INamedConfiguration + { + private const char ENV_KEY_SEPARATOR = '_'; + private const string SUFFIX_AOAI_API_KEY = "API_KEY"; + private const string SUFFIX_AOAI_ENDPOINT = "ENDPOINT"; + private const string SUFFIX_AOAI_DEPLOYMENT = "DEPLOYMENT"; + + private readonly string _prefix; + + /// + /// Creates a new instance. + /// + /// The environment value prefix to use. For example AZURE_OPENAI. + /// The prefix specified was null. + public EnvironmentValuesConfig(string prefix) + { + _prefix = prefix + ?.TrimEnd(ENV_KEY_SEPARATOR) + .ToUpperInvariant() + ?? throw new ArgumentNullException(nameof(prefix)); + + Endpoint = GetValue(SUFFIX_AOAI_ENDPOINT); + Key = GetValue(SUFFIX_AOAI_API_KEY); + Deployment = GetValue(SUFFIX_AOAI_DEPLOYMENT); + } + + /// + /// Creates a new instance. + /// + /// The environment value prefix to use. For example AZURE_OPENAI. + /// The specific type of client we want to get environment variable for + /// The prefix specified was null. + public EnvironmentValuesConfig(string prefix, string clientName) + : this($"{prefix}{ENV_KEY_SEPARATOR}{clientName}") + { + Name = clientName; + } + + /// + public string? Name { get; } + + /// + public Uri? Endpoint { get; } + + /// + public string? Key { get; } + + /// + public string? Deployment { get; } + + /// + public TVal? GetValue(string key) + { + string envKey = $"{_prefix}{ENV_KEY_SEPARATOR}{key.ToUpperInvariant()}"; + + string? value = Environment.GetEnvironmentVariable(envKey); + if (value == null) + { + return default; + } + else if (value is TVal val) + { + return val; + } + else + { + var defaultConverter = TypeDescriptor.GetConverter(typeof(TVal)); + return (TVal?)defaultConverter.ConvertFromInvariantString(value); + } + } + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/FlattenedConfig.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/FlattenedConfig.cs new file mode 100644 index 000000000..205a5d072 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/FlattenedConfig.cs @@ -0,0 +1,79 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Linq; +using OpenAI.TestFramework.Utils; + +namespace Azure.AI.OpenAI.Tests.Utils.Config; + +/// +/// Represents a flattened configuration that reads from one or more configurations in order. It will also +/// record the values read from each configuration. +/// +public class FlattenedConfig : IConfiguration +{ + private IReadOnlyList _configs; + private IDictionary? _recordedConfig; + + /// + /// Creates a new instance. + /// + /// The configurations to read from in order. + /// Where to store the recorded configuration. + /// The configs passed was null. + public FlattenedConfig(INamedConfiguration?[] configs, IDictionary recordedConfig) + { + _configs = configs ?? throw new ArgumentNullException(nameof(configs)); + _recordedConfig = recordedConfig ?? throw new ArgumentNullException(nameof(recordedConfig)); + + Endpoint = GetAndRecordProperty(c => c.Endpoint, (c, v) => c.Endpoint = v); + Key = GetAndRecordProperty(c => c.Key, (c, v) => c.Key = v); + Deployment = GetAndRecordProperty(c => c.Deployment, (c, v) => c.Deployment = v); + } + + /// + public Uri? Endpoint { get; } + /// + public string? Key { get; } + /// + public string? Deployment { get; } + + /// + public TVal? GetValue(string key) + { + TVal? value = default; + INamedConfiguration? selected = _configs + .Where(config => config != null) + .FirstOrDefault(config => (value = config!.GetValue(key)) != null); + + if (_recordedConfig != null && selected != null && value != null) + { + string configName = selected.Name ?? JsonConfig.DEFAULT_CONFIG_NAME; + SanitizedJsonConfig recorded = _recordedConfig.GetOrAdd(configName, _ => new SanitizedJsonConfig()); + recorded.SetValue(key, value); + } + + return value; + } + + private TVal? GetAndRecordProperty(Func getter, Action setter) + { + TVal? value = default; + INamedConfiguration? selected = _configs + .Where(config => config != null) + .FirstOrDefault(config => (value = getter(config!)) != null); + + if (_recordedConfig != null && selected != null && value != null) + { + string configName = selected.Name ?? JsonConfig.DEFAULT_CONFIG_NAME; + SanitizedJsonConfig recorded = _recordedConfig.GetOrAdd(configName, _ => new SanitizedJsonConfig()); + setter(recorded, value); + } + + return value; + } + + +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/IConfiguration.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/IConfiguration.cs new file mode 100644 index 000000000..97511d548 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/IConfiguration.cs @@ -0,0 +1,129 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using OpenAI.TestFramework.AutoSyncAsync; + +namespace Azure.AI.OpenAI.Tests.Utils.Config; + +/// +/// A test configuration for an Azure resource. +/// +public interface IConfiguration +{ + /// + /// The endpoint to use for sending requests to the Azure resource. + /// + Uri? Endpoint { get; } + + /// + /// The API key to use for authenticating requests to the Azure resource. + /// + string? Key { get; } + + /// + /// The deployment to use for this Azure resource. + /// + string? Deployment { get; } + + /// + /// Gets additional values from the test configuration for the Azure resource. + /// + /// The type of the value. + /// The name of the value (usually snake cased). For example: fine_tuned_model. + /// The parsed value for that key, or null of the key was not found, or failed to be parsed. + TVal? GetValue(string key); +} + +/// +/// A named test configuration for an Azure resource. +/// +public interface INamedConfiguration : IConfiguration +{ + /// + /// The name of the configuration. + /// + string? Name { get; } +} + +/// +/// Extensions methods for . +/// +public static class ConfigurationExtensions +{ + /// + /// Gets additional values from the test configuration for the Azure resource, but throws exceptions if the key is not found. + /// + /// The type of the value. + /// The configuration to get a value from. + /// The name of the value (usually snake cased). For example: fine_tuned_model. + /// The successfully parsed value for that key. + /// If the configuration passed was null + /// If the key could not be found + public static TVal GetValueOrThrow(this IConfiguration? config, string key) + { + if (config == null) + { + throw new ArgumentNullException(nameof(config)); + } + + return config.GetValue(key) + ?? throw new KeyNotFoundException($"Could not find a value for '{key}' in the test configuration"); + } + + /// + /// Gets the configuration that was used when creating the client instance. + /// + /// The type of the client. + /// The client instance. + /// The configuration. + /// The client did not have a config associated with it. + public static IConfiguration GetConfigOrThrow(this TExplicitClient client) where TExplicitClient : class + { + var instrumented = GetTopLevelClientInfo(client); + return instrumented.Config ?? throw new ArgumentException("The client was instrumented with a null configuration"); + } + + /// + /// Gets the deployment to use from the configuration, or throws if none was found. + /// + /// The config. + /// (Optional) The client name to include in th exception message. + /// The deployment. + /// The deployment was not set or found. + public static string DeploymentOrThrow(this IConfiguration? config, string? clientName = null) + { + string str = clientName == null ? string.Empty : clientName + " "; + return config?.Deployment + ?? throw new KeyNotFoundException($"Could not find a {str}deployment in the test configuration"); + } + + /// + /// Gets the deployment from the specified client. + /// + /// The type of the client. + /// The client instance. + /// The deployment name used for that client instance. + /// The client either was not properly instrumented. + /// The client did not have a deployment configured. + public static string DeploymentOrThrow(this TExplicitClient client) where TExplicitClient : class + { + var instrumented = GetTopLevelClientInfo(client); + return instrumented.Config.DeploymentOrThrow(client!.GetType().Name); + } + + private static AoaiTestBase.TopLevelInfo GetTopLevelClientInfo(TExplicitClient? client) + where TExplicitClient : class + { + if (client == null) + { + throw new ArgumentNullException(nameof(client)); + } + + return ((AoaiTestBase.TopLevelInfo?)(client as IAutoSyncAsync)?.Context) + ?? throw new ArgumentException( + $"The client was not properly wrapped for automatic sync/async ({client.GetType().Name})", + nameof(client)); + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/JsonConfig.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/JsonConfig.cs new file mode 100644 index 000000000..e7334b11f --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/JsonConfig.cs @@ -0,0 +1,62 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Text.Json; +using System.Text.Json.Serialization; +using OpenAI.TestFramework.Utils; + +namespace Azure.AI.OpenAI.Tests.Utils.Config; + +/// +/// A configuration that is deserialized from JSON. +/// +public class JsonConfig : IConfiguration +{ + /// + /// The default configuration key to use. + /// + public const string DEFAULT_CONFIG_NAME = "default"; + + /// + /// The JSON configuration to use when serializing and deserializing. + /// + public static readonly JsonSerializerOptions JSON_OPTIONS = new() + { + PropertyNameCaseInsensitive = true, + PropertyNamingPolicy = JsonOptions.SnakeCaseLower, + DictionaryKeyPolicy = JsonOptions.SnakeCaseLower, + WriteIndented = true, + AllowTrailingCommas = true, +#if NETFRAMEWORK + IgnoreNullValues = true, +#else + DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull, +#endif + }; + + /// + public Uri? Endpoint { get; init; } + /// + public string? Key { get; init; } + /// + public string? Deployment { get; init; } + + /// + /// Json values that are not part of the class go here. + /// + [JsonExtensionData] + public Dictionary? ExtensionData { get; set; } + + /// + public TVal? GetValue(string key) + { + if (ExtensionData?.TryGetValue(key, out JsonElement value) == true) + { + return value.Deserialize(JSON_OPTIONS); + } + + return default; + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/NamedConfig.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/NamedConfig.cs new file mode 100644 index 000000000..5690f48a9 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/NamedConfig.cs @@ -0,0 +1,39 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; +using System.Text.Json; + +namespace Azure.AI.OpenAI.Tests.Utils.Config; + +/// +/// A wrapper around a test configuration to associate an optional name. +/// +public class NamedConfig : INamedConfiguration +{ + private readonly IConfiguration? _config; + + /// + /// Creates a new instance. + /// + /// The configuration instance. + /// The name of the config. + public NamedConfig(IConfiguration? config, string? name) + { + _config = config; + Name = name; + } + + /// + public string? Name { get; } + /// + public Uri? Endpoint => _config?.Endpoint; + /// + public string? Key => _config?.Key; + /// + public string? Deployment => _config?.Deployment; + /// + public TVal? GetValue(string key) => _config == null ? default : _config.GetValue(key); +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/SanitizedJsonConfig.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/SanitizedJsonConfig.cs new file mode 100644 index 000000000..54e111aa7 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Config/SanitizedJsonConfig.cs @@ -0,0 +1,182 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Runtime.CompilerServices; +using System.Text.Json; +using System.Text.Json.Serialization; +using System.Text.RegularExpressions; +using OpenAI.TestFramework.Utils; + +namespace Azure.AI.OpenAI.Tests.Utils.Config +{ + /// + /// A sanitized JSON configuration. This will automatically sanitize the Endpoint, Key, subscription ID and resource group in the configuration + /// file. Please make sure to add any additional sanitization rules to the dictionary. + /// + public class SanitizedJsonConfig : IConfiguration + { + /// + /// The string to use when masking sensitive data. + /// + public const string MASK_STRING = "Sanitized"; + + /// + /// The pattern to match the subdomain of a URL. + /// + public const string HOST_SUBDOMAIN_PATTERN = @"(?<=.+://)([^\.]+)(?=[\./])"; + + private static readonly Regex HOST_SUBDOMAIN_MATCHER = new Regex(HOST_SUBDOMAIN_PATTERN, RegexOptions.Compiled); + private static readonly IReadOnlyDictionary> SANITIZERS = new Dictionary> + { + ["subscription_id"] = v => MASK_STRING, + ["resource_group"] = v => MASK_STRING, + ["endpoint"] = v => v is not null && (v is string || v is Uri) + ? MaskUriSubdomain(v.ToString())! + : MASK_STRING, + ["key"] = v => MASK_STRING, + ["api_key"] = v => MASK_STRING, + }; + + private Uri? _endpoint; + private string? _key; + private string? _deployment; + + /// + /// Creates a new instance. + /// + public SanitizedJsonConfig() + { + ExtensionData = new SortedDictionary(); + } + + /// + /// Creates a new instance from another . + /// + /// The configuration to create from. + /// If the configuration was null. + public SanitizedJsonConfig(JsonConfig config) : this() + { + if (config == null) + { + throw new ArgumentNullException(nameof(config)); + } + + Endpoint = config.Endpoint; + Key = config.Key; + Deployment = config.Deployment; + + if (config?.ExtensionData != null) + { + foreach (var kvp in config.ExtensionData) + { + switch (kvp.Value.ValueKind) + { + case JsonValueKind.Undefined: + case JsonValueKind.Null: + break; + case JsonValueKind.String: + SetValue(kvp.Key, kvp.Value.GetString()); + break; + default: + ExtensionData[kvp.Key] = kvp.Value.Clone(); + break; + } + } + } + } + + /// + public Uri? Endpoint + { + get => _endpoint; + set => _endpoint = MaskProperty(value); + } + + /// + public string? Key + { + get => _key; + set => _key = MaskProperty(value); + } + + /// + public string? Deployment + { + get => _deployment; + set => _deployment = MaskProperty(value); + } + + /// + /// Json values that are not part of the class go here. + /// + [JsonExtensionData] + public IDictionary ExtensionData { get; } + + /// + public virtual TVal? GetValue(string key) + { + if (ExtensionData?.TryGetValue(key, out JsonElement value) == true) + { + return value.Deserialize(JsonConfig.JSON_OPTIONS); + } + + return default; + } + + /// + /// Sets an additional value in the configuration. If the value is null it will be removed. + /// + /// Type of the value to set. + /// The name of the value (usually snake cased). For example: fine_tuned_model. + /// The value to set. + public virtual void SetValue(string key, TVal? value) + { + if (value == null) + { + if (ExtensionData != null) + { + ExtensionData.Remove(key); + } + } + else + { + value = MaskData(key, value); + JsonElement json = JsonHelpers.SerializeToElement(value, JsonConfig.JSON_OPTIONS); + ExtensionData[key] = json; + } + } + + private static TVal? MaskProperty(TVal? value, [CallerMemberName] string? key = null) + { + string convertedKey = JsonConfig.JSON_OPTIONS.PropertyNamingPolicy?.ConvertName(key ?? string.Empty) ?? string.Empty; + return MaskData(convertedKey, value); + } + + private static TVal? MaskData(string key, TVal? value) + { + if (value == null) + { + return default; + } + else if (SANITIZERS.TryGetValue(key ?? string.Empty, out var sanitizer)) + { + return (TVal?)sanitizer(value); + } + + return value; + } + + private static Uri? MaskUriSubdomain(string? uri) + { + if (uri == null) + { + return null; + } + + string maskedUrl = HOST_SUBDOMAIN_MATCHER.Replace(uri.ToString(), MASK_STRING); + return new Uri(maskedUrl); + } + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Extensions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Extensions.cs new file mode 100644 index 000000000..d25ef06b3 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/Extensions.cs @@ -0,0 +1,68 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.IO; +using System.Text; + +namespace Azure.AI.OpenAI.Tests.Utils; + +/// +/// Helper extension methods. +/// +public static class Extensions +{ + /// + /// Attempts to fill the buffer as much as possible from a stream. This will try to keep reading + /// until the buffer is filled, or the stream ends. + /// + /// The stream to read from. + /// The buffer to try to fill. + /// The number of bytes read. + public static int FillBuffer(this Stream stream, byte[] buffer) + { + if (stream == null) + throw new ArgumentNullException(nameof(stream)); + else if (buffer == null) + throw new ArgumentNullException(nameof(buffer)); + + int totalRead = 0; + while (totalRead < buffer.Length) + { + int read = stream.Read(buffer, totalRead, buffer.Length - totalRead); + if (read == 0) + { + return totalRead; + } + + totalRead += read; + } + + return totalRead; + } + + /// + /// Pads the current instance with the specified character on the left. + /// + /// The string builder instance + /// The total width we want the string builder to be + /// The padding characters + /// The same builder for chaining, with any needed padding. + public static StringBuilder PadRight(this StringBuilder builder, int totalWidth, char paddingChar = ' ') + { + if (builder == null) + throw new ArgumentNullException(nameof(builder)); + else if (totalWidth < 0) + throw new ArgumentOutOfRangeException(nameof(totalWidth), "Total width must be greater than or equal to 0."); + else if (totalWidth == 0) + return builder; + + int padding = totalWidth - builder.Length; + if (padding > 0) + { + builder.Append(paddingChar, padding); + } + + return builder; + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/JsonOptions.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/JsonOptions.cs new file mode 100644 index 000000000..4489b0169 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/JsonOptions.cs @@ -0,0 +1,168 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers; +using System.Globalization; +using System.Runtime.CompilerServices; +using System.Text.Json; + +#nullable enable + +namespace Azure.AI.OpenAI.Tests.Utils; + +/// +/// A helper class to make working with older versions of System.Text.Json simpler +/// +public static class JsonOptions +{ + // TODO FIXME once we update to newer versions of System.Text.JSon we should switch to using + // JsonNamingPolicy.SnakeCaseLower + public static JsonNamingPolicy SnakeCaseLower { get; } = + new SnakeCaseNamingPolicy(); + + public static JsonSerializerOptions OpenAIJsonOptions { get; } = new() + { + PropertyNameCaseInsensitive = true, + PropertyNamingPolicy = SnakeCaseLower, +#if NETFRAMEWORK + IgnoreNullValues = true, +#else + DefaultIgnoreCondition = System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull, +#endif + Converters = + { + new ModelReaderWriterConverter(), + new UnixDateTimeConverter() + } + }; + + public static JsonSerializerOptions AzureJsonOptions { get; } = new() + { + PropertyNameCaseInsensitive = true, + PropertyNamingPolicy = JsonNamingPolicy.CamelCase, +#if NETFRAMEWORK + IgnoreNullValues = true, +#else + DefaultIgnoreCondition = System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull, +#endif + }; + + // Ported over from the source code for newer versions of System.Text.Json + private class SnakeCaseNamingPolicy : JsonNamingPolicy + { + private enum SeparatorState + { + NotStarted, + UppercaseLetter, + LowercaseLetterOrDigit, + SpaceSeparator + } + + public override string ConvertName(string name) + { + if (string.IsNullOrEmpty(name)) + { + return string.Empty; + } + + return ConvertName('_', name.AsSpan()); + } + + internal static string ConvertName(char separator, ReadOnlySpan chars) + { + char[]? rentedBuffer = null; + + int num = (int)(1.2 * chars.Length); + Span output = num > 128 + ? (rentedBuffer = ArrayPool.Shared.Rent(num))! + : stackalloc char[128]; + + SeparatorState separatorState = SeparatorState.NotStarted; + int charsWritten = 0; + + for (int i = 0; i < chars.Length; i++) + { + char c = chars[i]; + UnicodeCategory unicodeCategory = char.GetUnicodeCategory(c); + switch (unicodeCategory) + { + case UnicodeCategory.UppercaseLetter: + switch (separatorState) + { + case SeparatorState.LowercaseLetterOrDigit: + case SeparatorState.SpaceSeparator: + WriteChar(separator, ref output); + break; + case SeparatorState.UppercaseLetter: + if (i + 1 < chars.Length && char.IsLower(chars[i + 1])) + { + WriteChar(separator, ref output); + } + break; + } + + c = char.ToLowerInvariant(c); + WriteChar(c, ref output); + separatorState = SeparatorState.UppercaseLetter; + break; + + case UnicodeCategory.LowercaseLetter: + case UnicodeCategory.DecimalDigitNumber: + if (separatorState == SeparatorState.SpaceSeparator) + { + WriteChar(separator, ref output); + } + + WriteChar(c, ref output); + separatorState = SeparatorState.LowercaseLetterOrDigit; + break; + + case UnicodeCategory.SpaceSeparator: + if (separatorState != 0) + { + separatorState = SeparatorState.SpaceSeparator; + } + break; + + default: + WriteChar(c, ref output); + separatorState = SeparatorState.NotStarted; + break; + } + } + + string result = output.Slice(0, charsWritten).ToString(); + if (rentedBuffer != null) + { + output.Slice(0, charsWritten).Clear(); + ArrayPool.Shared.Return(rentedBuffer); + } + return result; + + void ExpandBuffer(ref Span destination) + { + int minimumLength = checked(destination.Length * 2); + char[] array = ArrayPool.Shared.Rent(minimumLength); + destination.CopyTo(array); + if (rentedBuffer != null) + { + destination.Slice(0, charsWritten).Clear(); + ArrayPool.Shared.Return(rentedBuffer); + } + rentedBuffer = array; + destination = rentedBuffer; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + void WriteChar(char value, ref Span destination) + { + if (charsWritten == destination.Length) + { + ExpandBuffer(ref destination); + } + destination[charsWritten++] = value; + } + } + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/MockTokenCredential.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/MockTokenCredential.cs new file mode 100644 index 000000000..8615c0f07 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/MockTokenCredential.cs @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Threading; +using System.Threading.Tasks; +using Azure.Core; + +namespace OpenAI.TestFramework.Mocks; + +/// +/// A mock token credential to be used for testing. +/// +public class MockTokenCredential : TokenCredential +{ + /// + /// Event raised when a token is requested. + /// + public event EventHandler? TokenRequested; + + /// + public override AccessToken GetToken(TokenRequestContext requestContext, CancellationToken cancellationToken) + { + TokenRequested?.Invoke(this, requestContext); + return new AccessToken("TEST TOKEN " + string.Join(",", requestContext.Scopes), DateTimeOffset.MaxValue); + } + + /// + public override ValueTask GetTokenAsync(TokenRequestContext requestContext, CancellationToken cancellationToken) + { + return new(GetToken(requestContext, cancellationToken)); + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/ModelReaderWriterConverter.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/ModelReaderWriterConverter.cs new file mode 100644 index 000000000..255628633 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/ModelReaderWriterConverter.cs @@ -0,0 +1,59 @@ +#nullable enable + +using System; +using System.ClientModel.Primitives; +using System.Linq; +using System.Reflection; +using System.Text.Json; +using System.Text.Json.Serialization; + +namespace Azure.AI.OpenAI.Tests.Utils +{ + /// + /// Adapter to allow mixing reflection based JSON serialization and deserialization with the ModelReaderWriter based ones + /// + public class ModelReaderWriterConverter : JsonConverterFactory + { + /// + public override bool CanConvert(Type typeToConvert) + { + bool implementsInterface = typeof(IJsonModel).IsAssignableFrom(typeToConvert); + bool hasParameterlessConstructor = typeToConvert.GetConstructors(BindingFlags.Instance | BindingFlags.NonPublic | BindingFlags.Public) + .Any(ci => ci.GetParameters()?.Count() == 0); + return implementsInterface && hasParameterlessConstructor; + } + + /// + public override JsonConverter CreateConverter(Type typeToConvert, JsonSerializerOptions options) + { + return (JsonConverter)Activator.CreateInstance(typeof(InnerModelReaderWriterConverter<>).MakeGenericType([typeToConvert]))!; + } + + private class InnerModelReaderWriterConverter : JsonConverter where T : IJsonModel + { + private IJsonModel _converter; + + /// + /// Creates a new instance + /// + /// The type does not have any paramterless constructor + public InnerModelReaderWriterConverter() + { + _converter = (IJsonModel)(Activator.CreateInstance(typeof(T), true) + ?? throw new ArgumentNullException()); + } + + /// + public override T Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) + { + return _converter.Create(ref reader, ModelReaderWriterOptions.Json); + } + + /// + public override void Write(Utf8JsonWriter writer, T value, JsonSerializerOptions options) + { + _converter.Write(writer, ModelReaderWriterOptions.Json); + } + } + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/NonPublic.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/NonPublic.cs new file mode 100644 index 000000000..f890e4e75 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/NonPublic.cs @@ -0,0 +1,123 @@ +#nullable enable + +using System; +using System.Reflection; + +namespace Azure.AI.OpenAI.Tests.Utils; + +/// +/// Helpers to make accessing the many internal or private members of the Azure test framework more streamlined +/// +public static class NonPublic +{ + /// + /// Creates an accessor for an internal, protected, or private property. + /// + /// The type of the class that defines this property. + /// The type of the property. + /// The name of the property. + /// The property accessor. + /// If a property with that name and type could not be found. + public static Accessor FromProperty(string propertyName) where TObj : class + { + PropertyInfo? prop = typeof(TObj).GetProperty( + propertyName, BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Instance); + + if (prop == null) + { + throw new ArgumentException($"'{propertyName}' property could not be found in '{typeof(TObj).FullName}'"); + } + else if (prop.PropertyType != typeof(TProp)) + { + throw new ArgumentException($"'{propertyName}' property is not of type '{typeof(TProp).FullName}'"); + } + + Func? getter = null; + Action? setter = null; + + MethodInfo? method = prop.GetGetMethod(true); + if (method != null) + { + getter = (Func)method.CreateDelegate(typeof(Func)); + } + + method = prop.GetSetMethod(true); + if (method != null) + { + setter = (Action)method.CreateDelegate(typeof(Action)); + } + + return new Accessor(getter, setter); + } + + /// + /// Creates an accessory for an internal, protected, or private field. + /// + /// The type of the class that defines this field. + /// The type of the field. + /// The name of the field. + /// The filed accessor. + /// If a field with that name and type could not be found. + public static Accessor FromField(string fieldName) where TObj : class + { + FieldInfo? field = typeof(TObj).GetField( + fieldName, BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Instance); + + if (field == null) + { + throw new ArgumentException($"'{fieldName}' field could not be found in '{typeof(TObj).FullName}'"); + } + else if (field.FieldType != typeof(TField)) + { + throw new ArgumentException($"'{fieldName}' field is not of type '{typeof(TField).FullName}'"); + } + + Func getter = (instance) => (TField)field.GetValue(instance)!; + Action? setter = (instance, val) => field.SetValue(instance, val); + + return new Accessor(getter, setter); + } + + /// + /// The accessor struct that makes accessing internal, protected, or private properties/fields easier. + /// + /// The type of the class that defines this field. + /// Tye type of the property/field. + public readonly struct Accessor where TObj : class + { + private readonly Func _getter; + private readonly Action _setter; + + public Accessor(Func? getter, Action? setter) + { + HasGet = getter != null; + _getter = getter ?? (_ => throw new InvalidOperationException("Get is not supported")); + HasSet = setter != null; + _setter = setter ?? ((_, __) => throw new InvalidOperationException("Get is not supported")); + } + + /// + /// True if we can read the value of the property/field. + /// + public bool HasGet { get; } + + /// + /// True if we can set the value of the property/field. + /// + public bool HasSet { get; } + + /// + /// Gets the value of the property/field. + /// + /// The instance to get the value from. Can be null for static properties/fields. + /// The value of the property/field. + public TValue Get(TObj? instance) => _getter(instance); + + /// + /// Sets the value of the property/field. + /// + /// The instance to set the value on. Can be null for static properties/fields. + /// The value to set. + public void Set(TObj? instance, TValue value) => _setter(instance, value); + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/RunOnScopeExit.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/RunOnScopeExit.cs new file mode 100644 index 000000000..5ec65283e --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/RunOnScopeExit.cs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#nullable enable + +using System; +using System.Threading.Tasks; + +namespace Azure.AI.OpenAI.Tests.Utils +{ + public class RunOnScopeExit : IAsyncDisposable + { + private Func _asyncFunc; + + public RunOnScopeExit(Func asyncFunc) + { + _asyncFunc = asyncFunc ?? throw new ArgumentNullException(nameof(asyncFunc)); + } + + public async ValueTask DisposeAsync() + { + await _asyncFunc().ConfigureAwait(false); + } + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/TestConfig.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/TestConfig.cs new file mode 100644 index 000000000..1ac649f3e --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/TestConfig.cs @@ -0,0 +1,204 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Reflection; +using System.Text; +using System.Text.Json; +using Azure.AI.OpenAI.Tests.Utils.Config; +using OpenAI.TestFramework; +using OpenAI.TestFramework.Utils; + +namespace Azure.AI.OpenAI.Tests; + +internal class TestConfig +{ + private const string AZURE_OPENAI_ENV_KEY_PREFIX = "AZURE_OPENAI"; + + private readonly Func _getRecordedMode; + private SortedDictionary _recordedConfig; + private readonly IReadOnlyDictionary _liveConfig; + private readonly IReadOnlyDictionary _playbackConfig; + + public virtual string AssetsSubFolder => "Assets"; + public virtual string AssetsJson => "test_config.json"; + public virtual string PlaybackAssetsJson => $"playback_{AssetsJson}"; + + protected bool IsPlayback => _getRecordedMode() == RecordedTestMode.Playback; + + // When in playback mode, we always use the playback configuration. This ensures that we run in the same way in CI/CD + // as we do locally. + protected IReadOnlyDictionary CurrentConfig => IsPlayback ? _playbackConfig : _liveConfig; + + public TestConfig(Func getRecordedMode) + { + _getRecordedMode = getRecordedMode ?? throw new ArgumentNullException(nameof(getRecordedMode)); + _recordedConfig = new(new DefaultFirstStringComparer()); + + // Load the previous playback configuration and use that to initialize the recorded config + string playbackConfigJson = Path.Combine(AssetsSubFolder, PlaybackAssetsJson); + _playbackConfig = ReadJsonConfig(playbackConfigJson)!; + if (_playbackConfig == null) + { + throw new InvalidOperationException($"The playback config file was not found: {playbackConfigJson}"); + } + + foreach (var kvp in _playbackConfig) + { + _recordedConfig.Add(kvp.Key, new SanitizedJsonConfig(kvp.Value)); + } + + // Try to load the configuration to use against the real service (e.g. recording or live mode) + _liveConfig = new[] + { + AssetsJson, + Path.Combine(AssetsSubFolder, AssetsJson), + Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.UserProfile), ".azure", AssetsSubFolder, AssetsJson), + Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.ApplicationData), ".azure", AssetsSubFolder, AssetsJson), + } + .Select(f => ReadJsonConfig(f)) + .FirstOrDefault(c => c != null) + ?? new Dictionary(); + } + + public virtual IConfiguration? GetConfig() + => GetConfig(ToKey()); + + public virtual IConfiguration? GetConfig(string name) + { + // In order to populate each property of the Config object, the search order is as follows: + // 1. Getting the specific config for the name in the JSON config file + // 2. Getting the value from the default config + // 3. (Not in playback) Getting the value from the AZURE_OPENAI__ environment variable + // 4. (Not in playback) Getting the value from the AZURE_OPENAI_ environment variable + // It will fall through each one if the value is null + + return new FlattenedConfig( + [ + new NamedConfig(CurrentConfig.GetValueOrDefault(name), name), + new NamedConfig(CurrentConfig.GetValueOrDefault(JsonConfig.DEFAULT_CONFIG_NAME), null), + IsPlayback ? null : new EnvironmentValuesConfig(AZURE_OPENAI_ENV_KEY_PREFIX, name), + IsPlayback ? null : new EnvironmentValuesConfig(AZURE_OPENAI_ENV_KEY_PREFIX) + ], _recordedConfig); + } + + public virtual void SavePlaybackConfig() + { + try + { + string? sourceDirectoryPath = typeof(TestConfig).Assembly + .GetCustomAttributes() + .FirstOrDefault(attrib => attrib.Key == "TestProjectSourceBasePath") + ?.Value; + + if (sourceDirectoryPath != null) + { + string playbackConfigJson = Path.Combine(sourceDirectoryPath, AssetsSubFolder, PlaybackAssetsJson); + + string oldJson = string.Empty; + if (File.Exists(playbackConfigJson)) + { + oldJson = File.ReadAllText(playbackConfigJson); + } + + string newJson = JsonSerializer.Serialize(_recordedConfig, JsonConfig.JSON_OPTIONS); + + // Visual Studio's hot reload feature can get upset if you are debugging the code and the playback config + // file changes, so we only save it if it is different + if (oldJson != newJson) + { + File.WriteAllText(playbackConfigJson, newJson, Encoding.UTF8); + } + } + } + catch (Exception ex) + { + Console.Error.WriteLine("Failed to save the playback configuration file. Details: " + ex); + } + } + + protected static string ToKey() + { + string fullName = typeof(TClient).Name; + int stopAt = fullName.LastIndexOf("Client"); + stopAt = stopAt == -1 ? fullName.Length : stopAt; + + StringBuilder builder = new(fullName.Length); + bool prevWasUpper = true; + + for (int i = 0; i < stopAt; i++) + { + char c = fullName[i]; + if (char.IsUpper(c)) + { + if (prevWasUpper) + { + builder.Append(char.ToLowerInvariant(c)); + } + else + { + builder.Append('_'); + builder.Append(char.ToLowerInvariant(c)); + } + + prevWasUpper = true; + } + else + { + builder.Append(c); + prevWasUpper = false; + } + } + + return builder.ToString(); + } + + protected static IReadOnlyDictionary? ReadJsonConfig(string fullPath) + { + try + { + if (File.Exists(fullPath)) + { + string json = File.ReadAllText(fullPath); + return JsonSerializer.Deserialize>(json, JsonConfig.JSON_OPTIONS); + } + } + catch (Exception) + { + } + + return null; + } + + private class DefaultFirstStringComparer : IComparer + { + public int Compare(string? x, string? y) + { + if (ReferenceEquals(x, y)) + { + return 0; + } + else if (x == null) + { + return -1; + } + else if (y == null) + { + return 1; + } + else if (x == JsonConfig.DEFAULT_CONFIG_NAME && y != JsonConfig.DEFAULT_CONFIG_NAME) + { + return -1; + } + else if (x != JsonConfig.DEFAULT_CONFIG_NAME && y == JsonConfig.DEFAULT_CONFIG_NAME) + { + return 1; + } + + return string.Compare(x, y, StringComparison.Ordinal); + } + } +} diff --git a/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/UnixDateTimeConverter.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/UnixDateTimeConverter.cs new file mode 100644 index 000000000..a4c9b1856 --- /dev/null +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/Utils/UnixDateTimeConverter.cs @@ -0,0 +1,109 @@ +#nullable enable + +using System; +using System.Text.Json; +using System.Text.Json.Serialization; + +namespace Azure.AI.OpenAI.Tests.Utils +{ + public class UnixDateTimeConverter : JsonConverterFactory + { + private static Lazy _dateTimeOffset = new(() => new DateTimeOffsetConverter(), false); + private static Lazy _nullableDateTimeOffset = new(() => new NullableDateTimeOffsetConverter(), false); + private static Lazy _dateTime = new(() => new DateTimeConverter(), false); + private static Lazy _nullableDateTime = new(() => new NullableDateTimeConverter(), false); + + public override bool CanConvert(Type typeToConvert) + => typeToConvert == typeof(DateTime) + || typeToConvert == typeof(DateTime?) + || typeToConvert == typeof(DateTimeOffset) + || typeToConvert == typeof(DateTimeOffset?); + + public override JsonConverter CreateConverter(Type typeToConvert, JsonSerializerOptions options) + { + switch (typeToConvert) + { + case Type t when t == typeof(DateTime): + return _dateTime.Value; + case Type t when t == typeof(DateTime?): + return _nullableDateTime.Value; + case Type t when t == typeof(DateTimeOffset): + return _dateTimeOffset.Value; + case Type t when t == typeof(DateTimeOffset?): + return _nullableDateTimeOffset.Value; + default: + throw new NotSupportedException(); + } + } + + private static DateTimeOffset? Read(ref Utf8JsonReader reader) + { + if (reader.TokenType == JsonTokenType.Null) + { + return default; + } + else if (reader.TokenType == JsonTokenType.Number) + { + long unixTimeInSeconds = reader.GetInt64(); + return DateTimeOffset.FromUnixTimeSeconds(unixTimeInSeconds).ToLocalTime(); + } + else if (reader.TokenType == JsonTokenType.String + && long.TryParse(reader.GetString(), out long unixTime)) + { + return DateTimeOffset.FromUnixTimeSeconds(unixTime).ToLocalTime(); + } + else + { + throw new JsonException("Expected a number token type but got " + reader.TokenType); + } + } + + private static void Write(Utf8JsonWriter writer, DateTimeOffset? value) + { + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteNumberValue(value.Value.ToUnixTimeSeconds()); + } + } + + private class DateTimeOffsetConverter : JsonConverter + { + public override DateTimeOffset Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) + => UnixDateTimeConverter.Read(ref reader) ?? default; + + public override void Write(Utf8JsonWriter writer, DateTimeOffset value, JsonSerializerOptions options) + => UnixDateTimeConverter.Write(writer, value); + } + + private class NullableDateTimeOffsetConverter : JsonConverter + { + public override DateTimeOffset? Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) + => UnixDateTimeConverter.Read(ref reader); + + public override void Write(Utf8JsonWriter writer, DateTimeOffset? value, JsonSerializerOptions options) + => UnixDateTimeConverter.Write(writer, value); + } + + private class DateTimeConverter : JsonConverter + { + public override DateTime Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) + => UnixDateTimeConverter.Read(ref reader)?.LocalDateTime ?? default; + + public override void Write(Utf8JsonWriter writer, DateTime value, JsonSerializerOptions options) + => UnixDateTimeConverter.Write(writer, value); + } + + private class NullableDateTimeConverter : JsonConverter + { + public override DateTime? Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) + => UnixDateTimeConverter.Read(ref reader)?.LocalDateTime ?? default; + + public override void Write(Utf8JsonWriter writer, DateTime? value, JsonSerializerOptions options) + => UnixDateTimeConverter.Write(writer, value); + } + } +} diff --git a/.dotnet.azure/tests/VectorStoreTests.cs b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/VectorStoreTests.cs similarity index 62% rename from .dotnet.azure/tests/VectorStoreTests.cs rename to .dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/VectorStoreTests.cs index e0a6cfca0..2365e0f19 100644 --- a/.dotnet.azure/tests/VectorStoreTests.cs +++ b/.dotnet.azure/sdk/openai/Azure.AI.OpenAI/tests/VectorStoreTests.cs @@ -3,40 +3,48 @@ #nullable disable +using System; +using System.ClientModel; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Azure.AI.OpenAI.Tests.Utils.Config; +using NUnit.Framework; using OpenAI; +using OpenAI.Assistants; using OpenAI.Files; +using OpenAI.TestFramework; using OpenAI.VectorStores; -using System.ClientModel; -using System.ClientModel.Primitives; namespace Azure.AI.OpenAI.Tests; -#pragma warning disable OPENAI001 - -public class VectorStoreTests : TestBase +public class VectorStoreTests : AoaiTestBase { + public VectorStoreTests(bool isAsync) : base(isAsync) + { } + [Test] [Category("Smoke")] public void CanCreateClient() { - AzureOpenAIClient client = new(); - VectorStoreClient vectorStoreClient = client.GetVectorStoreClient(); - Assert.That(vectorStoreClient, Is.Not.Null); + VectorStoreClient client = GetTestClient(); + Assert.That(client, Is.Not.Null); } - [Test] - public void CanCreateGetAndDeleteVectorStores() + [RecordedTest] + public async Task CanCreateGetAndDeleteVectorStores() { VectorStoreClient client = GetTestClient(); - VectorStore vectorStore = client.CreateVectorStore(); + VectorStore vectorStore = await client.CreateVectorStoreAsync(); Validate(vectorStore); - bool deleted = client.DeleteVectorStore(vectorStore); + bool deleted = await client.DeleteVectorStoreAsync(vectorStore); Assert.That(deleted, Is.True); - IReadOnlyList testFiles = GetNewTestFiles(5); + IReadOnlyList testFiles = await GetNewTestFilesAsync(client.GetConfigOrThrow(), 5); - vectorStore = client.CreateVectorStore(new() + vectorStore = await client.CreateVectorStoreAsync(new VectorStoreCreationOptions() { FileIds = { testFiles[0].Id }, Name = "test vector store", @@ -62,7 +70,7 @@ public void CanCreateGetAndDeleteVectorStores() Assert.That(vectorStore.Status, Is.EqualTo(VectorStoreStatus.InProgress)); Assert.That(vectorStore.Metadata?.TryGetValue("test-key", out string metadataValue) == true && metadataValue == "test-value"); }); - vectorStore = client.GetVectorStore(vectorStore); + vectorStore = await client.GetVectorStoreAsync(vectorStore); Assert.Multiple(() => { Assert.That(vectorStore.Name, Is.EqualTo("test vector store")); @@ -74,10 +82,10 @@ public void CanCreateGetAndDeleteVectorStores() Assert.That(vectorStore.Metadata?.TryGetValue("test-key", out string metadataValue) == true && metadataValue == "test-value"); }); - deleted = client.DeleteVectorStore(vectorStore.Id); + deleted = await client.DeleteVectorStoreAsync(vectorStore.Id); Assert.That(deleted, Is.True); - vectorStore = client.CreateVectorStore(new() + vectorStore = await client.CreateVectorStoreAsync(new VectorStoreCreationOptions() { FileIds = testFiles.Select(file => file.Id).ToList() }); @@ -89,45 +97,8 @@ public void CanCreateGetAndDeleteVectorStores() }); } - [Test] - public void CanEnumerateVectorStores() - { - VectorStoreClient client = GetTestClient(); - for (int i = 0; i < 10; i++) - { - VectorStore vectorStore = client.CreateVectorStore(new VectorStoreCreationOptions() - { - Name = $"Test Vector Store {i}", - }); - Validate(vectorStore); - Assert.That(vectorStore.Name, Is.EqualTo($"Test Vector Store {i}")); - } - - int lastIdSeen = int.MaxValue; - int count = 0; - - foreach (VectorStore vectorStore in client.GetVectorStores(ListOrder.NewestFirst)) - { - Assert.That(vectorStore.Id, Is.Not.Null); - if (vectorStore.Name?.StartsWith("Test Vector Store ") == true) - { - string idString = vectorStore.Name["Test Vector Store ".Length..]; - - Assert.That(int.TryParse(idString, out int seenId), Is.True); - Assert.That(seenId, Is.LessThan(lastIdSeen)); - lastIdSeen = seenId; - } - if (lastIdSeen == 0 || ++count >= 100) - { - break; - } - } - - Assert.That(lastIdSeen, Is.EqualTo(0)); - } - - [Test] - public async Task CanEnumerateVectorStoresAsync() + [RecordedTest] + public async Task CanEnumerateVectorStores() { VectorStoreClient client = GetTestClient(); for (int i = 0; i < 10; i++) @@ -140,15 +111,17 @@ public async Task CanEnumerateVectorStoresAsync() Assert.That(vectorStore.Name, Is.EqualTo($"Test Vector Store {i}")); } + AsyncPageCollection response = client.GetVectorStoresAsync(new VectorStoreCollectionOptions() { Order = VectorStoreCollectionOrder.Descending }); + Assert.That(response, Is.Not.Null); + int lastIdSeen = int.MaxValue; int count = 0; - - await foreach (VectorStore vectorStore in client.GetVectorStoresAsync(ListOrder.NewestFirst)) + await foreach (VectorStore vectorStore in response.GetAllValuesAsync()) { Assert.That(vectorStore.Id, Is.Not.Null); if (vectorStore.Name?.StartsWith("Test Vector Store ") == true) { - string idString = vectorStore.Name["Test Vector Store ".Length..]; + string idString = vectorStore.Name.Substring("Test Vector Store ".Length); Assert.That(int.TryParse(idString, out int seenId), Is.True); Assert.That(seenId, Is.LessThan(lastIdSeen)); @@ -163,18 +136,18 @@ public async Task CanEnumerateVectorStoresAsync() Assert.That(lastIdSeen, Is.EqualTo(0)); } - [Test] - public void CanAssociateFiles() + [RecordedTest] + public async Task CanAssociateFiles() { VectorStoreClient client = GetTestClient(); - VectorStore vectorStore = client.CreateVectorStore(); + VectorStore vectorStore = await client.CreateVectorStoreAsync(); Validate(vectorStore); - IReadOnlyList files = GetNewTestFiles(3); + IReadOnlyList files = await GetNewTestFilesAsync(client.GetConfigOrThrow(), 3); foreach (OpenAIFileInfo file in files) { - VectorStoreFileAssociation association = client.AddFileToVectorStore(vectorStore, file); + VectorStoreFileAssociation association = await client.AddFileToVectorStoreAsync(vectorStore, file); Validate(association); Assert.Multiple(() => { @@ -182,38 +155,38 @@ public void CanAssociateFiles() Assert.That(association.VectorStoreId, Is.EqualTo(vectorStore.Id)); Assert.That(association.LastError, Is.Null); Assert.That(association.CreatedAt, Is.GreaterThan(s_2024)); - Assert.That(association.Status, Is.EqualTo(VectorStoreFileAssociationStatus.InProgress)); + Assert.That(association.Status, Is.AnyOf(VectorStoreFileAssociationStatus.InProgress, VectorStoreFileAssociationStatus.Completed)); }); } - bool removed = client.RemoveFileFromStore(vectorStore, files[0]); + bool removed = await client.RemoveFileFromStoreAsync(vectorStore, files[0]); Assert.True(removed); // Errata: removals aren't immediately reflected when requesting the list Thread.Sleep(1000); int count = 0; - foreach (VectorStoreFileAssociation association in client.GetFileAssociations(vectorStore)) + AsyncPageCollection response = client.GetFileAssociationsAsync(vectorStore); + await foreach (VectorStoreFileAssociation association in response.GetAllValuesAsync()) { count++; Assert.That(association.FileId, Is.Not.EqualTo(files[0].Id)); Assert.That(association.VectorStoreId, Is.EqualTo(vectorStore.Id)); } + Assert.That(count, Is.EqualTo(2)); } - [Test] - public void CanUseBatchIngestion() + [RecordedTest] + public async Task CanUseBatchIngestion() { VectorStoreClient client = GetTestClient(); - VectorStore vectorStore = client.CreateVectorStore(); + VectorStore vectorStore = await client.CreateVectorStoreAsync(); Validate(vectorStore); - IReadOnlyList testFiles = GetNewTestFiles(5); - - VectorStoreBatchFileJob batchJob = client.CreateBatchFileJob(vectorStore, testFiles); - Validate(batchJob); + IReadOnlyList testFiles = await GetNewTestFilesAsync(client.GetConfigOrThrow(), 3); + VectorStoreBatchFileJob batchJob = await client.CreateBatchFileJobAsync(vectorStore, testFiles); Assert.Multiple(() => { Assert.That(batchJob.BatchId, Is.Not.Null); @@ -221,12 +194,14 @@ public void CanUseBatchIngestion() Assert.That(batchJob.Status, Is.EqualTo(VectorStoreBatchFileJobStatus.InProgress)); }); - for (int i = 0; i < 10 && client.GetBatchFileJob(batchJob).Value.Status != VectorStoreBatchFileJobStatus.Completed; i++) - { - Thread.Sleep(500); - } + batchJob = await WaitUntilReturnLast( + batchJob, + () => client.GetBatchFileJobAsync(batchJob), + b => b.Status != VectorStoreBatchFileJobStatus.InProgress); + Assert.That(batchJob.Status, Is.EqualTo(VectorStoreBatchFileJobStatus.Completed)); - foreach (VectorStoreFileAssociation association in client.GetFileAssociations(batchJob)) + AsyncPageCollection response = client.GetFileAssociationsAsync(batchJob); + await foreach (VectorStoreFileAssociation association in response.GetAllValuesAsync()) { Assert.Multiple(() => { @@ -240,22 +215,23 @@ public void CanUseBatchIngestion() } } - private IReadOnlyList GetNewTestFiles(int count) + private async Task> GetNewTestFilesAsync(IConfiguration config, int count) { - AzureOpenAIClient azureClient = GetTestTopLevelClient(new() + AzureOpenAIClient azureClient = GetTestTopLevelClient(config, new() { ShouldOutputRequests = false, ShouldOutputResponses = false, }); - FileClient client = azureClient.GetFileClient(); + FileClient client = GetTestClient(azureClient, config); List files = []; for (int i = 0; i < count; i++) { - OpenAIFileInfo file = client.UploadFile( + OpenAIFileInfo file = await client.UploadFileAsync( BinaryData.FromString("This is a test file").ToStream(), $"test_file_{i.ToString().PadLeft(3, '0')}.txt", - FileUploadPurpose.Assistants); + FileUploadPurpose.Assistants) + .ConfigureAwait(false); Validate(file); files.Add(file); } @@ -264,4 +240,4 @@ private IReadOnlyList GetNewTestFiles(int count) } private static readonly DateTimeOffset s_2024 = new(2024, 1, 1, 0, 0, 0, TimeSpan.Zero); -} \ No newline at end of file +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/Directory.Build.props b/.dotnet.azure/sdk/openai/tools/TestFramework/Directory.Build.props new file mode 100644 index 000000000..f85173f26 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/Directory.Build.props @@ -0,0 +1,18 @@ + + + + false + true + false + false + false + false + true + + + + + diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/TestFramework.sln b/.dotnet.azure/sdk/openai/tools/TestFramework/TestFramework.sln new file mode 100644 index 000000000..a88dc3caf --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/TestFramework.sln @@ -0,0 +1,31 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.10.35013.160 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "OpenAI.TestFramework.Tests", "tests\OpenAI.TestFramework.Tests.csproj", "{61E849EB-F8BC-47C7-B730-874DD678BEA7}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "OpenAI.TestFramework", "src\OpenAI.TestFramework.csproj", "{BE2FF759-255B-44A8-BAE7-73E287AEEB97}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {61E849EB-F8BC-47C7-B730-874DD678BEA7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {61E849EB-F8BC-47C7-B730-874DD678BEA7}.Debug|Any CPU.Build.0 = Debug|Any CPU + {61E849EB-F8BC-47C7-B730-874DD678BEA7}.Release|Any CPU.ActiveCfg = Release|Any CPU + {61E849EB-F8BC-47C7-B730-874DD678BEA7}.Release|Any CPU.Build.0 = Release|Any CPU + {BE2FF759-255B-44A8-BAE7-73E287AEEB97}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {BE2FF759-255B-44A8-BAE7-73E287AEEB97}.Debug|Any CPU.Build.0 = Debug|Any CPU + {BE2FF759-255B-44A8-BAE7-73E287AEEB97}.Release|Any CPU.ActiveCfg = Release|Any CPU + {BE2FF759-255B-44A8-BAE7-73E287AEEB97}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {F145C399-D9D8-45F9-87DC-4BFFF983FA91} + EndGlobalSection +EndGlobal diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/assets.json b/.dotnet.azure/sdk/openai/tools/TestFramework/assets.json new file mode 100644 index 000000000..d33e24017 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "net", + "TagPrefix": "net/openai/OpenAI.TestFramework", + "Tag": "net/openai/OpenAI.TestFramework_f41330e3ac" +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Adapters/SyncToAsyncCollectionResult.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Adapters/SyncToAsyncCollectionResult.cs new file mode 100644 index 000000000..64096eb4a --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Adapters/SyncToAsyncCollectionResult.cs @@ -0,0 +1,83 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.Runtime.CompilerServices; +using System.Runtime.ExceptionServices; + +namespace OpenAI.TestFramework.Adapters; + +/// +/// An adapter to make a look and work like a . This +/// simplifies writing test cases +/// +/// The type of the items the enumerator returns +public class SyncToAsyncCollectionResult : AsyncCollectionResult +{ + private bool _responseSet; + private CollectionResult? _syncCollection; + private Exception? _ex; + + /// + /// Creates a new instance + /// + /// The synchronous collection to wrap + /// If the collection was null + public SyncToAsyncCollectionResult(CollectionResult syncCollection) + { + _syncCollection = syncCollection ?? throw new ArgumentNullException(nameof(syncCollection)); + TrySetRawResponse(); + } + + /// + /// Creates a new instance. + /// + /// The exception to throw. + /// If the exception was null. + public SyncToAsyncCollectionResult(Exception ex) + { + _ex = ex ?? throw new ArgumentNullException(nameof(ex)); + _syncCollection = null; + } + + /// + public override IAsyncEnumerator GetAsyncEnumerator(CancellationToken cancellationToken = default) + { + return InnerEnumerable(cancellationToken).GetAsyncEnumerator(); + } + + private async IAsyncEnumerable InnerEnumerable([EnumeratorCancellation] CancellationToken cancellationToken = default) + { + if (_ex != null) + { + ExceptionDispatchInfo.Capture(_ex).Throw(); + } + + var asyncWrapper = new SyncToAsyncEnumerator(_syncCollection?.GetEnumerator()!, cancellationToken); + while (await asyncWrapper.MoveNextAsync().ConfigureAwait(false)) + { + TrySetRawResponse(); + yield return asyncWrapper.Current; + } + } + + private void TrySetRawResponse() + { + if (_responseSet) + { + return; + } + + // Client result doesn't provide virtual methods so we have to manually set it ourselves here + try + { + var raw = _syncCollection?.GetRawResponse(); + if (raw != null) + { + SetRawResponse(raw); + _responseSet = true; + } + } + catch (Exception) { /* dont' care */ } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Adapters/SyncToAsyncEnumerable.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Adapters/SyncToAsyncEnumerable.cs new file mode 100644 index 000000000..c71c1a0e1 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Adapters/SyncToAsyncEnumerable.cs @@ -0,0 +1,46 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Adapters; + +/// +/// Wraps an as an +/// +/// The type of items being enumerated. +public class SyncToAsyncEnumerable : IAsyncEnumerable +{ + private IEnumerable _enumerable; + Exception? _ex; + + /// + /// Creates a new instance. + /// + /// The synchronous enumerable to wrap. + public SyncToAsyncEnumerable(IEnumerable enumerable) + { + _enumerable = enumerable; + } + + /// + /// Creates a new instance. + /// + /// The synchronous enumerable to wrap. + public SyncToAsyncEnumerable(Exception ex) + { + _ex = ex; + _enumerable = Array.Empty(); + } + + /// + public IAsyncEnumerator GetAsyncEnumerator(CancellationToken cancellationToken = default) + { + if (_ex != null) + { + return new SyncToAsyncEnumerator(_ex); + } + else + { + return new SyncToAsyncEnumerator(_enumerable.GetEnumerator(), cancellationToken); + } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Adapters/SyncToAsyncEnumerator.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Adapters/SyncToAsyncEnumerator.cs new file mode 100644 index 000000000..fa0ce81b0 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Adapters/SyncToAsyncEnumerator.cs @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Runtime.ExceptionServices; + +namespace OpenAI.TestFramework.Adapters; + +/// +/// Wraps an as an +/// +/// The type of items being enumerated. +public class SyncToAsyncEnumerator : IAsyncEnumerator +{ + private IEnumerator _sync; + private CancellationToken _token; + private Exception? _ex; + + /// + /// Creates a new instance. + /// + /// The synchronous enumerator to wrap. + /// (Optional) The cancellation token to use. + /// If the enumerator was null. + public SyncToAsyncEnumerator(IEnumerator sync, CancellationToken token = default) + { + _sync = sync ?? throw new ArgumentNullException(nameof(sync)); + _token = token; + } + + /// + /// Creates a new instance. + /// + /// The exception to throw. + /// If the exception was null. + public SyncToAsyncEnumerator(Exception ex) + { + _sync = Enumerable.Empty().GetEnumerator(); + _token = default; + _ex = ex ?? throw new ArgumentNullException(nameof(ex)); + } + + /// + public T Current => _sync.Current; + + /// + public ValueTask DisposeAsync() + { + _sync.Dispose(); + return default; + } + + /// + public ValueTask MoveNextAsync() + { + if (_ex != null) + { + ExceptionDispatchInfo.Capture(_ex).Throw(); + } + + _token.ThrowIfCancellationRequested(); + bool ret = _sync.MoveNext(); + return new ValueTask(ret); + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Adapters/SyncToAsyncPageCollection.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Adapters/SyncToAsyncPageCollection.cs new file mode 100644 index 000000000..89b963137 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Adapters/SyncToAsyncPageCollection.cs @@ -0,0 +1,67 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.Runtime.ExceptionServices; + +namespace OpenAI.TestFramework.Adapters; + +/// +/// An adapter to make a look and work like a . This +/// simplifies writing test cases. +/// +/// The type of the items the enumerator returns. +public class SyncToAsyncPageCollection : AsyncPageCollection +{ + private PageCollection? _syncCollection; + private Exception? _ex; + + /// + /// Creates a new instance. + /// + /// The synchronous collection to wrap. + /// If the collection was null. + public SyncToAsyncPageCollection(PageCollection syncCollection) + { + _syncCollection = syncCollection ?? throw new ArgumentNullException(nameof(syncCollection)); + } + + /// + /// Creates a new instance. + /// + /// The exception to throw. + /// If the exception was null. + public SyncToAsyncPageCollection(Exception ex) + { + _ex = ex ?? throw new ArgumentNullException(nameof(ex)); + _syncCollection = null; + } + + /// + protected override Task> GetCurrentPageAsyncCore() + { + if (_ex != null) + { + return Task.FromException>(_ex); + } + else + { + return Task.FromResult(_syncCollection!.GetCurrentPage()); + } + } + + /// + protected override async IAsyncEnumerator> GetAsyncEnumeratorCore(CancellationToken cancellationToken = default) + { + if (_ex != null) + { + ExceptionDispatchInfo.Capture(_ex).Throw(); + } + + foreach (PageResult page in _syncCollection!) + { + await Task.Delay(0).ConfigureAwait(false); + yield return page; + } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/AsyncOnlyAttribute.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/AsyncOnlyAttribute.cs new file mode 100644 index 000000000..3fbfc191f --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/AsyncOnlyAttribute.cs @@ -0,0 +1,14 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using NUnit.Framework; + +namespace OpenAI.TestFramework; + +/// +/// Attribute that can be applied to a test to indicate it only runs in asynchronous mode. +/// +[AttributeUsage(AttributeTargets.Method, AllowMultiple = false, Inherited = true)] +public class AsyncOnlyAttribute() : NUnitAttribute +{ +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsync/AsyncToSyncInterceptor.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsync/AsyncToSyncInterceptor.cs new file mode 100644 index 000000000..08fe268ec --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsync/AsyncToSyncInterceptor.cs @@ -0,0 +1,429 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.Collections.Concurrent; +using System.Diagnostics; +using System.Reflection; +using Castle.DynamicProxy; +using OpenAI.TestFramework.Adapters; +using Ext = OpenAI.TestFramework.Utils.TypeExtensions; + +namespace OpenAI.TestFramework.AutoSyncAsync; + +/// +/// An interceptor for Castle dynamic proxies that allows you to call the synchronous version of a method when the asynchronous one +/// is called on the proxy. This is useful for testing where you can write the async version of a test, and then automatically test +/// both async and sync methods with the same test code. +/// +[DebuggerStepThrough] +public class AsyncToSyncInterceptor : IInterceptor +{ + private const string AsyncSuffix = "Async"; + + private static readonly TypeArrayEquality s_typeArrayEquality = new(); + private static readonly ConcurrentDictionary> s_syncAsyncPairs = new(); + private static readonly MethodInfo s_taskFromResult = typeof(Task).GetMethod(nameof(Task.FromResult), BindingFlags.Public | BindingFlags.Static)!; + private static readonly MethodInfo s_taskFromException = typeof(Task) + .GetMethods(BindingFlags.Static | BindingFlags.Public) + .Where(m => m.Name == nameof(Task.FromException) && m.IsGenericMethodDefinition) + .First(); + + private readonly BindingFlags _flags; + + /// + /// Creates a new instance. + /// + /// True if you want to use async methods, false otherwise. + /// The binding flags to use when searching for methods. Default is public instance methods. + public AsyncToSyncInterceptor(bool useAsync, BindingFlags flags = BindingFlags.Public | BindingFlags.Instance) + { + UseAsync = useAsync; + _flags = flags; + } + + /// + /// Gets the shared use sync methods instance. + /// + public static AsyncToSyncInterceptor UseSyncMethods { get; } = new(false); + + /// + /// Gets the shared use async methods instance. + /// + public static AsyncToSyncInterceptor UseAsyncMethods { get; } = new(true); + + /// + [DebuggerStepThrough] + public virtual void Intercept(IInvocation invocation) + { + // 1. Should we even intercept this? + if (ShouldSkipIntercepting(invocation.Method)) + { + invocation.Proceed(); + return; + } + + // 2. Check if this method is one of a pair of Operation and OperationAsync methods. + bool isSyncAsyncPair = IsMethodSyncAsyncPair(invocation.Method); + if (!isSyncAsyncPair) + { + throw CreateEx("Method does not have a synchronous and asynchronous pair", invocation.Method); + } + + // 3. If it is, check if the method is the synchronous version. We only allow async versions in the test code + bool isAsyncMethod = invocation.Method.Name.EndsWith(AsyncSuffix); + if (!isAsyncMethod) + { + throw CreateEx("You must use the asynchronous versions of the methods when writing your tests", invocation.Method); + } + + Type asyncReturnType = invocation.Method.ReturnType; + + // 4. Call the correct synchronous or asynchronous method and warp the returned result or exception + if (UseAsync) + { + // Async method running in async mode, no need to do anything, special, continue normally + invocation.Proceed(); + } + else + { + // Call the equivalent sync method + string methodName = RemoveAsyncSuffix(invocation.Method.Name); + Type expectedReturnType = ToSyncRetType(asyncReturnType); + Type[] expectedArgs = invocation.Method.GetParameters().Select(p => p.ParameterType).ToArray(); + + MethodInfo syncMethod = invocation.TargetType.GetMethod( + methodName, _flags, binder: null, expectedArgs, modifiers: null)!; + + // this should never happen since we've already checked for the existence of the expected method + Debug.Assert(syncMethod != null); + if (syncMethod == null) + { + throw CreateEx("Could not find the synchronous version of the method", invocation.Method); + } + + if (syncMethod.ContainsGenericParameters) + { + syncMethod = syncMethod.MakeGenericMethod(invocation.Method.GetGenericArguments()); + } + + // Call the synchronous method + try + { + object? result = syncMethod.Invoke(invocation.InvocationTarget, invocation.Arguments); + if (result != null && !expectedReturnType.IsAssignableFrom(result.GetType())) + { + throw CreateEx("The synchronous method returned an unexpected type", invocation.Method); + } + + invocation.ReturnValue = ToAsyncResult(asyncReturnType, result); + } + catch (TargetInvocationException ex) + { + invocation.ReturnValue = ToAsyncException(asyncReturnType, ex.InnerException ?? ex); + } + } + } + + /// + /// Whether or not we are using async methods. + /// + public bool UseAsync { get; } + + /// Determines whether or not we should skip intercepting this method or not. + /// + /// The method we are inspecting. + /// True to skip intercepting this method, false otherwise. + protected virtual bool ShouldSkipIntercepting(MethodInfo? method) + { + return method == null + // Skip for special names (i.e. getters and setters) + || method.IsSpecialName + // Also for dispose methods + || method.Name == nameof(IDisposable.Dispose) + || method.Name == nameof(IAsyncDisposable.DisposeAsync); + } + + /// + /// Determines whether or not the specified method is part of a pair of synchronous and asynchronous methods. This will + /// check based on 3 factors: + /// + /// If there is a "???" and "???Async" pair of named methods + /// If the arguments are exactly the same for both methods + /// If we know how to determine the expected return type for the synchronous method, from the asynchronous one + /// + /// + /// The method to check. + /// True if it is, false otherwise. + protected virtual bool IsMethodSyncAsyncPair(MethodInfo? method) + { + if (method == null || method.DeclaringType == null) + { + return false; + } + + ISet validPrefixes = s_syncAsyncPairs.GetOrAdd(method.DeclaringType, t => DetermineValidSyncAsyncPairs(t, _flags)); + return validPrefixes.Contains(RemoveAsyncSuffix(method.Name)); + } + + /// + /// Determines what the corresponding synchronous return type would be for the specified asynchronous return type. + /// + /// The asynchronous return type. + /// The corresponding synchronous return type. + /// If we don't know what the equivalent would be. + protected virtual Type ToSyncRetType(Type asyncReturnType) + { + if (typeof(Task) == asyncReturnType || typeof(ValueTask) == asyncReturnType) + { + return typeof(void); + } + else if (Ext.IsClosedGenericOf(asyncReturnType, typeof(Task<>), out Type[] genericTypes)) + { + return genericTypes[0]; + } + else if (Ext.IsClosedGenericOf(asyncReturnType, typeof(ValueTask<>), out genericTypes)) + { + return genericTypes[0]; + } + else if (Ext.IsClosedGenericOf(asyncReturnType, typeof(AsyncPageCollection<>), out genericTypes)) + { + return typeof(PageCollection<>).MakeGenericType(genericTypes); + } + else if (Ext.IsClosedGenericOf(asyncReturnType, typeof(AsyncCollectionResult<>), out genericTypes)) + { + return typeof(CollectionResult<>).MakeGenericType(genericTypes); + } + else if (Ext.IsClosedGenericOf(asyncReturnType, typeof(IAsyncEnumerable<>), out genericTypes)) + { + return typeof(IEnumerable<>).MakeGenericType(genericTypes); + } + else + { + throw new NotSupportedException("Don't know how to create the sync to async wrapper for " + asyncReturnType.FullName); + } + } + + /// + /// Wraps the result from a synchronous method into the equivalent asynchronous return type. + /// + /// The asynchronous return type. + /// The result to wrap. + /// The wrapped result. + /// If we don't support the conversion. + protected virtual object? ToAsyncResult(Type asyncReturnType, object? result) + { + if (typeof(Task) == asyncReturnType) + { + return Task.CompletedTask; + } + else if (Ext.IsClosedGenericOf(asyncReturnType, typeof(Task<>), out Type[] genericTypes)) + { + return s_taskFromResult + .MakeGenericMethod(genericTypes) + .Invoke(null, [result]); + } + else if (typeof(ValueTask) == asyncReturnType) + { + return new ValueTask(); + } + else if (Ext.IsClosedGenericOf(asyncReturnType, typeof(ValueTask<>), out genericTypes)) + { + return Activator.CreateInstance( + typeof(ValueTask<>).MakeGenericType(genericTypes), + result); + } + else if (Ext.IsClosedGenericOf(asyncReturnType, typeof(AsyncPageCollection<>), out genericTypes)) + { + return Activator.CreateInstance( + typeof(SyncToAsyncPageCollection<>).MakeGenericType(genericTypes), + result); + } + else if (Ext.IsClosedGenericOf(asyncReturnType, typeof(AsyncCollectionResult<>), out genericTypes)) + { + return Activator.CreateInstance( + typeof(SyncToAsyncCollectionResult<>).MakeGenericType(genericTypes), + result); + } + else if (Ext.IsClosedGenericOf(asyncReturnType, typeof(IAsyncEnumerable<>), out genericTypes)) + { + return Activator.CreateInstance( + typeof(SyncToAsyncEnumerable<>).MakeGenericType(genericTypes), + result); + } + else + { + throw new NotSupportedException("Don't know how to wrap the exception for " + asyncReturnType.FullName); + } + } + + /// + /// Wraps the exception from a synchronous method into the equivalent asynchronous return type. + /// + /// The asynchronous return type. + /// The exception to wrap. + /// The wrapped exception. + /// If we don't support the conversion. + protected virtual object? ToAsyncException(Type asyncReturnType, Exception ex) + { + if (typeof(Task) == asyncReturnType) + { + return Task.FromException(ex); + } + else if (Ext.IsClosedGenericOf(asyncReturnType, typeof(Task<>), out Type[] genericTypes)) + { + return s_taskFromException + .MakeGenericMethod(genericTypes) + .Invoke(null, [ex]); + } + else if (typeof(ValueTask) == asyncReturnType) + { + return new ValueTask(Task.FromException(ex)); + } + else if (Ext.IsClosedGenericOf(asyncReturnType, typeof(ValueTask<>), out genericTypes)) + { + var failedTask = s_taskFromException + .MakeGenericMethod(genericTypes) + .Invoke(null, [ex]); + return Activator.CreateInstance( + typeof(ValueTask<>).MakeGenericType(genericTypes), + failedTask); + } + else if (Ext.IsClosedGenericOf(asyncReturnType, typeof(AsyncPageCollection<>), out genericTypes)) + { + return Activator.CreateInstance( + typeof(SyncToAsyncPageCollection<>).MakeGenericType(genericTypes), + ex); + } + else if (Ext.IsClosedGenericOf(asyncReturnType, typeof(AsyncCollectionResult<>), out genericTypes)) + { + return Activator.CreateInstance( + typeof(SyncToAsyncCollectionResult<>).MakeGenericType(genericTypes), + ex); + } + else if (Ext.IsClosedGenericOf(asyncReturnType, typeof(IAsyncEnumerable<>), out genericTypes)) + { + return Activator.CreateInstance( + typeof(SyncToAsyncEnumerable<>).MakeGenericType(genericTypes), + ex); + } + else + { + throw new NotSupportedException("Don't know how to determine the synchronous equivalent return type of " + asyncReturnType.FullName); + } + } + + private static InvalidOperationException CreateEx(string description, MethodInfo method) + { + return new InvalidOperationException($"{description}. '{method.DeclaringType?.Name} -> {method.Name}'"); + } + + private static string RemoveAsyncSuffix(string? name) + { + if (name == null) + return string.Empty; + + int index = name.LastIndexOf(AsyncSuffix); + return index >= 0 + ? name.Substring(0, index) + : name; + } + + [DebuggerStepperBoundary] + private ISet DetermineValidSyncAsyncPairs(Type declaringType, BindingFlags flags) + { + // Group potential pairs based only on the method name removing the "Async" postfix + var potentialPairs = declaringType.GetMethods(flags) + .Where(m => !m.IsSpecialName) + .GroupBy(m => RemoveAsyncSuffix(m.Name)) + .OrderBy(g => g.Key) + .Select(g => new + { + g.Key, + Potentials = g.Select(m => new + { + m.Name, + Args = m.GetParameters().Select(p => p.ParameterType).ToArray(), + Return = m.ReturnType, + }) + // Order by name to ensure OperationName comes before OperationNameAsync + .OrderBy(p => p.Name) + // Match on method arguments + .GroupBy(g => g.Args, s_typeArrayEquality) + .Select(g => g.ToArray()) + }); + + // Now evaluate potential pairs to ensure that for each argument list for that method, there exists both a synchronous + // and asynchronous version with equivalent return types + HashSet validPairPrefixes = new(); + + foreach (var entry in potentialPairs) + { + bool allValid = entry.Potentials.All(matchedPair => + { + // because of the way we sorted above, we should have exactly 2 entries here, the first is the synchronous method + // the second the corresponding asynchronous method + return matchedPair.Length == 2 + && matchedPair[0].Name + AsyncSuffix == matchedPair[1].Name + && matchedPair[0].Return == ToSyncRetType(matchedPair[1].Return); + }); + + if (allValid) + { + validPairPrefixes.Add(entry.Key); + } + } + + return validPairPrefixes; + } + + /// + /// Helper comparer that compares all of the Types in an array for equality. + /// + private class TypeArrayEquality : IEqualityComparer + { + /// + public bool Equals(Type[]? x, Type[]? y) + { + if (ReferenceEquals(x, y)) + { + return true; + } + else if (x == null || y == null) + { + return false; + } + else if (x.LongLength != y.LongLength) + { + return false; + } + + for (long i = 0; i < x.LongLength; i++) + { + if (x[i] != y[i]) + { + return false; + } + } + + return true; + } + + /// + public int GetHashCode(Type[] obj) + { + if (obj == null) + { + return 0; + } + + int rollingHash = 1; // to distinguish empty case from null case + for (long i = 0; i < obj.LongLength; i++) + { + rollingHash = (rollingHash, obj[i].GetHashCode()).GetHashCode(); + } + + return rollingHash; + } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsync/AutoSyncAsyncMixIn.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsync/AutoSyncAsyncMixIn.cs new file mode 100644 index 000000000..538f8fb75 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsync/AutoSyncAsyncMixIn.cs @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.AutoSyncAsync; + +/// +/// An implementation of that allows you to get the original back, as well as a place +/// to store an additional context. +/// +public class AutoSyncAsyncMixIn : IAutoSyncAsync +{ + /// + /// Creates a new instance. + /// + /// The original instance. + public AutoSyncAsyncMixIn(object original, object? context = null) + { + Original = original; + Context = context; + } + + /// + public object Original { get; } + + /// + public object? Context { get; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsync/IAutoSyncAsync.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsync/IAutoSyncAsync.cs new file mode 100644 index 000000000..551df75c5 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsync/IAutoSyncAsync.cs @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.AutoSyncAsync; + +/// +/// An interface that serves as a way to identify a dynamically proxied class that supports automatic sync and async testing. This +/// also provides a way to get the the original un-proxied instance. +/// instance. +/// +public interface IAutoSyncAsync +{ + /// + /// Gets the original un-proxied instance back. + /// + public object Original { get; } + + /// + /// Any additional context associated with the instrumented object (e.g. options used to create it). + /// + public object? Context { get; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsync/TestProxyGenerationHook.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsync/TestProxyGenerationHook.cs new file mode 100644 index 000000000..d11d5d86f --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsync/TestProxyGenerationHook.cs @@ -0,0 +1,45 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Reflection; +using Castle.DynamicProxy; +using NUnit.Framework.Interfaces; +using NUnit.Framework.Internal; + +namespace OpenAI.TestFramework.AutoSyncAsync +{ + /// + /// Controls which methods are skipped during dynamic proxy generation. + /// + public class TestProxyGenerationHook : IProxyGenerationHook + { + /// + public void MethodsInspected() + { } + + /// + public void NonProxyableMemberNotification(Type type, MemberInfo memberInfo) + { } + + /// + public bool ShouldInterceptMethod(Type type, MethodInfo methodInfo) + { + IMethodInfo? testMethod = TestExecutionContext.CurrentContext.CurrentTest.Method; + + if (methodInfo == null + // Skip for special names (i.e. getters and setters) + || methodInfo.IsSpecialName + // Also for dispose methods + || methodInfo.Name == nameof(IDisposable.Dispose) + || methodInfo.Name == nameof(IAsyncDisposable.DisposeAsync) + // If we are running a sync only or async only, skip intercepting altogether + || testMethod?.IsDefined(false) == true + || testMethod?.IsDefined(false) == true) + { + return false; + } + + return true; + } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsync/ThisLeakInterceptor.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsync/ThisLeakInterceptor.cs new file mode 100644 index 000000000..c7d6c1e70 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsync/ThisLeakInterceptor.cs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Diagnostics; +using Castle.DynamicProxy; + +namespace OpenAI.TestFramework.AutoSyncAsync; + +/// +/// A basic interceptor that prevents the leaking of the original un-proxied this instance as a return value. +/// +public class ThisLeakInterceptor : IInterceptor +{ + /// + [DebuggerStepThrough] + public void Intercept(IInvocation invocation) + { + invocation.Proceed(); + + if (invocation.ReturnValue == invocation.InvocationTarget) + { + invocation.ReturnValue = invocation.Proxy; + } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsyncTestFixtureAttribute.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsyncTestFixtureAttribute.cs new file mode 100644 index 000000000..29983a94d --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/AutoSyncAsyncTestFixtureAttribute.cs @@ -0,0 +1,32 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using NUnit.Framework; +using NUnit.Framework.Interfaces; +using NUnit.Framework.Internal; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework; + +/// +/// Attribute used to indicate that a test fixture should automatically be be run in both synchronous and asynchronous mode. +/// +[AttributeUsage(AttributeTargets.Class, AllowMultiple = false, Inherited = true)] +public class AutoSyncAsyncTestFixtureAttribute : NUnitAttribute, IFixtureBuilder2 +{ + /// + public IEnumerable BuildFrom(ITypeInfo typeInfo) + => BuildFrom(typeInfo, null!); + + /// + public IEnumerable BuildFrom(ITypeInfo typeInfo, IPreFilter filter) + { + List suites = + [ + .. new TestFixtureAttribute([false]).BuildFrom(typeInfo, new AndPreFilter(filter, new SyncAsyncPreFilter(false))), + .. new TestFixtureAttribute([true]).BuildFrom(typeInfo, new AndPreFilter(filter, new SyncAsyncPreFilter(true))), + ]; + + return suites; + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/ClientTestBase.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/ClientTestBase.cs new file mode 100644 index 000000000..7406e738e --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/ClientTestBase.cs @@ -0,0 +1,174 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Diagnostics; +using Castle.DynamicProxy; +using NUnit.Framework; +using NUnit.Framework.Internal; +using OpenAI.TestFramework.AutoSyncAsync; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework; + +/// +/// Base class for client test cases. This provides support for writing only a test that uses the Async version of +/// methods, and automatically creating a test that uses the equivalent Sync version of a method. Please note that +/// this will only work for public virtual methods. In order for this to work, you should write a test that uses the +/// async version of a method. +/// +[AutoSyncAsyncTestFixture] +public abstract class ClientTestBase +{ + private static ProxyGenerator? s_proxyGenerator = null; + private static ThisLeakInterceptor? s_thisLeakInterceptor = null; + private static AsyncToSyncInterceptor? s_asyncInterceptor = null; + private static AsyncToSyncInterceptor? s_syncInterceptor = null; + + private CancellationTokenSource? _cts = null; + + /// + /// Creates a new instance. + /// + /// True to run the async version of a test, false to run the sync version of a test. + public ClientTestBase(bool isAsync) + { + IsAsync = isAsync; + } + + /// + /// Gets whether or not we are running async tests. + /// + public virtual bool IsAsync { get; } + + /// + /// Gets the start time of the test. + /// + public virtual DateTimeOffset TestStartTime => TestExecutionContext.CurrentContext.StartTime.ToUniversalTime(); + + /// + /// Gets the test timeout. + /// + public virtual TimeSpan TestTimeout => Debugger.IsAttached + ? Default.DebuggerAttachedTestTimeout + : Default.TestTimeout; + + /// + /// Gets the cancellation token to use + /// + public virtual CancellationToken Token => _cts?.Token ?? default; + + [SetUp] + public void TestSetup() + { + _cts?.Dispose(); + _cts = new CancellationTokenSource(TestTimeout); + } + + [TearDown] + public void TestCleanup() + { + _cts?.Dispose(); + _cts = null; + } + + /// + /// Gets the instance to use to create proxies of classes + /// that allow you inject additional functionality in for testing. + /// + protected static ProxyGenerator ProxyGenerator => s_proxyGenerator ??= new ProxyGenerator(); + + /// + /// An interceptor that prevents leaking a reference to the original instance as a return value from methods. + /// + protected static ThisLeakInterceptor ThisLeakInterceptor => s_thisLeakInterceptor ??= new ThisLeakInterceptor(); + + /// + /// An interceptor to force the use of async version of a method. + /// + protected static AsyncToSyncInterceptor UseSyncMethodInterceptor => s_syncInterceptor ??= new AsyncToSyncInterceptor(false); + + /// + /// An interceptor to force the use of sync version of a method. + /// + protected static AsyncToSyncInterceptor UseAsyncMethodInterceptor => s_asyncInterceptor ??= new AsyncToSyncInterceptor(true); + + /// + /// Wraps a client for automatic sync/async testing. This will return a proxied version of the client that will allow you to + /// automatically use the sync versions of a method. + /// + /// The type of the client instance. + /// The client instance to instrument for testing. + /// (Optional) Any additional context to associate with the wrapped client. + /// (Optional) Any additional interceptors to use. + /// The proxied version of the client. + public T WrapClient(T client, object? context = null, params IInterceptor[] interceptors) where T : class + => (T)WrapClient(typeof(T), client, context, interceptors); + + /// + /// Gets the original client from a wrapped client. + /// + /// The type of the client. + /// The wrapped client instance. + /// The original client instance. + /// The the client passed was not wrapped. + public virtual T UnWrap(T wrapped) where T : class + { + if (wrapped is IAutoSyncAsync instrumented) + { + return (T)instrumented.Original; + } + + throw new NotSupportedException($"That instance was not wrapped using {nameof(WrapClient)}"); + } + + /// + /// Gets the context associated with the wrapped instance. + /// + /// The type of the client. + /// The wrapped client. + /// The associated context for the wrapped instance. Will be null if none was set. + /// The the instance passed was not wrapped. + public virtual object? GetClientContext(T client) where T : class + { + if (client is IAutoSyncAsync instrumented) + { + return instrumented.Context; + } + + throw new NotSupportedException($"That instance was not wrapped using {nameof(WrapClient)}"); + } + + /// + /// Wraps a client with sync/async equivalent methods for testing. This enables the automatic testing of the sync version + /// of methods if you write an async test case. + /// + /// The type of the client. + /// The client instance to wrap. + /// (Optional) Any additional context to associate with the wrapped client. + /// (Optional) Any additional interceptors to include. + /// The wrapped version of the client. + protected internal virtual object WrapClient(Type instanceType, object client, object? context, IEnumerable? interceptors) + { + List allInterceptors = new(); + + if (interceptors != null) + { + allInterceptors.AddRange(interceptors); + } + + allInterceptors.Add(ThisLeakInterceptor); + allInterceptors.Add(IsAsync ? UseAsyncMethodInterceptor : UseSyncMethodInterceptor); + + ProxyGenerationOptions options = new(new TestProxyGenerationHook()); + options.AddMixinInstance(new AutoSyncAsyncMixIn(client, context)); + + object proxy = ProxyGenerator.CreateClassProxyWithTarget( + instanceType, + [], + client, + options, + allInterceptors.ToArray()); + + return proxy; + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/CapturedMessage.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/CapturedMessage.cs new file mode 100644 index 000000000..854b8cd7d --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/CapturedMessage.cs @@ -0,0 +1,175 @@ +// Copyright(c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Net; +using System.Net.Http; +using System.Net.Http.Headers; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework.Mocks; + +/// +/// A captured message. This is used as part of the . +/// +public abstract class CapturedMessage +{ + private static BinaryData? s_emptyData = null; + private static IReadOnlyDictionary>? s_emptyHeaders = null; + + /// + /// An empty header dictionary. + /// + public static IReadOnlyDictionary> EMPTY_HEADERS + => s_emptyHeaders ??= new Dictionary>(); + + /// + /// Empty binary data. + /// + public static BinaryData EMPTY_DATA => s_emptyData ??= new BinaryData(Array.Empty()); + + /// + /// Gets or sets the headers of the captured message. + /// + public IReadOnlyDictionary> Headers { get; init; } = EMPTY_HEADERS; + + /// + /// Gets or sets the content of the captured message. + /// + public BinaryData Content { get; init; } = EMPTY_DATA; + + /// + /// Copies the content from the provided to a new instance. + /// + /// The to copy the content from. + /// A new instance containing the copied content. + public static BinaryData CopyContent(HttpContent? content) + { + if (content == null) + { + return EMPTY_DATA; + } + + using Stream stream = content.ReadAsStreamAsync().Result; + return BinaryData.FromStream(stream); + } + + /// + /// Copies the headers from the provided and to a new dictionary. + /// + /// The to copy headers from. + /// The to copy headers from. + /// A new dictionary containing the copied headers. + public static IReadOnlyDictionary> CopyHeaders(HttpHeaders header, HttpContentHeaders? contentHeaders) + { + Dictionary> dict = new(StringComparer.OrdinalIgnoreCase); + foreach (var kvp in header) + { + dict[kvp.Key] = new List(kvp.Value); + } + + if (contentHeaders != null) + { + foreach (var kvp in contentHeaders) + { + var list = (List?)dict.GetValueOrDefault(kvp.Key); + if (list == null) + { + list = new List(); + dict[kvp.Key] = list; + } + + list.AddRange(kvp.Value); + } + } + + return dict; + } +} + +/// +/// A captured request. +/// +public class CapturedRequest : CapturedMessage +{ + /// + /// Creates a new instance. + /// + public CapturedRequest() + { } + + /// + /// Creates a new instance of using the provided . + /// + /// The to create the captured request from. + public CapturedRequest(HttpRequestMessage request) + { + if (request == null) + { + throw new ArgumentNullException(nameof(request)); + } + + Method = request.Method; + Uri = request.RequestUri; + Headers = CopyHeaders(request.Headers, request.Content?.Headers); + Content = CopyContent(request.Content); + } + + /// + /// Gets or sets the HTTP method of the captured request. + /// + public HttpMethod Method { get; init; } = HttpMethod.Get; + + /// + /// Gets or sets the URI of the captured request. + /// + public Uri? Uri { get; init; } +} + +/// +/// A captured response. +/// +public class CapturedResponse : CapturedMessage +{ + /// + /// Gets or sets the status code of the captured response. + /// + public HttpStatusCode Status { get; init; } = HttpStatusCode.OK; + + /// + /// Gets or sets the reason phrase of the captured response. + /// + public string? ReasonPhrase { get; init; } = "OK"; + + /// + /// Converts the captured response to an . + /// + /// The . + public HttpResponseMessage ToResponse() + { + const string contentPrefix = "Content-"; + + HttpResponseMessage response = new() + { + StatusCode = Status, + ReasonPhrase = ReasonPhrase + }; + + foreach (var kvp in Headers.Where(h => h.Key?.StartsWith(contentPrefix) == false)) + { + response.Headers.TryAddWithoutValidation(kvp.Key, kvp.Value); + } + + if (Content != null && Content.ToMemory().Length > 0) + { + response.Content = new StreamContent(Content.ToStream()); + foreach (var kvp in Headers.Where(h => h.Key?.StartsWith(contentPrefix) == true)) + { + response.Content.Headers.TryAddWithoutValidation(kvp.Key, kvp.Value); + } + } + + return response; + } +} + + diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockAsyncCollectionResult.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockAsyncCollectionResult.cs new file mode 100644 index 000000000..86e871aa6 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockAsyncCollectionResult.cs @@ -0,0 +1,32 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.ClientModel.Primitives; + +namespace OpenAI.TestFramework.Mocks; + +/// +/// Represents a mock implementation of the class. +/// +/// The type of the values in the collection. +public class MockAsyncCollectionResult : AsyncCollectionResult +{ + private readonly Func> _enumerateAsyncFunc; + + /// + /// Initializes a new instance of the class + /// with the specified asynchronous enumeration function and optional pipeline response. + /// + /// The function that asynchronously enumerates the values in the collection. + /// The optional pipeline response. + public MockAsyncCollectionResult(Func> enumerateAsyncFunc, PipelineResponse? response = null) : + base(response ?? new MockPipelineResponse()) + { + _enumerateAsyncFunc = enumerateAsyncFunc ?? throw new ArgumentNullException(nameof(enumerateAsyncFunc)); + } + + /// + public override IAsyncEnumerator GetAsyncEnumerator(CancellationToken cancellationToken = default) + => _enumerateAsyncFunc().GetAsyncEnumerator(cancellationToken); +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockAsyncPageCollection.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockAsyncPageCollection.cs new file mode 100644 index 000000000..424681252 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockAsyncPageCollection.cs @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.ClientModel.Primitives; + +namespace OpenAI.TestFramework.Mocks; + +/// +/// Represents a mock implementation of the class. +/// +/// The type of the values in the collection. +public class MockAsyncPageCollection : AsyncPageCollection +{ + private readonly Func> _enumerateAsyncFunc; + private readonly PipelineResponse _response; + private readonly int _itemsPerPage; + private PageResult? _currentPage; + + /// + /// Initializes a new instance. + /// + /// The function that enumerates the collection asynchronously. + /// The pipeline response. + public MockAsyncPageCollection(Func> enumerateAsyncFunc, PipelineResponse response, int itemsPerPage = 5) + { + if (itemsPerPage <= 0) + { + throw new ArgumentOutOfRangeException(nameof(itemsPerPage)); + } + + _enumerateAsyncFunc = enumerateAsyncFunc ?? throw new ArgumentNullException(nameof(enumerateAsyncFunc)); + _response = response; + _itemsPerPage = itemsPerPage; + } + + /// + protected override Task> GetCurrentPageAsyncCore() + => Task.FromResult(_currentPage ?? throw new InvalidOperationException("Please call MoveNextAsync first.")); + + /// + protected override async IAsyncEnumerator> GetAsyncEnumeratorCore(CancellationToken cancellationToken = default) + { + List items = new(_itemsPerPage); + int pageStart = 0; + int rolling = 0; + + await foreach (TValue value in _enumerateAsyncFunc()) + { + items.Add(value); + rolling++; + if (items.Count == _itemsPerPage) + { + _currentPage = PageResult.Create(items, ToContinuation(pageStart), ToContinuation(rolling), _response); + yield return _currentPage; + items.Clear(); + pageStart = rolling; + } + } + + if (items.Count > 0) + { + _currentPage = PageResult.Create(items, ToContinuation(pageStart), ToContinuation(rolling), _response); + yield return _currentPage; + } + } + + private static ContinuationToken ToContinuation(int offset) + => ContinuationToken.FromBytes(BinaryData.FromBytes(BitConverter.GetBytes(offset))); +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockCollectionResult.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockCollectionResult.cs new file mode 100644 index 000000000..e12e34b34 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockCollectionResult.cs @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.ClientModel.Primitives; + +namespace OpenAI.TestFramework.Mocks +{ + /// + /// Represents a mock implementation of the class. + /// + /// The type of the values in the collection. + public class MockCollectionResult : CollectionResult + { + private readonly Func> _enumerateFunc; + + /// + /// Initializes a new instance of the class with the specified enumeration + /// function and optional pipeline response. + /// + /// The function used to enumerate the collection. + /// The pipeline response associated with the collection. + public MockCollectionResult(Func> enumerateFunc, PipelineResponse? response = null) : + base(response ?? new MockPipelineResponse()) + { + _enumerateFunc = enumerateFunc ?? throw new ArgumentNullException(nameof(enumerateFunc)); + } + + /// + /// Returns an enumerator that iterates through the collection. + /// + /// An enumerator that can be used to iterate through the collection. + public override IEnumerator GetEnumerator() + => _enumerateFunc().GetEnumerator(); + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockHeaders.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockHeaders.cs new file mode 100644 index 000000000..7cac49376 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockHeaders.cs @@ -0,0 +1,91 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Mocks; + +/// +/// Basic implementation of headers. +/// +public class MockHeaders +{ + private IDictionary> _headers = + new Dictionary>(StringComparer.OrdinalIgnoreCase); + + /// + /// Adds a header value. + /// + /// The name of the header. + /// The value to add. + public virtual void Add(string name, string value) + { + IList? existing; + if (!_headers.TryGetValue(name, out existing)) + { + existing = new List(); + _headers[name] = existing; + } + + existing.Add(value); + } + + /// + /// Removes all values of a header. + /// + /// The name of the header to remove. + /// True if we removed a value, false otherwise. + public virtual bool Remove(string name) => _headers.Remove(name); + + /// + /// Sets the value for a header. This will override all existing values. + /// + /// The name of the header. + /// The value to set. + public virtual void Set(string name, string value) => _headers[name] = new List() { value }; + + /// + /// Gets an enumerator for the header values. In the case of a header with more than one value, they will be joined into + /// a single comma separated string. + /// + /// The enumerator. + public virtual IEnumerator> GetEnumerator() + => _headers + .Select(kvp => new KeyValuePair(kvp.Key, string.Join(",", kvp.Value))) + .GetEnumerator(); + + /// + /// Gets the value for a header. In the case of a header with more than one value, they will be joined into a single comma + /// separated string. + /// + /// The name of the header. + /// The value of the headers + /// True if the header was found, false otherwise. + public virtual bool TryGetValue(string name, out string? value) + { + if (_headers.TryGetValue(name, out IList? existing)) + { + value = string.Join(",", existing); + return true; + } + + value = null; + return false; + } + + /// + /// Gets the values for a header. + /// + /// The name of the header. + /// All of the values for the header. + /// True if the header was found, false otherwise. + public virtual bool TryGetValues(string name, out IEnumerable? values) + { + if (_headers.TryGetValue(name, out IList? existing)) + { + values = existing; + return true; + } + + values = null; + return false; + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockHttpMessageHandler.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockHttpMessageHandler.cs new file mode 100644 index 000000000..74f4b9980 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockHttpMessageHandler.cs @@ -0,0 +1,124 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Net; +using System.Net.Http; + +namespace OpenAI.TestFramework.Mocks; + +/// +/// A mock message handler that doesn't use the network. This captures all received requests, and allows you to specify a handler +/// to hand craft response messages. This can be useful for unit testing. +/// +public class MockHttpMessageHandler : HttpMessageHandler, IDisposable +{ + /// + /// Handles a captured request. + /// + /// The captured request. + /// The corresponding response. + public delegate CapturedResponse RequestHandlerDelegate(CapturedRequest request); + + private RequestHandlerDelegate _handler; + private List _requests; + private List _responses; + private PipelineTransport? _transport; + + /// + /// Creates a new instance. + /// + /// (Optional) The handler to use to generate responses. Default returns an empty + /// response body with HTTP 204 + public MockHttpMessageHandler(RequestHandlerDelegate? requestHandler = null) + { + _handler = requestHandler ?? ReturnEmpty; + _requests = new List(); + _responses = new List(); + } + + /// + /// Event raised when a request is received. + /// + public event EventHandler? OnRequest; + + /// + /// Event raised when a response is generated. + /// + public event EventHandler? OnResponse; + + /// + /// Gets the transport to pass to your System.ClientModel based clients. + /// + public PipelineTransport Transport => _transport ??= new HttpClientPipelineTransport(new HttpClient(this)); + + /// + /// All received requests. + /// + public IReadOnlyList Requests => _requests; + + /// + /// All generated responses. + /// + public IReadOnlyList Responses => _responses; + + /// + /// Default handler that always returns an empty JSON payload as the response with the correct headers set + /// + /// The request + /// An empty successful JSON response + public static CapturedResponse ReturnEmptyJson(CapturedRequest request) + => new() + { + Status = HttpStatusCode.OK, + ReasonPhrase = "OK", + Content = BinaryData.FromString("{}"), + Headers = new Dictionary>() + { + ["Content-Type"] = ["application/json"], + ["Content-Length"] = ["2"] + } + }; + + /// + /// Default handler that returns an empty HTTP 204 payload + /// + /// The request + /// An HTTP 204 empty response + public static CapturedResponse ReturnEmpty(CapturedRequest request) + => new() { Status = HttpStatusCode.NoContent }; + + private HttpResponseMessage HandleRequest(HttpRequestMessage request, CancellationToken token) + { + try + { + CapturedRequest capturedRequest = new(request); + OnRequest?.Invoke(this, capturedRequest); + _requests.Add(capturedRequest); + + CapturedResponse capturedResponse = _handler(capturedRequest); + OnResponse?.Invoke(this, capturedResponse); + _responses.Add(capturedResponse); + + return capturedResponse.ToResponse(); + } + catch (Exception ex) + { + throw new ClientResultException("Failed to process request", null, ex); + } + } + + #region HttpMessagHandler implementation + +#if NET + override +#endif + protected HttpResponseMessage Send(HttpRequestMessage request, CancellationToken cancellationToken) + => HandleRequest(request, cancellationToken); + + protected override Task SendAsync(HttpRequestMessage request, CancellationToken cancellationToken) + => Task.FromResult(HandleRequest(request, cancellationToken)); + + #endregion +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockPageCollection.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockPageCollection.cs new file mode 100644 index 000000000..1f08987ae --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockPageCollection.cs @@ -0,0 +1,71 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.ClientModel.Primitives; + +namespace OpenAI.TestFramework.Mocks; + +/// +/// Represents a mock implementation of the class. +/// +/// The type of the values in the collection. +public class MockPageCollection : PageCollection +{ + private readonly Func> _enumerateFunc; + private readonly PipelineResponse _response; + private readonly int _itemsPerPage; + private PageResult? _currentPage; + + /// + /// Creates a new instance. + /// + /// The function used to enumerate the collection. + /// The pipeline response. + /// (Optional) The number of items per page. + public MockPageCollection(Func> enumerateFunc, PipelineResponse response, int itemsPerPage = 5) + { + if (itemsPerPage <= 0) + { + throw new ArgumentOutOfRangeException(nameof(itemsPerPage)); + } + + _enumerateFunc = enumerateFunc ?? throw new ArgumentNullException(nameof(enumerateFunc)); + _response = response; + _itemsPerPage = itemsPerPage; + } + + /// + protected override PageResult GetCurrentPageCore() + => _currentPage ?? throw new InvalidOperationException("Please call MoveNextAsync first."); + + /// + protected override IEnumerator> GetEnumeratorCore() + { + List items = new(_itemsPerPage); + int pageStart = 0; + int rolling = 0; + + foreach (TValue item in _enumerateFunc()) + { + items.Add(item); + rolling++; + if (items.Count == _itemsPerPage) + { + _currentPage = PageResult.Create(items, ToContinuation(pageStart), ToContinuation(rolling), _response); + yield return _currentPage; + items.Clear(); + pageStart = rolling; + } + } + + if (items.Count > 0) + { + _currentPage = PageResult.Create(items, ToContinuation(pageStart), null, _response); + yield return _currentPage; + } + } + + private static ContinuationToken ToContinuation(int offset) + => ContinuationToken.FromBytes(BinaryData.FromBytes(BitConverter.GetBytes(offset))); +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockPipelineResponse.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockPipelineResponse.cs new file mode 100644 index 000000000..1ade396df --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockPipelineResponse.cs @@ -0,0 +1,88 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel.Primitives; + +namespace OpenAI.TestFramework.Mocks; + +/// +/// A mock implementation of a pipeline response +/// +public class MockPipelineResponse : PipelineResponse +{ + private Stream? _contentStream; + private BinaryData? _buffered; + + /// + /// Creates a new instance. + /// + /// (Optional) The HTTP status. + /// (Optional) The HTTP reason phrase. + /// (Optional) The HTTP response body content. + public MockPipelineResponse( + int? status = null, + string? reasonPhrase = null, + BinaryData? content = null) + { + Status = status ?? 200; + ReasonPhrase = reasonPhrase ?? "OK"; + _buffered = content; + ContentStream = content?.ToStream(); + HeadersCore = new MockResponseHeaders(); + } + + /// + public override int Status { get; } + + /// + public override string ReasonPhrase { get; } + + /// + public override Stream? ContentStream + { + get => _contentStream; + set + { + _contentStream = value; + _buffered = null; + } + } + + /// + public override BinaryData Content => _buffered ?? throw new InvalidOperationException("Response content is not yet buffered"); + + /// + protected override PipelineResponseHeaders HeadersCore { get; } + + /// + public override BinaryData BufferContent(CancellationToken cancellationToken = default) + => BufferContentSyncAsync(false, cancellationToken).GetAwaiter().GetResult(); + + /// + public override ValueTask BufferContentAsync(CancellationToken cancellationToken = default) + => BufferContentSyncAsync(true, cancellationToken); + + /// + public override void Dispose() + { + ContentStream?.Dispose(); + } + + private async ValueTask BufferContentSyncAsync(bool isAsync, CancellationToken token) + { + if (_buffered != null) + { + return _buffered; + } + + _buffered = ContentStream == null + ? BinaryData.FromBytes(Array.Empty()) + : isAsync + ? await BinaryData.FromStreamAsync(ContentStream, token).ConfigureAwait(false) + : BinaryData.FromStream(ContentStream); + + ContentStream?.Dispose(); + ContentStream = _buffered.ToStream(); + return _buffered; + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockRequestHeaders.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockRequestHeaders.cs new file mode 100644 index 000000000..e03c4cd18 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockRequestHeaders.cs @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel.Primitives; + +namespace OpenAI.TestFramework.Mocks; + +/// +/// Mock implementation of request headers. +/// +public class MockRequestHeaders : PipelineRequestHeaders +{ + private MockHeaders _headers = new(); + + /// + public override void Add(string name, string value) + => _headers.Add(name, value); + + /// + public override bool Remove(string name) + => _headers.Remove(name); + + /// + public override void Set(string name, string value) + => _headers.Set(name, value); + + /// + public override IEnumerator> GetEnumerator() + => _headers.GetEnumerator(); + + /// + public override bool TryGetValue(string name, out string? value) + => _headers.TryGetValue(name, out value); + + /// + public override bool TryGetValues(string name, out IEnumerable? values) + => _headers.TryGetValues(name, out values); +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockResponseHeaders.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockResponseHeaders.cs new file mode 100644 index 000000000..aead0b4b8 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockResponseHeaders.cs @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel.Primitives; + +namespace OpenAI.TestFramework.Mocks; + +/// +/// Mock implementation of response headers. +/// +public class MockResponseHeaders : PipelineResponseHeaders +{ + private MockHeaders _headers = new(); + + /// + public override IEnumerator> GetEnumerator() + => _headers.GetEnumerator(); + + /// + public override bool TryGetValue(string name, out string? value) + => _headers.TryGetValue(name, out value); + + /// + public override bool TryGetValues(string name, out IEnumerable? values) + => _headers.TryGetValues(name, out values); +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockRestService.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockRestService.cs new file mode 100644 index 000000000..58420f679 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockRestService.cs @@ -0,0 +1,413 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Collections.Concurrent; +using System.Net; +using System.Net.Sockets; +using System.Text.Json; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework.Mocks; + +/// +/// Represents a mock REST service for testing purposes. +/// +/// The type of data stored in the service. +public class MockRestService : IDisposable +{ + /// + /// Represents an entry in the mock REST service. + /// + /// The ID of the entry. + /// The data associated with the entry. + public record Entry(string id, TData data) + { +#if NETFRAMEWORK + public Entry() : this(string.Empty, default!) + { + // .Net framework System.Text.Json cannot deserialize records without a parameterless constructor + } +#endif + }; + + /// + /// Represents an error in the mock REST service. + /// + /// The error code. + /// The error message. + /// The stack trace of the error. + public record Error(int error, string message, string? stack = null); + + private static readonly JsonSerializerOptions s_options = new() + { + WriteIndented = true, +#pragma warning disable SYSLIB0020 + IgnoreNullValues = true +#pragma warning restore SYSLIB0020 + }; + + private ConcurrentDictionary _data; + private HttpListener _listener; + private CancellationTokenSource _cts; + private Task _workerTask; + + /// + /// Initializes a new instance of the class. + /// + /// (Optional) The base path of the service. + /// (Optional) The port number to listen on. If set to 0, a port will be automatically selected. + public MockRestService(string? basePath = null, ushort port = 0) + { + _data = new(); + basePath = basePath?.EnsureEndsWith("/"); + + int maxAttempts = port == 0 ? 15 : 1; + Exception? ex = null; + for (int i = 0; _listener == null && i < maxAttempts; i++) + { + _listener = TryStartListener(basePath ?? string.Empty, port, out ex)!; + } + + if (_listener == null || ex != null) + { + throw new ApplicationException("Failed to start the mock rest service", ex); + } + + HttpEndpoint = TerminatePathWithSlash(new Uri(_listener.Prefixes.First())); + _cts = new(); + _workerTask = Task.Run(() => WorkerAsync(_cts.Token), _cts.Token); + } + + /// + /// Gets the HTTP endpoint of the mock REST service. + /// + public Uri HttpEndpoint { get; } + + /// + /// Gets all entries in the mock REST service. + /// + /// An enumerable collection of entries. + public virtual IEnumerable GetAll() + => _data.Select(kvp => new Entry(kvp.Key, kvp.Value)); + + /// + /// Tries to get an entry from the mock REST service. + /// + /// The ID of the entry to get. + /// When this method returns, contains the entry associated with the specified ID, if found; otherwise, null. + /// true if the entry was found; otherwise, false. + public virtual bool TryGet(string id, out Entry? entry) + { + if (_data.TryGetValue(id, out TData? value)) + { + entry = new(id, value); + return true; + } + + entry = null; + return false; + } + + /// + /// Tries to add an entry to the mock REST service. + /// + /// The ID of the entry to add. + /// The data associated with the entry. + /// When this method returns, contains the added entry, if successful; otherwise, null. + /// true if the entry was added successfully; otherwise, false. + public virtual bool TryAdd(string id, TData data, out Entry? entry) + { + entry = null; + + if (_data.TryAdd(id, data)) + { + entry = new(id, data); + return true; + } + + return false; + } + + /// + /// Tries to delete an entry from the mock REST service. + /// + /// The ID of the entry to delete. + /// true if the entry was deleted successfully; otherwise, false. + public virtual bool TryDelete(string id) + => _data.TryRemove(id, out _); + + /// + /// Tries to update an entry in the mock REST service. + /// + /// The ID of the entry to update. + /// The updated data for the entry. + /// When this method returns, contains the updated entry, if successful; otherwise, null. + /// true if the entry was updated successfully; otherwise, false. + public virtual bool TryUpdate(string id, TData data, out Entry? entry) + { + _data[id] = data; + entry = new(id, data); + return true; + } + + /// + /// Resets the mock REST service removing all entries. + /// + public virtual void Reset() + => _data.Clear(); + + /// + /// Disposes of the resources used by the mock REST service. + /// + public void Dispose() + { + _cts.Cancel(); + _listener.Stop(); + try { _workerTask.Wait(500); } catch { } + _listener.Close(); + _cts.Dispose(); + } + + /// + /// Worker method that handles incoming HTTP requests. + /// + /// The cancellation token. + protected virtual async Task WorkerAsync(CancellationToken token) + { + while (!token.IsCancellationRequested) + { + HttpListenerContext context = await _listener.GetContextAsync().ConfigureAwait(false); + HttpListenerRequest request = context.Request; + HttpListenerResponse response = context.Response; + + if (request == null || request.Url == null) + { + context.Response?.Abort(); + continue; + } + + try + { + response.ContentLength64 = 0; + + string? id = GetId(HttpEndpoint, request.Url); + switch (request.HttpMethod.ToUpperInvariant()) + { + case "GET": + if (id == null) + { + // Send down all data + IEnumerable allData = GetAll(); + WriteJsonResponse(response, 200, allData); + } + else if (TryGet(id, out Entry? entry) && entry != null) + { + WriteJsonResponse(response, 200, entry); + } + else + { + response.StatusCode = (int)HttpStatusCode.NotFound; + } + break; + + case "POST": + if (id == null) + { + response.StatusCode = (int)HttpStatusCode.BadRequest; + } + else + { + TData? data = ReadBody(request); + if (data == null) + { + response.StatusCode = (int)HttpStatusCode.GatewayTimeout; + } + else if (TryAdd(id, data, out Entry? entry)) + { + if (entry == null) + { + response.StatusCode = (int)HttpStatusCode.NoContent; + } + else + { + WriteJsonResponse(response, 200, entry); + } + } + else + { + response.StatusCode = (int)HttpStatusCode.Conflict; + } + } + break; + + case "PUT": + if (id == null) + { + response.StatusCode = (int)HttpStatusCode.BadRequest; + } + else + { + TData? data = ReadBody(request); + if (data == null) + { + response.StatusCode = (int)HttpStatusCode.GatewayTimeout; + } + else if (TryUpdate(id, data, out Entry? entry)) + { + if (entry == null) + { + response.StatusCode = (int)HttpStatusCode.NoContent; + } + else + { + WriteJsonResponse(response, 200, entry); + } + } + else + { + response.StatusCode = (int)HttpStatusCode.NotFound; + response.ContentLength64 = 0; + } + } + break; + + case "DELETE": + response.ContentLength64 = 0; + if (id == null) + { + response.StatusCode = (int)HttpStatusCode.BadRequest; + } + else if (TryDelete(id)) + { + response.StatusCode = (int)HttpStatusCode.NoContent; + } + else + { + response.StatusCode = (int)HttpStatusCode.NotFound; + } + break; + + default: + response.StatusCode = (int)HttpStatusCode.MethodNotAllowed; + break; + } + + response.Close(); + } + catch (Exception ex) + { + response.StatusCode = (int)HttpStatusCode.InternalServerError; + try + { + if (response.OutputStream.Length > 0 || response.OutputStream.CanSeek) + { + response.OutputStream.SetLength(0); + } + + if (response.OutputStream.Length == 0) + { + WriteJsonResponse( + response, + (int)HttpStatusCode.InternalServerError, + new Error( + 500, + ex.Message +#if DEBUG + , ex.StackTrace +#endif + )); + } + } + catch { /* we tried */ } + } + } + } + + private static ushort GetFreePort() + { + TcpListener? listener = null; + try + { + listener = new TcpListener(IPAddress.Loopback, 0); + listener.Start(); + return (ushort)((IPEndPoint)listener.LocalEndpoint).Port; + } + finally + { + listener?.Stop(); + } + } + + private static HttpListener? TryStartListener(string basePath, ushort port, out Exception? ex) + { + if (port == 0) + { + port = GetFreePort(); + } + + HttpListener? listener = null; + try + { + listener = new(); + listener.Prefixes.Add($"http://localhost:{port}/{basePath}"); + listener.Start(); + ex = null; + return listener; + } + catch (Exception e) + { + listener?.Close(); + ex = e; + return null; + } + } + + private static Uri TerminatePathWithSlash(Uri uri) + { + if (uri.IsAbsoluteUri) + { + if (!uri.AbsolutePath.EndsWith("/")) + { + UriBuilder builder = new(uri); + builder.Path += '/'; + return builder.Uri; + } + } + else if (!uri.OriginalString.EndsWith("/")) + { + return new Uri(uri.OriginalString + '/', UriKind.RelativeOrAbsolute); + } + + return uri; + } + + private static string? GetId(Uri baseUri, Uri requestUri) + { + Uri normalizedRequestUri = TerminatePathWithSlash(requestUri); + Uri relative = baseUri.MakeRelativeUri(normalizedRequestUri); + return relative.OriginalString.Split(["/"], StringSplitOptions.RemoveEmptyEntries).FirstOrDefault(); + } + + private static TData? ReadBody(HttpListenerRequest request) + { + if (request.ContentLength64 == 0) + { + return default; + } + + return JsonHelpers.Deserialize(request.InputStream, s_options); + } + + private static void WriteJsonResponse(HttpListenerResponse response, int status, T data) + { + response.StatusCode = status; + + using MemoryStream buffer = new(); + JsonHelpers.Serialize(buffer, data, s_options); + buffer.Seek(0, SeekOrigin.Begin); + + response.ContentType = "application/json"; + response.ContentLength64 = buffer.Length; + buffer.CopyTo(response.OutputStream); + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockRestServiceClient.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockRestServiceClient.cs new file mode 100644 index 000000000..a3fb851d3 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Mocks/MockRestServiceClient.cs @@ -0,0 +1,274 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Globalization; +using System.Net.Http; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework.Mocks; + +/// +/// A client for . +/// +/// The type of data used by the client. +public class MockRestServiceClient : IDisposable +{ + private ClientPipeline _pipeline; + private Uri _baseUri; + + /// + /// Only used to generate a dynamic proxy for testing. Do not use this yourself. + /// + internal MockRestServiceClient() + { + _pipeline = null!; + _baseUri = null!; + } + + /// + /// Initializes a new instance of the class with the specified service URI and options. + /// + /// The service URI. + /// The client pipeline options. + public MockRestServiceClient(Uri serviceUri, ClientPipelineOptions? options = null) + { + _pipeline = ClientPipeline.Create(options); + _baseUri = serviceUri ?? throw new ArgumentNullException(nameof(serviceUri)); + } + + /// + /// Adds data asynchronously to the service with the specified ID. + /// + /// The ID of the data. + /// The data to add. + /// The cancellation token. + /// A task representing the asynchronous operation. + public virtual Task AddAsync(string id, TData data, CancellationToken token = default) + { + if (string.IsNullOrWhiteSpace(id)) + throw new ArgumentException("Value cannot be null or whitespace.", nameof(id)); + + ValidateData(data); + return SendSyncOrAsync(true, HttpMethod.Post, id, data, token).AsTask(); + } + + /// + /// Adds data synchronously to the service with the specified ID. + /// + /// The ID of the data. + /// The data to add. + /// The cancellation token. + /// The result of the operation. + public virtual ClientResult Add(string id, TData data, CancellationToken token = default) + { + if (string.IsNullOrWhiteSpace(id)) + throw new ArgumentException("Value cannot be null or whitespace.", nameof(id)); + + ValidateData(data); + return SendSyncOrAsync(false, HttpMethod.Post, id, data, token).GetAwaiter().GetResult(); + } + + /// + /// Gets data asynchronously from the service with the specified ID. Will return null if the data does not exist. + /// + /// The ID of the data. + /// The cancellation token. + /// A task representing the asynchronous operation. + public virtual async Task> GetAsync(string id, CancellationToken token = default) + { + if (string.IsNullOrWhiteSpace(id)) + throw new ArgumentException("Value cannot be null or whitespace.", nameof(id)); + + try + { + ClientResult result = await SendSyncOrAsync(true, HttpMethod.Get, id, default, token) + .ConfigureAwait(false); + + var response = result.GetRawResponse(); + return ClientResult.FromOptionalValue( + response.Content.ToObjectFromJson.Entry>().data, + response); + } + catch (ClientResultException ex) + { + if (ex.GetRawResponse()?.Status == 404) + { + return ClientResult.FromOptionalValue(default, ex.GetRawResponse()!); + } + + throw; + } + } + + /// + /// Gets data synchronously from the service with the specified ID. Will return null if the data does not exist. + /// + /// The ID of the data. + /// The cancellation token. + /// The result of the operation. + public virtual ClientResult Get(string id, CancellationToken token = default) + { + if (string.IsNullOrWhiteSpace(id)) + throw new ArgumentException("Value cannot be null or whitespace.", nameof(id)); + + try + { + ClientResult result = SendSyncOrAsync(false, HttpMethod.Get, id, default, token).GetAwaiter().GetResult(); + var response = result.GetRawResponse(); + return ClientResult.FromOptionalValue( + response.Content.ToObjectFromJson.Entry>().data, + response); + } + catch (ClientResultException ex) + { + if (ex.GetRawResponse()?.Status == 404) + { + return ClientResult.FromOptionalValue(default, ex.GetRawResponse()!); + } + + throw; + } + } + + /// + /// Removes data asynchronously from the service with the specified ID. + /// + /// The ID of the data. + /// The cancellation token. + /// A task representing the asynchronous operation. + public virtual async Task> RemoveAsync(string id, CancellationToken token = default) + { + if (string.IsNullOrWhiteSpace(id)) + throw new ArgumentException("Value cannot be null or whitespace.", nameof(id)); + + try + { + ClientResult result = await SendSyncOrAsync(true, HttpMethod.Delete, id, default, token); + return ClientResult.FromValue(true, result.GetRawResponse()); + } + catch (ClientResultException ex) + { + if (ex.GetRawResponse()?.Status == 404) + { + return ClientResult.FromValue(false, ex.GetRawResponse()!); + } + + throw; + } + } + + /// + /// Removes data synchronously from the service with the specified ID. + /// + /// The ID of the data. + /// The cancellation token. + /// The result of the operation. + public virtual ClientResult Remove(string id, CancellationToken token = default) + { + if (string.IsNullOrWhiteSpace(id)) + throw new ArgumentException("Value cannot be null or whitespace.", nameof(id)); + + try + { + ClientResult result = SendSyncOrAsync(false, HttpMethod.Delete, id, default, token).GetAwaiter().GetResult(); + return ClientResult.FromValue(true, result.GetRawResponse()); + } + catch (ClientResultException ex) + { + if (ex.GetRawResponse()?.Status == 404) + { + return ClientResult.FromValue(false, ex.GetRawResponse()!); + } + + throw; + } + } + + /// + /// Disposes of the resources used by the client. + /// + public virtual void Dispose() + { + // no obvious way to dispose of the pipeline, nor the inner transport + } + + /// + /// Validates the data before sending it to the service. + /// + /// The data to validate. + protected virtual void ValidateData(TData? data) + { + if (data == null) + { + throw new ArgumentNullException(nameof(data)); + } + } + + /// + /// Sends the request to the service synchronously or asynchronously. This will serialize the passed in data to JSON using the default + /// serializer. + /// + /// Indicates whether the request should be sent asynchronously. + /// The HTTP method. + /// The ID of the data. + /// The data to send. + /// The cancellation token. + /// The result of the operation. + protected async ValueTask SendSyncOrAsync(bool isAsync, HttpMethod method, string? id, TData? data, CancellationToken token) + { + UriBuilder builder = new(_baseUri); + if (id != null) + { + builder.Path += id; + } + + PipelineMessage message = _pipeline.CreateMessage(); + message.Request.Method = method.Method; + message.Request.Uri = builder.Uri; + message.Apply(new RequestOptions() + { + CancellationToken = token, + BufferResponse = true + }); + + if (data == null) + { + message.Request.Headers.Set("Content-Length", "0"); + } + else + { + using MemoryStream stream = new(); + JsonHelpers.Serialize(stream, data); + var binaryData = BinaryData.FromBytes(new ReadOnlyMemory(stream.GetBuffer(), 0, (int)stream.Length)); + + message.Request.Headers.Set("Content-Length", stream.Length.ToString(CultureInfo.InvariantCulture)); + message.Request.Headers.Set("Content-Type", "application/json"); + message.Request.Content = BinaryContent.Create(binaryData); + } + + if (isAsync) + { + await _pipeline.SendAsync(message).ConfigureAwait(false); + } + else + { + _pipeline.Send(message); + } + + if (message.Response?.IsError == true) + { + if (message.Response.Content?.ToMemory().Length > 0) + { + var error = message.Response.Content.ToObjectFromJson.Error>(); + throw new ClientResultException($"Error {error.error}: {error.message}", message.Response); + } + + throw new ClientResultException(message.Response); + } + + return ClientResult.FromResponse(message.Response!); + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/OpenAI.TestFramework.csproj b/.dotnet.azure/sdk/openai/tools/TestFramework/src/OpenAI.TestFramework.csproj new file mode 100644 index 000000000..dc92fd798 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/OpenAI.TestFramework.csproj @@ -0,0 +1,39 @@ + + + + $(RequiredTargetFrameworks) + enable + enable + latest + + + + + Utils\Polyfill\%(RecursiveDir)\%(Filename).cs + + + + + + + + + + + + + + 0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7 + + + + + + + <_Parameter1>TestProxyPath + <_Parameter2>$(NuGetPackageRoot)\azure.sdk.tools.testproxy\$(TestProxyVersion)\tools\net6.0\any\Azure.Sdk.Tools.TestProxy.dll + + + + diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/RecordedClientTestBase.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/RecordedClientTestBase.cs new file mode 100644 index 000000000..22766ebaa --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/RecordedClientTestBase.cs @@ -0,0 +1,449 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel.Primitives; +using System.Diagnostics; +using System.Net; +using System.Text; +using NUnit.Framework; +using NUnit.Framework.Internal; +using OpenAI.TestFramework.Recording; +using OpenAI.TestFramework.Recording.Proxy; +using OpenAI.TestFramework.Recording.Proxy.Service; +using OpenAI.TestFramework.Recording.RecordingProxy; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework; + +/// +/// Base class for client test cases that supports recording and playback of HTTP/HTTPS REST requests. This recording +/// support is provided by use of the Test Proxy . +/// This provides the basic framework to start the Test Proxy, create a recording for a test or playback a recording +/// for a test. It also provides support for automatic testing of async and sync versions of methods (see +/// for more details). +/// +[NonParallelizable] +public abstract class RecordedClientTestBase : ClientTestBase +{ + /// + /// Invalid characters that will be removed from test names when creating recordings. + /// + /// + /// Using Windows version as it is the most restrictive of all platforms: + /// + /// + protected static readonly ISet s_invalidChars = new HashSet() + { + '\"', '<', '>', '|', '\0', + (char)1, (char)2, (char)3, (char)4, (char)5, (char)6, (char)7, (char)8, (char)9, (char)10, + (char)11, (char)12, (char)13, (char)14, (char)15, (char)16, (char)17, (char)18, (char)19, (char)20, + (char)21, (char)22, (char)23, (char)24, (char)25, (char)26, (char)27, (char)28, (char)29, (char)30, + (char)31, ':', '*', '?', '\\', '/' + }; + + private DateTimeOffset _testStartTime; + private TestRecordingOptions _options; + + /// + /// Creates a new instance. + /// + /// True to run the async version of a test, false to run the sync version of a test. + public RecordedClientTestBase(bool isAsync) : this(isAsync, null, null) + { } + + /// + /// Creates a new instance. + /// + /// True to run the async version of a test, false to run the sync version of a test. + /// (Optional) The recorded test mode to use. If unset, the default recorded test mode will be used. + /// (Optional) Whether or not to attempt to record automatically in the case of missing recordings + /// or recording mismatches. + public RecordedClientTestBase(bool isAsync, RecordedTestMode? mode = null, bool? automaticRecord = null) : base(isAsync) + { + _options = new TestRecordingOptions(); + Mode = mode ?? GetDefaultRecordedTestMode(); + AutomaticRecord = automaticRecord ?? (!IsRunningInCI && GetDefaultAutomaticRecordEnabled()); + } + + /// + public override DateTimeOffset TestStartTime => _testStartTime; + + /// + /// Gets the test proxy instance to use for the current test case. + /// + public ProxyService? Proxy { get; protected internal set; } + + /// + /// Gets or sets the current recording mode for the test. + /// + public RecordedTestMode Mode { get; set; } + + /// + /// Gets or sets whether or not we should attempt to record a test if there is a recording mismatch, or the recording + /// file is missing. + /// + public bool AutomaticRecord { get; set; } + + /// + /// Gets or sets the recording options to use for the current test. This will be pre-populated with a sensible configuration. + /// + public TestRecordingOptions RecordingOptions + { + get => _options; + set => _options = value ?? throw new ArgumentNullException(nameof(value)); + } + + /// + /// Gets the recording for the current test. + /// + public TestRecording? Recording { get; protected internal set; } + + /// + /// Gets the maximum amount of time to wait for starting/tearing down the test proxy, as well as the maximum amount of time + /// to wait for configuring a recording session, and then saving it or closing it. + /// + public virtual TimeSpan TestProxyWaitTime => Debugger.IsAttached + ? Default.DebuggerAttachedTestTimeout + : Default.TestProxyWaitTime; + + /// + /// Gets the test timeout. + /// + public override TimeSpan TestTimeout + { + get + { + if (Debugger.IsAttached) + { + return Default.DebuggerAttachedTestTimeout; + } + + switch (Mode) + { + default: + case RecordedTestMode.Record: + case RecordedTestMode.Live: + return TimeSpan.FromSeconds(60); + + case RecordedTestMode.Playback: + return Default.TestTimeout; + } + } + } + + /// + /// Determines whether or not to use Fiddler. If this is true, then the recording transport will be updated to use Fiddler + /// as the intermediary when talking to the test proxy, as well as accept the Fiddler root certificate. + /// + public virtual bool UseFiddler + { + get + { + // Check to see if Fiddler is already running and capturing traffic by checking to see if a proxy is configured for + // 127.0.0.1:8888 with no credentials + try + { + Uri dummyUri = new("https://not.a.real.uri.com"); + + IWebProxy webProxy = WebRequest.GetSystemWebProxy(); + Uri? proxyUri = webProxy?.GetProxy(dummyUri); + if (proxyUri == null || proxyUri == dummyUri) + { + return false; + } + + // assume default of 127.0.0.1:8888 with no credentials + var cred = webProxy?.Credentials?.GetCredential(dummyUri, string.Empty); + return proxyUri.Host == "127.0.0.1" + && proxyUri.Port == 8888 + && string.IsNullOrWhiteSpace(cred?.UserName) + && string.IsNullOrWhiteSpace(cred?.Password); + } + catch + { + return false; + } + } + } + + /// + /// Gets whether or not we are running the tests in CI/CD (e.g. GitHub workflows) + /// + public virtual bool IsRunningInCI => new string?[] + { + Environment.GetEnvironmentVariable("CI"), // GitHub workflows + Environment.GetEnvironmentVariable("TF_BUILD"), // Azure DevOps + } + .Any(s => s != null); + + /// + /// Checks if the recording has a recorded value for . If there is none, the + /// will be added and return. Otherwise the existing value will be returned. + /// + /// The name of the value. + /// The value to add. + /// The existing value, or the newly added value. + /// If you called this function outside of a test run. + public string? GetOrAddRecordedValue(string name, string valueToAdd) + => GetOrAddRecordedValue(name, () => valueToAdd); + + /// + /// Checks if the recording has a recorded value for . If there is none, a value will be created, added + /// and returned. Otherwise the existing value will be returned. + /// + /// The name of the value. + /// The factory used to create the value. + /// The existing value, or the newly added value. + /// If you called this function outside of a test run. + public virtual string GetOrAddRecordedValue(string name, Func valueFactory) + { + if (Recording == null) + { + throw new InvalidOperationException("Recorded value should not be retrieved outside the test method invocation"); + } + + return Recording.GetOrAddVariable(name, valueFactory); + } + + /// + /// Starts the test proxy for the current test. This will be called once at the start of the test fixture. + /// + /// Asynchronous task. + [OneTimeSetUp] + public virtual async Task StartTestProxyAsync() + { + using CancellationTokenSource cts = new(TestProxyWaitTime); + + ProxyServiceOptions options = CreateProxyServiceOptions(); + Proxy = await ProxyService.CreateNewAsync(options, cts.Token).ConfigureAwait(false); + } + + [OneTimeTearDown] + public virtual Task StopTestProxyAsync() + { + Proxy?.Dispose(); + Proxy = null; + + //TODO FIXME: Do we need to do any cleanup here? + return Task.CompletedTask; + } + + /// + /// Starts the test proxy (if it has not already been started), and then configures the recording session for the current + /// test. This should also set the property to the new recording session. + /// + /// Asynchronous task. + [SetUp] + public virtual async Task StartTestRecordingAsync() + { + // Check if the current NUnit test method has a specific attribute applied to it + if (!IsCurrentTestRecorded()) + { + return; + } + + if (Proxy == null) + { + throw new InvalidOperationException("The proxy service was not set and/or started"); + } + + _testStartTime = DateTimeOffset.UtcNow; + + // TODO FIXME: Add logic to ignore certain tests here by throwing IgnoreException()? + + using CancellationTokenSource cts = new(TestProxyWaitTime); + Recording = await StartAndConfigureRecordingSessionAsync(Proxy, cts.Token).ConfigureAwait(false); + + // don't include test proxy overhead as part of the test time + _testStartTime = DateTimeOffset.UtcNow; + } + + /// + /// Stops a recording session for the current test. If the test passed and we are in recording mode, the recording will be saved, + /// otherwise it will be discarded. + /// + /// Asynchronous task. + [TearDown] + public virtual async Task StopTestRecordingAsync() + { + if (!IsCurrentTestRecorded()) + { + return; + } + + bool testsPassed = TestContext.CurrentContext.Result.Outcome.Status == NUnit.Framework.Interfaces.TestStatus.Passed; + using CancellationTokenSource cts = new(TestProxyWaitTime); + + if (Recording != null) + { + await Recording.FinishAsync(testsPassed, cts.Token).ConfigureAwait(false); + } + } + + /// + /// Configures the client options for a System.ClientModel based service client. This will be used to configure the transport + /// such that all requests are routed to the test proxy during recording (for capture), and playback (for replaying captured + /// requests). + /// + /// The type of the client options. + /// The options to configure. + /// The configured client options. + /// The current recording mode is not supported. + /// There was no test recording configured for this test. + public virtual TClientOptions ConfigureClientOptions(TClientOptions options) + where TClientOptions : ClientPipelineOptions + { + if (!IsCurrentTestRecorded()) + { + return options; + } + + // If we are in playback, or record mode we should set the transport to the test proxy transport, except + // in the case where we've explicitly specified the transport ourselves in case we are doing some custom + // work. + if (options.Transport != null) + { + return options; + } + + switch (Mode) + { + case RecordedTestMode.Live: + // no need to to anything special + return options; + + case RecordedTestMode.Record: + // continue + break; + + case RecordedTestMode.Playback: + // force the use of a fixed retry with a short timeout + options.RetryPolicy = new TestClientRetryPolicy(delay: TimeSpan.FromMilliseconds(100)); + break; + + default: + throw new NotSupportedException("The following mode is not supported: " + Mode); + } + + if (Recording == null) + { + throw new InvalidOperationException("Please call this from within a test method invocation"); + } + + ProxyTransportOptions transportOptions = Recording.GetProxyTransportOptions(); + transportOptions.UseFiddler = UseFiddler; + if (_options.RequestOverride != null) + { + transportOptions.ShouldRecordRequest = _options.RequestOverride; + } + + options.Transport = new ProxyTransport(transportOptions); + return options; + } + + /// + /// Gets the default recorded test mode to use. + /// + /// The test mode to use. + protected virtual RecordedTestMode GetDefaultRecordedTestMode() => RecordedTestMode.Playback; + + /// + /// Gets the default value for whether or not to automatically record a test if there is a recording mismatch, or the recording + /// file is missing. + /// + /// True or false. + protected virtual bool GetDefaultAutomaticRecordEnabled() => true; + + /// + /// Gets the name of recording JSON file that contains the recording. This will be based on a sanitized version + /// of test name, and "Async" will be automatically appended when running the asynchronous versions of tests. + /// + /// The name of the test to use. + protected virtual string GetRecordedTestFileName() + { + const string c_asyncSuffix = "Async"; + TestContext.TestAdapter testAdapter = TestContext.CurrentContext.Test; + + StringBuilder builder = new(testAdapter.Name.Length + c_asyncSuffix.Length); + foreach (char c in testAdapter.Name) + { + builder.Append(s_invalidChars.Contains(c) ? '%' : c); + } + + if (IsAsync) + { + builder.Append(c_asyncSuffix); + } + + builder.Append(".json"); + + return builder.ToString(); + } + + /// + /// Configures a recording/playback session for the current test on the test proxy. This is called at the start of every test. + /// It is responsible for configuring all the necessary sanitizers, matchers, and transforms for the test proxy. + /// + /// The test proxy service to configure the recording session for. + /// The cancellation token to use. + /// The configured test recording session. + /// The test proxy service instance did not have a valid client configured. + /// The recording mode is not supported. + protected virtual async Task StartAndConfigureRecordingSessionAsync(ProxyService proxy, CancellationToken token) + { + var client = proxy.Client ?? throw new ArgumentNullException("Test proxy client was null"); + IDictionary? variables = null; + + ProxyClientResult result; + switch (Mode) + { + case RecordedTestMode.Live: + // nothing to see here + return new TestRecording(string.Empty, RecordedTestMode.Live, proxy); + + case RecordedTestMode.Playback: + var playbackResult = await client.StartPlaybackAsync(CreateRecordingSessionStartInfo(), token).ConfigureAwait(false); + variables = playbackResult.Value; + result = playbackResult; + break; + + case RecordedTestMode.Record: + result = await client.StartRecordingAsync(CreateRecordingSessionStartInfo(), token).ConfigureAwait(false); + break; + + default: + throw new NotSupportedException("Don't know how to handle recording mode: " + Mode); + } + + string? recordingId = result.RecordingId; + if (string.IsNullOrWhiteSpace(recordingId)) + { + throw new InvalidOperationException("Recording test proxy did not return a recording ID"); + } + + TestRecording recording = new TestRecording(recordingId!, Mode, proxy, variables); + await recording.ApplyOptions(_options, token).ConfigureAwait(false); + return recording; + } + + /// + /// Determines whether or not the current test should be recorded (or played back from a file). + /// + /// True to enable the use of the recording test proxy, false otherwise. + protected virtual bool IsCurrentTestRecorded() + { + return TestExecutionContext.CurrentContext.CurrentTest.GetCustomAttributes(true).Any(); + } + + /// + /// Creates the options used when starting a new instance of the test proxy service. + /// + /// The options to use. + protected abstract ProxyServiceOptions CreateProxyServiceOptions(); + + /// + /// Creates the information used to configured a recording/playback session for the current test on the test proxy. + /// + /// The information to use. + protected abstract RecordingStartInformation CreateRecordingSessionStartInfo(); +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/RecordedTestAttribute.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/RecordedTestAttribute.cs new file mode 100644 index 000000000..5f98e8d75 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/RecordedTestAttribute.cs @@ -0,0 +1,132 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Text; +using NUnit.Framework; +using NUnit.Framework.Interfaces; +using NUnit.Framework.Internal.Commands; +using NUnit.Framework.Internal; +using OpenAI.TestFramework.Recording; + +namespace OpenAI.TestFramework; + +/// +/// An attribute used to indicate that a test should be recorded (or played back from a file). When you inherit from +/// in your test class, and add this attribute to your test function, and then +/// make sure to call +/// on the client options you use to configure a client, this should automatically enable the recording/playback +/// functionality. By default. this will also automatically try to re-record the test if it fails during playback. +/// +[AttributeUsage(AttributeTargets.Method, AllowMultiple = false, Inherited = true)] +public class RecordedTestAttribute : TestAttribute, IRepeatTest +{ + /// + /// Whether or not to automatically try to record the test again in the case of a recording mismatch, or missing + /// test recording. + /// + public bool AutomaticRecord { get; set; } = true; + + public TestCommand Wrap(TestCommand command) + { + // For some reason, the test fixture may be set on the parent of the current test, and not the current test + // itself. Let's handle this + ITest? test = command.Test; + while (test.Fixture == null && test.Parent != null) + { + test = test.Parent; + } + + // If the test fixture extends RecordedClientTestBase, we are in playback mode, and auto-rerecord + // is enabled, wrap the command to enable the retry in Record mode + if (AutomaticRecord + && test?.Fixture is RecordedClientTestBase testBase + && testBase.AutomaticRecord + && testBase.Mode == RecordedTestMode.Playback) + { + return new AutoRerecordCommand(command, testBase); + } + + return command; + } + + private class AutoRerecordCommand(TestCommand inner, RecordedClientTestBase testBase) : DelegatingTestCommand(inner) + { + private readonly RecordedClientTestBase _testBase = testBase ?? throw new ArgumentNullException(nameof(testBase)); + + public override TestResult Execute(TestExecutionContext context) + { + context.CurrentResult = innerCommand.Execute(context); + if (IsRecordingPlaybackFailure(context.CurrentResult)) + { + try + { + _testBase.Mode = RecordedTestMode.Record; + TestResult originalResult = context.CurrentResult; + + context.CurrentResult = context.CurrentTest.MakeTestResult(); + context.CurrentResult = innerCommand.Execute(context); + + // If the recording succeeded, update the original message to reflect this + ResultState state; + string? stackTrace; + StringBuilder builder = new(); + if (context.CurrentResult.ResultState?.Status == TestStatus.Passed) + { + state = originalResult.ResultState; + stackTrace = originalResult.StackTrace; + builder.AppendLine("Test failed playback, but was successfully re-recorded. It should pass if re-run."); + } + else + { + state = context.CurrentResult.ResultState ?? ResultState.Error; + stackTrace = context.CurrentResult.StackTrace; + builder.AppendLine("Re-recording attempt failed. Error: "); + builder.AppendLine(); + builder.AppendLine(context.CurrentResult.Message); + builder.AppendLine(); + builder.AppendLine("Original message:"); + } + + builder.AppendLine(); + builder.Append(originalResult.Message); + context.CurrentResult.SetResult(state, builder.ToString(), stackTrace); + } + finally + { + _testBase.Mode = RecordedTestMode.Playback; + } + } + + return context.CurrentResult; + } + + private static bool IsRecordingPlaybackFailure(TestResult result) + { + string exceptionName = typeof(TestRecordingMismatchException).FullName + ?? nameof(TestRecordingMismatchException); + + // 1. Check if the test passed + bool testPassed = result.ResultState?.Status switch + { + TestStatus.Passed => true, + TestStatus.Inconclusive => true, + TestStatus.Skipped => true, + _ => false + }; + + if (testPassed) + { + return false; + } + + // 2. Check if the failure message indicates a recording playback exception. This sadly requires us to check test failure + // messages which can be a little fragile but there does not seem to be a way to get the exception directly + if (result.Message?.Contains(exceptionName) == true) + { + return true; + } + + return false; + } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/RecordedTestMode.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/RecordedTestMode.cs new file mode 100644 index 000000000..bc0371ccf --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/RecordedTestMode.cs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework; + +/// +/// The recording mode. +/// +public enum RecordedTestMode +{ + /// + /// Talk to live services. No recording or playback is used. + /// + Live, + + /// + /// Record the test and overwrite any existing recordings. + /// + Record, + + /// + /// Playback the test from a recording. + /// + Playback, +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Condition.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Condition.cs new file mode 100644 index 000000000..6b0bdad00 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Condition.cs @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Recording; + +/// +/// A condition used to evaluate whether or not a sanitizer should be applied. +/// +public class Condition +{ + /// Gets or sets the uri regex. + public string? UriRegex { get; set; } + + /// Header condition to apply. + public HeaderCondition? ResponseHeader { get; set; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/HeaderCondition.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/HeaderCondition.cs new file mode 100644 index 000000000..12d2ba55c --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/HeaderCondition.cs @@ -0,0 +1,15 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Recording; + +/// +/// Header condition to apply. +/// +public class HeaderCondition +{ + /// Gets or sets the key. + public string? Key { get; set; } + /// Gets or sets the value regex. + public string? ValueRegex { get; set; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Matchers/BaseMatcher.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Matchers/BaseMatcher.cs new file mode 100644 index 000000000..c4578d763 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Matchers/BaseMatcher.cs @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Text.Json; +using System.Text.Json.Serialization; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework.Recording.Matchers; + +/// +/// The base class for matchers that are applied during a playback session to match an incoming request +/// to a recorded one. +/// +public abstract class BaseMatcher : IUtf8JsonSerializable +{ + /// + /// Creates a new instance. + /// + /// The type of this sanitizer (e.g. GeneralRegexSanitizer). + /// If the type was null. + protected BaseMatcher(string type) + { + Type = type ?? throw new ArgumentNullException(nameof(type)); + } + + /// + /// Gets the type of the matcher (e.g. BodilessMatcher). + /// + [JsonIgnore] + public string Type { get; } + + /// + public virtual void Write(Utf8JsonWriter writer, JsonSerializerOptions? options = null) + { + // By default use reflection based serialization + JsonSerializer.Serialize(writer, this, GetType(), Default.InnerRecordingJsonOptions); + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Matchers/CustomMatcher.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Matchers/CustomMatcher.cs new file mode 100644 index 000000000..80e0f1b6d --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Matchers/CustomMatcher.cs @@ -0,0 +1,42 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Recording.Matchers; + +/// +/// This matcher exposes the default matcher in a customizable way. Currently this merely includes enabling/disabling body match and +/// adding additional excluded headers. All optional settings are safely defaulted. This means that providing zero additional +/// configuration will produce a sanitizer that is functionally identical to the default. +/// +public class CustomMatcher() : BaseMatcher("CustomDefaultMatcher") +{ + /// + /// A comma separated list of additional headers that should be excluded during matching. "Excluded" headers are entirely ignored. + /// Unlike "ignored" headers, the presence (or lack of presence) of a header will not cause mismatch. + /// + public string? ExcludedHeaders { get; set; } + + /// + /// Should the body value be compared during lookup operations? + /// + public bool? CompareBodies { get; set; } + + /// + /// A comma separated list of additional headers that should be ignored during matching. Any headers that are "ignored" will not + /// do value comparison when matching. This means that if the recording has a header that isn't in the request, a test mismatch + /// exception will be thrown noting the lack of header in the request. This also applies if the header is present in the request + /// but not recording. + /// + public string? IgnoredHeaders { get; set; } + + /// + /// A comma separated list of query parameters that should be ignored during matching. + /// + public string? IgnoredQueryParameters { get; set; } + + /// + /// By default, the test-proxy does not sort query params before matching. Setting true will sort query params alphabetically + /// before comparing URI. + /// + public bool? IgnoreQueryOrdering { get; set; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Matchers/ExistingMatcher.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Matchers/ExistingMatcher.cs new file mode 100644 index 000000000..8d2cb4eab --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Matchers/ExistingMatcher.cs @@ -0,0 +1,35 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Text.Json; + +namespace OpenAI.TestFramework.Recording.Matchers; + +/// +/// Used for specifying the use of pre-existing matchers defined in the test proxy. +/// +/// The name of the existing matcher. +public class ExistingMatcher(string existingMatcherName) : BaseMatcher(existingMatcherName) +{ + private static ExistingMatcher? _bodiless = null; + private static ExistingMatcher? _headerless = null; + + /// + /// This matcher adjusts the "match" operation to EXCLUDE the body when matching a request to a recording's entries. + /// + public static ExistingMatcher Bodiless => _bodiless ??= new ExistingMatcher("BodilessMatcher"); + + /// + /// NOT RECOMMENDED. This matcher adjusts the "match" operation to ignore header differences when matching a request. + /// Be aware that wholly ignoring headers during matching might incur unexpected issues down the line. + /// + public static ExistingMatcher Headerless => _headerless ??= new ExistingMatcher("HeaderlessMatcher"); + + /// + public override void Write(Utf8JsonWriter writer, JsonSerializerOptions? options = null) + { + // Pre-existing matchers use an empty JSON object. + writer.WriteStartObject(); + writer.WriteEndObject(); + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyClient.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyClient.cs new file mode 100644 index 000000000..55660d990 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyClient.cs @@ -0,0 +1,679 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Net.Http; +using System.Text.Json; +using OpenAI.TestFramework.Recording.Matchers; +using OpenAI.TestFramework.Recording.Proxy; +using OpenAI.TestFramework.Recording.Proxy.Service; +using OpenAI.TestFramework.Recording.Sanitizers; +using OpenAI.TestFramework.Recording.Transforms; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework.Recording.RecordingProxy; + +/// +/// A client for configuring the recording text proxy. Please see here for more information: +/// https://github.com/Azure/azure-sdk-tools/blob/main/tools/test-proxy/Azure.Sdk.Tools.TestProxy/README.md +/// +public class ProxyClient +{ + protected internal const string X_RECORDING_ID_HEADER = "x-recording-id"; + + private ProxyClientOptions _options; + private ClientPipeline _pipeline; + + /// + /// For testing only. + /// + internal ProxyClient() + { + _options = new(new Uri("http://localhost:0")); + _pipeline = ClientPipeline.Create(); + } + + /// + /// Creates a new instance. + /// + /// The options to use. + public ProxyClient(ProxyClientOptions options) + { + _options = options ?? throw new ArgumentNullException(nameof(options)); + _pipeline = ClientPipeline.Create(options); + } + + /// + /// Starts playback session of recordings. + /// + /// The configuration to use for starting playback. + /// The cancellation token to use. + /// The result that includes any recorded variables. + public virtual ProxyClientResult> StartPlayback(RecordingStartInformation startInfo, CancellationToken token = default) + { + if (startInfo == null) + { + throw new ArgumentNullException(nameof(startInfo)); + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "playback/start", startInfo, token); + return SendSyncOrAsync>(false, message, token).GetAwaiter().GetResult(); + } + + /// + /// Starts playback session of recordings asynchronously. + /// + /// The configuration to use for starting playback. + /// The cancellation token to use. + /// The result that includes any recorded variables. + public virtual async Task>> StartPlaybackAsync(RecordingStartInformation startInfo, CancellationToken token = default) + { + if (startInfo == null) + { + throw new ArgumentNullException(nameof(startInfo)); + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "playback/start", startInfo, token); + return await SendSyncOrAsync>(true, message, token).ConfigureAwait(false); + } + + /// + /// Stops a playback session. + /// + /// The ID for the playback session to stop. + /// The cancellation token to use. + /// The client result. + public virtual ProxyClientResult StopPlayback(string recordingId, CancellationToken token = default) + { + if (string.IsNullOrWhiteSpace(recordingId)) + { + throw new ArgumentException("Recording ID cannot be null, empty, or white space only"); + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "playback/stop", null, token, new() + { + [X_RECORDING_ID_HEADER] = recordingId, + }); + return SendSyncOrAsync(false, message, token).GetAwaiter().GetResult(); + } + + /// + /// Stops a playback session asynchronously. + /// + /// The ID for the playback session to stop. + /// The cancellation token to use. + /// The client result. + public virtual async Task StopPlaybackAsync(string recordingId, CancellationToken token = default) + { + if (string.IsNullOrWhiteSpace(recordingId)) + { + throw new ArgumentException("Recording ID cannot be null, empty, or white space only"); + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "playback/stop", null, token, new() + { + [X_RECORDING_ID_HEADER] = recordingId, + }); + return await SendSyncOrAsync(true, message, token).ConfigureAwait(false); + } + + /// + /// Starts a recording session. + /// + /// The configuration to use for the recording session. + /// The cancellation token to use. + /// The client result. + public virtual ProxyClientResult StartRecording(RecordingStartInformation startInfo, CancellationToken token = default) + { + if (startInfo == null) + { + throw new ArgumentNullException(nameof(startInfo)); + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "record/start", startInfo, token); + return SendSyncOrAsync(false, message, token).GetAwaiter().GetResult(); + } + + /// + /// Starts a recording session asynchronously. + /// + /// The configuration to use for the recording session. + /// The cancellation token to use. + /// The client result. + public virtual async Task StartRecordingAsync(RecordingStartInformation startInfo, CancellationToken token = default) + { + if (startInfo == null) + { + throw new ArgumentNullException(nameof(startInfo)); + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "record/start", startInfo, token); + return await SendSyncOrAsync(true, message, token).ConfigureAwait(false); + } + + /// + /// Stops a recording session. + /// + /// The identifier for the recording session. + /// (Optional) Any additional variables to include with the recording. + /// (Optional) Set this to true to turn off recording. + /// The cancellation token to use. + /// The client result. + public virtual ProxyClientResult StopRecording(string recordingId, IDictionary? variables = null, bool skipRecording = false, CancellationToken token = default) + { + if (string.IsNullOrWhiteSpace(recordingId)) + { + throw new ArgumentException("Recording ID cannot be null, empty, or white space only"); + } + + Dictionary additionalHeaders = new() + { + [X_RECORDING_ID_HEADER] = recordingId + }; + + if (skipRecording) + { + additionalHeaders["x-recording-skip"] = "request-response"; + } + + variables ??= new Dictionary(); + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "record/stop", variables, token, additionalHeaders); + return SendSyncOrAsync(false, message, token).GetAwaiter().GetResult(); + } + + /// + /// Stops a recording session asynchronously. + /// + /// The ID for the recording session to stop. + /// (Optional) Any additional variables to include with the recording. + /// (Optional) Set this to true to turn off recording. + /// The cancellation token to use. + /// The client result. + public virtual async Task StopRecordingAsync(string recordingId, IDictionary? variables = null, bool skipRecording = false, CancellationToken token = default) + { + if (string.IsNullOrWhiteSpace(recordingId)) + { + throw new ArgumentException("Recording ID cannot be null, empty, or white space only"); + } + + Dictionary additionalHeaders = new() + { + [X_RECORDING_ID_HEADER] = recordingId + }; + + if (skipRecording) + { + additionalHeaders["x-recording-skip"] = "request-response"; + } + + variables ??= new Dictionary(); + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "record/stop", variables, token, additionalHeaders); + return await SendSyncOrAsync(true, message, token).ConfigureAwait(false); + } + + /// + /// Sets options for the proxy. + /// + /// The identifier for the playback/recording session. + /// The options to set. + /// The cancellation token to use. + /// The client result. + public virtual ProxyClientResult SetRecordingTransportOptions(string recordingId, ProxyServiceOptions options, CancellationToken token = default) + { + if (string.IsNullOrWhiteSpace(recordingId)) + { + throw new ArgumentException("Recording ID cannot be null, empty, or white space only"); + } + else if (options == null) + { + throw new ArgumentNullException(nameof(options)); + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "admin/setrecordingoptions", options, token, new() + { + [X_RECORDING_ID_HEADER] = recordingId, + }); + return SendSyncOrAsync(false, message, token).GetAwaiter().GetResult(); + } + + /// + /// Sets options for the proxy asynchronously. + /// + /// The identifier for the playback/recording session. + /// The options to set. + /// The cancellation token to use. + /// The client result. + public virtual async Task SetRecordingTransportOptionsAsync(string recordingId, ProxyServiceOptions options, CancellationToken token = default) + { + if (string.IsNullOrWhiteSpace(recordingId)) + { + throw new ArgumentException("Recording ID cannot be null, empty, or white space only"); + } + else if (options == null) + { + throw new ArgumentNullException(nameof(options)); + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "admin/setrecordingoptions", options, token, new() + { + [X_RECORDING_ID_HEADER] = recordingId, + }); + return await SendSyncOrAsync(true, message, token).ConfigureAwait(false); + } + + /// + /// Removes some pre-defined sanitizers to be used during recording/playback by specifying their IDs. + /// + /// The set of sanitizer IDs to remove. + /// (Optional) If specified, the sanitizers will be removed for a particular session only. + /// If null, the sanitizers will be removed globally on the test proxy. + /// The cancellation token to use. + /// The client result. + public virtual ProxyClientResult RemoveSanitizers(ISet sanitizerIds, string? recordingId = null, CancellationToken token = default) + { + if (sanitizerIds == null) + { + throw new ArgumentNullException(nameof(sanitizerIds)); + } + + Dictionary headers = new(); + if (recordingId != null) + { + headers[X_RECORDING_ID_HEADER] = recordingId; + } + + PipelineMessage message = CreateJsonRequest( + HttpMethod.Post, + "admin/removesanitizers", + new SanitizerIdList() { Sanitizers = sanitizerIds.ToArray() }, + token, + headers); + return SendSyncOrAsync(false, message, token).GetAwaiter().GetResult(); + } + + /// + /// Removes some pre-defined sanitizers to be used during recording/playback by specifying their IDs. + /// + /// The set of sanitizer IDs to remove. + /// (Optional) If specified, the sanitizers will be removed for a particular session only. + /// If null, the sanitizers will be removed globally on the test proxy. + /// The cancellation token to use. + /// The client result. + public virtual async Task RemoveSanitizersAsync(ISet sanitizerIds, string? recordingId = null, CancellationToken token = default) + { + if (sanitizerIds == null) + { + throw new ArgumentNullException(nameof(sanitizerIds)); + } + + Dictionary headers = new(); + if (recordingId != null) + { + headers[X_RECORDING_ID_HEADER] = recordingId; + } + + PipelineMessage message = CreateJsonRequest( + HttpMethod.Post, + "admin/removesanitizers", + new SanitizerIdList() { Sanitizers = sanitizerIds.ToArray() }, + token, + headers); + return await SendSyncOrAsync(true, message, token).ConfigureAwait(false); + } + + /// + /// Adds sanitizers for the recording test proxy. + /// + /// The sanitizers to add. + /// (Optional) If specified, the sanitizers will added for a particular session only. + /// If null, the sanitizers will be added globally on the test proxy. + /// The cancellation token to use. + /// The client result with the set of sanitizer IDs added. + public virtual ProxyClientResult> AddSanitizers(IEnumerable sanitizers, string? recordingId = null, CancellationToken token = default) + { + if (sanitizers == null) + { + throw new ArgumentNullException(nameof(sanitizers)); + } + + Dictionary headers = new(); + if (recordingId != null) + { + headers[X_RECORDING_ID_HEADER] = recordingId; + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "Admin/AddSanitizers", sanitizers, token, headers); + ProxyClientResult result = SendSyncOrAsync(false, message, token).GetAwaiter().GetResult(); + return new ProxyClientResult>( + result.Value.Sanitizers ?? Array.Empty(), + result.GetRawResponse()); + } + + /// + /// Adds sanitizers for the recording test proxy asynchronously. + /// + /// The sanitizers to add. + /// (Optional) If specified, the sanitizers will added for a particular session only. + /// If null, the sanitizers will be added globally on the test proxy. + /// The cancellation token to use. + /// The client result with the set of sanitizer IDs added. + public virtual async Task>> AddSanitizersAsync(IEnumerable sanitizers, string? recordingId = null, CancellationToken token = default) + { + if (sanitizers == null) + { + throw new ArgumentNullException(nameof(sanitizers)); + } + + Dictionary headers = new(); + if (recordingId != null) + { + headers[X_RECORDING_ID_HEADER] = recordingId; + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "Admin/AddSanitizers", sanitizers, token, headers); + ProxyClientResult result = await SendSyncOrAsync(true, message, token).ConfigureAwait(false); + return new ProxyClientResult>( + result.Value.Sanitizers ?? Array.Empty(), + result.GetRawResponse()); + } + + /// + /// Sets the matcher to use. + /// + /// The matcher to use. + /// (Optional) If specified, the matcher will be set for a particular session only. + /// If null, the matcher will be set globally on the test proxy. + /// The cancellation token to use. + /// The client result. + public virtual ProxyClientResult SetMatcher(BaseMatcher matcher, string? recordingId = null, CancellationToken token = default) + { + if (matcher == null) + { + throw new ArgumentNullException(nameof(matcher)); + } + + Dictionary headers = new() + { + ["x-abstraction-identifier"] = matcher.Type + }; + + if (recordingId != null) + { + headers[X_RECORDING_ID_HEADER] = recordingId; + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "admin/setmatcher", matcher, token, headers); + return SendSyncOrAsync(false, message, token).GetAwaiter().GetResult(); + } + + /// + /// Sets the matcher to use asynchronously. + /// + /// The matcher to use. + /// (Optional) If specified, the matcher will be set for a particular session only. + /// If null, the matcher will be set globally on the test proxy. + /// The cancellation token to use. + /// The client result. + public virtual async Task SetMatcherAsync(BaseMatcher matcher, string? recordingId = null, CancellationToken token = default) + { + if (matcher == null) + { + throw new ArgumentNullException(nameof(matcher)); + } + + Dictionary headers = new() + { + ["x-abstraction-identifier"] = matcher.Type + }; + + if (recordingId != null) + { + headers[X_RECORDING_ID_HEADER] = recordingId; + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "admin/setmatcher", matcher, token, headers); + return await SendSyncOrAsync(true, message, token).ConfigureAwait(false); + } + + /// + /// Adds a transform. + /// + /// The transform to add. + /// (Optional) If specified, the transform will be added for a particular session only. + /// If null, the transform will be added globally on the test proxy. + /// The cancellation token to use. + /// The client result. + public virtual ProxyClientResult AddTransform(BaseTransform transform, string? recordingId = null, CancellationToken token = default) + { + if (transform == null) + { + throw new ArgumentNullException(nameof(transform)); + } + + Dictionary headers = new() + { + ["x-abstraction-identifier"] = transform.Type + }; + + if (recordingId != null) + { + headers[X_RECORDING_ID_HEADER] = recordingId; + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "admin/addtransform", transform, token, headers); + return SendSyncOrAsync(false, message, token).GetAwaiter().GetResult(); + } + + /// + /// Adds a transform asynchronously. + /// + /// The transform to add. + /// (Optional) If specified, the transform will be added for a particular session only. + /// If null, the transform will be added globally on the test proxy. + /// The cancellation token to use. + /// The client result. + public virtual async Task AddTransformAsync(BaseTransform transform, string? recordingId = null, CancellationToken token = default) + { + if (transform == null) + { + throw new ArgumentNullException(nameof(transform)); + } + + Dictionary headers = new() + { + ["x-abstraction-identifier"] = transform.Type + }; + + if (recordingId != null) + { + headers[X_RECORDING_ID_HEADER] = recordingId; + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "admin/addtransform", transform, token, headers); + return await SendSyncOrAsync(true, message, token).ConfigureAwait(false); + } + + /// + /// Resets the sanitizers, matcher, and transforms to the default. + /// + /// (Optional) If specified, only the particular session will be reset. + /// If null, the reset will apply globally. + /// The cancellation token to use. + /// The client result. + public virtual ProxyClientResult Reset(string? recordingId = null, CancellationToken token = default) + { + Dictionary headers = new(); + if (recordingId != null) + { + headers[X_RECORDING_ID_HEADER] = recordingId; + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "Admin/Reset", null, token, headers); + return SendSyncOrAsync(false, message, token).GetAwaiter().GetResult(); + } + + /// + /// Resets the sanitizers, matcher, and transforms to the default asynchronously. + /// + /// (Optional) If specified, only the particular session will be reset. + /// If null, the reset will apply globally. + /// The cancellation token to use. + /// The client result. + public virtual async Task ResetAsync(string? recordingId = null, CancellationToken token = default) + { + Dictionary headers = new(); + if (recordingId != null) + { + headers[X_RECORDING_ID_HEADER] = recordingId; + } + + PipelineMessage message = CreateJsonRequest(HttpMethod.Post, "Admin/Reset", null, token, headers); + return await SendSyncOrAsync(true, message, token).ConfigureAwait(false); + } + + /// + /// Lists the available sanitizers, matchers, and transforms. + /// + /// The cancellation token. + /// The client result with the HTML returned from the service. + public virtual ProxyClientResult ListAvailable(CancellationToken token = default) + { + PipelineMessage message = CreateJsonRequest(HttpMethod.Get, "Info/Available", null, token); + ProxyClientResult result = SendSyncOrAsync(false, message, token).GetAwaiter().GetResult(); + return new ProxyClientResult(result.GetRawResponse().Content.ToString(), result.GetRawResponse()); + } + + /// + /// Lists the available sanitizers, matchers, and transforms asynchronously. + /// + /// The cancellation token. + /// The client result with the HTML returned from the service. + public virtual async Task> ListAvailableAsync(CancellationToken token = default) + { + PipelineMessage message = CreateJsonRequest(HttpMethod.Get, "Info/Available", null, token); + ProxyClientResult result = await SendSyncOrAsync(true, message, token).ConfigureAwait(false); + return new ProxyClientResult(result.GetRawResponse().Content.ToString(), result.GetRawResponse()); + } + + protected virtual PipelineMessage CreateJsonRequest(HttpMethod method, string path, TBody? body, CancellationToken token, Dictionary? headers = null) + { + PipelineMessage message = _pipeline.CreateMessage(); + message.Apply(new RequestOptions + { + CancellationToken = token, + BufferResponse = true + }); + + PipelineRequest request = message.Request; + request.Method = method.Method; + request.Uri = new Uri(_options.HttpEndpoint, path); + request.Headers.Add("Accept", "application/json"); + + if (headers != null) + { + foreach (var kvp in headers) + { + request.Headers.Add(kvp.Key, kvp.Value); + } + } + + if (body != null) + { + MemoryStream stream = new(); + using Utf8JsonWriter writer = new(stream); + JsonSerializer.Serialize(writer, body, Default.RecordingJsonOptions); + BinaryData jsonBody = BinaryData.FromBytes(new ReadOnlyMemory(stream.GetBuffer(), 0, (int)stream.Length)); + + request.Headers.Add("Content-Type", "application/json"); + request.Content = BinaryContent.Create(jsonBody); + } + + return message; + } + + protected virtual async ValueTask SendSyncOrAsync(bool isAsync, PipelineMessage message, CancellationToken token) + { + if (isAsync) + { + await _pipeline.SendAsync(message).ConfigureAwait(false); + } + else + { + _pipeline.Send(message); + } + + PipelineResponse response = message.Response ?? throw new ClientResultException("Response was null", message.Response); + if (response.IsError) + { + if (response.Content.ToMemory().Length > 0) + { + string contentType = response.Headers.GetFirstOrDefault("Content-Type") ?? string.Empty; + + if (contentType.StartsWith("text/", StringComparison.OrdinalIgnoreCase)) + { + string error = response.Content.ToString(); + throw new ClientResultException(error, response); + } + else if (contentType.StartsWith("application/json", StringComparison.OrdinalIgnoreCase)) + { + string error; + try + { + var parsed = response.Content.ToObjectFromJson(new() + { + PropertyNameCaseInsensitive = true + }); + + error = $"{parsed.Status}: {parsed.Message}"; + } + catch + { + error = response.Content.ToString(); + } + + throw new ClientResultException(error, response); + } + } + + throw new ClientResultException(response); + } + + return new ProxyClientResult(response); + } + + protected virtual async ValueTask> SendSyncOrAsync(bool isAsync, PipelineMessage message, CancellationToken token) + { + if (isAsync) + { + await SendSyncOrAsync(isAsync, message, token).ConfigureAwait(false); + } + else + { + SendSyncOrAsync(isAsync, message, token).GetAwaiter().GetResult(); + } + + PipelineResponse response = message.Response!; // we've already validated this is not null in the previous call + + try + { + TResponse? parsed = JsonSerializer.Deserialize(response.Content.ToMemory().Span, Default.TestProxyJsonOptions); + if (parsed == null) + { + throw new InvalidDataException("Response parsed to null"); + } + + return new ProxyClientResult(parsed, response); + } + catch (Exception ex) + { + throw new ClientResultException("Failed to deserialize response", message.Response, ex); + } + } + + private struct ErrorResponse + { + public string? Message { get; set; } + public string? Status { get; set; } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyClientOptions.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyClientOptions.cs new file mode 100644 index 000000000..bb35323c9 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyClientOptions.cs @@ -0,0 +1,31 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel.Primitives; + +namespace OpenAI.TestFramework.Recording.RecordingProxy; + +/// +/// Options for the test proxy client. +/// +public class ProxyClientOptions : ClientPipelineOptions +{ + /// + /// Creates a new instance. + /// + /// The HTTP endpoint. + /// The endpoint was null. + /// The endpoint was not absolute. + public ProxyClientOptions(Uri http) + { + if (http == null) throw new ArgumentNullException(nameof(http)); + else if (!http.IsAbsoluteUri) throw new ArgumentException("URI must be absolute", nameof(http)); + + HttpEndpoint = http; + } + + /// + /// The HTTP endpoint to use + /// + public Uri HttpEndpoint { get; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyClientResult.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyClientResult.cs new file mode 100644 index 000000000..a1e16d300 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyClientResult.cs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.ClientModel.Primitives; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework.Recording.RecordingProxy +{ + /// + /// Represents the result of a proxy client operation. + /// + public class ProxyClientResult : ClientResult + { + /// + /// Initializes a new instance of the class. + /// + /// (Optional) The pipeline response. + public ProxyClientResult(PipelineResponse? response = null) + { + if (response != null) + { + SetRawResponse(response); + } + } + + /// + /// Gets the recording ID from the response headers. + /// + public string? RecordingId => GetRawResponse().Headers.GetFirstOrDefault(ProxyClient.X_RECORDING_ID_HEADER); + } + + /// + /// Represents the result of a proxy client operation. + /// + /// The type of the result value. + public class ProxyClientResult : ProxyClientResult + { + /// + /// Initializes a new instance of the class. + /// + /// The result value. + /// (Optional) The pipeline response. + public ProxyClientResult(TResult value, PipelineResponse? response = null) + { + Value = value; + if (response != null) + { + SetRawResponse(response); + } + } + + /// + /// Gets the result value. + /// + public virtual TResult Value { get; } + + /// + /// Implicitly converts the to the result value. + /// + /// The instance. + /// The result value. + public static implicit operator TResult(ProxyClientResult result) => result.Value; + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyService.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyService.cs new file mode 100644 index 000000000..8d6460afd --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyService.cs @@ -0,0 +1,256 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Diagnostics; +using System.Runtime.InteropServices; +using System.Text; +using NUnit.Framework; +using OpenAI.TestFramework.Recording.RecordingProxy; +using OpenAI.TestFramework.Utils.Processes; + +namespace OpenAI.TestFramework.Recording.Proxy; + +/// +/// Represents the test proxy. See here for more information: +/// https://github.com/Azure/azure-sdk-tools/blob/main/tools/test-proxy/Azure.Sdk.Tools.TestProxy/README.md +/// +public class ProxyService : IDisposable +{ + private const int c_maxLines = 50; + + private Process _testProxyProcess; + private Uri? _http; + private Uri? _https; + private TaskCompletionSource<(int, int)> _portsAvailableTcs; + private StringBuilder _errorOutput; + private int _lines; + private ProxyClient? _client; + private WindowsJob? _windowsJob; + + /// + /// Creates a new instance. + /// + /// The options to use. + /// was null. + private ProxyService(ProxyServiceOptions options) + { + if (options == null) + { + throw new ArgumentNullException(nameof(options)); + } + + options.Validate(); + + ProcessStartInfo startInfo = new() + { + FileName = options.DotnetExecutable, + Arguments = $@"""{options.TestProxyDll}"" start -u --storage-location=""{options.StorageLocationDir}""", + RedirectStandardOutput = true, + RedirectStandardError = true, + UseShellExecute = false, + EnvironmentVariables = + { + ["ASPNETCORE_URLS"] = $"http://127.0.0.1:{options.HttpPort};https://127.0.0.1:{options.HttpsPort}", + ["Logging__LogLevel__Azure.Sdk.Tools.TestProxy"] = "Error", + ["Logging__LogLevel__Default"] = "Error", + ["Logging__LogLevel__Microsoft.AspNetCore"] = "Error", + ["Logging__LogLevel__Microsoft.Hosting.Lifetime"] = "Information", + } + }; + + if (options.DevCertFile != null) + { + startInfo.EnvironmentVariables["ASPNETCORE_Kestrel__Certificates__Default__Path"] = options.DevCertFile; + if (options.DevCertPassword != null) + { + startInfo.EnvironmentVariables["ASPNETCORE_Kestrel__Certificates__Default__Password"] = options.DevCertPassword; + } + } + + _errorOutput = new(); + _portsAvailableTcs = new(); + _testProxyProcess = new Process() + { + EnableRaisingEvents = true, + StartInfo = startInfo + }; + + _testProxyProcess.Exited += (_, _) => + { + _portsAvailableTcs.TrySetException(new InvalidOperationException("Test proxy process exited unexpectedly")); + }; + _testProxyProcess.ErrorDataReceived += HandleStdErr; + _testProxyProcess.OutputDataReceived += HandleStdOut; + + _windowsJob = null; + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + // If running on Windows, use a Job to instruct the OS to kill the test proxy service process + // should this current process die for any reason. + _windowsJob = new($"TestProxy_{Process.GetCurrentProcess().Id}"); + } + } + + /// + /// Gets the client to use to communicate with this recording test proxy. + /// + public ProxyClient Client => _client + ?? throw new InvalidOperationException("Please wait for the proxy to finish starting first"); + + /// + /// Gets the HTTP endpoint the test recording proxy is listening on. + /// + public Uri HttpEndpoint => _http + ?? throw new InvalidOperationException("Please wait for the proxy to finish starting first"); + + /// + /// Gets the HTTPS endpoint the test recording proxy is listening on. + /// + public Uri HttpsEndpoint => _https + ?? throw new InvalidOperationException("Please wait for the proxy to finish starting first"); + + /// + /// Creates a new instance of the recording test proxy. + /// + /// The options to use for the proxy. + /// The cancellation token to use. + /// The initialized recording test proxy instance. + public static async Task CreateNewAsync(ProxyServiceOptions options, CancellationToken token = default) + { + token.ThrowIfCancellationRequested(); + + ProxyService proxy = new ProxyService(options); + + // Try to make sure the test proxy process is terminated when we exit + AppDomain.CurrentDomain.DomainUnload += (_, _) => proxy.Dispose(); + // TODO FIXME: On Windows, use a job to ensure the OS will properly kill the process + + await proxy.StartAsync(token).ConfigureAwait(false); + return proxy; + } + + /// + /// Tears down the recording test proxy instance. + /// + public void Dispose() + { + _portsAvailableTcs.TrySetException(new ObjectDisposedException(nameof(ProxyService))); + try + { + _testProxyProcess.Kill(); + if (_windowsJob != null) + { + // do NOT call Dispose here. This will terminate this process too. + } + } catch { /* we tried */ } + } + + /// + /// Checks to see if any errors were encountered in the test proxy, and if so throws an exception. + /// + /// If there were any errors encountered. + public void ThrowOnErrors() + { + lock (_errorOutput) + { + if (_errorOutput.Length > 0) + { + string error = _errorOutput.ToString(); + _errorOutput.Clear(); + throw new InvalidOperationException($"An error occurred in the test proxy:\n{error}"); + } + } + } + + /// + /// For testing purposes only + /// + /// The client to set. + internal void SetClient(ProxyClient client) + { + _client = client; + } + + /// + /// Starts the recording test proxy instance, and waits until we can read the ports it is listening on for + /// HTTP and HTTPS. + /// + /// The cancellation token to use. + /// Asynchronous tas + /// The test proxy failed to start, or we encountered some other error. + protected async Task StartAsync(CancellationToken token = default) + { + token.Register(_portsAvailableTcs.SetCanceled); + + bool success = _testProxyProcess.Start(); + if (!success) + { + throw new InvalidOperationException("The test proxy process failed to start"); + } + + _windowsJob?.Add(_testProxyProcess); + + _testProxyProcess.BeginOutputReadLine(); + _testProxyProcess.BeginErrorReadLine(); + + await _portsAvailableTcs.Task.ConfigureAwait(false); + } + + private static Uri? ParseListeningOnUri(string line) + { + const string nowListeningOn = "Now listening on: "; + int index = line.IndexOf(nowListeningOn, StringComparison.OrdinalIgnoreCase); + if (index < 0) + { + return null; + } + + Uri.TryCreate(line.AsSpan().Slice(index + nowListeningOn.Length).Trim().ToString(), UriKind.Absolute, out Uri? uri); + return uri; + } + + private void HandleStdErr(object sender, DataReceivedEventArgs args) + { + if (args?.Data != null) + { + lock (_errorOutput) + { + _errorOutput.Append(args.Data); + } + + TestContext.Progress.WriteLine(args.Data); + } + } + + private void HandleStdOut(object sender, DataReceivedEventArgs args) + { + if (_lines++ >= c_maxLines) + { + _portsAvailableTcs.TrySetException(new InvalidOperationException( + $"Failed to start the test proxy. One or both the ports was not populated. http: {_http}, https: {_https}")); + _testProxyProcess.OutputDataReceived -= HandleStdOut; + return; + } + else if (args?.Data == null) + { + return; + } + + Uri? uri = ParseListeningOnUri(args.Data); + if (_http == null && uri?.Scheme == "http") + { + _http = uri; + _client = new ProxyClient(new ProxyClientOptions(_http!)); + } + else if (_https == null && uri?.Scheme == "https") + { + _https = uri; + } + + if (_http != null && _https != null) + { + _testProxyProcess.OutputDataReceived -= HandleStdOut; + _portsAvailableTcs.TrySetResult((_http.Port, _https.Port)); + } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyServiceOptions.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyServiceOptions.cs new file mode 100644 index 000000000..2f3e3d27f --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyServiceOptions.cs @@ -0,0 +1,82 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Recording.Proxy; + +/// +/// Options for starting the recording test proxy. +/// +public class ProxyServiceOptions +{ + /// + /// Gets the full path to the dotnet executable. + /// + required public string DotnetExecutable { get; set; } + + /// + /// Gets the full path to the test proxy DLL. + /// + required public string TestProxyDll { get; set; } + + /// + /// The path to the directory to store or read recordings from. + /// + required public string StorageLocationDir { get; set; } + + /// + /// (Optional) The file to use for the HTTPS endpoint certificate. + /// + public string? DevCertFile { get; set; } + + /// + /// (Optional) The password to use for opening the for the HTTPS endpoint. + /// + public string? DevCertPassword { get; set; } + + /// + /// (Optional) The HTTP port the test proxy should listen on. Set this to 0 to have the next available port be automatically selected. + /// + public ushort HttpPort { get; set; } + + /// + /// (Optional) The HTTPS port the test proxy should listen on. Set this to 0 to have the next available port be automatically selected. + /// + public ushort HttpsPort { get; set; } + + /// + /// Validates the configuration. + /// + /// The storage location directory was could not be found. + /// The HTTPS certificate file could not be found. + /// No password was specified for the developer certificate file. + internal protected virtual void Validate() + { + List exceptions = new(); + + if (!File.Exists(DotnetExecutable)) + { + exceptions.Add(new FileNotFoundException("Could not find (or read from) the dotnet executable: " + DotnetExecutable)); + } + else if (!File.Exists(TestProxyDll)) + { + exceptions.Add(new FileNotFoundException("Could not find (or read from) the test proxy DLL: " + TestProxyDll)); + } + else if (!Directory.Exists(StorageLocationDir)) + { + exceptions.Add(new DirectoryNotFoundException("Could not find (or read from) the following directory: " + StorageLocationDir)); + } + else if (DevCertFile != null && !File.Exists(DevCertFile)) + { + exceptions.Add(new FileNotFoundException("Could not find (or read from) the HTTPS certificate file: " + DevCertFile)); + } + else if (DevCertFile != null && DevCertPassword == null) + { + exceptions.Add(new InvalidOperationException($"You must set the {nameof(DevCertPassword)} property if you specify the {nameof(DevCertFile)}")); + } + + if (exceptions.Any()) + { + throw new AggregateException("The test proxy service configuration is invalid", exceptions); ; + } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyTransport.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyTransport.cs new file mode 100644 index 000000000..69cb9a8fc --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyTransport.cs @@ -0,0 +1,216 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Net.Http; +using System.Text.Json; + +namespace OpenAI.TestFramework.Recording.RecordingProxy; + +/// +/// Implements a that will redirect all HTTP/HTTPS requests to the test proxy for recording or playback. +/// Depending on the mode, the test proxy will then either forward the request to the upstream service and record the request and response, +/// or playback the response from a previous recording. +/// +public class ProxyTransport : PipelineTransport +{ + private const string DevCertIssuer = "CN=localhost"; + private const string FiddlerCertIssuer = "CN=DO_NOT_TRUST_FiddlerRoot, O=DO_NOT_TRUST, OU=Created by http://www.fiddler2.com"; + private const string FiddlerHost = "ipv4.fiddler"; + + private readonly ProxyTransportOptions _options; + + /// + /// Initializes a new instance of the class. + /// + /// The options for the proxy transport. + public ProxyTransport(ProxyTransportOptions options) + { + _options = options ?? throw new ArgumentNullException(nameof(options)); + + string certIssuer; + if (_options.UseFiddler) + { + certIssuer = FiddlerCertIssuer; + } + else + { + certIssuer = DevCertIssuer; + } + + HttpClientHandler handler = new() + { + ServerCertificateCustomValidationCallback = (_, certificate, _, _) => certificate?.Issuer == certIssuer, + UseCookies = _options.AllowCookies, + AllowAutoRedirect = _options.AllowAutoRedirect + }; + + InnerTransport = new HttpClientPipelineTransport(new HttpClient(handler)); + } + + /// + /// The actual transport to use for sending requests, and receiving responses. + /// + protected PipelineTransport InnerTransport { get; } + + /// + protected override PipelineMessage CreateMessageCore() + { + Exception? ex = _options.MismatchException?.GetValue(); + if (ex != null) + { + throw ex; + } + + PipelineMessage message = InnerTransport.CreateMessage(); + PipelineRequest request = message.Request; + + // PipelineRequest no longer has a ClientRequestId property, so we need to set it on the headers directly + request.Headers.Add("x-ms-client-request-id", _options.RequestId); + + return message; + } + + /// + protected override void ProcessCore(PipelineMessage message) + => ProcessCoreSyncOrAsync(message, async: false).GetAwaiter().GetResult(); + + /// + protected override ValueTask ProcessCoreAsync(PipelineMessage message) + => ProcessCoreSyncOrAsync(message, async: true); + + /// + /// Processes the pipeline message synchronously or asynchronously. + /// + /// The pipeline message to process. + /// A flag indicating whether to process asynchronously. + /// A representing the asynchronous operation. + protected virtual async ValueTask ProcessCoreSyncOrAsync(PipelineMessage message, bool async) + { + try + { + RedirectToTestProxy(message); + if (async) + { + await InnerTransport.ProcessAsync(message).ConfigureAwait(false); + } + else + { + InnerTransport.Process(message); + } + + await ProcessResponseSyncAsync(message, async).ConfigureAwait(false); + } + finally + { + // revert the original URI - this is important for tests that rely on aspects of the URI in the pipeline + // e.g. KeyVault caches tokens based on URI + message.Request.Headers.TryGetValue("x-recording-upstream-base-uri", out string? original); + if (message.Request.Uri is null) + { + throw new InvalidOperationException("The request cannot have a null URI"); + } + if (original == null) + { + throw new InvalidOperationException("The TestProxy response did not contain the expected \"x-recording-upstream-base-uri\" header"); + } + + var originalBaseUri = new Uri(original); + var builder = new UriBuilder(message.Request.Uri); + builder.Scheme = originalBaseUri.Scheme; + builder.Host = originalBaseUri.Host; + builder.Port = originalBaseUri.Port; + + message.Request.Uri = builder.Uri; + } + } + + /// + /// Processes the response synchronously or asynchronously. + /// + /// The pipeline message containing the response. + /// A flag indicating whether to process asynchronously. + /// A representing the asynchronous operation. + protected virtual async ValueTask ProcessResponseSyncAsync(PipelineMessage message, bool async) + { + if (message.Response?.Headers.TryGetValues("x-request-mismatch", out _) == true) + { + if (message.Response.ContentStream == null) + { + throw new TestRecordingMismatchException("Detected a mismatch but the response had no body"); + } + + using var doc = async + ? await JsonDocument.ParseAsync(message.Response.ContentStream).ConfigureAwait(false) + : JsonDocument.Parse(message.Response.ContentStream); + throw new TestRecordingMismatchException(doc.RootElement.GetProperty("Message").GetString(), null); + } + } + + // copied from https://github.com/Azure/azure-sdk-for-net/blob/main/common/Perf/Azure.Test.Perf/TestProxyPolicy.cs + /// + /// Redirects the pipeline message to the test proxy based on the recording mode. + /// + /// The pipeline message to redirect. + protected virtual void RedirectToTestProxy(PipelineMessage message) + { + if (_options.Mode == RecordedTestMode.Record) + { + switch (_options.ShouldRecordRequest(message.Request)) + { + case RequestRecordMode.Record: + break; + case RequestRecordMode.RecordWithoutRequestBody: + message.Request.Headers.Set("x-recording-skip", "request-body"); + break; + case RequestRecordMode.DoNotRecord: + message.Request.Headers.Set("x-recording-skip", "request-response"); + break; + } + } + else if (_options.Mode == RecordedTestMode.Playback) + { + switch (_options.ShouldRecordRequest(message.Request)) + { + case RequestRecordMode.Record: + break; + case RequestRecordMode.RecordWithoutRequestBody: + // CAUTION: setting the request content to null has the unfortunate side effect of causing any HttpClient backed + // implementation of networking to not send up any Content-??? headers as well which can cause test + // mismatches. Let's work around this by setting some empty content. + message.Request.Content = BinaryContent.Create(BinaryData.FromBytes(Array.Empty())); + break; + case RequestRecordMode.DoNotRecord: + throw new InvalidOperationException( + "Cannot playback when recording has been disabled. Please make sure to skip the test or request."); + } + } + + var request = message.Request; + request.Headers.Set("x-recording-id", _options.RecordingId); + request.Headers.Set("x-recording-mode", _options.Mode.ToString().ToLowerInvariant()); + + if (request.Uri is null) + { + throw new InvalidOperationException("Request URI cannot be null"); + } + + // Intentionally reset the upstream URI in case the request URI changes between retries - e.g. when using GeoRedundant secondary Storage + var builder = new UriBuilder() + { + Scheme = request.Uri.Scheme, + Host = request.Uri.Host, + Port = request.Uri.Port, + }; + request.Headers.Set("x-recording-upstream-base-uri", builder.ToString()); + + Uri baseUri = request.Uri.Scheme == "https" ? _options.HttpsEndpoint : _options.HttpEndpoint; + + builder = new(request.Uri); + builder.Host = _options.UseFiddler ? FiddlerHost : baseUri.Host; + builder.Port = baseUri.Port; + + request.Uri = builder.Uri; + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyTransportOptions.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyTransportOptions.cs new file mode 100644 index 000000000..a0087e850 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/ProxyTransportOptions.cs @@ -0,0 +1,72 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel.Primitives; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework.Recording.RecordingProxy; + +/// +/// The options for the recording test proxy transport. +/// +public class ProxyTransportOptions +{ + private Func? _shouldRecordRequest; + + /// + /// Gets or sets the test proxy HTTP endpoint. + /// + required public Uri HttpEndpoint { get; set; } + + /// + /// Gets or sets the test proxy HTTPS endpoint. + /// + required public Uri HttpsEndpoint { get; set; } + + /// + /// Gets or sets the current test recording mode. + /// + required public RecordedTestMode Mode { get; set; } + + /// + /// Gets or sets the identifier for the recording. + /// + required public string RecordingId { get; set; } + + /// + /// The ID for the request. Please make sure that a consistent ID is used during recording and playback to avoid + /// mismatches. + /// + required public string RequestId { get; set; } + + /// + /// Gets or sets the delegate used to get/set the test recording mismatch exception. + /// + public PropertyDelegate? MismatchException { get; set; } + + /// + /// Gets or sets a value indicating whether to use Fiddler. If this is true, the transport will be updated to accept + /// the Fiddler root certificate. + /// + public bool UseFiddler { get; set; } + + /// + /// Gets or sets the predicate used to determine whether or not a particular request should not be recorded. + /// Default behaviour is to defer to what the matchers/sanitizers do. + /// + public Func ShouldRecordRequest + { + get => _shouldRecordRequest ?? (_ => RequestRecordMode.Record); + set => _shouldRecordRequest = value; + } + + /// + /// Gets or sets a value indicating whether to allow cookies while sending and receiving requests. + /// + public bool AllowCookies { get; set; } + + /// + /// Gets or sets a value indicating whether to allow auto redirect when processing server responses. + /// + public bool AllowAutoRedirect { get; set; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/RequestRecordMode.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/RequestRecordMode.cs new file mode 100644 index 000000000..d8a782327 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/RequestRecordMode.cs @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Recording.RecordingProxy; + +/// +/// Enumeration of possible values of how to record a request. This acts as an override. +/// +public enum RequestRecordMode +{ + /// + /// Records the request. + /// + Record, + /// + /// Records the request headers but skips the request body. + /// + RecordWithoutRequestBody, + /// + /// Does not record the request (nor the response). + /// + DoNotRecord, +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/Service/PemPair.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/Service/PemPair.cs new file mode 100644 index 000000000..15c72d94e --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/Service/PemPair.cs @@ -0,0 +1,15 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Recording.Proxy.Service; + +/// +/// Information about certificates for the test proxy service. +/// +public class PemPair +{ + /// Gets or sets the pem value. + public string? PemValue { get; set; } + /// Gets or sets the pem key. + public string? PemKey { get; set; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/Service/ProxyServiceRecordingOptions.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/Service/ProxyServiceRecordingOptions.cs new file mode 100644 index 000000000..449e90926 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/Service/ProxyServiceRecordingOptions.cs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Recording.Proxy.Service; + +/// +/// Options for the test proxy. +/// +public class ProxyServiceRecordingOptions +{ + /// + /// Whether or not to follow redirects + /// + public bool? HandleRedirects { get; set; } + + /// + /// If set, this will change the "root" path the test proxy uses when loading a recording. + /// + public string? ContextDirectory { get; set; } + + /// + /// Options for the transport. + /// + public ProxyServiceTransportCustomizations? Transport { get; set; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/Service/ProxyServiceTransportCustomizations.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/Service/ProxyServiceTransportCustomizations.cs new file mode 100644 index 000000000..6be1ba257 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/Service/ProxyServiceTransportCustomizations.cs @@ -0,0 +1,40 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Text.Json.Serialization; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework.Recording.Proxy.Service; + +/// +/// Transport customizations for the test proxy service. +/// +public class ProxyServiceTransportCustomizations() +{ + /// Gets or sets the allow auto redirect. + public bool? AllowAutoRedirect { get; set; } + + /// + /// If specified, the public key contained here will be used during validation of the SSL connection by + /// comparing thumbprints. + /// + public string? TLSValidationCert { get; set; } + + /// + /// If specified, the will only be applied to the specified host. + /// + public string? TSLValidationCertHost { get; set; } + + /// + /// Each certificate pair contained within this list should be added to the clientHandler for the server + /// or an individual recording. + /// + public IList? Certificates { get; set; } + + /// + /// During playback, a response is normally returned all at once. By offering this response time, we can + /// "stretch" the writing of the response bytes over a time range of milliseconds. + /// + [JsonConverter(typeof(TimespanToMillisecondConverter))] + public TimeSpan? PlaybackResponseTime { get; set; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/Service/RecordingStartInformation.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/Service/RecordingStartInformation.cs new file mode 100644 index 000000000..e3526592a --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/Service/RecordingStartInformation.cs @@ -0,0 +1,30 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Text.Json.Serialization; + +namespace OpenAI.TestFramework.Recording.Proxy.Service; + +/// +/// Information for starting a recording or playback session with the recording test proxy. +/// +public class RecordingStartInformation +{ + /// + /// Gets or sets the file to save recordings to, or to play back requests from. + /// + [JsonPropertyName("x-recording-file")] + required public string RecordingFile { get; set; } + + /// + /// Gets or sets the path to the "assets.json" file to use for integration with external Git + /// repositories. This enables the proxy to work against repositories that do not emplace their + /// test recordings directly alongside their test implementations. + /// + /// + /// Please refer to the documentation for more information: + /// https://github.com/Azure/azure-sdk-tools/blob/main/tools/test-proxy/documentation/asset-sync/README.md + /// + [JsonPropertyName("x-recording-assets-file")] + public string? AssetsFile { get; set; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/Service/SanitizerIdList.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/Service/SanitizerIdList.cs new file mode 100644 index 000000000..f0982542b --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Proxy/Service/SanitizerIdList.cs @@ -0,0 +1,15 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Recording.Proxy.Service; + +/// +/// Request to remove sanitizers for the test proxy. +/// +public struct SanitizerIdList +{ + /// + /// The IDs of the sanitizers to remove. + /// + public string[]? Sanitizers { get; set; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/BaseRegexSanitizer.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/BaseRegexSanitizer.cs new file mode 100644 index 000000000..3c52f35b8 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/BaseRegexSanitizer.cs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Recording.Sanitizers; + +/// +/// The case class for regex based sanitizers +/// +public abstract class BaseRegexSanitizer(string type) : BaseSanitizer(type) +{ + /// + /// Gets the regular expression to match what to replace. + /// + public string? Regex { get; set; } + + /// + /// Gets or sets the value to replace the match with. + /// + public string? Value { get; set; } + + /// + /// Gets or sets the group in the regex match to replace. + /// + public string? GroupForReplace { get; set; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/BaseSanitizer.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/BaseSanitizer.cs new file mode 100644 index 000000000..011145bf5 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/BaseSanitizer.cs @@ -0,0 +1,53 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Text.Json; +using System.Text.Json.Serialization; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework.Recording.Sanitizers; + +/// +/// The base class for all test proxy recording sanitizers +/// +public abstract class BaseSanitizer : IUtf8JsonSerializable +{ + /// + /// Creates a new instance. + /// + /// The type of this sanitizer (e.g. GeneralRegexSanitizer). + /// If the type was null. + protected BaseSanitizer(string type) + { + Type = type ?? throw new ArgumentNullException(nameof(Type)); + } + + /// + /// Gets the type of the sanitizer (e.g. HeaderRegexSanitizer). + /// + [JsonIgnore] + public string Type { get; } + + /// + public void Write(Utf8JsonWriter writer, JsonSerializerOptions? options = null) + { + writer.WriteStartObject(); + { + writer.WriteString("Name"u8, Type); + writer.WritePropertyName("Body"u8); + + SerializeInner(writer, options); + } + writer.WriteEndObject(); + } + + /// + /// Serializes the child types. By default this will use reflection based serialization. + /// + /// The writer to write to. + protected virtual void SerializeInner(Utf8JsonWriter writer, JsonSerializerOptions? options = null) + { + // By default use reflection based serialization + JsonSerializer.Serialize(writer, this, GetType(), Default.InnerRecordingJsonOptions); + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/BodyKeySanitizer.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/BodyKeySanitizer.cs new file mode 100644 index 000000000..87b3f67e1 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/BodyKeySanitizer.cs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Recording.Sanitizers; + +/// +/// Sanitizer for a request body that matches a particular value in JSON using a JPath expression. +/// +public class BodyKeySanitizer : BaseRegexSanitizer +{ + /// + /// Creates a new instance. + /// + /// The JSON path to match. + /// If the JSON path is null. + public BodyKeySanitizer(string jsonPath) : base("BodyKeySanitizer") + { + JsonPath = jsonPath ?? throw new ArgumentNullException(nameof(jsonPath)); + } + + /// + /// The JPath expression to match a particular value to sanitize. + /// + public string JsonPath { get; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/BodyRegexSanitizer.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/BodyRegexSanitizer.cs new file mode 100644 index 000000000..e49b6f625 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/BodyRegexSanitizer.cs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Recording.Sanitizers; + +/// +/// Sanitizer for the body of a request or response. +/// +public class BodyRegexSanitizer : BaseRegexSanitizer +{ + /// + /// Creates a new instance. + /// + /// Gets the regular expression to match what to replace. + /// If was null. + public BodyRegexSanitizer(string regex) : base("BodyRegexSanitizer") + { + Regex = regex ?? throw new ArgumentNullException(nameof(regex)); + } + + /// + /// Condition to apply for the sanitization or transform. If the condition is not met, sanitization is not performed. + /// + public Condition? Condition { get; set; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/HeaderRegexSanitizer.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/HeaderRegexSanitizer.cs new file mode 100644 index 000000000..d1a76fc04 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/HeaderRegexSanitizer.cs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Recording.Sanitizers; + +/// +/// Sanitizer for a request header. +/// +public class HeaderRegexSanitizer : BaseRegexSanitizer +{ + /// + /// Creates a new instance. + /// + /// The header to sanitize. + /// If the is null. + public HeaderRegexSanitizer(string key) : base("HeaderRegexSanitizer") + { + Key = key ?? throw new ArgumentNullException(nameof(key)); + } + + /// + /// The name of the header to sanitize. + /// + public string Key { get; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/UriRegexSanitizer.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/UriRegexSanitizer.cs new file mode 100644 index 000000000..3c5bad68e --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Sanitizers/UriRegexSanitizer.cs @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Recording.Sanitizers; + +/// +/// Sanitizer for a request URI. +/// +public class UriRegexSanitizer : BaseRegexSanitizer +{ + /// + /// Creates a new instance. + /// + /// The regular expression to match in the request URI. + /// If the regular expression is null. + public UriRegexSanitizer(string regex) : base("UriRegexSanitizer") + { + Regex = regex ?? throw new ArgumentNullException(nameof(regex)); + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/TestRandom.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/TestRandom.cs new file mode 100644 index 000000000..d3b6bccb6 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/TestRandom.cs @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Recording; + +/// +/// Represents an implementation of the class used for test recordings. +/// +public class TestRandom : Random +{ + private RecordedTestMode _mode; + + /// + /// Initializes a new instance of the class. + /// + /// The recorded test mode. + /// The seed value. + public TestRandom(RecordedTestMode mode, int seed) : base(seed) + { + _mode = mode; + } + + /// + /// Generates a new based on the recorded test mode. + /// + /// A new . + public Guid NewGuid() + { + if (_mode == RecordedTestMode.Live) + { + return Guid.NewGuid(); + } + + var bytes = new byte[16]; + NextBytes(bytes); + return new Guid(bytes); + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/TestRecording.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/TestRecording.cs new file mode 100644 index 000000000..d58573f9e --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/TestRecording.cs @@ -0,0 +1,250 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Globalization; +using System.Security.Cryptography; +using OpenAI.TestFramework.Recording.Matchers; +using OpenAI.TestFramework.Recording.Proxy; +using OpenAI.TestFramework.Recording.RecordingProxy; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework.Recording; + +/// +/// Represents a test recording session. This is used to record or playback requests and responses. It also provides +/// a random generator that is consistent between recording and playback sessions. +/// +public class TestRecording : IAsyncDisposable +{ + /// + /// The key to use to store the random seed in the recording. + /// + public const string RandomSeedVariableKey = "RandomSeed"; + + private SortedDictionary _variables; + + /// + /// Creates a new instance. + /// + /// The unique identifier for the recording. + /// The current recording mode. + /// The test proxy service instance to use for the recording. + /// (Optional) Any variables populate this recording this. This is normally used in + /// playback mode to pass in any variables saved as part of the recording. + /// Any of the required parameters are null. + /// Some expected values were missing or null. + /// The current recording mode is not supported. + public TestRecording(string id, RecordedTestMode mode, ProxyService proxy, IDictionary? variables = null) + { + ID = id ?? throw new ArgumentNullException(nameof(id)); + Mode = mode; + Proxy = proxy ?? throw new ArgumentNullException(nameof(proxy)); + _variables = variables == null + ? new() + : new(variables); + + if (Proxy.Client == null) + { + throw new InvalidOperationException("Recording test proxy did not have a client defined"); + } + + int seed; + switch (Mode) + { + case RecordedTestMode.Live: + Random = new TestRandom(Mode, GetRandomSeed()); + break; + + case RecordedTestMode.Record: + seed = GetRandomSeed(); + _variables[RandomSeedVariableKey] = seed.ToString(CultureInfo.InvariantCulture); + Random = new TestRandom(Mode, seed); + break; + + case RecordedTestMode.Playback: + if (Variables.TryGetValue(RandomSeedVariableKey, out string? seedString) + && int.TryParse(seedString, NumberStyles.Integer, CultureInfo.InvariantCulture, out seed)) + { + Random = new TestRandom(Mode, seed); + } + else + { + // To maximise backwards compatibility with the recordings from the previous test framework, we'll just use a random + // seed if one wasn't set instead of failing here. Worst case, we'll get recording mismatches if this is not configured + // correctly. + Random = new TestRandom(Mode, GetRandomSeed()); + } + break; + + default: + throw new NotSupportedException("Unsupported recording mode: " + Mode); + } + } + + /// + /// Gets the unique identifier for this recording. + /// + public string ID { get; } + + /// + /// Gets the current recording mode. + /// + public RecordedTestMode Mode { get; } + + /// + /// Gets the random generator to use for this recording. Using this ensures consistent random values generated during + /// recording, as well as during playback. + /// + public TestRandom Random { get; } + + /// + /// Gets the proxy service associated with the recording. + /// + protected internal ProxyService Proxy { get; } + + /// + /// Gets any variables associated with the recording. + /// + protected IReadOnlyDictionary Variables => _variables; + + /// + /// Disposes of the recording session. If you were recording, this will try to save your captured requests and + /// responses. If you were playing back, this will stop the playback session. + /// + /// Asynchronous task + public virtual ValueTask DisposeAsync() => FinishAsync(true); + + /// + /// Finishes the recording session. This will stop recording or playback. If you were recording, you can use + /// to determine whether or not captured requests and responses will be saved. + /// + /// True to save any captured requests and responses to the file specified in your + /// . False to not save. This is only used if + /// you were recording. + /// The cancellation token to use. + /// Asynchronous task + /// If the recording mode is not supported. + public async virtual ValueTask FinishAsync(bool save, CancellationToken token = default) + { + switch (Mode) + { + case RecordedTestMode.Live: + // nothing to see here, move along + break; + case RecordedTestMode.Playback: + await Proxy.Client.StopPlaybackAsync(ID, token).ConfigureAwait(false); + break; + case RecordedTestMode.Record: + await Proxy.Client.StopRecordingAsync(ID, _variables, !save, token).ConfigureAwait(false); + break; + default: + throw new NotSupportedException("The following mode is not supported: " + Mode); + } + + Proxy.ThrowOnErrors(); + } + + /// + /// Gets a recorded variable. + /// + /// The name of the variable. + /// The variable value, or null if the variable was not set. + public virtual string? GetVariable(string name) + { + return _variables.GetValueOrDefault(name); + } + + /// + /// Sets a recorded variable to a value. + /// + /// The name of the variable. + /// The value to set. + public virtual void SetVariable(string name, string value) + { + _variables[name] = value; + } + + /// + /// Gets a recorded variable, or if it was not set, creates and adds a new variable. + /// + /// The name of the variable. + /// The factory used to create a value if none was previously set. + /// The already existing value, or the newly added value. + public virtual string GetOrAddVariable(string name, Func valueFactory) + { + string? value; + if (!_variables.TryGetValue(name, out value) || value == null) + { + value = valueFactory(); + SetVariable(name, value); + } + + return value; + } + + /// + /// Gets the options to use as the options for creating transport to pass to clients. This will allow the clients to + /// forward requests to the test proxy. + /// + /// The options to use. + public virtual ProxyTransportOptions GetProxyTransportOptions() + { + return new() + { + HttpEndpoint = Proxy.HttpEndpoint, + HttpsEndpoint = Proxy.HttpsEndpoint, + Mode = Mode, + RecordingId = ID, + RequestId = Random.NewGuid().ToString() + }; + } + + /// + /// Applies recording options to the current recording. + /// + /// The recording options to apply for this recording/playback session. + /// The cancellation token to use. + /// Asynchronous task + public virtual async Task ApplyOptions(TestRecordingOptions options, CancellationToken token) + { + if (options.Sanitizers.Any()) + { + await Proxy.Client.AddSanitizersAsync(options.Sanitizers, ID, token).ConfigureAwait(false); + } + + if (options.SanitizersToRemove.Any()) + { + await Proxy.Client.RemoveSanitizersAsync(options.SanitizersToRemove, ID, token).ConfigureAwait(false); + } + + if (Mode == RecordedTestMode.Playback) + { + BaseMatcher matcher = options.Matcher ?? new CustomMatcher() + { + CompareBodies = options.CompareBodies, + ExcludedHeaders = options.ExcludedHeaders.JoinOrNull(","), + IgnoredHeaders = options.IgnoredHeaders.JoinOrNull(","), + IgnoredQueryParameters = options.IgnoredQueryParameters.JoinOrNull(","), + }; + + await Proxy.Client.SetMatcherAsync(matcher, ID, token).ConfigureAwait(false); + + foreach (var transform in options.Transforms) + { + await Proxy.Client.AddTransformAsync(transform, ID, token).ConfigureAwait(false); + } + } + } + + private static int GetRandomSeed() + { +#if NET6_0_OR_GREATER + return RandomNumberGenerator.GetInt32(int.MaxValue); +#else + byte[] bytes = new byte[4]; + using var rng = RandomNumberGenerator.Create(); + rng.GetBytes(bytes); + return BitConverter.ToInt32(bytes, 0); +#endif + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/TestRecordingMismatchException.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/TestRecordingMismatchException.cs new file mode 100644 index 000000000..3f6af0242 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/TestRecordingMismatchException.cs @@ -0,0 +1,44 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Runtime.Serialization; + +namespace OpenAI.TestFramework.Recording; + +/// +/// Exception thrown when the test recording does not match during playback. +/// +[Serializable] +public class TestRecordingMismatchException : Exception +{ + /// + /// Creates a new instance + /// + public TestRecordingMismatchException() + { + } + + /// + /// Creates a new instance. + /// + /// The exception message. + public TestRecordingMismatchException(string message) : base(message) + { + } + + /// + /// Creates a new instance. + /// + /// The exception message. + /// The inner exception. + public TestRecordingMismatchException(string? message, Exception? innerException = null) : base(message, innerException) + { + } + +#if !NET8_0_OR_GREATER + /// + protected TestRecordingMismatchException(SerializationInfo info, StreamingContext context) : base(info, context) + { + } +#endif +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/TestRecordingOptions.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/TestRecordingOptions.cs new file mode 100644 index 000000000..de97b0d89 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/TestRecordingOptions.cs @@ -0,0 +1,151 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel.Primitives; +using OpenAI.TestFramework.Recording.Matchers; +using OpenAI.TestFramework.Recording.RecordingProxy; +using OpenAI.TestFramework.Recording.Sanitizers; +using OpenAI.TestFramework.Recording.Transforms; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework.Recording; + +/// +/// Options to configure a test recording. This can be used to set sanitizers to apply to the URI, headers, and/or body of a request +/// before matching, and before saving the recording. This can also be used to specify which matcher will be used to match a request +/// to a recorded one during playback. Finally this can be used to set the transforms applied to responses from the test proxy. +/// +public class TestRecordingOptions +{ + /// + /// Creates a new instance + /// + public TestRecordingOptions() + { } + + /// + /// The list of sanitizers to apply to request before matching, and before saving a recording. + /// + public IList Sanitizers { get; } = new List(); + + /// + /// Gets or sets the matcher to use. If this is unset, a custom matcher will be created based on the options specified in this class. + /// + public BaseMatcher? Matcher { get; set; } + + /// + /// The list of transforms to apply when returning a response during playback. + /// + public IList Transforms { get; } = new List(); + + /// + /// The sanitizers to remove from the list of default sanitizers. More details about default sanitizers can be found here: + /// https://github.com/Azure/azure-sdk-tools/blob/main/tools/test-proxy/Azure.Sdk.Tools.TestProxy/README.md#removing-a-sanitizer. + /// + /// You can find the list of sanitizer IDs to remove in two ways: + /// + /// Sending a GET request to http://{proxy_endpoint}/Info/Active + /// Looking at the source code for the test proxy here: + /// https://github.com/Azure/azure-sdk-tools/blob/main/tools/test-proxy/Azure.Sdk.Tools.TestProxy/Common/SanitizerDictionary.cs + /// + /// + public ISet SanitizersToRemove { get; } = new HashSet() + { + // For now, we should leave the default sanitizers in place since it is better to err on the side of caution + }; + + /// + /// Query parameters that we are only interested in checking if a value is set, but don't care about the actual value set. + /// + public ISet IgnoredQueryParameters { get; } = new HashSet(); + + /// + /// Headers that we are only interested in checking if a value is set, but don't care about the actual value set. + /// + public ISet IgnoredHeaders { get; } = new HashSet() + { + "Date", + "x-ms-date", + "User-Agent", + }; + + /// + /// Headers to completely disregard when recording and matching. In other words it is as if these headers were never set. + /// + public ISet ExcludedHeaders { get; } = new HashSet() + { +#if NETFRAMEWORK + // .Net framework will add some headers not found in newer .Net versions so let's completely ignore them here. It is also + // different in how it handles setting the Content-Length header when there is no body as compared to .Net + "Connection", + "Content-Length", +#endif + }; + + /// + /// Whether or not we want to compare bodies from the request and the recorded request during playback. Default + /// is true. + /// + public bool CompareBodies { get; set; } = true; + + /// + /// A function used to override if recording is enabled for a particular request. This will override other settings present + /// here. + /// + public Func? RequestOverride { get; set; } + + /// + /// Helper method to simplify sanitizing specific headers values. This will add a entry + /// to . The default replacement value will be set to . + /// + /// The keys to sanitize. + public void SanitizeHeaders(params string[] keys) + => SanitizeHeaders(Default.SanitizedValue, keys); + + /// + /// Helper method to simplify sanitizing specific headers values. This will add a entry + /// to . + /// + /// The value to replace matches with. + /// The keys to sanitize. + public virtual void SanitizeHeaders(string sanitizedValue, IEnumerable keys) + { + if (keys == null) + { + return; + } + + foreach (var key in keys) + { + Sanitizers.Add(new HeaderRegexSanitizer(key) { Value = sanitizedValue }); + } + } + + /// + /// Helper method to sanitize specific parts of a JSON request body. This will add a entry + /// to for each JSON path provided in . The default replacement value + /// will be set to . + /// + /// The JSON paths to sanitize. + public void SanitizeJsonBody(params string[] jsonPaths) + => SanitizeJsonBody(Default.SanitizedValue, jsonPaths); + + /// + /// Helper method to sanitize specific parts of a JSON request body. This will add a entry + /// to for each JSON path provided in . + /// + /// The value to replace matches with. + /// The JSON paths to sanitize. + public virtual void SanitizeJsonBody(string sanitizedValue, IEnumerable jsonPaths) + { + if (jsonPaths == null) + { + return; + } + + foreach (var key in jsonPaths) + { + Sanitizers.Add(new BodyKeySanitizer(key) { Value = sanitizedValue }); + } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Transforms/BaseTransform.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Transforms/BaseTransform.cs new file mode 100644 index 000000000..11be5e1be --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Transforms/BaseTransform.cs @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Text.Json; +using System.Text.Json.Serialization; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework.Recording.Transforms; + +/// +/// Base class for test recording proxy transforms. Transforms are applied when returning a request during playback. +/// +public abstract class BaseTransform : IUtf8JsonSerializable +{ + /// + /// Creates a new instance. + /// + /// The type of this sanitizer (e.g. GeneralRegexSanitizer). + /// If the type was null. + protected BaseTransform(string type) + { + Type = type ?? throw new ArgumentNullException(nameof(Type)); + } + + /// + /// Gets the type of the sanitizer (e.g. HeaderRegexSanitizer). + /// + [JsonIgnore] + public string Type { get; } + + /// + public virtual void Write(Utf8JsonWriter writer, JsonSerializerOptions? options = null) + { + // By default use reflection based serialization + JsonSerializer.Serialize(writer, this, GetType(), Default.InnerRecordingJsonOptions); + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Transforms/HeaderTransform.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Transforms/HeaderTransform.cs new file mode 100644 index 000000000..4817f84c5 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Recording/Transforms/HeaderTransform.cs @@ -0,0 +1,35 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Recording.Transforms; + +/// +/// Transform applied to headers before the response is generated during recording playback. +/// +public class HeaderTransform : BaseTransform +{ + /// + /// Creates a new instance. + /// + /// The response header to set. + /// If the is null. + public HeaderTransform(string key) : base("HeaderTransform") + { + Key = key ?? throw new ArgumentNullException(nameof(key)); + } + + /// + /// Gets the header to transform. + /// + public string Key { get; } + + /// + /// Gets or sets the value to set. + /// + public string? Value { get; set; } + + /// + /// The condition to apply for this transform. If the condition is not met, no transform is performed. + /// + public Condition? Condition { get; set; } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/SyncOnlyAttribute.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/SyncOnlyAttribute.cs new file mode 100644 index 000000000..2d00681f9 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/SyncOnlyAttribute.cs @@ -0,0 +1,14 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using NUnit.Framework; + +namespace OpenAI.TestFramework; + +/// +/// Attribute that can be applied to a test to indicate it only runs in synchronous mode. +/// +[AttributeUsage(AttributeTargets.Method, AllowMultiple = false, Inherited = true)] +public class SyncOnlyAttribute() : NUnitAttribute +{ +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/AndPreFilters.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/AndPreFilters.cs new file mode 100644 index 000000000..714bb78e1 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/AndPreFilters.cs @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Reflection; +using NUnit.Framework.Interfaces; + +namespace OpenAI.TestFramework.Utils; + +/// +/// Represents a pre-filter that combines multiple pre-filters using a logical AND operation. +/// +public class AndPreFilter : IPreFilter +{ + private IEnumerable _filters; + + /// + /// Initializes a new instance. + /// + /// The pre-filters to combine. + public AndPreFilter(params IPreFilter[] filters) : this((IEnumerable)filters) + { } + + /// + /// Initializes a new instance. + /// + /// The pre-filters to combine. + public AndPreFilter(IEnumerable filters) + { + _filters = filters?.Where(p => p != null) ?? Array.Empty(); + } + + /// + public bool IsMatch(Type type) => _filters.All(p => p.IsMatch(type)); + + /// + public bool IsMatch(Type type, MethodInfo method) => _filters.All(p => p.IsMatch(type, method)); +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/AssemblyHelper.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/AssemblyHelper.cs new file mode 100644 index 000000000..ae11a0eed --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/AssemblyHelper.cs @@ -0,0 +1,100 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Reflection; +using System.Runtime.InteropServices; + +namespace OpenAI.TestFramework.Utils +{ + /// + /// Assembly related helper methods + /// + public static class AssemblyHelper + { + /// + /// Gets the value of the named assembly metadata attribute for the assembly where the is defined. + /// + /// The type whose assembly we want to read from. + /// The name of the metadata assembly attribute to read. + /// The value of the metadata attribute, or null if none was specified or could be found. + public static string? GetAssemblyMetadata(string name) + => GetAssemblyMetadata(typeof(T).Assembly, name); + + /// + /// Gets the value of the named assembly metadata attribute from assembly. + /// + /// The assembly to read the metadata attribute from + /// The name of the metadata assembly attribute to read. + /// The value of the metadata attribute, or null if none was specified or could be found. + public static string? GetAssemblyMetadata(this Assembly assembly, string name) + { + return assembly + ?.GetCustomAttributes() + .FirstOrDefault(a => a.Key == name && !string.IsNullOrWhiteSpace(a.Value)) + ?.Value; + } + + /// + /// Gets the root source directory for the assembly that defines the type . + /// + /// The type whose assembly source path we want to read. + /// The directory containing the original source path, or null if it was not set or did not exist. + public static DirectoryInfo? GetAssemblySourceDir() + => GetAssemblySourceDir(typeof(T).Assembly); + + /// + /// Gets the source path for the assembly. In order for this to work, you will need to set the assembly metadata attribute + /// your project file as follows: + /// + /// <ItemGroup> + /// <AssemblyAttribute Include="System.Reflection.AssemblyMetadataAttribute"> + /// <_Parameter1>SourcePath</_Parameter1> + /// <_Parameter2>$(MSBuildProjectDirectory)</_Parameter2> + /// </AssemblyAttribute> + /// </ItemGroup> + /// + /// + /// The assembly whose source path we want to find. + /// The directory containing the original source path, or null if it was not set or did not exist. + public static DirectoryInfo? GetAssemblySourceDir(this Assembly assembly) + { + string? sourcePath = assembly.GetAssemblyMetadata("SourcePath"); + if (sourcePath == null) + { + return null; + } + + DirectoryInfo dir = new(sourcePath); + return dir.Exists + ? dir + : null; + } + + /// + /// Finds the dotnet executable path for the current system. It does this by reading the DOTNET_INSTALL_DIR environment variable + /// first, and then inspecting all folders in the current PATH environment variable. + /// + /// The path to the found dotnet executable, or null if none could be found. + public static FileInfo? GetDotnetExecutable() + { + string dotnetExeName = "dotnet"; + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + dotnetExeName += ".exe"; + } + + List searchDirs = + [ + Environment.GetEnvironmentVariable("DOTNET_INSTALL_DIR"), + ..Environment.GetEnvironmentVariable("PATH") + ?.Split(Path.PathSeparator) + ?? Array.Empty() + ]; + + return searchDirs + .Where(dir => !string.IsNullOrWhiteSpace(dir)) + .Select(dir => new FileInfo(Path.Combine(dir!, dotnetExeName))) + .FirstOrDefault(file => file.Exists); + } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/Default.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/Default.cs new file mode 100644 index 000000000..5774a4774 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/Default.cs @@ -0,0 +1,108 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Text.Json; +using System.Text.Json.Serialization; + +namespace OpenAI.TestFramework.Utils; + +/// +/// Options used for various recordings. +/// +public static class Default +{ + private static JsonSerializerOptions? _recordingJsonOptions; + private static JsonSerializerOptions? _innerRecordingJsonOptions; + private static JsonSerializerOptions? _testProxyJsonOptions; + private static TimeSpan? _testProxyWaitTime; + private static TimeSpan? _requestRetryDelay; + private static TimeSpan? _debuggerTestTimeout; + private static TimeSpan? _defaultTestTimeout; + + /// + /// Gets the default value to replace matches with while sanitizing. + /// + public const string SanitizedValue = "Sanitized"; + + /// + /// Gets the JSON serialization options to use for recording sanitizers, matchers, and transforms child instances. + /// + public static JsonSerializerOptions InnerRecordingJsonOptions => _innerRecordingJsonOptions ??= new() + { + PropertyNameCaseInsensitive = true, + PropertyNamingPolicy = JsonNamingPolicy.CamelCase, + WriteIndented = true, +#if NET + DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull, +#else + IgnoreNullValues = true, +#endif + }; + + /// + /// Gets the JSON serialization options to use for recording sanitizers, matchers, and transforms. + /// + public static JsonSerializerOptions RecordingJsonOptions + { + get + { + if (_recordingJsonOptions == null) + { + _recordingJsonOptions = InnerRecordingJsonOptions.Clone(); + _recordingJsonOptions.Converters.Add( + +#if NET6_0 + // .Net 6.0 seems to have a weird bug here. This is not needed for .Net framework, nor .Net 7+ + new Utf8JsonSerializableConverterFactory() +#else + new Utf8JsonSerializableConverter() +#endif + ); + } + + return _recordingJsonOptions; + } + } + + + /// + /// Gets the JSON serialization options to use for the test proxy + /// + public static JsonSerializerOptions TestProxyJsonOptions => _testProxyJsonOptions ??= new() + { + PropertyNameCaseInsensitive = true, + WriteIndented = true, +#if NET + DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull, +#else + IgnoreNullValues = true, +#endif + }; + + /// + /// The default maximum amount of time to wait to for the test proxy operations to finish (e.g. start up + /// and configuration, or saving a recording and teardown). + /// + public static TimeSpan TestProxyWaitTime => _testProxyWaitTime ??= TimeSpan.FromMinutes(2); + + /// + /// Gets the maximum number of times to retry requests + /// + public const int MaxRequestRetries = 3; + + /// + /// The amount of time to wait between requests. + /// + public static TimeSpan RequestRetryDelay => _requestRetryDelay ??= TimeSpan.FromSeconds(0.8); + + /// + /// The amount of time to wait when the debugger is attached. This is much higher than normal to allow for more time while debugging. + /// + public static TimeSpan DebuggerAttachedTestTimeout => _debuggerTestTimeout ??= TimeSpan.FromMinutes(15); + + /// + /// The default test timeout. + /// + public static TimeSpan TestTimeout => _defaultTestTimeout ??= TimeSpan.FromSeconds(15); +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/Extensions.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/Extensions.cs new file mode 100644 index 000000000..437e8dc05 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/Extensions.cs @@ -0,0 +1,414 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Diagnostics; + +namespace OpenAI.TestFramework.Utils; + +/// +/// String related extension methods. +/// +public static class StringExtensions +{ + /// + /// Ensures that a string ends with a specified suffix. + /// + /// The string value. + /// The suffix to check for. + /// The string comparison type. Default is . + /// The original string if it ended in the suffix, or a new string value with the suffix appended. + public static string EnsureEndsWith(this string value, string suffix, StringComparison comparison = StringComparison.Ordinal) + { + if (value == null) + { + return null!; + } + + if (value.EndsWith(suffix, comparison)) + { + return value; + } + + return value + suffix; + } + + /// + /// Ensures that a string ends with a specified suffix. + /// + /// The string value. + /// The suffix to check for. + /// The string comparison type. Default is . + /// The original string if it ended in the suffix, or a new string value with the suffix appended. + public static string EnsureEndsWith(this string value, char suffix, StringComparison comparison = StringComparison.Ordinal) + => EnsureEndsWith(value, suffix.ToString(), comparison); +} + +/// +/// Extension methods for System.ClientModel types. +/// +public static class ScmExtensions +{ + /// + /// Gets the first value associated with the specified header name from the pipeline request headers. + /// + /// The pipeline request headers. + /// The name of the header. + /// The first non-empty value associated with the specified header name, or null if the header is not found or has no non-empty values. + public static string? GetFirstOrDefault(this PipelineRequestHeaders headers, string name) + { + if (headers?.TryGetValues(name, out IEnumerable? values) == true) + { + return values?.FirstOrDefault(v => !string.IsNullOrWhiteSpace(v)); + } + + return null; + } + + /// + /// Gets the first value associated with the specified header name from the pipeline response headers. + /// + /// The pipeline response headers. + /// The name of the header. + /// The first non-empty value associated with the specified header name, or null if the header is not found or has no non-empty values. + public static string? GetFirstOrDefault(this PipelineResponseHeaders headers, string name) + { + if (headers?.TryGetValues(name, out IEnumerable? values) == true) + { + return values?.FirstOrDefault(v => !string.IsNullOrWhiteSpace(v)); + } + + return null; + } +} + +/// +/// Extensions for collections +/// +public static class CollectionExtensions +{ + /// + /// Adds the elements to a collection. + /// + /// The type of the elements in the collection. + /// The collection to add elements to. + /// The items to add. + public static void AddRange(this ICollection collection, IEnumerable itemsToAdd) + { + foreach (T item in itemsToAdd) + { + collection.Add(item); + } + } + + /// + /// Joins the elements of a collection into a single string using the specified separator. + /// Returns null if the collection is null or empty. + /// + /// The collection of strings to join. + /// The separator string. + /// A string that consists of the elements of the collection joined by the separator, or null if the collection is null or empty. + public static string? JoinOrNull(this IEnumerable values, string separator) + { + if (values == null || !values.Any()) + { + return null; + } + + return string.Join(separator, values); + } + +#if NETFRAMEWORK + /// + /// Gets the value associated with the specified key from the dictionary, or returns the default value if the key is not found. + /// + /// The type of the keys in the dictionary. + /// The type of the values in the dictionary. + /// The dictionary. + /// The key to locate. + /// The value associated with the specified key, or the default value if the key is not found. + public static TVal? GetValueOrDefault(this IReadOnlyDictionary dict, TKey key) + => GetValueOrDefault(dict, key, default!); + + /// + /// Gets the value associated with the specified key from the dictionary, or returns the specified default value if the key is not found. + /// + /// The type of the keys in the dictionary. + /// The type of the values in the dictionary. + /// The dictionary. + /// The key to locate. + /// The default value to return if the key is not found. + /// The value associated with the specified key, or the specified default value if the key is not found. + public static TVal GetValueOrDefault(this IReadOnlyDictionary dict, TKey key, TVal defaultValue) + { + if (dict?.TryGetValue(key, out TVal? value) == true) + { + return value; + } + + return defaultValue; + } +#endif + + /// + /// Gets the value associated with the specified key from the dictionary, or returns the default value if the key is not found. + /// + /// The type of the keys in the dictionary. + /// The type of the values in the dictionary. + /// The dictionary. + /// The key to locate. + /// The value associated with the specified key, or the default value if the key is not found. + public static TVal? GetValueOrDefault(this Dictionary dict, TKey key) where TKey : notnull + => GetValueOrDefault((IDictionary)dict, key, default!); + + /// + /// Gets the value associated with the specified key from the dictionary, or returns the specified default value if the key is not found. + /// + /// The type of the keys in the dictionary. + /// The type of the values in the dictionary. + /// The dictionary. + /// The key to locate. + /// The default value to return if the key is not found. + /// The value associated with the specified key, or the specified default value if the key is not found. + public static TVal GetValueOrDefault(this Dictionary dict, TKey key, TVal defaultValue) where TKey : notnull + => GetValueOrDefault((IDictionary)dict, key, defaultValue); + + /// + /// Gets the value associated with the specified key from the sorted dictionary, or returns the default value if the key is not found. + /// + /// The type of the keys in the sorted dictionary. + /// The type of the values in the sorted dictionary. + /// The sorted dictionary. + /// The key to locate. + /// The value associated with the specified key, or the default value if the key is not found. + public static TVal? GetValueOrDefault(this SortedDictionary dict, TKey key) where TKey : notnull + => GetValueOrDefault((IDictionary)dict, key, default!); + + /// + /// Gets the value associated with the specified key from the sorted dictionary, or returns the specified default value if the key is not found. + /// + /// The type of the keys in the sorted dictionary. + /// The type of the values in the sorted dictionary. + /// The sorted dictionary. + /// The key to locate. + /// The default value to return if the key is not found. + /// The value associated with the specified key, or the specified default value if the key is not found. + public static TVal GetValueOrDefault(this SortedDictionary dict, TKey key, TVal defaultValue) where TKey : notnull + => GetValueOrDefault((IDictionary)dict, key, defaultValue); + + /// + /// Gets the value associated with the specified key from the dictionary, or returns the default value if the key is not found. + /// + /// The type of the keys in the dictionary. + /// The type of the values in the dictionary. + /// The dictionary. + /// The key to locate. + /// The value associated with the specified key, or the default value if the key is not found. + public static TVal? GetValueOrDefault(this IDictionary dict, TKey key) + => GetValueOrDefault(dict, key, default!); + + /// + /// Gets the value associated with the specified key from the dictionary, or returns the specified default value if the key is not found. + /// + /// The type of the keys in the dictionary. + /// The type of the values in the dictionary. + /// The dictionary. + /// The key to locate. + /// The default value to return if the key is not found. + /// The value associated with the specified key, or the specified default value if the key is not found. + public static TVal GetValueOrDefault(this IDictionary dict, TKey key, TVal defaultValue) + { + if (dict?.TryGetValue(key, out TVal? value) == true) + { + return value; + } + + return defaultValue; + } + + /// + /// Gets the value associated with the specified key from the dictionary, or creates and adds a new value if the key did not exist. + /// + /// The type of the keys in the dictionary. + /// The type of the values in the dictionary. + /// The dictionary. + /// The key to locate. + /// The function used to create a value for the key if it is not found in the dictionary. + /// The value associated with the specified key, or the value created by the if the key is not found. + public static TValue GetOrAdd(this IDictionary dictionary, TKey key, Func valueFactory) + { + if (dictionary == null) + { + throw new ArgumentNullException(nameof(dictionary)); + } + + if (!dictionary.TryGetValue(key, out TValue? value)) + { + value = valueFactory(key); + dictionary[key] = value; + } + + return value!; + } + + /// + /// Asynchronously returns the first element of a sequence. + /// is found. + /// + /// The type of the elements in the sequence. + /// The sequence to search. + /// A cancellation token to cancel the operation. + /// Asynchronous task. + public static ValueTask FirstOrDefaultAsync(this IAsyncEnumerable enumerable, CancellationToken token = default) + => FirstOrDefaultAsync(enumerable, _ => true); + + /// + /// Asynchronously returns the first element of a sequence that satisfies a specified condition or a default value if no such element + /// is found. + /// + /// The type of the elements in the sequence. + /// The sequence to search. + /// A function to test each element for a condition. + /// A cancellation token to cancel the operation. + /// Asynchronous task. + public static async ValueTask FirstOrDefaultAsync(this IAsyncEnumerable enumerable, Predicate predicate, CancellationToken token = default) + { + await foreach (T item in enumerable.WithCancellation(token)) + { + if (predicate(item)) + { + return item; + } + } + + return default!; + } + + /// + /// Converts an to a asynchronously. + /// + /// The type of the elements in the enumerable. + /// The to convert. + /// The cancellation token. + /// Asynchronous task to do the conversion. + public static async Task> ToListAsync(this IAsyncEnumerable asyncEnumerable, CancellationToken token = default) + { + List list = new List(); + await foreach (T item in asyncEnumerable.WithCancellation(token)) + { + list.Add(item); + } + return list; + } + + /// + /// Converts an async enumerable of pages to a asynchronously. + /// + /// The type of the elements in the enumerable. + /// The to convert. + /// The cancellation token. + /// Asynchronous task to do the conversion. + public static async Task> ToListAsync(this IAsyncEnumerable> pageAsyncEnumerable, CancellationToken token = default) + { + List list = new List(); + await foreach(PageResult page in pageAsyncEnumerable.WithCancellation(token)) + { + list.AddRange(page.Values); + } + return list; + } +} + +/// +/// Helpers for working with paths. +/// +public static class PathHelpers +{ + /// + /// Create a relative path from one path to another. Paths will be resolved before calculating the difference. + /// + /// The source path the output should be relative to. This path is always considered to be a directory. + /// The destination path. + /// The relative path or if the paths don't share the same root. + public static string GetRelativePath(string relativeTo, string path) + { + +#if NET + return Path.GetRelativePath(relativeTo, path); +#else + relativeTo = Path.GetFullPath(relativeTo).EnsureEndsWith(Path.DirectorySeparatorChar); + path = Path.GetFullPath(path).EnsureEndsWith(Path.DirectorySeparatorChar); + + Uri relativeToUri = new Uri(relativeTo); + Uri pathUri = new Uri(path); + + if (relativeToUri.Scheme != pathUri.Scheme) + { + return path; + } + + Uri relative = relativeToUri.MakeRelativeUri(pathUri); + return Uri.UnescapeDataString(relative.ToString()) + .Replace('/', '\\'); +#endif + } +} + + +/// +/// Extensions for types. +/// +public static class TypeExtensions +{ + /// + /// Determines whether the specified type either implements the open generic type specified, + /// or inherits from the open generic type specified. + /// + /// The type to inspect. + /// The open generic type. + /// The arguments of the closed generic type. + /// True if the type implements, or inherits, or is a closed version of the open type. + [DebuggerStepThrough] + public static bool IsClosedGenericOf(this Type type, Type openGeneric, out Type[] closedTypeArguments) + { + Type? closedType = null; + + if (openGeneric.IsInterface) + { + closedType = type.GetInterfaces() + .FirstOrDefault(iType => IsAssignableToOpen(iType, openGeneric)); + } + + if (closedType == null) + { + for (Type? current = type; current != null && closedType == null; current = current.BaseType) + { + if (IsAssignableToOpen(current, openGeneric)) + { + closedType = current; + } + } + } + + closedTypeArguments = closedType?.GetGenericArguments() ?? Array.Empty(); + return closedType != null; + } + + /// + /// Determines if the type is or inherits from the open generic type. + /// + /// The type. + /// The open generic type. + /// True if the open generic type could be assigned from the type. + [DebuggerStepThrough] + public static bool IsAssignableToOpen(this Type type, Type openGeneric) + { + if (!type.IsGenericType || !type.IsConstructedGenericType) + { + return false; + } + + return openGeneric.IsAssignableFrom(type.GetGenericTypeDefinition()); + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/IUtf8JsonSerializable.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/IUtf8JsonSerializable.cs new file mode 100644 index 000000000..7b81c3532 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/IUtf8JsonSerializable.cs @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Text.Json; + +namespace OpenAI.TestFramework.Utils; + +/// +/// Interface applied to types that can be serialized to JSON. +/// +public interface IUtf8JsonSerializable +{ + /// + /// Writes this instance as JSON to the writer. + /// + /// The writer to write to. + /// The options to use when writing. + void Write(Utf8JsonWriter writer, JsonSerializerOptions? options = null); +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/JsonHelpers.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/JsonHelpers.cs new file mode 100644 index 000000000..ed6752ca4 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/JsonHelpers.cs @@ -0,0 +1,143 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Text.Json; +using System.Text.Json.Serialization; + +namespace OpenAI.TestFramework.Utils; + +public static class JsonHelpers +{ + /// + /// Serializes the specified data to a stream using as a UTF-8 encoded JSON text. + /// + /// The type of the data to serialize. + /// The stream to write the serialized data to. + /// The data to serialize. + /// (Optional) Options to use when serializing. + public static void Serialize(Stream stream, T data, JsonSerializerOptions? options = null) + { +#if NETFRAMEWORK + using (Utf8JsonWriter writer = new(stream)) + { + JsonSerializer.Serialize(writer, data, options); + writer.Flush(); + } +#else + JsonSerializer.Serialize(stream, data, options); +#endif + } + + /// + /// Deserializes UTF-8 encoded JSON text from a stream. + /// + /// The type of the data to deserialize. + /// The stream to read the serialized data from. + /// (Optional) Options to use when deserializing. + /// The deserialized data. + public static T? Deserialize(Stream stream, JsonSerializerOptions? options = null) + { +#if NETFRAMEWORK + // For now let's keep it simple and load entire JSON bytes into memory + using MemoryStream buffer = new(); + stream.CopyTo(buffer); + + ReadOnlySpan jsonBytes = buffer.GetBuffer().AsSpan(0, (int)buffer.Length); + return JsonSerializer.Deserialize(jsonBytes, options); +#else + return JsonSerializer.Deserialize(stream, options); +#endif + } + +#if NET6_0_OR_GREATER + // .Net 6 and newer already have the extension method we need defined in JsonSerializer +#else + // TODO FIXME once we move to newer versions of System.Text.Json we can directly use the + // JsonSerializer extension method for elements + public static T? Deserialize(this JsonElement element, JsonSerializerOptions? options = null) + { + using MemoryStream stream = new(); + using Utf8JsonWriter writer = new(stream, new() + { + Encoder = System.Text.Encodings.Web.JavaScriptEncoder.UnsafeRelaxedJsonEscaping, + Indented = false, + SkipValidation = true + }); + element.WriteTo(writer); + writer.Flush(); + + stream.Seek(0, SeekOrigin.Begin); + if (((ulong)stream.Length & 0xffffffff00000000) != 0ul) + { + throw new ArgumentOutOfRangeException("JsonElement is too large"); + } + + ReadOnlySpan span = new(stream.GetBuffer(), 0, (int)stream.Length); + return JsonSerializer.Deserialize(span, options); + } +#endif + + /// + /// Serializes a value to a JsonElement. + /// + /// Type of the data to serialize. + /// The value to serialize. + /// (Optional) Options to use when serializing. + /// The serialized value as a JsonElement. + public static JsonElement SerializeToElement(T value, JsonSerializerOptions? options = null) + { +#if NET6_0_OR_GREATER + return JsonSerializer.SerializeToElement(value, options); +#else + using MemoryStream stream = new(); + Serialize(stream, value, options); + stream.Seek(0, SeekOrigin.Begin); + return JsonDocument.Parse(stream).RootElement; +#endif + } + + /// + /// Creates a clone of the specified JSON serializer options. + /// + /// The JSON serializer options to clone. + /// (Optional) Filter to apply for selecting specific converters to include in the cloned options. + /// A clone of the JSON serializer options. + public static JsonSerializerOptions Clone(this JsonSerializerOptions options, Predicate? converterFilter = null) + { +#if NET + JsonSerializerOptions cloned = new JsonSerializerOptions(options); + if (converterFilter != null) + { + cloned.Converters.Clear(); + foreach (var converter in options.Converters.Where(c => converterFilter(c))) + { + cloned.Converters.Add(converter); + } + } + + return cloned; +#else + JsonSerializerOptions clone = new() + { + AllowTrailingCommas = options.AllowTrailingCommas, + DefaultBufferSize = options.DefaultBufferSize, + DictionaryKeyPolicy = options.DictionaryKeyPolicy, + Encoder = options.Encoder, + IgnoreNullValues = options.IgnoreNullValues, + IgnoreReadOnlyProperties = options.IgnoreReadOnlyProperties, + MaxDepth = options.MaxDepth, + PropertyNameCaseInsensitive = options.PropertyNameCaseInsensitive, + PropertyNamingPolicy = options.PropertyNamingPolicy, + ReadCommentHandling = options.ReadCommentHandling, + WriteIndented = options.WriteIndented, + }; + + foreach (var converter in options.Converters.Where(c => converterFilter?.Invoke(c) ?? true)) + { + clone.Converters.Add(converter); + } + + return clone; +#endif + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/PropertyDelegate.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/PropertyDelegate.cs new file mode 100644 index 000000000..3c2f467cf --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/PropertyDelegate.cs @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Utils; + +/// +/// Represents a delegate for getting and setting property values. +/// +/// The type of the property value. +public struct PropertyDelegate +{ + private Func? _getter; + private Action? _setter; + + /// + /// Initializes a new instance of the struct. + /// + /// The delegate used to get the property value. + /// The delegate used to set the property value. + public PropertyDelegate(Func getter, Action setter) + { + _getter = getter ?? throw new ArgumentNullException(nameof(getter)); + _setter = setter ?? throw new ArgumentNullException(nameof(setter)); + } + + /// + /// Gets the value of the property. + /// + /// The value of the property. + public TVal GetValue() + { + if (_getter != null) + return _getter(); + else + throw new InvalidOperationException("No getter was set"); + } + + /// + /// Sets the value of the property. + /// + /// The value to set. + public void SetValue(TVal val) + { + if (_setter != null) + _setter(val); + else + throw new InvalidOperationException("No setter was set"); + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/SyncAsyncPreFilter.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/SyncAsyncPreFilter.cs new file mode 100644 index 000000000..a2c32fc1a --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/SyncAsyncPreFilter.cs @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Reflection; +using NUnit.Framework.Interfaces; + +namespace OpenAI.TestFramework.Utils +{ + /// + /// Filter to exclude sync only or async only tests in the appropriate test run. + /// + public class SyncAsyncPreFilter : IPreFilter + { + private bool _isAsync; + + /// + /// Creates a new instance. + /// + /// True to filter for an async test run, false to filter for sync test run. + public SyncAsyncPreFilter(bool isAsync) + { + _isAsync = isAsync; + } + + /// + public bool IsMatch(Type type) + => type.GetCustomAttribute() != null; + + /// + public bool IsMatch(Type type, MethodInfo method) + { + if (!IsMatch(type)) + { + return false; + } + + return _isAsync && method.GetCustomAttribute() == null + || !_isAsync && method.GetCustomAttribute() == null; + } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/TestClientRetryPolicy.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/TestClientRetryPolicy.cs new file mode 100644 index 000000000..517ff5576 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/TestClientRetryPolicy.cs @@ -0,0 +1,84 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel.Primitives; +using System.Diagnostics; +using System.Reflection; + +namespace OpenAI.TestFramework.Utils; + +/// +/// Represents a retry policy to be used when testing clients. +/// +public class TestClientRetryPolicy : ClientRetryPolicy +{ + private Func _getRetries; + + /// + /// Initializes a new instance of the class. + /// + /// The maximum number of retries. + /// The delay between retries. + /// Indicates whether the delay should be exponential. + public TestClientRetryPolicy(int maxRetries = Utils.Default.MaxRequestRetries, TimeSpan? delay = null, bool exponentialDelay = false) + : base(maxRetries) + { + MaxRetries = MaxRetries; + Delay = delay ?? Utils.Default.RequestRetryDelay; + IsExponentialDelay = exponentialDelay; + + // Of course, even reading the number of retries property on the PipelineMessage is internal only. + // So reflection it is + _getRetries = (Func) + (typeof(PipelineMessage).GetProperty("RetryCount", BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Instance) + ?.GetGetMethod(true) + ?.CreateDelegate(typeof(Func)) + ?? throw new InvalidOperationException("Failed to get RetryCount property")); + } + + /// + /// Gets the maximum number of retries. + /// + public int MaxRetries { get; } + + /// + /// Gets the delay between retries. + /// + public TimeSpan Delay { get; } + + /// + /// Gets a value indicating whether the delay should be exponential. + /// + public bool IsExponentialDelay { get; } + + /// + protected override TimeSpan GetNextDelay(PipelineMessage message, int tryCount) + { + TimeSpan delay = IsExponentialDelay + ? TimeSpan.FromMilliseconds((1 << tryCount - 1) * Delay.TotalMilliseconds) + : Delay; + + return delay; + } + + /// + protected override bool ShouldRetry(PipelineMessage message, Exception? exception) + { + if (_getRetries(message) >= MaxRetries) + { + return false; + } + + if (!message.ResponseClassifier.TryClassify(message, exception, out bool isRetriable) + && !PipelineMessageClassifier.Default.TryClassify(message, exception, out isRetriable)) + { + Debug.Assert(false, "Failed to classify message"); + } + + return isRetriable; + } + + /// + protected override ValueTask ShouldRetryAsync(PipelineMessage message, Exception? exception) + => new ValueTask(ShouldRetry(message, exception)); +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/TestPipelinePolicy.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/TestPipelinePolicy.cs new file mode 100644 index 000000000..bc004f1bc --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/TestPipelinePolicy.cs @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel.Primitives; + +namespace OpenAI.TestFramework.Utils; + +/// +/// A pipeline policy that raises events before a request sent, and after response has been received. +/// +public class TestPipelinePolicy() : PipelinePolicy() +{ + /// + /// Creates a new instance. This will instantiate the and + /// events based on and respectively. + /// + /// (Optional) Action to perform before sending a request. + /// (Optional) Action to perform after a response is received. + public TestPipelinePolicy(Action? requestAction, Action? responseAction) : this() + { + if (requestAction != null) BeforeRequest += (s, e) => requestAction(e); + + if (responseAction != null) AfterResponse += (s, e) => responseAction(e); + } + + /// + /// Event raised before a request is sent. + /// + public event EventHandler? BeforeRequest; + + /// + /// Event raised after a response has been received. + /// + public event EventHandler? AfterResponse; + + /// + public override void Process(PipelineMessage message, IReadOnlyList pipeline, int currentIndex) + { + BeforeRequest?.Invoke(this, message.Request); + ProcessNext(message, pipeline, currentIndex); + if (message.Response != null) + { + AfterResponse?.Invoke(this, message.Response); + } + } + + /// + public override async ValueTask ProcessAsync(PipelineMessage message, IReadOnlyList pipeline, int currentIndex) + { + BeforeRequest?.Invoke(this, message.Request); + await ProcessNextAsync(message, pipeline, currentIndex).ConfigureAwait(false); + if (message.Response != null) + { + AfterResponse?.Invoke(this, message.Response); + } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/TimespanToMillisecondConverter.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/TimespanToMillisecondConverter.cs new file mode 100644 index 000000000..7045f956b --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/TimespanToMillisecondConverter.cs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Globalization; +using System.Text.Json; +using System.Text.Json.Serialization; + +namespace OpenAI.TestFramework.Utils; + +/// +/// Converter for TimeSpans to/from integer millisecond values in JSON. +/// +public class TimespanToMillisecondConverter : JsonConverter +{ + /// + /// Reads a value from JSON. + /// + /// The to read from. + /// The type of the object to convert. + /// The serializer options. + /// The deserialized value. + public override TimeSpan? Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case JsonTokenType.Null: + return null; + + case JsonTokenType.Number: + return TimeSpan.FromMilliseconds(reader.GetInt32()); + + case JsonTokenType.String: + string? strValue = reader.GetString(); + if (int.TryParse(strValue, NumberStyles.Integer, CultureInfo.InvariantCulture, out int milliseconds)) + { + return TimeSpan.FromMilliseconds(milliseconds); + } + else + { + throw new JsonException("Invalid millisecond value: " + strValue); + } + + default: + throw new JsonException($"Don't know how to parse '{reader.TokenType}' as a millisecond value"); + } + } + + /// + /// Writes a value to JSON. + /// + /// The to write to. + /// The value to write. + /// The serializer options. + public override void Write(Utf8JsonWriter writer, TimeSpan? value, JsonSerializerOptions options) + { + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteNumberValue((int)Math.Ceiling(value.Value.TotalMilliseconds)); + } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/Utf8JsonSerializableConverter.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/Utf8JsonSerializableConverter.cs new file mode 100644 index 000000000..3613f66c0 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/Utf8JsonSerializableConverter.cs @@ -0,0 +1,55 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Text.Json; +using System.Text.Json.Serialization; + +namespace OpenAI.TestFramework.Utils; + +/// +/// Converter for types that implement . +/// +public class Utf8JsonSerializableConverter : JsonConverter +{ + private static Utf8JsonSerializableConverter? s_instance; + + /// + /// Gets the shared instance of the converter. + /// + public static Utf8JsonSerializableConverter Instance => s_instance ??= new(); + + /// + public override bool CanConvert(Type typeToConvert) + => typeof(IUtf8JsonSerializable).IsAssignableFrom(typeToConvert); + + /// + public override IUtf8JsonSerializable Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) + => throw new NotSupportedException("Only writing JSON is supported"); + + /// + public override void Write(Utf8JsonWriter writer, IUtf8JsonSerializable value, JsonSerializerOptions options) + => value.Write(writer); +} + +#if NET6_0 +/// +/// .Net 6.0 has some odd quirks and is particularly pedantic with converters so directly using Utf8JsonSerializableConverter would +/// result in an InvalidCastException. The work around is to use a converter factory. Thankfully, neither .Net Framework, nor .Net 7+ +/// exhibit this behavior. +/// +public class Utf8JsonSerializableConverterFactory : JsonConverterFactory +{ + public override bool CanConvert(Type typeToConvert) => typeof(IUtf8JsonSerializable).IsAssignableFrom(typeToConvert); + public override JsonConverter? CreateConverter(Type typeToConvert, JsonSerializerOptions options) + => (JsonConverter?)Activator.CreateInstance(typeof(InnerConverter<>).MakeGenericType(typeToConvert)); + + private class InnerConverter : JsonConverter where T : IUtf8JsonSerializable + { + public override T Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) + => (T)Utf8JsonSerializableConverter.Instance.Read(ref reader, typeToConvert, options); + + public override void Write(Utf8JsonWriter writer, T value, JsonSerializerOptions options) + => Utf8JsonSerializableConverter.Instance.Write(writer, value, options); + } +} +#endif diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/WindowsJob.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/WindowsJob.cs new file mode 100644 index 000000000..a5eb2570b --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/src/Utils/WindowsJob.cs @@ -0,0 +1,208 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Diagnostics; +using System.Runtime.ConstrainedExecution; +using System.Runtime.InteropServices; +using System.Security; + +namespace OpenAI.TestFramework.Utils.Processes; + +/// +/// A job provides a way to link several processes together on Windows. In this way, they can all be +/// terminated by calling the method. The OS will also automatically terminate +/// the linked processes if the owner process terminates. +/// +public class WindowsJob : IDisposable +{ + private IntPtr _jobHandle; + private int _disposed; + + /// + /// Creates a new job + /// + /// (Optional) The name to associate + public WindowsJob(string? name = null) + { + if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + throw new NotSupportedException("This is only supported on Windows platforms"); + } + + var securityAttributes = new SECURITY_ATTRIBUTES() + { + nLength = (uint)Marshal.SizeOf(typeof(SECURITY_ATTRIBUTES)), + lpSecurityDescriptor = IntPtr.Zero, + bInheritHandle = false + }; + + // Create the job handle + _jobHandle = CreateJobObject(ref securityAttributes, name); + if (_jobHandle == IntPtr.Zero) + { + throw new COMException("Failed to create job", Marshal.GetLastWin32Error()); + } + + // Set the job state so that all associated handles are closed + var extendedInfo = new JOBOBJECT_EXTENDED_LIMIT_INFORMATION() + { + BasicLimitInformation = new JOBOBJECT_BASIC_LIMIT_INFORMATION() + { + LimitFlags = JobObjectLimits.LIMIT_KILL_ON_JOB_CLOSE + } + }; + + int length = Marshal.SizeOf(typeof(JOBOBJECT_EXTENDED_LIMIT_INFORMATION)); + IntPtr ptr = IntPtr.Zero; + try + { + ptr = Marshal.AllocHGlobal(length); + Marshal.StructureToPtr(extendedInfo, ptr, false); + + bool success = SetInformationJobObject( + _jobHandle, + JOBOBJECTINFOCLASS.JobObjectExtendedLimitInformation, + ptr, + (uint)length); + + if (!success) + { + throw new COMException("Failed to set the job extended information", Marshal.GetLastWin32Error()); + } + } + finally + { + Marshal.FreeHGlobal(ptr); + } + } + + /// + /// Adds a process to the job + /// + /// The process to add + public void Add(Process process) + { + if (process == null) + { + throw new ArgumentNullException(nameof(process)); + } + else if (process.Handle == IntPtr.Zero) + { + throw new ArgumentException("The specified process has a NULL handle"); + } + + bool success = AssignProcessToJobObject(_jobHandle, process.Handle); + if (!success) + { + throw new COMException("Failed to add the process to the job", Marshal.GetLastWin32Error()); + } + } + + /// + /// Closes the job. This will close all linked processes + /// + public void Close() + { + CloseHandle(_jobHandle); + _jobHandle = IntPtr.Zero; + } + + /// + /// Disposes of the job. This will also close all linked process. + /// + public void Dispose() + { + if (Interlocked.Exchange(ref _disposed, 1) == 0) + { + Close(); + } + } + + #region native methods + + [DllImport("kernel32.dll", CharSet = CharSet.Auto, SetLastError = true)] + internal static extern IntPtr CreateJobObject([In] ref SECURITY_ATTRIBUTES lpJobAttributes, string? lpName); + + [DllImport("kernel32.dll", CharSet = CharSet.Auto, SetLastError = true)] + internal static extern IntPtr OpenJobObject(uint dwDesiredAccess, bool bInheritHandles, string lpName); + + [DllImport("kernel32.dll", SetLastError = true)] + [return: MarshalAs(UnmanagedType.Bool)] + internal static extern bool AssignProcessToJobObject(IntPtr hJob, IntPtr hProcess); + + [DllImport("kernel32.dll", SetLastError = true)] + [return: MarshalAs(UnmanagedType.Bool)] + internal static extern bool SetInformationJobObject( + [In] IntPtr hJob, + JOBOBJECTINFOCLASS JobObjectInfoClass, + [In] IntPtr lpJobObjectInfo, + uint cbJobObjectInfoLength); + + [DllImport("kernel32.dll", SetLastError = true)] +#if NETFRAMEWORK + [ReliabilityContract(Consistency.WillNotCorruptState, Cer.Success)] +#endif + [SuppressUnmanagedCodeSecurity] + [return: MarshalAs(UnmanagedType.Bool)] + internal static extern bool CloseHandle(IntPtr hObject); + +#endregion + + #region native types + + [StructLayout(LayoutKind.Sequential)] + internal struct SECURITY_ATTRIBUTES + { + public uint nLength; + public IntPtr lpSecurityDescriptor; + public bool bInheritHandle; + } + + [StructLayout(LayoutKind.Sequential)] + internal struct JOBOBJECT_BASIC_LIMIT_INFORMATION + { + public Int64 PerProcessUserTimeLimit; + public Int64 PerJobUserTimeLimit; + public JobObjectLimits LimitFlags; + public UIntPtr MinimumWorkingSetSize; + public UIntPtr MaximumWorkingSetSize; + public UInt32 ActiveProcessLimit; + public UIntPtr Affinity; + public UInt32 PriorityClass; + public UInt32 SchedulingClass; + } + + [StructLayout(LayoutKind.Sequential)] + internal struct JOBOBJECT_EXTENDED_LIMIT_INFORMATION + { + public JOBOBJECT_BASIC_LIMIT_INFORMATION BasicLimitInformation; + public IO_COUNTERS IoInfo; + public UIntPtr ProcessMemoryLimit; + public UIntPtr JobMemoryLimit; + public UIntPtr PeakProcessMemoryUsed; + public UIntPtr PeakJobMemoryUsed; + } + + [StructLayout(LayoutKind.Sequential)] + internal struct IO_COUNTERS + { + public UInt64 ReadOperationCount; + public UInt64 WriteOperationCount; + public UInt64 OtherOperationCount; + public UInt64 ReadTransferCount; + public UInt64 WriteTransferCount; + public UInt64 OtherTransferCount; + } + + internal enum JOBOBJECTINFOCLASS + { + JobObjectExtendedLimitInformation = 9, + } + + internal enum JobObjectLimits : UInt32 + { + LIMIT_KILL_ON_JOB_CLOSE = 0x00002000, + } +} + +#endregion diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/tests/AdaptersTests.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/tests/AdaptersTests.cs new file mode 100644 index 000000000..266d388a3 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/tests/AdaptersTests.cs @@ -0,0 +1,107 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.Diagnostics; +using NUnit.Framework; +using OpenAI.TestFramework.Adapters; +using OpenAI.TestFramework.Mocks; + +namespace OpenAI.TestFramework.Tests; + +[TestFixture] +public class AdaptersTests +{ + public CancellationToken Token => + new CancellationTokenSource(Debugger.IsAttached + ? TimeSpan.FromMinutes(15) + : TimeSpan.FromSeconds(5)) + .Token; + + [Test] + public async Task TestSyncToAsyncEnumerator() + { + const int start = 0; + const int num = 100; + + IEnumerator sync = Enumerable.Range(start, num).GetEnumerator(); + await using SyncToAsyncEnumerator async = new(sync, Token); + + for (int i = start; i < num; i++) + { + bool success = await async.MoveNextAsync(); + Assert.That(success, Is.True); + Assert.That(async.Current, Is.EqualTo(i)); + } + } + + [Test] + public async Task TestSyncToAsyncResultCollection() + { + const int start = 0; + const int num = 100; + + MockCollectionResult sync = new(() => Enumerable.Range(start, num)); + SyncToAsyncCollectionResult asyncAdapter = new(sync); + + await using var asyncEnumerator = asyncAdapter.GetAsyncEnumerator(Token); + + for (int i = start; i < num; i++) + { + bool success = await asyncEnumerator.MoveNextAsync(); + Assert.That(success, Is.True); + Assert.That(asyncEnumerator.Current, Is.EqualTo(i)); + } + } + + [Test] + public async Task TestFailedSyncToAsyncResultCollection() + { + MockCollectionResult sync = new(Fail); + SyncToAsyncCollectionResult asyncAdapter = new(sync); + + await using var asyncEnumerator = asyncAdapter.GetAsyncEnumerator(Token); + Assert.ThrowsAsync(() => asyncEnumerator.MoveNextAsync().AsTask()); + } + + [Test] + public async Task TestSyncToAsyncPageableCollection() + { + const int start = 0; + const int num = 100; + const int itemsPerPage = 10; + int expectedPages = (int)Math.Ceiling((double)num / itemsPerPage); + + MockPageCollection sync = new(() => Enumerable.Range(start, num), new MockPipelineResponse(), itemsPerPage); + SyncToAsyncPageCollection asyncAdapter = new(sync); + + int numPages = 0; + int expected = 0; + await foreach (var page in asyncAdapter) + { + numPages++; + foreach (int actual in page.Values) + { + Assert.That(actual, Is.EqualTo(expected)); + expected++; + } + } + + Assert.That(numPages, Is.EqualTo(expectedPages)); + } + + [Test] + public async Task TestFailedSyncToAsyncPageableCollection() + { + MockPageCollection sync = new(Fail, new MockPipelineResponse()); + SyncToAsyncPageCollection asyncAdapter = new(sync); + + await using var asyncEnumerator = ((IAsyncEnumerable>)asyncAdapter).GetAsyncEnumerator(Token); + Assert.ThrowsAsync(() => asyncEnumerator.MoveNextAsync().AsTask()); + } + + private static IEnumerable Fail() + { + throw new ApplicationException("This should fail"); + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/tests/AutoSyncAsyncTests.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/tests/AutoSyncAsyncTests.cs new file mode 100644 index 000000000..44f13bee3 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/tests/AutoSyncAsyncTests.cs @@ -0,0 +1,201 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using NUnit.Framework; +using OpenAI.TestFramework.Tests.Helpers; + +namespace OpenAI.TestFramework.Tests; + +public class AutoSyncAsyncTests(bool useAsync) : ClientTestBase(useAsync) +{ + private static readonly string EX_MSG = Guid.NewGuid().ToString(); + + [Test] + [SyncOnly] + public void OnlyInSyncMode() + { + Assert.That(IsAsync, Is.False); + } + + [Test] + [AsyncOnly] + public void OnlyInAsyncMode() + { + Assert.That(IsAsync, Is.True); + } + + [Test] + public void CanGetOriginal() + { + MockClient original = new MockClient(); + + MockClient instrumented = WrapClient(original); + Assert.That(instrumented, Is.Not.Null); + Assert.That(ReferenceEquals(original, instrumented), Is.False); + Assert.That(typeof(MockClient).IsAssignableFrom(instrumented.GetType()), Is.True); + + MockClient recovered = UnWrap(instrumented); + Assert.That(recovered, Is.Not.Null); + Assert.That(ReferenceEquals(original, recovered), Is.True); + } + + [Test] + public void CanGetContext() + { + var context = new MockClientContext(); + + MockClient client = WrapClient(new MockClient(), context); + Assert.That(client, Is.Not.Null); + + var recoveredContext = GetClientContext(client) as MockClientContext; + Assert.That(recoveredContext, Is.Not.Null); + Assert.That(recoveredContext!.Id, Is.EqualTo(context.Id)); + Assert.That(ReferenceEquals(recoveredContext, context), Is.True); + } + + [Test] + public async Task TaskWorks() + { + MockClient client = WrapClient(new MockClient()); + await client.DoAsync(); + AssertCorrectFunctionCalled(client); + } + + [Test] + public void FailedTaskWorks() + { + MockClient client = WrapClient(new MockClient()); + ArgumentException? ex = Assert.ThrowsAsync(() => client.FailAsync(EX_MSG)); + Assert.That(ex, Is.Not.Null); + Assert.That(ex!.Message, Is.EqualTo(EX_MSG)); + AssertCorrectFunctionCalled(client); + } + + [Test] + public async Task TaskWithResultWorks() + { + MockClient client = WrapClient(new MockClient()); + int count = await client.CountAsync(); + Assert.That(count, Is.EqualTo(IsAsync ? 12 : 5)); + AssertCorrectFunctionCalled(client); + } + + [Test] + public void FailedTaskWithResultWorks() + { + MockClient client = WrapClient(new MockClient()); + ArgumentException? ex = Assert.ThrowsAsync(() => client.FailWithResultAsync(EX_MSG)); + Assert.That(ex, Is.Not.Null); + Assert.That(ex!.Message, Is.EqualTo(EX_MSG)); + AssertCorrectFunctionCalled(client); + } + + [Test] + public async Task ResultCollectionWorks() + { + const int num = 3; + const int increment = 2; + + MockClient client = WrapClient(new MockClient()); + AsyncCollectionResult coll = client.ResultCollectionAsync(num, increment); + + Assert.IsNotNull(coll); + Assert.That(coll.GetRawResponse(), Is.Not.Null); + Assert.That(coll.GetRawResponse().Status, Is.EqualTo(200)); + Assert.That(coll.GetRawResponse().ReasonPhrase, Is.EqualTo("OK")); + + int numResults = 0; + await foreach (int i in coll) + { + Assert.That(i, Is.EqualTo(numResults * increment)); + numResults++; + } + + Assert.That(numResults, Is.EqualTo(num)); + AssertCorrectFunctionCalled(client); + } + + [Test] + public void FailedResultCollection() + { + MockClient client = WrapClient(new MockClient()); + + // For now we mimic how the OpenAI and Azure OpenAI libraries work in that no service requests are sent + // until we try to enumerate the async collections. So exceptions aren't expected initially + AsyncCollectionResult coll = client.FailResultCollectionAsync(EX_MSG); + Assert.That(coll, Is.Not.Null); + + IAsyncEnumerator enumerator = coll.GetAsyncEnumerator(); + Assert.That(enumerator, Is.Not.Null); + ArgumentException? ex = Assert.ThrowsAsync(() => enumerator.MoveNextAsync().AsTask()); + Assert.That(ex, Is.Not.Null); + Assert.That(ex!.Message, Is.EqualTo(EX_MSG)); + AssertCorrectFunctionCalled(client); + } + + [Test] + public async Task PageableCollectionWorks() + { + const int num = 50; + const int increment = 1; + const int itemsPerPage = 20; + int expectedPages = (int)Math.Ceiling((double)num / itemsPerPage); + + MockClient client = WrapClient(new MockClient()); + AsyncPageCollection coll = client.PageableCollectionAsync(num, increment, itemsPerPage); + Assert.IsNotNull(coll); + + int numPages = 0; + int numResults = 0; + await foreach(PageResult page in coll) + { + Assert.That(page.GetRawResponse(), Is.Not.Null); + Assert.That(page.GetRawResponse().Status, Is.EqualTo(200)); + Assert.That(page.GetRawResponse().ReasonPhrase, Is.EqualTo("OK")); + + numPages++; + foreach (int actual in page.Values) + { + Assert.That(actual, Is.EqualTo(numResults * increment)); + numResults++; + } + } + + Assert.That(numResults, Is.EqualTo(num)); + Assert.That(numPages, Is.EqualTo(expectedPages)); + AssertCorrectFunctionCalled(client); + } + + [Test] + public void FailedPageableCollection() + { + MockClient client = WrapClient(new MockClient()); + + // For now we mimic how the OpenAI and Azure OpenAI libraries work in that no service requests are sent + // until we try to enumerate the async collections. So exceptions aren't expected initially + AsyncPageCollection coll = client.FailPageableCollectionAsync(EX_MSG); + Assert.That(coll, Is.Not.Null); + + IAsyncEnumerator> enumerator = ((IAsyncEnumerable>)coll).GetAsyncEnumerator(); + Assert.That(enumerator, Is.Not.Null); + ArgumentException? ex = Assert.ThrowsAsync(() => enumerator.MoveNextAsync().AsTask()); + Assert.That(ex, Is.Not.Null); + Assert.That(ex!.Message, Is.EqualTo(EX_MSG)); + AssertCorrectFunctionCalled(client); + } + + private void AssertCorrectFunctionCalled(MockClient client, int expectedCalls = 1) + { + if (IsAsync) + { + Assert.That(client.AsyncHit, Is.EqualTo(expectedCalls)); + Assert.That(client.SyncHit, Is.EqualTo(0)); + } + else + { + Assert.That(client.AsyncHit, Is.EqualTo(0)); + Assert.That(client.SyncHit, Is.EqualTo(expectedCalls)); + } + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/tests/Helpers/MockClient.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/tests/Helpers/MockClient.cs new file mode 100644 index 000000000..9eac6054d --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/tests/Helpers/MockClient.cs @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.Runtime.CompilerServices; +using OpenAI.TestFramework.Mocks; + +namespace OpenAI.TestFramework.Tests.Helpers; + +public class MockClient +{ + private int _asyncHit; + private int _syncHit; + + public virtual int AsyncHit => _asyncHit; + public virtual int SyncHit => _syncHit; + + public virtual Task DoAsync() + { + Interlocked.Increment(ref _asyncHit); + return Task.Delay(200); + } + + public virtual void Do() + { + Interlocked.Increment(ref _syncHit); + } + + public virtual Task FailAsync(string message) + { + Interlocked.Increment(ref _asyncHit); + return Task.FromException(new ArgumentException(message)); + } + + public virtual void Fail(string message) + { + Interlocked.Increment(ref _syncHit); + throw new ArgumentException(message); + } + + public virtual async Task CountAsync() + { + Interlocked.Increment(ref _asyncHit); + await Task.Delay(100).ConfigureAwait(false); + return 12; + } + + public virtual int Count() + { + Interlocked.Increment(ref _syncHit); + return 5; + } + + public virtual Task FailWithResultAsync(string message) + { + Interlocked.Increment(ref _asyncHit); + return Task.FromException(new ArgumentException(message)); + } + + public virtual int FailWithResult(string message) + { + Interlocked.Increment(ref _syncHit); + throw new ArgumentException(message); + } + + public virtual AsyncCollectionResult ResultCollectionAsync(int num, int increment = 5) + { + Interlocked.Increment(ref _asyncHit); + return new MockAsyncCollectionResult(() => EnumerateAsync(num, increment)); + } + + public virtual CollectionResult ResultCollection(int num, int increment = 5) + { + Interlocked.Increment(ref _syncHit); + return new MockCollectionResult(() => Enumerate(num, increment)); + } + + public virtual AsyncCollectionResult FailResultCollectionAsync(string message) + { + Interlocked.Increment(ref _asyncHit); + return new MockAsyncCollectionResult(() => FailEnumerateAsync(message)); + } + + public virtual CollectionResult FailResultCollection(string message) + { + Interlocked.Increment(ref _syncHit); + return new MockCollectionResult(() => FailEnumerate(message)); + } + + public virtual AsyncPageCollection PageableCollectionAsync(int num, int increment, int itemsPerPage) + { + Interlocked.Increment(ref _asyncHit); + return new MockAsyncPageCollection(() => EnumerateAsync(num, increment), new MockPipelineResponse(), itemsPerPage); + } + + public virtual PageCollection PageableCollection(int num, int increment, int itemsPerPage) + { + Interlocked.Increment(ref _syncHit); + return new MockPageCollection(() => Enumerate(num, increment), new MockPipelineResponse(), itemsPerPage); + } + + public virtual AsyncPageCollection FailPageableCollectionAsync(string message) + { + Interlocked.Increment(ref _asyncHit); + return new MockAsyncPageCollection(() => FailEnumerateAsync(message), new MockPipelineResponse()); + } + + public virtual PageCollection FailPageableCollection(string message) + { + Interlocked.Increment(ref _syncHit); + return new MockPageCollection(() => FailEnumerate(message), new MockPipelineResponse()); + } + + private async IAsyncEnumerable EnumerateAsync(int num, int increment, [EnumeratorCancellation] CancellationToken token = default) + { + int running = 0; + for (int i = 0; i < num; i++, running += increment) + { + await Task.Delay(100); + yield return running; + } + } + + private IEnumerable Enumerate(int num, int increment) + { + int running = 0; + for (int i = 0; i < num; i++, running += increment) + { + yield return running; + } + } + + private async IAsyncEnumerable FailEnumerateAsync(string message, [EnumeratorCancellation] CancellationToken token = default) + { + bool c = true; + await Task.Delay(100).ConfigureAwait(false); + if (c) + { + throw new ArgumentException(message); + } + + yield break; + } + + private IEnumerable FailEnumerate(string message) + { + throw new ArgumentException(message); + } +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/tests/Helpers/MockClientContext.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/tests/Helpers/MockClientContext.cs new file mode 100644 index 000000000..e36ed1e1f --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/tests/Helpers/MockClientContext.cs @@ -0,0 +1,9 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace OpenAI.TestFramework.Tests.Helpers; + +public class MockClientContext +{ + public string Id { get; } = Guid.NewGuid().ToString(); +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/tests/MockStringServiceTests.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/tests/MockStringServiceTests.cs new file mode 100644 index 000000000..d5d3edff4 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/tests/MockStringServiceTests.cs @@ -0,0 +1,146 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Reflection; +using NUnit.Framework; +using OpenAI.TestFramework.Mocks; +using OpenAI.TestFramework.Recording.Proxy; +using OpenAI.TestFramework.Recording.Proxy.Service; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework.Tests; + +public class MockStringServiceTests : RecordedClientTestBase +{ + private const string c_basePath = "data"; + + public MockStringServiceTests(bool isAsync) + : base(isAsync, null) + { + RecordingOptions.SanitizersToRemove.Add("AZSDK3430"); // $..id + } + + public DirectoryInfo RepositoryRoot { get; } = FindRepoRoot(); + + [Test] + public async Task AddAndGet() + { + const string id = "first.one"; + const string expected = "The first value goes here"; + + using MockRestService service = new(c_basePath); + var options = ConfigureClientOptions(new ClientPipelineOptions()); + using var client = WrapClient(new MockRestServiceClient(service.HttpEndpoint, options)); + + ClientResult add = await client.AddAsync(id, expected, Token); + Assert.That(add, Is.Not.Null); + Assert.That(add.GetRawResponse().Status, Is.EqualTo(200)); + + string? retrieved = await client.GetAsync("first.one", Token); + Assert.That(retrieved, Is.EqualTo(expected)); + } + + [Test] + public async Task AddAndDelete() + { + const string id = "first.one"; + const string expected = "The first value goes here"; + + using MockRestService service = new(c_basePath); + var options = ConfigureClientOptions(new ClientPipelineOptions()); + using var client = WrapClient(new MockRestServiceClient(service.HttpEndpoint, options)); + + ClientResult add = await client.AddAsync(id, expected, Token); + Assert.That(add, Is.Not.Null); + Assert.That(add.GetRawResponse().Status, Is.EqualTo(200)); + + bool deleted = await client.RemoveAsync(id, Token); + Assert.That(deleted, Is.True); + + string? retrieved = await client.GetAsync("first.one", Token); + Assert.That(retrieved, Is.Null); + } + + #region overrides + + protected override ProxyServiceOptions CreateProxyServiceOptions() + => new() + { + DotnetExecutable = AssemblyHelper.GetDotnetExecutable()?.FullName!, + TestProxyDll = AssemblyHelper.GetAssemblyMetadata("TestProxyPath")!, + DevCertFile = Path.Combine( + RepositoryRoot.FullName, + "eng", + "common", + "testproxy", + "dotnet-devcert.pfx"), + DevCertPassword = "password", + StorageLocationDir = RepositoryRoot.FullName, + }; + + protected override RecordingStartInformation CreateRecordingSessionStartInfo() + => new() + { + RecordingFile = GetRecordingFile(), + AssetsFile = GetAssetsFile() + }; + + #endregion + + #region helper methods + + private static DirectoryInfo FindRepoRoot() + { + /** + * This code assumes that we are running in the standard Azure .Net SDK repository layout. With this in mind, + * we generally assume that we are running our test code from + * /artifacts/bin/// + * So to find the root we keep navigating up until we find a folder with a .git subfolder + * + * Another alternative would be to call: git rev-parse --show-toplevel + */ + + DirectoryInfo? current = new FileInfo(Assembly.GetExecutingAssembly().Location).Directory; + while (current != null && !current.EnumerateDirectories(".git").Any()) + { + current = current.Parent; + } + + return current + ?? throw new InvalidOperationException("Could not determine the root folder for this repository"); + } + + private string GetRecordingFile() + { + DirectoryInfo sourceDir = AssemblyHelper.GetAssemblySourceDir() + ?? throw new InvalidOperationException("Could not determine the source path for this assembly"); + string relativeDir = PathHelpers.GetRelativePath(RepositoryRoot.FullName, sourceDir.FullName); + return Path.Combine( + relativeDir, + "SessionRecords", + GetType().Name, + GetRecordedTestFileName()); + } + + private string? GetAssetsFile() + { + DirectoryInfo? sourceDir = AssemblyHelper.GetAssemblySourceDir() + ?? throw new InvalidOperationException("Could not determine the source path for this assembly"); + + // walk up the tree until we hit either the repository root, or found a folder with an "assets.json" file + for (; sourceDir != null && sourceDir?.FullName != RepositoryRoot.FullName; sourceDir = sourceDir.Parent) + { + string assetsFile = Path.Combine(sourceDir!.FullName, "assets.json"); + if (File.Exists(assetsFile)) + { + return assetsFile; + } + } + + return null; + } + + #endregion +} diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/tests/OpenAI.TestFramework.Tests.csproj b/.dotnet.azure/sdk/openai/tools/TestFramework/tests/OpenAI.TestFramework.Tests.csproj new file mode 100644 index 000000000..e6934e292 --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/tests/OpenAI.TestFramework.Tests.csproj @@ -0,0 +1,21 @@ + + + + $(RequiredTargetFrameworks);net8.0 + enable + enable + latest + + + + + + + + + + + + + + diff --git a/.dotnet.azure/sdk/openai/tools/TestFramework/tests/ProxyServiceTests.cs b/.dotnet.azure/sdk/openai/tools/TestFramework/tests/ProxyServiceTests.cs new file mode 100644 index 000000000..496afbc6d --- /dev/null +++ b/.dotnet.azure/sdk/openai/tools/TestFramework/tests/ProxyServiceTests.cs @@ -0,0 +1,351 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.ClientModel; +using System.ClientModel.Primitives; +using NUnit.Framework; +using OpenAI.TestFramework.Mocks; +using OpenAI.TestFramework.Recording; +using OpenAI.TestFramework.Recording.Matchers; +using OpenAI.TestFramework.Recording.Proxy; +using OpenAI.TestFramework.Recording.Proxy.Service; +using OpenAI.TestFramework.Recording.RecordingProxy; +using OpenAI.TestFramework.Recording.Sanitizers; +using OpenAI.TestFramework.Recording.Transforms; +using OpenAI.TestFramework.Utils; + +namespace OpenAI.TestFramework.Tests +{ + [NonParallelizable] + public class ProxyServiceTests(bool isAsync) : ClientTestBase(isAsync) + { + #region Properties and setup/teardown methods + + public DirectoryInfo? RecordingDir { get; private set; } + + public FileInfo? RecordingFile { get; private set; } + + [SetUp] + public void CreateRecordingFile() + { + RecordingDir = new DirectoryInfo(Path.Combine(Path.GetTempPath(), "RecordingTests", Guid.NewGuid().ToString())); + if (!RecordingDir.Exists) + { + RecordingDir.Create(); + } + + RecordingFile = new FileInfo(Path.Combine(RecordingDir.FullName, Path.GetRandomFileName() + ".json")); + } + + [TearDown] + public void DeleteRecordingFile() + { + if (RecordingFile != null) + { + RecordingFile.Delete(); + } + + if (RecordingDir != null) + { + RecordingDir.Delete(true); + } + } + + #endregion + + [Test] + public async Task StartProxy() + { + using ProxyService proxy = await CreateProxyServiceAsync(); + + Assert.That(proxy.HttpEndpoint, Is.Not.Null); + Assert.That(proxy.HttpEndpoint.Port, Is.GreaterThan(0).And.LessThanOrEqualTo(ushort.MaxValue)); + Assert.That(proxy.HttpsEndpoint, Is.Not.Null); + Assert.That(proxy.HttpsEndpoint.Port, Is.GreaterThan(0).And.LessThanOrEqualTo(ushort.MaxValue)); + + ProxyClientResult available = await proxy.Client.ListAvailableAsync(Token); + Assert.That(available, Is.Not.Null); + Assert.That(available.GetRawResponse(), Is.Not.Null); + Assert.That(available.GetRawResponse().Status, Is.EqualTo(200)); + Assert.That(available.Value, Is.Not.Null); + Assert.That(available.Value, Does.Contain("BodilessMatcher")); + } + + [Test] + public async Task AddSanitizers() + { + using ProxyService proxy = await CreateProxyServiceAsync(); + + List sanitizers = + [ + new BodyKeySanitizer("body.key"), + new BodyRegexSanitizer("(.*)") + { + GroupForReplace = "1", + Condition = new Recording.Condition() + { + ResponseHeader = new() + { + Key = "Content-Type", + ValueRegex = "json$" + }, + UriRegex = "https://[^/]+/sub" + } + }, + new HeaderRegexSanitizer("Authentication") + { + Value = "replacement", + GroupForReplace = "1", + Regex = "^Bearer " + }, + new UriRegexSanitizer("https://[^/]+/sub") + { + GroupForReplace = "1", + Value = "replacement" + } + ]; + + ProxyClientResult> result = await proxy.Client.AddSanitizersAsync(sanitizers, token: Token); + Assert.That(result, Is.Not.Null); + Assert.That(result.GetRawResponse(), Is.Not.Null); + Assert.That(result.GetRawResponse().Status, Is.EqualTo(200)); + Assert.That(result.Value, Is.Not.Null); + Assert.That(result.Value, Has.Count.EqualTo(sanitizers.Count)); + } + + [Test] + public async Task SetMatcher() + { + using ProxyService proxy = await CreateProxyServiceAsync(); + + BaseMatcher[] matchers = + [ + ExistingMatcher.Headerless, + ExistingMatcher.Bodiless, + new CustomMatcher() + { + CompareBodies = false, + ExcludedHeaders = "Authorization", + IgnoredHeaders = "Content-Length,Content-Type", + IgnoredQueryParameters = "page,version", + IgnoreQueryOrdering = true, + } + ]; + + foreach (var matcher in matchers) + { + ProxyClientResult result = await proxy.Client.SetMatcherAsync(matcher, token: Token); + Assert.That(result, Is.Not.Null); + Assert.That(result.GetRawResponse(), Is.Not.Null); + Assert.That(result.GetRawResponse().Status, Is.EqualTo(200)); + } + } + + [Test] + public async Task SetTransform() + { + using ProxyService proxy = await CreateProxyServiceAsync(); + + HeaderTransform transform = new("X-Client-RequestId") + { + Value = "replacement", + Condition = new() + { + UriRegex = "http.*://[^/]+/(.*)" + } + }; + + ProxyClientResult result = await proxy.Client.AddTransformAsync(transform, token: Token); + Assert.That(result, Is.Not.Null); + Assert.That(result.GetRawResponse(), Is.Not.Null); + Assert.That(result.GetRawResponse().Status, Is.EqualTo(200)); + } + + [Test] + public async Task StartStopRecording() + { + const string key1 = "key1"; + string value1 = Guid.NewGuid().ToString(); + const string key2 = "the.others"; + string value2 = "value"; + + using ProxyService proxy = await CreateProxyServiceAsync(); + + RecordingStartInformation startInfo = new() + { + RecordingFile = RecordingFile!.FullName, + }; + + ProxyClientResult result = await proxy.Client.StartRecordingAsync(startInfo, token: Token); + Assert.That(result, Is.Not.Null); + Assert.That(result.GetRawResponse(), Is.Not.Null); + Assert.That(result.GetRawResponse().Status, Is.EqualTo(200)); + + string recordingId = result.RecordingId!; + Assert.That(recordingId, Is.Not.Null); + + Dictionary additional = new() + { + [key1] = value1, + [key2] = value2, + }; + + result = await proxy.Client.StopRecordingAsync(recordingId, additional, false, Token); + + // At this point we should have a recording file + string recordedJson = File.ReadAllText(RecordingFile.FullName); + Assert.That(recordedJson, Does.Contain(key1) + .And.Contain(value1) + .And.Contain(key2) + .And.Contain(value2)); + } + + [Test] + public async Task RecordAndPlayback() + { + using ProxyService recordingProxyService = await CreateProxyServiceAsync(); + RecordingStartInformation startInfo = new() { RecordingFile = RecordingFile!.FullName }; + + using MockRestService mockRestService = new(); + TestRecordingOptions recordingOptions = new() + { + SanitizersToRemove = + { + "AZSDK3430", // $..id + } + }; + + string id1; + string id2; + + // Start recording, and capture some requests + { + ProxyClientResult result = await recordingProxyService.Client.StartRecordingAsync(startInfo, Token); + Assert.That(result, Is.Not.Null); + Assert.That(result.RecordingId, !Is.Null.Or.Empty); + string recordingId = result.RecordingId!; + + await using TestRecording recording = new(recordingId, RecordedTestMode.Record, recordingProxyService); + await recording.ApplyOptions(recordingOptions, Token); + + id1 = recording.Random.NewGuid().ToString(); + id2 = recording.Random.NewGuid().ToString(); + + await SendRequestsAsync(recording, mockRestService.HttpEndpoint, id1, id2, Token); + } + + // validate the service has what we expect + var serviceIds = mockRestService.GetAll() + .Select(e => e.id) + .ToArray(); + Assert.That(serviceIds, Is.EquivalentTo(new[] { id1, id2 })); + + mockRestService.Reset(); + + // Playback the recording + { + ProxyClientResult> result = await recordingProxyService.Client.StartPlaybackAsync(startInfo, Token); + Assert.That(result, Is.Not.Null); + Assert.That(result.RecordingId, !Is.Null.Or.Empty); + string recordingId = result.RecordingId!; + + await using TestRecording playback = new(recordingId, RecordedTestMode.Playback, recordingProxyService, result.Value); + await playback.ApplyOptions(recordingOptions, Token); + + string id = playback.Random.NewGuid().ToString(); + Assert.That(id, Is.EqualTo(id1)); + id = playback.Random.NewGuid().ToString(); + Assert.That(id, Is.EqualTo(id2)); + + await SendRequestsAsync(playback, mockRestService.HttpEndpoint, id1, id2, Token); + } + + // since we are playing back, the service should not have been called + Assert.That(mockRestService.GetAll().Count(), Is.EqualTo(0)); + + static async Task SendRequestsAsync(TestRecording recording, Uri restEndpoint, string id1, string id2, CancellationToken token) + { + const string value1 = "The value for the first item"; + const string value2 = "The secondary value goes here"; + const string id3 = "random"; + const string value3 = "Sure why not"; + + ClientPipelineOptions options = new(); + options.RetryPolicy = new TestClientRetryPolicy(0, TimeSpan.FromMilliseconds(100)); + options.Transport = new ProxyTransport(recording.GetProxyTransportOptions()); + + using MockRestServiceClient client = new(restEndpoint, options); + + ClientResult add = await client.AddAsync(id1, value1, token); + Assert.That(add, Is.Not.Null); + Assert.That(add.GetRawResponse().Status, Is.EqualTo(200)); + + add = await client.AddAsync(id2, value2, token); + Assert.That(add, Is.Not.Null); + Assert.That(add.GetRawResponse().Status, Is.EqualTo(200)); + + add = await client.AddAsync(id3, value3, token); + Assert.That(add, Is.Not.Null); + Assert.That(add.GetRawResponse().Status, Is.EqualTo(200)); + + ClientResult get = await client.GetAsync(id2, token); + Assert.That(add, Is.Not.Null); + Assert.That(add.GetRawResponse().Status, Is.EqualTo(200)); + Assert.That(get.Value, Is.EqualTo(value2)); + + get = await client.GetAsync(id3, token); + Assert.That(add, Is.Not.Null); + Assert.That(add.GetRawResponse().Status, Is.EqualTo(200)); + Assert.That(get.Value, Is.EqualTo(value3)); + + ClientResult remove = await client.RemoveAsync(id3, token); + Assert.That(remove.Value, Is.True); + + remove = await client.RemoveAsync("does.not.exist", token); + Assert.That(remove.Value, Is.False); + + get = await client.GetAsync(id3, token); + Assert.That(get, Is.Not.Null); + Assert.That(get.GetRawResponse().Status, Is.EqualTo(404)); + Assert.That(get.Value, Is.Null); + } + } + + #region helper methods + + private async Task CreateProxyServiceAsync() + { + ProxyService? proxy = null; + try + { + proxy = await ProxyService.CreateNewAsync( + new ProxyServiceOptions() + { + DotnetExecutable = AssemblyHelper.GetDotnetExecutable()?.FullName!, + TestProxyDll = AssemblyHelper.GetAssemblyMetadata("TestProxyPath")!, + StorageLocationDir = RecordingDir!.FullName + }, + Token); + + Assert.That(proxy, Is.Not.Null); + Assert.DoesNotThrow(proxy.ThrowOnErrors); + Assert.That(proxy.Client, Is.Not.Null); + + var wrappedClient = WrapClient(proxy.Client); + var setter = typeof(ProxyService).GetMethod("SetClient", System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Public | System.Reflection.BindingFlags.Instance) + ?? throw new InvalidOperationException("Could not find the ProxyService.SetClient method"); + setter.Invoke(proxy, [wrappedClient]); + + var ret = proxy; + proxy = null; + return ret; + } + finally + { + proxy?.Dispose(); + } + } + + #endregion + } +} diff --git a/.dotnet.azure/src/Azure.AI.OpenAI.csproj b/.dotnet.azure/src/Azure.AI.OpenAI.csproj deleted file mode 100644 index d0c5122a2..000000000 --- a/.dotnet.azure/src/Azure.AI.OpenAI.csproj +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - - Azure OpenAI's official extension package for using OpenAI's .NET library with the Azure OpenAI Service. - - Azure.AI.OpenAI Client Library - 2.0.0-beta.3 - Microsoft Azure OpenAI - true - - netstandard2.0 - true - $(NoWarn);CS1591;AZC0012;AZC0102;CS8002;CS0436;AZC0112 - true - enable - preview - disable - Unsigned - - - - - - - - - - - - - - diff --git a/.dotnet.azure/src/Custom/Assistants/AzureRunStepDetailsUpdate.cs b/.dotnet.azure/src/Custom/Assistants/AzureRunStepDetailsUpdate.cs deleted file mode 100644 index d70d99394..000000000 --- a/.dotnet.azure/src/Custom/Assistants/AzureRunStepDetailsUpdate.cs +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Diagnostics.CodeAnalysis; -using System.Text.Json; - -namespace Azure.AI.OpenAI; - -public static class RunStepDetailsUpdateExtensions -{ - /// - /// Gets a value indicating whether this instance represents a call to a browser tool. - /// - /// The tool call to check the type of. - /// True if the tool call represents a browser tool call, false otherwise. - [Experimental("AOAI001")] - public static bool IsBingSearchKind(this RunStepDetailsUpdate baseUpdate) - { - return baseUpdate?._toolCall?.Type == "browser"; - } -} diff --git a/.dotnet.azure/src/Custom/Assistants/Internal/GeneratorStubs.cs b/.dotnet.azure/src/Custom/Assistants/Internal/GeneratorStubs.cs deleted file mode 100644 index a73cb2016..000000000 --- a/.dotnet.azure/src/Custom/Assistants/Internal/GeneratorStubs.cs +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Azure.AI.OpenAI.Assistants; - -[CodeGenModel("BingSearchToolDefinitionBrowser")] internal partial class InternalBingSearchToolDefinitionBrowser { } -[CodeGenModel("MessageContentTextAnnotationsBingSearchUrlCitation")] internal partial class InternalMessageContentTextAnnotationsBingSearchUrlCitation { } -[CodeGenModel("MessageContentTextAnnotationsBingSearchUrlCitationUrlCitation")] internal partial class InternalMessageContentTextAnnotationsBingSearchUrlCitationUrlCitation { } -[CodeGenModel("MessageDeltaContentTextAnnotationsBingSearchUrlCitation")] internal partial class InternalMessageDeltaContentTextAnnotationsBingSearchUrlCitation { } -[CodeGenModel("MessageDeltaContentTextAnnotationsBingSearchUrlCitationUrlCitation")] internal partial class InternalMessageDeltaContentTextAnnotationsBingSearchUrlCitationUrlCitation { } -[CodeGenModel("RunStepDetailsToolCallsBingSearchObject")] internal partial class InternalRunStepDetailsToolCallsBingSearchObject { } -[CodeGenModel("RunStepDeltaStepDetailsToolCallsBingSearchObject")] internal partial class InternalRunStepDeltaStepDetailsToolCallsBingSearchObject { } diff --git a/.dotnet.azure/src/Custom/AzureOpenAIClientOptions.cs b/.dotnet.azure/src/Custom/AzureOpenAIClientOptions.cs deleted file mode 100644 index 15a0a07c5..000000000 --- a/.dotnet.azure/src/Custom/AzureOpenAIClientOptions.cs +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using OpenAI; - -namespace Azure.AI.OpenAI; - -/// -/// Defines the scenario-independent, client-level options for the Azure-specific OpenAI client. -/// -public partial class AzureOpenAIClientOptions : OpenAIClientOptions -{ - internal string Version => _version; - private readonly string _version; - - /// - /// Initializes a new instance of - /// - /// The service API version to use with the client. - /// The provided service API version is not supported. - public AzureOpenAIClientOptions(ServiceVersion version = LatestVersion) - : base() - { - _version = version switch - { - ServiceVersion.V2024_04_01_Preview => "2024-04-01-preview", - ServiceVersion.V2024_05_01_Preview => "2024-05-01-preview", - ServiceVersion.V2024_06_01 => "2024-06-01", - ServiceVersion.V2024_07_01_Preview => "2024-07-01-preview", - _ => throw new NotSupportedException() - }; - } - - /// The version of the service to use. - public enum ServiceVersion - { - /// Service version "2024-04-01-preview". - V2024_04_01_Preview = 7, - V2024_05_01_Preview = 8, - V2024_06_01 = 9, - V2024_07_01_Preview = 10, - } - - private const ServiceVersion LatestVersion = ServiceVersion.V2024_07_01_Preview; -} diff --git a/.dotnet.azure/tests/Assets/test_config.json b/.dotnet.azure/tests/Assets/test_config.json deleted file mode 100644 index c9d484e70..000000000 --- a/.dotnet.azure/tests/Assets/test_config.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "assistants": { - "endpoint_name": "AZURE_OPENAI_TIP_ENDPOINT", - "api_key_name": "AZURE_OPENAI_TIP_API_KEY" - }, - "audio": { - "endpoint_name": "AZURE_OPENAI_ENDPOINT_SWEDENCENTRAL", - "api_key_name": "AZURE_OPENAI_API_KEY_SWEDENCENTRAL", - "deployment": "whisper" - }, - "batch": { - "endpoint_name": "AZURE_OPENAI_ENDPOINT", - "api_key_name": "AZURE_OPENAI_API_KEY", - "deployment": "gpt-35-turbo" - }, - "chat": { - "endpoint_name": "AZURE_OPENAI_ENDPOINT", - "api_key_name": "AZURE_OPENAI_API_KEY", - "deployment": "gpt-35-turbo" - }, - "embeddings": { - "endpoint_name": "AZURE_OPENAI_ENDPOINT", - "api_key_name": "AZURE_OPENAI_API_KEY", - "deployment": "text-embedding-3-small" - }, - "files": { - "endpoint_name": "AZURE_OPENAI_ENDPOINT", - "api_key_name": "AZURE_OPENAI_API_KEY" - }, - "fine_tuning": { - "endpoint_name": "AZURE_OPENAI_ENDPOINT", - "api_key_name": "AZURE_OPENAI_API_KEY" - }, - "images": { - "endpoint_name": "AZURE_OPENAI_ENDPOINT_SWEDENCENTRAL", - "api_key_name": "AZURE_OPENAI_API_KEY_SWEDENCENTRAL", - "deployment": "dall-e-3" - }, - "vector_stores": { - "endpoint_name": "AZURE_OPENAI_TIP_ENDPOINT", - "api_key_name": "AZURE_OPENAI_TIP_API_KEY" - } -} \ No newline at end of file diff --git a/.dotnet.azure/tests/AudioTests.cs b/.dotnet.azure/tests/AudioTests.cs deleted file mode 100644 index 936672f48..000000000 --- a/.dotnet.azure/tests/AudioTests.cs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#nullable disable - -using Azure.Core; -using Azure.Identity; -using OpenAI.Audio; -using OpenAI.Chat; -using System.ClientModel; - -namespace Azure.AI.OpenAI.Tests; - -public class AudioTests : TestBase -{ - [Test] - [Category("Smoke")] - public void CanCreateClient() => Assert.That(GetTestClient(), Is.InstanceOf()); - - [Test] - public void TranscriptionWorks() - { - AudioClient audioClient = GetTestClient("whisper"); - AudioTranscription transcription = audioClient.TranscribeAudio( - Path.Combine("Assets", "hello_world.m4a")); - Assert.That(transcription?.Text, Is.Not.Null.Or.Empty); - } - - [Test] - public void TranslationWorks() - { - AudioClient audioClient = GetTestClient("whisper"); - AudioTranslation translation = audioClient.TranslateAudio( - Path.Combine("Assets", "french.wav")); - Assert.That(translation?.Text, Is.Not.Null.Or.Empty); - } - - [Test] - public void TextToSpeechWorks() - { - AudioClient audioClient = GetTestClient("tts"); - BinaryData ttsData = audioClient.GenerateSpeechFromText( - "hello, world!", - GeneratedSpeechVoice.Alloy); - Assert.That(ttsData, Is.Not.Null); - } -} \ No newline at end of file diff --git a/.dotnet.azure/tests/Azure.AI.OpenAI.Tests.csproj b/.dotnet.azure/tests/Azure.AI.OpenAI.Tests.csproj deleted file mode 100644 index 8cd5fb367..000000000 --- a/.dotnet.azure/tests/Azure.AI.OpenAI.Tests.csproj +++ /dev/null @@ -1,18 +0,0 @@ - - - net7.0 - - $(NoWarn);CS1591 - - - - - - - - - - - - - diff --git a/.dotnet.azure/tests/BatchTests.cs b/.dotnet.azure/tests/BatchTests.cs deleted file mode 100644 index 101466e0f..000000000 --- a/.dotnet.azure/tests/BatchTests.cs +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#nullable disable - -using OpenAI.Batch; - -namespace Azure.AI.OpenAI.Tests; - -public class BatchTests : TestBase -{ - [Test] - [Category("Smoke")] - public void CanCreateClient() => Assert.That(GetTestClient(), Is.InstanceOf()); -} \ No newline at end of file diff --git a/.dotnet.azure/tests/ChatTests.cs b/.dotnet.azure/tests/ChatTests.cs deleted file mode 100644 index eee2f4d7e..000000000 --- a/.dotnet.azure/tests/ChatTests.cs +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#nullable disable - -using Azure.AI.OpenAI.Chat; -using Azure.Core; -using Azure.Identity; -using OpenAI.Chat; -using System.ClientModel; -using System.ClientModel.Primitives; -using System.Text; - -namespace Azure.AI.OpenAI.Tests; - -public class ChatTests : TestBase -{ - [Test] - public void HelloWorldChatWithTopLevelClient() - { - ChatClient chatClient = GetTestClient(); - ClientResult chatCompletion = chatClient.CompleteChat([new UserChatMessage("hello, world!")]); - Assert.That(chatCompletion?.Value, Is.Not.Null); - } - - [Test] - public void HelloWorldStreaming() - { - ChatClient chatClient = GetTestClient("gpt-4"); - StringBuilder contentBuilder = new(); - foreach (StreamingChatCompletionUpdate chatUpdate in chatClient.CompleteChatStreaming( - [new UserChatMessage("Hello, assistant"!)])) - { - foreach (ChatMessageContentPart contentPart in chatUpdate.ContentUpdate) - { - contentBuilder.Append(contentPart.Text); - } - } - Assert.That(contentBuilder.ToString(), Is.Not.Null.Or.Empty); - } - - [Test] - public void BadKeyGivesHelpfulError() - { - string endpointFromEnvironment = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT"); - Uri endpoint = new(endpointFromEnvironment); - string mockKey = "not-a-valid-key-and-should-still-be-sanitized"; - ApiKeyCredential credential = new(mockKey); - AzureOpenAIClient topLevelClient = new(endpoint, credential); - ChatClient chatClient = topLevelClient.GetChatClient("gpt-35-turbo"); - Exception thrownException = null; - try - { - _ = chatClient.CompleteChat([new UserChatMessage("oops, this won't work with that key!")]); - } - catch (Exception ex) - { - thrownException = ex; - } - Assert.That(thrownException, Is.InstanceOf()); - Assert.That(thrownException.Message, Does.Contain("invalid subscription key")); - Assert.That(thrownException.Message, Does.Not.Contain(mockKey)); - } - - [Test] - public void BadKeyGivesHelpfulErrorStreaming() - { - string endpointFromEnvironment = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT"); - Uri endpoint = new(endpointFromEnvironment); - string mockKey = "not-a-valid-key-and-should-still-be-sanitized"; - ApiKeyCredential credential = new(mockKey); - AzureOpenAIClient topLevelClient = new(endpoint, credential); - ChatClient chatClient = topLevelClient.GetChatClient("gpt-35-turbo"); - Exception thrownException = null; - try - { - foreach (StreamingChatCompletionUpdate update in chatClient.CompleteChatStreaming( - [new UserChatMessage("oops, this won't work with that key!")])) - {} - } - catch (Exception ex) - { - thrownException = ex; - } - Assert.That(thrownException, Is.InstanceOf()); - Assert.That(thrownException.Message, Does.Contain("invalid subscription key")); - Assert.That(thrownException.Message, Does.Not.Contain(mockKey)); - } - - [Test] - public void DefaultAzureCredentialWorks() - { - ChatClient chatClient = GetTestClient(); - ChatCompletion chatCompletion = chatClient.CompleteChat([ChatMessage.CreateUserMessage("Hello, world!")]); - Assert.That(chatCompletion?.Content, Is.Not.Null); - chatCompletion = chatClient.CompleteChat([ChatMessage.CreateUserMessage("Hello again, world!")]); - Assert.That(chatCompletion?.Content, Is.Not.Null); - } - - [Test] - public void CanGetContentFilterResults() - { - ChatClient client = GetTestClient(); - ClientResult chatCompletionResult = client.CompleteChat([ChatMessage.CreateUserMessage("Hello, world!")]); - Console.WriteLine($"--- RESPONSE ---"); - Console.WriteLine(chatCompletionResult.GetRawResponse().Content.ToString()); - ChatCompletion chatCompletion = chatCompletionResult.Value; -#pragma warning disable OPENAI002 - ContentFilterResultForPrompt promptFilterResult = chatCompletion.GetContentFilterResultForPrompt(); - Assert.That(promptFilterResult, Is.Not.Null); - Assert.That(promptFilterResult.Sexual?.Filtered, Is.False); - Assert.That(promptFilterResult.Sexual?.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); - ContentFilterResultForResponse responseFilterResult = chatCompletion.GetContentFilterResultForResponse(); - Assert.That(responseFilterResult, Is.Not.Null); - Assert.That(responseFilterResult.Hate?.Severity, Is.EqualTo(ContentFilterSeverity.Safe)); - Assert.That(responseFilterResult.ProtectedMaterialCode, Is.Null); - } -#pragma warning restore - - [Test] - [Category("Smoke")] - public void DataSourceSerializationWorks() - { - AzureSearchChatDataSource source = new() - { - Endpoint = new Uri("https://some-search-resource.azure.com"), - Authentication = DataSourceAuthentication.FromApiKey("test-api-key"), - IndexName = "index-name-here", - FieldMappings = new() - { - ContentFieldNames = { "hello" }, - TitleFieldName = "hi", - }, - AllowPartialResult = true, - QueryType = DataSourceQueryType.Simple, - OutputContextFlags = DataSourceOutputContextFlags.AllRetrievedDocuments | DataSourceOutputContextFlags.Citations, - VectorizationSource = DataSourceVectorizer.FromEndpoint( - new Uri("https://my-embedding.com"), - DataSourceAuthentication.FromApiKey("embedding-api-key")), - }; - dynamic serialized = ModelReaderWriter.Write(source).ToDynamicFromJson(); - Assert.That(serialized?.type?.ToString(), Is.EqualTo("azure_search")); - Assert.That(serialized?.parameters?.authentication?.type?.ToString(), Is.EqualTo("api_key")); - Assert.That(serialized?.parameters?.authentication?.key?.ToString(), Does.Contain("test")); - Assert.That(serialized?.parameters?.index_name?.ToString(), Is.EqualTo("index-name-here")); - Assert.That(serialized?.parameters?.fields_mapping?.content_fields?[0]?.ToString(), Is.EqualTo("hello")); - Assert.That(serialized?.parameters?.fields_mapping?.title_field?.ToString(), Is.EqualTo("hi")); - Assert.That(bool.TryParse(serialized?.parameters?.allow_partial_result?.ToString(), out bool parsed) && parsed == true); - Assert.That(serialized?.parameters?.query_type?.ToString(), Is.EqualTo("simple")); - Assert.That(serialized?.parameters?.include_contexts?[0]?.ToString(), Is.EqualTo("citations")); - Assert.That(serialized?.parameters?.include_contexts?[1]?.ToString(), Is.EqualTo("all_retrieved_documents")); - Assert.That(serialized?.parameters?.embedding_dependency?.type?.ToString(), Is.EqualTo("endpoint")); - -#pragma warning disable OPENAI002 - ChatCompletionOptions options = new(); - options.AddDataSource(new ElasticsearchChatDataSource() - { - Authentication = DataSourceAuthentication.FromAccessToken("foo-token"), - Endpoint = new Uri("https://my-elasticsearch.com"), - IndexName = "my-index-name", - InScope = true, - }); - - IReadOnlyList sourcesFromOptions = options.GetDataSources(); - Assert.That(sourcesFromOptions, Has.Count.EqualTo(1)); - Assert.That(sourcesFromOptions[0], Is.InstanceOf()); - Assert.That((sourcesFromOptions[0] as ElasticsearchChatDataSource).IndexName, Is.EqualTo("my-index-name")); - - options.AddDataSource(new AzureCosmosDBChatDataSource() - { - Authentication = DataSourceAuthentication.FromApiKey("api-key"), - ContainerName = "my-container-name", - DatabaseName = "my_database_name", - FieldMappings = new() - { - ContentFieldNames = { "hello", "world" }, - }, - IndexName = "my-index-name", - VectorizationSource = DataSourceVectorizer.FromDeploymentName("my-deployment"), - }); - sourcesFromOptions = options.GetDataSources(); - Assert.That(sourcesFromOptions, Has.Count.EqualTo(2)); - Assert.That(sourcesFromOptions[1], Is.InstanceOf()); - } - - [Test] - public void SearchExtensionWorks() - { - string searchEndpoint = Environment.GetEnvironmentVariable("AOAI_SEARCH_ENDPOINT"); - string searchKey = Environment.GetEnvironmentVariable("AOAI_SEARCH_API_KEY"); - string searchIndex = Environment.GetEnvironmentVariable("AOAI_SEARCH_INDEX_NAME"); - - AzureSearchChatDataSource source = new() - { - Endpoint = new Uri(searchEndpoint), - Authentication = DataSourceAuthentication.FromApiKey(searchKey), - IndexName = searchIndex, - AllowPartialResult = true, - QueryType = DataSourceQueryType.Simple, - }; - ChatCompletionOptions options = new(); - options.AddDataSource(source); - - ChatClient client = GetTestClient("gpt-4"); - ClientResult chatCompletionResult = client.CompleteChat( - [new UserChatMessage("What does the term 'PR complete' mean?")], - options); - Console.WriteLine($"--- RESPONSE CONTENT ---"); - Console.WriteLine(chatCompletionResult.GetRawResponse().Content); - ChatCompletion chatCompletion = chatCompletionResult.Value; - AzureChatMessageContext context = chatCompletion.GetAzureMessageContext(); - Assert.That(context?.Intent, Is.Not.Null.Or.Empty); - Assert.That(context?.Citations, Has.Count.GreaterThan(0)); - Assert.That(context.Citations[0].Filepath, Is.Not.Null.Or.Empty); - Assert.That(context.Citations[0].Content, Is.Not.Null.Or.Empty); - Assert.That(context.Citations[0].ChunkId, Is.Not.Null.Or.Empty); - Assert.That(context.Citations[0].Title, Is.Not.Null.Or.Empty); - Assert.That(context.Citations[0].Url, Is.Not.Null.Or.Empty); - } - - [Test] - public void StreamingSearchExtensionWorks() - { - string searchEndpoint = Environment.GetEnvironmentVariable("AOAI_SEARCH_ENDPOINT"); - string searchKey = Environment.GetEnvironmentVariable("AOAI_SEARCH_API_KEY"); - string searchIndex = Environment.GetEnvironmentVariable("AOAI_SEARCH_INDEX_NAME"); - - AzureSearchChatDataSource source = new() - { - Endpoint = new Uri(searchEndpoint), - Authentication = DataSourceAuthentication.FromApiKey(searchKey), - IndexName = searchIndex, - AllowPartialResult = true, - QueryType = DataSourceQueryType.Simple, - }; - ChatCompletionOptions options = new(); - options.AddDataSource(source); - - ChatClient client = GetTestClient("gpt-4"); - - ResultCollection chatUpdates = client.CompleteChatStreaming( - [new UserChatMessage("What does the term 'PR complete' mean?")], - options); - - StringBuilder contentBuilder = new(); - List contexts = []; - - foreach (StreamingChatCompletionUpdate chatUpdate in chatUpdates) - { - AzureChatMessageContext context = chatUpdate.GetAzureMessageContext(); - if (context is not null) - { - contexts.Add(context); - } - foreach (ChatMessageContentPart contentPart in chatUpdate.ContentUpdate) - { - contentBuilder.Append(contentPart.Text); - } - } - - Assert.That(contentBuilder.ToString(), Is.Not.Null.Or.Empty); - Assert.That(contexts, Has.Count.EqualTo(1)); - Assert.That(contexts[0].Intent, Is.Not.Null.Or.Empty); - Assert.That(contexts[0].Citations, Has.Count.GreaterThan(0)); - Assert.That(contexts[0].Citations[0].Content, Is.Not.Null.Or.Empty); - } -} \ No newline at end of file diff --git a/.dotnet.azure/tests/FileTests.cs b/.dotnet.azure/tests/FileTests.cs deleted file mode 100644 index ba86a2740..000000000 --- a/.dotnet.azure/tests/FileTests.cs +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#nullable disable - -using OpenAI.Files; -using System.ClientModel; - -namespace Azure.AI.OpenAI.Tests; - -public class FileTests : TestBase -{ - [Test] - [Category("Smoke")] - public void CanCreateClient() => Assert.That(GetTestClient(), Is.InstanceOf()); - - [Test] - public void CanUploadAndDeleteFiles() - { - FileClient client = GetTestClient(); - OpenAIFileInfo file = client.UploadFile( - BinaryData.FromString("hello, world!"), - "test_file_delete_me.txt", - FileUploadPurpose.Assistants); - Validate(file); - bool deleted = client.DeleteFile(file); - Assert.IsTrue(deleted); - } -} \ No newline at end of file diff --git a/.dotnet.azure/tests/FineTuningTests.cs b/.dotnet.azure/tests/FineTuningTests.cs deleted file mode 100644 index 248e1373a..000000000 --- a/.dotnet.azure/tests/FineTuningTests.cs +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#nullable disable - -using OpenAI.FineTuning; - -namespace Azure.AI.OpenAI.Tests; - -public class FineTuningTests : TestBase -{ - [Test] - [Category("Smoke")] - public void CanCreateClient() => Assert.That(GetTestClient(), Is.InstanceOf()); -} \ No newline at end of file diff --git a/.dotnet.azure/tests/TestBase.cs b/.dotnet.azure/tests/TestBase.cs deleted file mode 100644 index 55712e559..000000000 --- a/.dotnet.azure/tests/TestBase.cs +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#nullable disable - -using Azure.Core; -using Azure.Identity; -using OpenAI.Assistants; -using OpenAI.Audio; -using OpenAI.Batch; -using OpenAI.Chat; -using OpenAI.Embeddings; -using OpenAI.Files; -using OpenAI.FineTuning; -using OpenAI.Images; -using OpenAI.Tests; -using OpenAI.VectorStores; -using System.ClientModel; -using System.ClientModel.Primitives; -using System.Net; -using System.Security.Cryptography; -using System.Text.Json; - -namespace Azure.AI.OpenAI.Tests; - -public class TestBase -{ - internal TestConfig TestConfig { get; } - - protected TestBase() - { - TestConfig = new TestConfig(); - } - - internal AzureOpenAIClient GetTestTopLevelClient(TestClientOptions options = null) - => GetExplicitTestTopLevelClient(options); - internal AzureOpenAIClient GetTestTopLevelClient(TestClientOptions options = null) - => GetExplicitTestTopLevelClient(options); - private AzureOpenAIClient GetExplicitTestTopLevelClient(TestClientOptions options = null, bool honorParentClient = true) - { - // If the top-level client is being requested on behalf of another client (e.g. a file client for resources to - // use with an assistant client), then we'll ensure we match the configuration of the dependent client to its - // progenitor. - if (honorParentClient && options?.ParentClientObject is not null) - { - return options.ParentClientObject switch - { -#pragma warning disable OPENAI001 - AssistantClient => GetExplicitTestTopLevelClient(options, false), -#pragma warning restore - BatchClient => GetExplicitTestTopLevelClient(options, false), - ChatClient => GetExplicitTestTopLevelClient(options, false), - EmbeddingClient => GetExplicitTestTopLevelClient(options, false), - FileClient => GetExplicitTestTopLevelClient(options, false), - FineTuningClient => GetExplicitTestTopLevelClient(options, false), - ImageClient => GetExplicitTestTopLevelClient(options, false), -#pragma warning disable OPENAI001 - VectorStoreClient => GetExplicitTestTopLevelClient(options, false), -#pragma warning restore - _ => throw new NotImplementedException() - }; - } - - Uri endpoint = TestConfig.GetEndpointFor(); - - ApiKeyCredential apiKeyCredential = typeof(TCredential) == typeof(ApiKeyCredential) - ? TestConfig.GetApiKeyFromEnvironmentFor() - : null; - TokenCredential tokenCredential = typeof(TCredential) == typeof(TokenCredential) - ? new DefaultAzureCredential() - : null; - - options ??= new(); - Action requestAction = options.ShouldOutputRequests ? DumpRequest : null; - Action responseAction = options.ShouldOutputResponses ? DumpResponse : null; - options.AddPolicy(new TestPipelinePolicy(requestAction, responseAction), PipelinePosition.PerCall); - - AzureOpenAIClient client = - typeof(TCredential) == typeof(ApiKeyCredential) - ? new(endpoint, apiKeyCredential, options) - : (typeof(TCredential) == typeof(TokenCredential)) - ? new(endpoint, tokenCredential, options) - : throw new NotImplementedException(); - - return client; - } - - internal TClient GetTestClient(string overrideDeploymentName, TestClientOptions options = null) - => GetExplicitTestClient(overrideDeploymentName, options); - internal TClient GetTestClient(TestClientOptions options = null) - => GetExplicitTestClient(null, options); - internal TClient GetTestClient(TestClientOptions options = null) - => GetExplicitTestClient(null, options); - internal TChildClient GetChildTestClient(TClient parentClient) - => GetExplicitTestClient(null, new() { ParentClientObject = parentClient }); - private TExplicitClient GetExplicitTestClient(string overrideDeploymentName = null, TestClientOptions options = null) - { - AzureOpenAIClient topLevelClient = GetExplicitTestTopLevelClient(options); - string GetDeployment() => overrideDeploymentName ?? TestConfig.GetDeploymentNameFor(); - object clientObject = null; - switch (typeof(TExplicitClient).Name) - { -#pragma warning disable OPENAI001 - case nameof(AssistantClient): - clientObject = topLevelClient.GetAssistantClient(); - break; -#pragma warning restore - case nameof(AudioClient): - clientObject = topLevelClient.GetAudioClient(GetDeployment()); - break; - case nameof(BatchClient): - clientObject = topLevelClient.GetBatchClient(GetDeployment()); - break; - case nameof(ChatClient): - clientObject = topLevelClient.GetChatClient(GetDeployment()); - break; - case nameof(EmbeddingClient): - clientObject = topLevelClient.GetEmbeddingClient(GetDeployment()); - break; - case nameof(FileClient): - clientObject = topLevelClient.GetFileClient(); - break; - case nameof(FineTuningClient): - clientObject = topLevelClient.GetFineTuningClient(); - break; - case nameof(ImageClient): - clientObject = topLevelClient.GetImageClient(GetDeployment()); - break; -#pragma warning disable OPENAI001 - case nameof(VectorStoreClient): - clientObject = topLevelClient.GetVectorStoreClient(); - break; -#pragma warning restore - default: throw new NotImplementedException($"Test client helpers not yet implemented for {typeof(TExplicitClient)}"); - }; - return (TExplicitClient)clientObject; - } - - private static void DumpRequest(PipelineRequest request) - { - Console.WriteLine($"--- New request ---"); - IEnumerable headerPairs = request.Headers? - .Select(header => $"{header.Key}={(header.Key.ToLower().Contains("auth") ? "***" : header.Value)}"); - string headers = string.Join(',', headerPairs); - Console.WriteLine($"Headers: {headers}"); - Console.WriteLine($"{request.Method} URI: {request?.Uri}"); - if (request.Content is not null) - { - using MemoryStream stream = new(); - request.Content.WriteTo(stream, default); - stream.Position = 0; - using StreamReader reader = new(stream); - Console.WriteLine(reader.ReadToEnd()); - } - } - - private static void DumpResponse(PipelineResponse response) - { - Console.WriteLine($"--- Response --- "); - } - - protected void ValidateById(string id) - { - Assert.That(id, Is.Not.Null.Or.Empty); - switch (typeof(T).Name) - { - case nameof(Assistant): _assistantIdsToDelete.Add(id); break; - case nameof(AssistantThread): _threadIdsToDelete.Add(id); break; - case nameof(OpenAIFileInfo): _fileIdsToDelete.Add(id); break; - case nameof(ThreadRun): break; - case nameof(VectorStore): _vectorStoreIdsToDelete.Add(id); break; - default: throw new NotImplementedException(); - } - } - - protected void ValidateById(string id, string parentId) - { - Assert.That(id, Is.Not.Null.Or.Empty); - Assert.That(parentId, Is.Not.Null.Or.Empty); - switch (typeof(T).Name) - { - case nameof(ThreadMessage): - _threadIdsWithMessageIdsToDelete.Add((parentId, id)); - break; - case nameof(VectorStoreFileAssociation): - _vectorStoreFileAssociationsToRemove.Add((parentId, id)); - break; - default: - throw new NotImplementedException(); - } - } - - /// - /// Performs basic, invariant validation of a target that was just instantiated from its corresponding origination - /// mechanism. If applicable, the instance is recorded into the test run for cleanup of persistent resources. - /// - /// Instance type being validated. - /// The instance to validate. - /// The provided instance type isn't supported. - protected void Validate(T target) - { - if (target is ThreadMessage message) - { - ValidateById(message.Id, message.ThreadId); - } - else if (target is VectorStoreFileAssociation fileAssociation) - { - ValidateById(fileAssociation.VectorStoreId, fileAssociation.FileId); - } - else - { - ValidateById(target switch - { - Assistant assistant => assistant.Id, - AssistantThread thread => thread.Id, - OpenAIFileInfo file => file.Id, - ThreadRun run => run.Id, - VectorStore store => store.Id, - _ => throw new NotImplementedException(), - }); - } - } - - [TearDown] - protected void Cleanup() - { - AzureOpenAIClient topLevelCleanupClient = GetTestTopLevelClient(new() - { - ShouldOutputRequests = false, - ShouldOutputResponses = false, - }); -#pragma warning disable OPENAI001 - AssistantClient client = topLevelCleanupClient.GetAssistantClient(); - VectorStoreClient vectorStoreClient = topLevelCleanupClient.GetVectorStoreClient(); -#pragma warning restore - FileClient fileClient = topLevelCleanupClient.GetFileClient(); - RequestOptions requestOptions = new() { ErrorOptions = ClientErrorBehaviors.NoThrow, }; - foreach ((string threadId, string messageId) in _threadIdsWithMessageIdsToDelete) - { - Console.WriteLine($"Cleanup: {messageId} -> {client.DeleteMessage(threadId, messageId, requestOptions)?.GetRawResponse().Status}"); - } - foreach (string assistantId in _assistantIdsToDelete) - { - Console.WriteLine($"Cleanup: {assistantId} -> {client.DeleteAssistant(assistantId, requestOptions)?.GetRawResponse().Status}"); - } - foreach (string threadId in _threadIdsToDelete) - { - Console.WriteLine($"Cleanup: {threadId} -> {client.DeleteThread(threadId, requestOptions)?.GetRawResponse().Status}"); - } - foreach ((string vectorStoreId, string fileId) in _vectorStoreFileAssociationsToRemove) - { - Console.WriteLine($"Cleanup: {vectorStoreId}<->{fileId} => {vectorStoreClient.RemoveFileFromStore(vectorStoreId, fileId, requestOptions)?.GetRawResponse().Status}"); - } - foreach (string vectorStoreId in _vectorStoreIdsToDelete) - { - Console.WriteLine($"Cleanup: {vectorStoreId} => {vectorStoreClient.DeleteVectorStore(vectorStoreId, requestOptions)?.GetRawResponse().Status}"); - } - foreach (string fileId in _fileIdsToDelete) - { - Console.WriteLine($"Cleanup: {fileId} -> {fileClient.DeleteFile(fileId, requestOptions)?.GetRawResponse().Status}"); - } - _threadIdsWithMessageIdsToDelete.Clear(); - _assistantIdsToDelete.Clear(); - _threadIdsToDelete.Clear(); - _vectorStoreFileAssociationsToRemove.Clear(); - _vectorStoreIdsToDelete.Clear(); - _fileIdsToDelete.Clear(); - } - - private readonly List _assistantIdsToDelete = []; - private readonly List _threadIdsToDelete = []; - private readonly List<(string, string)> _threadIdsWithMessageIdsToDelete = []; - private readonly List _fileIdsToDelete = []; - private readonly List<(string, string)> _vectorStoreFileAssociationsToRemove = []; - private readonly List _vectorStoreIdsToDelete = []; -} - -internal class TestClientOptions : AzureOpenAIClientOptions -{ - public bool ShouldOutputRequests { get; init; } = true; - public bool ShouldOutputResponses { get; init; } = true; - public object ParentClientObject { get; init; } -} \ No newline at end of file diff --git a/.dotnet.azure/tests/TestConfig.cs b/.dotnet.azure/tests/TestConfig.cs deleted file mode 100644 index 93ad4d979..000000000 --- a/.dotnet.azure/tests/TestConfig.cs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#nullable disable - -using OpenAI.Assistants; -using OpenAI.Audio; -using OpenAI.Batch; -using OpenAI.Chat; -using OpenAI.Embeddings; -using OpenAI.Files; -using OpenAI.FineTuning; -using OpenAI.Images; -using OpenAI.VectorStores; -using System.ClientModel; - -namespace Azure.AI.OpenAI.Tests; - -internal class TestConfig -{ - private readonly dynamic _dynamicConfig; - - public TestConfig() - { - string configPath = Path.Combine(AssetFolderName, AssetFilename); - if (File.Exists(configPath)) - { - using Stream configStream = File.OpenRead(configPath); - BinaryData configData = BinaryData.FromStream(configStream); - _dynamicConfig = configData.ToDynamicFromJson(); - } - } - - public Uri GetEndpointFor() - { - dynamic configNode = GetConfigNode(); - string endpointFromVariable = configNode?.endpoint_name is not null - ? Environment.GetEnvironmentVariable(configNode.endpoint_name) : null; - string rawEndpoint = configNode?.endpoint - ?? endpointFromVariable - ?? throw new KeyNotFoundException($"{typeof(TClient)}: endpoint"); - - return new Uri(rawEndpoint); - } - - public ApiKeyCredential GetApiKeyFromEnvironmentFor() - { - string environmentVariableName = GetConfigNode()?.api_key_name - ?? throw new KeyNotFoundException($"{typeof(TClient)}: api_key_name"); - string rawKeyFromEnvironment = Environment.GetEnvironmentVariable(environmentVariableName) - ?? throw new KeyNotFoundException(environmentVariableName); - return new(rawKeyFromEnvironment); - } - - public string GetDeploymentNameFor() - => GetConfigNode()?.deployment - ?? throw new NotImplementedException(); - - private dynamic GetConfigNode() - { - switch (typeof(TClient).Name) - { -#pragma warning disable OPENAI001 - case nameof(AssistantClient): return _dynamicConfig?.assistants; -#pragma warning restore - case nameof(AudioClient): return _dynamicConfig?.audio; - case nameof(BatchClient): return _dynamicConfig?.batch; - case nameof(ChatClient): return _dynamicConfig?.chat; - case nameof(EmbeddingClient): return _dynamicConfig?.embeddings; - case nameof(FileClient): return _dynamicConfig?.files; - case nameof(FineTuningClient): return _dynamicConfig?.fine_tuning; - case nameof(ImageClient): return _dynamicConfig?.images; -#pragma warning disable OPENAI001 - case nameof(VectorStoreClient): return _dynamicConfig?.vector_stores; -#pragma warning restore - default: throw new NotImplementedException(typeof(TClient).Name); - } - } - - private const string AssetFolderName = "Assets"; - private const string AssetFilename = "test_config.json"; -} \ No newline at end of file diff --git a/.dotnet.azure/tests/TestPipelinePolicy.cs b/.dotnet.azure/tests/TestPipelinePolicy.cs deleted file mode 100644 index cc5ccc395..000000000 --- a/.dotnet.azure/tests/TestPipelinePolicy.cs +++ /dev/null @@ -1,43 +0,0 @@ -using System; -using System.ClientModel; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Threading.Tasks; - -namespace OpenAI.Tests; - -internal partial class TestPipelinePolicy : PipelinePolicy -{ - private readonly Action _processRequestAction; - private readonly Action _processResponseAction; - - public TestPipelinePolicy(Action requestAction, Action responseAction) - { - _processRequestAction = requestAction; - _processResponseAction = responseAction; - } - - public override void Process(PipelineMessage message, IReadOnlyList pipeline, int currentIndex) - { - InvokeActions(message); - ProcessNext(message, pipeline, currentIndex); - } - - public override ValueTask ProcessAsync(PipelineMessage message, IReadOnlyList pipeline, int currentIndex) - { - InvokeActions(message); - return ProcessNextAsync(message, pipeline, currentIndex); - } - - private void InvokeActions(PipelineMessage message) - { - if (message?.Request is not null) - { - _processRequestAction?.Invoke(message.Request); - } - if (message?.Response is not null) - { - _processResponseAction?.Invoke(message.Response); - } - } -} \ No newline at end of file diff --git a/.dotnet/.github/ISSUE_TEMPLATE/bug_report.yaml b/.dotnet/.github/ISSUE_TEMPLATE/bug_report.yaml index 479ab9af9..6cc705203 100644 --- a/.dotnet/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.dotnet/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -6,13 +6,21 @@ body: attributes: value: | Thanks for taking the time to fill out this bug report! + - type: checkboxes + id: non_python + attributes: + label: Confirm this is not an issue with the OpenAI Python Library + description: Issues with the OpenAI Python Library should be reported in our [OpenAI Python SDK repo](https://github.com/openai/openai-python/issues) + options: + - label: This is not an issue with the OpenAI Python Library + required: true - type: checkboxes id: non_api attributes: label: Confirm this is not an issue with the underlying OpenAI API description: Issues with the underlying OpenAI API should be reported in our [Developer Community](https://community.openai.com/c/api/7) options: - - label: This is an issue with the Python library + - label: This is not an issue with the OpenAI API required: true - type: checkboxes id: non_azure @@ -69,4 +77,4 @@ body: label: Library version placeholder: validations: - required: true \ No newline at end of file + required: true diff --git a/.dotnet/CHANGELOG.md b/.dotnet/CHANGELOG.md index 1ec332c88..26b286043 100644 --- a/.dotnet/CHANGELOG.md +++ b/.dotnet/CHANGELOG.md @@ -1,17 +1,102 @@ # Release History -## 2.0.0-beta.9 (Unreleased) +## 2.0.0-beta.12 (Unreleased) ### Features Added -- Added `OpenAIAudioModelFactory`, `OpenAIEmbeddingsModelFactory`, and `OpenAIImagesModelFactory` static classes to the `Audio`, `Embeddings`, and `Images` namespaces, respectively. Model factories can be used to instantiate OpenAI models for mocking in non-live test scenarios. +### Breaking Changes + +- Renamed `ChatMessageContentPart`'s `CreateTextMessageContentPart` factory method to `CreateTextPart`. (commit_hash) +- Renamed `ChatMessageContentPart`'s `CreateImageMessageContentPart` factory method to `CreateImagePart`. (commit_hash) +- Renamed `ChatMessageContentPart`'s `CreateRefusalMessageContentPart` factory method to `CreateRefusalPart`. (commit_hash) +- Renamed `ImageChatMessageContentPartDetail` to `ChatImageDetailLevel`. +- Removed `ChatMessageContentPart`'s `ToString` overload. (commit_hash) +- Removed the common `ListOrder` enum from the top-level `OpenAI` namespace in favor of individual enums in their corresponding sub-namespace. (commit_hash) +- Renamed the `PageSize` property to `PageSizeLimit`. (commit_hash) + +### Bugs Fixed + +- Addressed an issue that caused multi-page queries of fine-tuning jobs, checkpoints, and events to fail. (commit_hash) + +### Other Changes + +## 2.0.0-beta.11 (2024-09-03) + +### Features Added + +- Added the `OpenAIChatModelFactory` in the `OpenAI.Chat` namespace (a static class that can be used to instantiate OpenAI models for mocking in non-live test scenarios). ([79014ab](https://github.com/openai/openai-dotnet/commit/79014abc01a00e13d5a334d3f6529ed590b8ee98)) ### Breaking Changes +- Updated fine-tuning pagination methods `GetJobs`, `GetEvents`, and `GetJobCheckpoints` to return `IEnumerable` instead of `ClientResult`. ([5773292](https://github.com/openai/openai-dotnet/commit/57732927575c6c48f30bded0afb9f5b16d4f30da)) +- Updated the batching pagination method `GetBatches` to return `IEnumerable` instead of `ClientResult`. ([5773292](https://github.com/openai/openai-dotnet/commit/57732927575c6c48f30bded0afb9f5b16d4f30da)) +- Changed `GeneratedSpeechVoice` from an enum to an "extensible enum". ([79014ab](https://github.com/openai/openai-dotnet/commit/79014abc01a00e13d5a334d3f6529ed590b8ee98)) +- Changed `GeneratedSpeechFormat` from an enum to an "extensible enum". ([cc9169a](https://github.com/openai/openai-dotnet/commit/cc9169ad2ff92bb7312eed3b7e64e45da5da1d18)) +- Renamed `SpeechGenerationOptions`'s `Speed` property to `SpeedRatio`. ([cc9169a](https://github.com/openai/openai-dotnet/commit/cc9169ad2ff92bb7312eed3b7e64e45da5da1d18)) + ### Bugs Fixed +- Corrected an internal deserialization issue that caused recent updates to Assistants `file_search` to fail when streaming a run. Strongly typed support for `ranking_options` is not included but will arrive soon. ([cc9169a](https://github.com/openai/openai-dotnet/commit/cc9169ad2ff92bb7312eed3b7e64e45da5da1d18)) +- Mitigated a .NET runtime issue that prevented `ChatResponseFormat` from serializing correct on targets including Unity. ([cc9169a](https://github.com/openai/openai-dotnet/commit/cc9169ad2ff92bb7312eed3b7e64e45da5da1d18)) + ### Other Changes +- Reverted the removal of the version path parameter "v1" from the default endpoint URL. ([583e9f6](https://github.com/openai/openai-dotnet/commit/583e9f6f519feeee0e2907e80bf7d5bf8302d93f)) +- Added the `Experimental` attribute to the following APIs: + - All public APIs in the `OpenAI.Assistants` namespace. ([79014ab](https://github.com/openai/openai-dotnet/commit/79014abc01a00e13d5a334d3f6529ed590b8ee98)) + - All public APIs in the `OpenAI.VectorStores` namespace. ([79014ab](https://github.com/openai/openai-dotnet/commit/79014abc01a00e13d5a334d3f6529ed590b8ee98)) + - All public APIs in the `OpenAI.Batch` namespace. ([0f5e024](https://github.com/openai/openai-dotnet/commit/0f5e0249cffd42755fc9a820e65fb025fd4f986c)) + - All public APIs in the `OpenAI.FineTuning` namespace. ([0f5e024](https://github.com/openai/openai-dotnet/commit/0f5e0249cffd42755fc9a820e65fb025fd4f986c)) + - The `ChatCompletionOptions.Seed` property. ([0f5e024](https://github.com/openai/openai-dotnet/commit/0f5e0249cffd42755fc9a820e65fb025fd4f986c)) + +## 2.0.0-beta.10 (2024-08-26) + +### Breaking Changes + +- Renamed `AudioClient`'s `GenerateSpeechFromText` methods to simply `GenerateSpeech`. ([d84bf54](https://github.com/openai/openai-dotnet/commit/d84bf54df14ddac4c49f6efd61467b600d34ecd7)) +- Changed the type of `OpenAIFileInfo`'s `SizeInBytes` property from `long?` to `int?`. ([d84bf54](https://github.com/openai/openai-dotnet/commit/d84bf54df14ddac4c49f6efd61467b600d34ecd7)) + +### Bugs Fixed + +- Fixed a newly introduced bug ([#185](https://github.com/openai/openai-dotnet/pull/185)) where providing `OpenAIClientOptions` to a top-level `OpenAIClient` did not carry over to scenario clients (e.g. `ChatClient`) created via that top-level client ([d84bf54](https://github.com/openai/openai-dotnet/commit/d84bf54df14ddac4c49f6efd61467b600d34ecd7)) + +### Other Changes + +- Removed the version path parameter "v1" from the default endpoint URL. ([d84bf54](https://github.com/openai/openai-dotnet/commit/d84bf54df14ddac4c49f6efd61467b600d34ecd7)) + +## 2.0.0-beta.9 (2024-08-23) + +### Features Added + +- Added support for the new [structured outputs](https://platform.openai.com/docs/guides/structured-outputs/introduction) response format feature, which enables chat completions, assistants, and tools on each of those clients to provide a specific JSON Schema that generated content should adhere to. ([3467b53](https://github.com/openai/openai-dotnet/commit/3467b535c918e72237a4c0dc36d4bda5548edb7a)) + - To enable top-level structured outputs for response content, use `ChatResponseFormat.CreateJsonSchemaFormat()` and `AssistantResponseFormat.CreateJsonSchemaFormat()` as the `ResponseFormat` in method options like `ChatCompletionOptions` + - To enable structured outputs for function tools, set `StrictParameterSchemaEnabled` to `true` on the tool definition + - For more information, please see [the new section in readme.md](readme.md#how-to-use-structured-outputs) +- Chat completions: the request message types of `AssistantChatMessage`, `SystemChatMessage`, and `ToolChatMessage` now support array-based content part collections in addition to simple string input. ([3467b53](https://github.com/openai/openai-dotnet/commit/3467b535c918e72237a4c0dc36d4bda5548edb7a)) +- Added the following model factories (static classes that can be used to instantiate OpenAI models for mocking in non-live test scenarios): + - `OpenAIAudioModelFactory` in the `OpenAI.Audio` namespace ([3284295](https://github.com/openai/openai-dotnet/commit/3284295e7fd9922a3395d921513473bcb483655e)) + - `OpenAIEmbeddingsModelFactory` in the `OpenAI.Embeddings` namespace ([3284295](https://github.com/openai/openai-dotnet/commit/3284295e7fd9922a3395d921513473bcb483655e)) + - `OpenAIFilesModelFactory` in the `OpenAI.Files` namespace ([b1ce397](https://github.com/openai/openai-dotnet/commit/b1ce397ff4f9a55db797167be9e86e138ed5d403)) + - `OpenAIImagesModelFactory` in the `OpenAI.Images` namespace ([3284295](https://github.com/openai/openai-dotnet/commit/3284295e7fd9922a3395d921513473bcb483655e)) + - `OpenAIModelsModelFactory` in the `OpenAI.Models` namespace ([b1ce397](https://github.com/openai/openai-dotnet/commit/b1ce397ff4f9a55db797167be9e86e138ed5d403)) + - `OpenAIModerationsModelFactory` in the `OpenAI.Moderations` namespace ([b1ce397](https://github.com/openai/openai-dotnet/commit/b1ce397ff4f9a55db797167be9e86e138ed5d403)) + +### Breaking Changes + +- Removed client constructors that do not explicitly take an API key parameter or an endpoint via an `OpenAIClientOptions` parameter, making it clearer how to appropriately instantiate a client. ([13a9c68](https://github.com/openai/openai-dotnet/commit/13a9c68647c8d54475f1529a63b13ad711bd4ba6)) +- Removed the endpoint parameter from all client constructors, making it clearer that an alternative endpoint must be specified via the `OpenAIClientOptions` parameter. ([13a9c68](https://github.com/openai/openai-dotnet/commit/13a9c68647c8d54475f1529a63b13ad711bd4ba6)) +- Removed `OpenAIClient`'s `Endpoint` `protected` property. ([13a9c68](https://github.com/openai/openai-dotnet/commit/13a9c68647c8d54475f1529a63b13ad711bd4ba6)) +- Made `OpenAIClient`'s constructor that takes a `ClientPipeline` parameter `protected internal` instead of just `protected`. ([13a9c68](https://github.com/openai/openai-dotnet/commit/13a9c68647c8d54475f1529a63b13ad711bd4ba6)) +- Renamed the `User` property in applicable Options classes to `EndUserId`, making its purpose clearer. ([13a9c68](https://github.com/openai/openai-dotnet/commit/13a9c68647c8d54475f1529a63b13ad711bd4ba6)) + +### Bugs Fixed + +- The `Assistants` namespace `VectorStoreCreationHelper` type now properly includes a `ChunkingStrategy` property. ([3467b53](https://github.com/openai/openai-dotnet/commit/3467b535c918e72237a4c0dc36d4bda5548edb7a)) + +### Other Changes + +- `ChatCompletion.ToString()` will no longer throw an exception when no content is present, as is the case for tool calls. Additionally, if a tool call is present with no content, `ToString()` will return the serialized form of the first available tool call. ([3467b53](https://github.com/openai/openai-dotnet/commit/3467b535c918e72237a4c0dc36d4bda5548edb7a)) + ## 2.0.0-beta.8 (2024-07-31) ### Breaking Changes diff --git a/.dotnet/README.md b/.dotnet/README.md index 891b11eb8..7185e3464 100644 --- a/.dotnet/README.md +++ b/.dotnet/README.md @@ -17,6 +17,7 @@ It is generated from our [OpenAPI specification](https://github.com/openai/opena - [Using the `OpenAIClient` class](#using-the-openaiclient-class) - [How to use chat completions with streaming](#how-to-use-chat-completions-with-streaming) - [How to use chat completions with tools and function calling](#how-to-use-chat-completions-with-tools-and-function-calling) +- [How to use structured outputs](#how-to-use-structured-outputs) - [How to generate text embeddings](#how-to-generate-text-embeddings) - [How to generate images](#how-to-generate-images) - [How to transcribe audio](#how-to-transcribe-audio) @@ -296,6 +297,60 @@ do } while (requiresAction); ``` +## How to use structured outputs + +Beginning with the `gpt-4o-mini`, `gpt-4o-mini-2024-07-18`, and `gpt-4o-2024-08-06` model snapshots, structured outputs are available for both top-level response content and tool calls in the chat completion and assistants APIs. + +For information about the feature, see [the Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs/introduction). + +To use structured outputs to constrain chat completion content, set an appropriate `ChatResponseFormat` as in the following example: + +```csharp +ChatCompletionOptions options = new() +{ + ResponseFormat = ChatResponseFormat.CreateJsonSchemaFormat( + name: "math_reasoning", + jsonSchema: BinaryData.FromString(""" + { + "type": "object", + "properties": { + "steps": { + "type": "array", + "items": { + "type": "object", + "properties": { + "explanation": { "type": "string" }, + "output": { "type": "string" } + }, + "required": ["explanation", "output"], + "additionalProperties": false + } + }, + "final_answer": { "type": "string" } + }, + "required": ["steps", "final_answer"], + "additionalProperties": false + } + """), + strictSchemaEnabled: true) +}; + +ChatCompletion chatCompletion = await client.CompleteChatAsync( + ["How can I solve 8x + 7 = -23?"], + options); + +using JsonDocument structuredJson = JsonDocument.Parse(chatCompletion.ToString()); + +Console.WriteLine($"Final answer: {structuredJson.RootElement.GetProperty("final_answer").GetString()}"); +Console.WriteLine("Reasoning steps:"); + +foreach (JsonElement stepElement in structuredJson.RootElement.GetProperty("steps").EnumerateArray()) +{ + Console.WriteLine($" - Explanation: {stepElement.GetProperty("explanation").GetString()}"); + Console.WriteLine($" Output: {stepElement.GetProperty("output")}"); +} +``` + ## How to generate text embeddings In this example, you want to create a trip-planning website that allows customers to write a prompt describing the kind of hotel that they are looking for and then offers hotel recommendations that closely match this description. To achieve this, it is possible to use text embeddings to measure the relatedness of text strings. In summary, you can get embeddings of the hotel descriptions, store them in a vector database, and use them to build a search index that you can query using the embedding of a given customer's prompt. diff --git a/.dotnet/api/OpenAI.netstandard2.0.cs b/.dotnet/api/OpenAI.netstandard2.0.cs index 268b9123c..599bfc607 100644 --- a/.dotnet/api/OpenAI.netstandard2.0.cs +++ b/.dotnet/api/OpenAI.netstandard2.0.cs @@ -1,26 +1,9 @@ namespace OpenAI { - public readonly partial struct ListOrder : IEquatable { - private readonly object _dummy; - private readonly int _dummyPrimitive; - public ListOrder(string value); - public static ListOrder NewestFirst { get; } - public static ListOrder OldestFirst { get; } - public readonly bool Equals(ListOrder other); - [EditorBrowsable(EditorBrowsableState.Never)] - public override readonly bool Equals(object obj); - [EditorBrowsable(EditorBrowsableState.Never)] - public override readonly int GetHashCode(); - public static bool operator ==(ListOrder left, ListOrder right); - public static implicit operator ListOrder(string value); - public static bool operator !=(ListOrder left, ListOrder right); - public override readonly string ToString(); - } public class OpenAIClient { protected OpenAIClient(); - public OpenAIClient(OpenAIClientOptions options = null); - public OpenAIClient(ApiKeyCredential credential, OpenAIClientOptions options = null); - protected OpenAIClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options); - protected Uri Endpoint { get; } + public OpenAIClient(ApiKeyCredential credential, OpenAIClientOptions options); + public OpenAIClient(ApiKeyCredential credential); + protected internal OpenAIClient(ClientPipeline pipeline, OpenAIClientOptions options); public virtual ClientPipeline Pipeline { get; } public virtual AssistantClient GetAssistantClient(); public virtual AudioClient GetAudioClient(string model); @@ -63,9 +46,9 @@ public class Assistant : IJsonModel, IPersistableModel { } public class AssistantClient { protected AssistantClient(); - public AssistantClient(OpenAIClientOptions options = null); - public AssistantClient(ApiKeyCredential credential, OpenAIClientOptions options = null); - protected internal AssistantClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options); + public AssistantClient(ApiKeyCredential credential, OpenAIClientOptions options); + public AssistantClient(ApiKeyCredential credential); + protected internal AssistantClient(ClientPipeline pipeline, OpenAIClientOptions options); public virtual ClientPipeline Pipeline { get; } public virtual ClientResult CancelRun(ThreadRun run); [EditorBrowsable(EditorBrowsableState.Never)] @@ -261,8 +244,24 @@ public class AssistantClient { public class AssistantCollectionOptions { public string AfterId { get; set; } public string BeforeId { get; set; } - public ListOrder? Order { get; set; } - public int? PageSize { get; set; } + public AssistantCollectionOrder? Order { get; set; } + public int? PageSizeLimit { get; set; } + } + public readonly partial struct AssistantCollectionOrder : IEquatable { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public AssistantCollectionOrder(string value); + public static AssistantCollectionOrder Ascending { get; } + public static AssistantCollectionOrder Descending { get; } + public readonly bool Equals(AssistantCollectionOrder other); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly bool Equals(object obj); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly int GetHashCode(); + public static bool operator ==(AssistantCollectionOrder left, AssistantCollectionOrder right); + public static implicit operator AssistantCollectionOrder(string value); + public static bool operator !=(AssistantCollectionOrder left, AssistantCollectionOrder right); + public override readonly string ToString(); } public class AssistantCreationOptions : IJsonModel, IPersistableModel { public string Description { get; set; } @@ -297,24 +296,33 @@ public class AssistantModificationOptions : IJsonModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); } - public class AssistantResponseFormat : IJsonModel, IPersistableModel { - protected AssistantResponseFormat(); + public abstract class AssistantResponseFormat : IEquatable, IEquatable, IJsonModel, IPersistableModel { public static AssistantResponseFormat Auto { get; } public static AssistantResponseFormat JsonObject { get; } public static AssistantResponseFormat Text { get; } - public bool Equals(AssistantResponseFormat other); + public static AssistantResponseFormat CreateAutoFormat(); + public static AssistantResponseFormat CreateJsonObjectFormat(); + public static AssistantResponseFormat CreateJsonSchemaFormat(string name, BinaryData jsonSchema, string description = null, bool? strictSchemaEnabled = null); + public static AssistantResponseFormat CreateTextFormat(); [EditorBrowsable(EditorBrowsableState.Never)] public override bool Equals(object obj); [EditorBrowsable(EditorBrowsableState.Never)] public override int GetHashCode(); - public static bool operator ==(AssistantResponseFormat left, AssistantResponseFormat right); - public static implicit operator AssistantResponseFormat(string value); - public static bool operator !=(AssistantResponseFormat left, AssistantResponseFormat right); + [EditorBrowsable(EditorBrowsableState.Never)] + public static bool operator ==(AssistantResponseFormat first, AssistantResponseFormat second); + [EditorBrowsable(EditorBrowsableState.Never)] + public static implicit operator AssistantResponseFormat(string plainTextFormat); + [EditorBrowsable(EditorBrowsableState.Never)] + public static bool operator !=(AssistantResponseFormat first, AssistantResponseFormat second); AssistantResponseFormat IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options); AssistantResponseFormat IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); + [EditorBrowsable(EditorBrowsableState.Never)] + bool IEquatable.Equals(AssistantResponseFormat other); + [EditorBrowsable(EditorBrowsableState.Never)] + bool IEquatable.Equals(string other); public override string ToString(); } public class AssistantThread : IJsonModel, IPersistableModel { @@ -334,7 +342,6 @@ public class CodeInterpreterToolDefinition : ToolDefinition, IJsonModel.Create(BinaryData data, ModelReaderWriterOptions options); string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); } public class CodeInterpreterToolResources : IJsonModel, IPersistableModel { public IList FileIds { get; set; } @@ -351,7 +358,6 @@ public class FileSearchToolDefinition : ToolDefinition, IJsonModel.Create(BinaryData data, ModelReaderWriterOptions options); string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); } public class FileSearchToolResources : IJsonModel, IPersistableModel { public IList NewVectorStores { get; } @@ -364,27 +370,44 @@ public class FileSearchToolResources : IJsonModel, IPer } public class FunctionToolDefinition : ToolDefinition, IJsonModel, IPersistableModel { public FunctionToolDefinition(); - public FunctionToolDefinition(string name, string description = null, BinaryData parameters = null); + public FunctionToolDefinition(string name); public string Description { get; set; } public required string FunctionName { get; set; } public BinaryData Parameters { get; set; } + public bool? StrictParameterSchemaEnabled { get; set; } FunctionToolDefinition IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options); FunctionToolDefinition IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); } public class MessageCollectionOptions { public string AfterId { get; set; } public string BeforeId { get; set; } - public ListOrder? Order { get; set; } - public int? PageSize { get; set; } + public MessageCollectionOrder? Order { get; set; } + public int? PageSizeLimit { get; set; } + } + public readonly partial struct MessageCollectionOrder : IEquatable { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public MessageCollectionOrder(string value); + public static MessageCollectionOrder Ascending { get; } + public static MessageCollectionOrder Descending { get; } + public readonly bool Equals(MessageCollectionOrder other); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly bool Equals(object obj); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly int GetHashCode(); + public static bool operator ==(MessageCollectionOrder left, MessageCollectionOrder right); + public static implicit operator MessageCollectionOrder(string value); + public static bool operator !=(MessageCollectionOrder left, MessageCollectionOrder right); + public override readonly string ToString(); } public abstract class MessageContent : IJsonModel, IPersistableModel { public MessageImageDetail? ImageDetail { get; } public string ImageFileId { get; } public Uri ImageUrl { get; } + public string Refusal { get; } public string Text { get; } public IReadOnlyList TextAnnotations { get; } public static MessageContent FromImageFileId(string imageFileId, MessageImageDetail? detail = null); @@ -396,13 +419,13 @@ public abstract class MessageContent : IJsonModel, IPersistableM MessageContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); - protected abstract void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); } public class MessageContentUpdate : StreamingUpdate { public MessageImageDetail? ImageDetail { get; } public string ImageFileId { get; } public string MessageId { get; } public int MessageIndex { get; } + public string RefusalUpdate { get; } public MessageRole? Role { get; } public string Text { get; } public TextAnnotationUpdate TextAnnotation { get; } @@ -503,8 +526,24 @@ public class RequiredActionUpdate : RunUpdate { public class RunCollectionOptions { public string AfterId { get; set; } public string BeforeId { get; set; } - public ListOrder? Order { get; set; } - public int? PageSize { get; set; } + public RunCollectionOrder? Order { get; set; } + public int? PageSizeLimit { get; set; } + } + public readonly partial struct RunCollectionOrder : IEquatable { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public RunCollectionOrder(string value); + public static RunCollectionOrder Ascending { get; } + public static RunCollectionOrder Descending { get; } + public readonly bool Equals(RunCollectionOrder other); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly bool Equals(object obj); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly int GetHashCode(); + public static bool operator ==(RunCollectionOrder left, RunCollectionOrder right); + public static implicit operator RunCollectionOrder(string value); + public static bool operator !=(RunCollectionOrder left, RunCollectionOrder right); + public override readonly string ToString(); } public class RunCreationOptions : IJsonModel, IPersistableModel { public string AdditionalInstructions { get; set; } @@ -643,8 +682,24 @@ public abstract class RunStepCodeInterpreterOutput : IJsonModel { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public RunStepCollectionOrder(string value); + public static RunStepCollectionOrder Ascending { get; } + public static RunStepCollectionOrder Descending { get; } + public readonly bool Equals(RunStepCollectionOrder other); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly bool Equals(object obj); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly int GetHashCode(); + public static bool operator ==(RunStepCollectionOrder left, RunStepCollectionOrder right); + public static implicit operator RunStepCollectionOrder(string value); + public static bool operator !=(RunStepCollectionOrder left, RunStepCollectionOrder right); + public override readonly string ToString(); } public abstract class RunStepDetails : IJsonModel, IPersistableModel { public string CreatedMessageId { get; } @@ -938,13 +993,12 @@ public abstract class ToolDefinition : IJsonModel, IPersistableM protected ToolDefinition(string type); public static CodeInterpreterToolDefinition CreateCodeInterpreter(); public static FileSearchToolDefinition CreateFileSearch(int? maxResults = null); - public static FunctionToolDefinition CreateFunction(string name, string description = null, BinaryData parameters = null); + public static FunctionToolDefinition CreateFunction(string name, string description = null, BinaryData parameters = null, bool? strictParameterSchemaEnabled = null); ToolDefinition IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options); ToolDefinition IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); - protected abstract void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); } public class ToolOutput : IJsonModel, IPersistableModel { public ToolOutput(); @@ -970,6 +1024,7 @@ public class VectorStoreCreationHelper : IJsonModel, public VectorStoreCreationHelper(); public VectorStoreCreationHelper(IEnumerable files, IDictionary metadata = null); public VectorStoreCreationHelper(IEnumerable fileIds, IDictionary metadata = null); + public FileChunkingStrategy ChunkingStrategy { get; set; } public IList FileIds { get; } public IDictionary Metadata { get; } VectorStoreCreationHelper IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); @@ -982,16 +1037,16 @@ public class VectorStoreCreationHelper : IJsonModel, namespace OpenAI.Audio { public class AudioClient { protected AudioClient(); - protected internal AudioClient(ClientPipeline pipeline, string model, Uri endpoint, OpenAIClientOptions options); - public AudioClient(string model, OpenAIClientOptions options = null); - public AudioClient(string model, ApiKeyCredential credential, OpenAIClientOptions options = null); + protected internal AudioClient(ClientPipeline pipeline, string model, OpenAIClientOptions options); + public AudioClient(string model, ApiKeyCredential credential, OpenAIClientOptions options); + public AudioClient(string model, ApiKeyCredential credential); public virtual ClientPipeline Pipeline { get; } [EditorBrowsable(EditorBrowsableState.Never)] - public virtual ClientResult GenerateSpeechFromText(BinaryContent content, RequestOptions options = null); - public virtual ClientResult GenerateSpeechFromText(string text, GeneratedSpeechVoice voice, SpeechGenerationOptions options = null, CancellationToken cancellationToken = default); + public virtual ClientResult GenerateSpeech(BinaryContent content, RequestOptions options = null); + public virtual ClientResult GenerateSpeech(string text, GeneratedSpeechVoice voice, SpeechGenerationOptions options = null, CancellationToken cancellationToken = default); [EditorBrowsable(EditorBrowsableState.Never)] - public virtual Task GenerateSpeechFromTextAsync(BinaryContent content, RequestOptions options = null); - public virtual Task> GenerateSpeechFromTextAsync(string text, GeneratedSpeechVoice voice, SpeechGenerationOptions options = null, CancellationToken cancellationToken = default); + public virtual Task GenerateSpeechAsync(BinaryContent content, RequestOptions options = null); + public virtual Task> GenerateSpeechAsync(string text, GeneratedSpeechVoice voice, SpeechGenerationOptions options = null, CancellationToken cancellationToken = default); [EditorBrowsable(EditorBrowsableState.Never)] public virtual ClientResult TranscribeAudio(BinaryContent content, string contentType, RequestOptions options = null); public virtual ClientResult TranscribeAudio(Stream audio, string audioFilename, AudioTranscriptionOptions options = null, CancellationToken cancellationToken = default); @@ -1072,21 +1127,45 @@ public class AudioTranslationOptions : IJsonModel, IPer string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); } - public enum GeneratedSpeechFormat { - Mp3 = 0, - Opus = 1, - Aac = 2, - Flac = 3, - Wav = 4, - Pcm = 5 - } - public enum GeneratedSpeechVoice { - Alloy = 0, - Echo = 1, - Fable = 2, - Onyx = 3, - Nova = 4, - Shimmer = 5 + public readonly partial struct GeneratedSpeechFormat : IEquatable { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public GeneratedSpeechFormat(string value); + public static GeneratedSpeechFormat Aac { get; } + public static GeneratedSpeechFormat Flac { get; } + public static GeneratedSpeechFormat Mp3 { get; } + public static GeneratedSpeechFormat Opus { get; } + public static GeneratedSpeechFormat Pcm { get; } + public static GeneratedSpeechFormat Wav { get; } + public readonly bool Equals(GeneratedSpeechFormat other); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly bool Equals(object obj); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly int GetHashCode(); + public static bool operator ==(GeneratedSpeechFormat left, GeneratedSpeechFormat right); + public static implicit operator GeneratedSpeechFormat(string value); + public static bool operator !=(GeneratedSpeechFormat left, GeneratedSpeechFormat right); + public override readonly string ToString(); + } + public readonly partial struct GeneratedSpeechVoice : IEquatable { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public GeneratedSpeechVoice(string value); + public static GeneratedSpeechVoice Alloy { get; } + public static GeneratedSpeechVoice Echo { get; } + public static GeneratedSpeechVoice Fable { get; } + public static GeneratedSpeechVoice Nova { get; } + public static GeneratedSpeechVoice Onyx { get; } + public static GeneratedSpeechVoice Shimmer { get; } + public readonly bool Equals(GeneratedSpeechVoice other); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly bool Equals(object obj); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly int GetHashCode(); + public static bool operator ==(GeneratedSpeechVoice left, GeneratedSpeechVoice right); + public static implicit operator GeneratedSpeechVoice(string value); + public static bool operator !=(GeneratedSpeechVoice left, GeneratedSpeechVoice right); + public override readonly string ToString(); } public static class OpenAIAudioModelFactory { public static AudioTranscription AudioTranscription(string language = null, TimeSpan? duration = null, string text = null, IEnumerable words = null, IEnumerable segments = null); @@ -1096,7 +1175,7 @@ public static class OpenAIAudioModelFactory { } public class SpeechGenerationOptions : IJsonModel, IPersistableModel { public GeneratedSpeechFormat? ResponseFormat { get; set; } - public float? Speed { get; set; } + public float? SpeedRatio { get; set; } SpeechGenerationOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options); SpeechGenerationOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); @@ -1148,9 +1227,9 @@ public class SpeechGenerationOptions : IJsonModel, IPer namespace OpenAI.Batch { public class BatchClient { protected BatchClient(); - public BatchClient(OpenAIClientOptions options = null); - public BatchClient(ApiKeyCredential credential, OpenAIClientOptions options = null); - protected internal BatchClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options); + public BatchClient(ApiKeyCredential credential, OpenAIClientOptions options); + public BatchClient(ApiKeyCredential credential); + protected internal BatchClient(ClientPipeline pipeline, OpenAIClientOptions options); public virtual ClientPipeline Pipeline { get; } public virtual CreateBatchOperation CreateBatch(BinaryContent content, bool waitUntilCompleted, RequestOptions options = null); public virtual Task CreateBatchAsync(BinaryContent content, bool waitUntilCompleted, RequestOptions options = null); @@ -1174,23 +1253,25 @@ namespace OpenAI.Chat { public class AssistantChatMessage : ChatMessage, IJsonModel, IPersistableModel { public AssistantChatMessage(ChatCompletion chatCompletion); public AssistantChatMessage(ChatFunctionCall functionCall, string content = null); + public AssistantChatMessage(params ChatMessageContentPart[] contentParts); + public AssistantChatMessage(IEnumerable contentParts); public AssistantChatMessage(IEnumerable toolCalls, string content = null); public AssistantChatMessage(string content); public ChatFunctionCall FunctionCall { get; set; } public string ParticipantName { get; set; } + public string Refusal { get; set; } public IList ToolCalls { get; } AssistantChatMessage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options); AssistantChatMessage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); } public class ChatClient { protected ChatClient(); - protected internal ChatClient(ClientPipeline pipeline, string model, Uri endpoint, OpenAIClientOptions options); - public ChatClient(string model, OpenAIClientOptions options = null); - public ChatClient(string model, ApiKeyCredential credential, OpenAIClientOptions options = null); + protected internal ChatClient(ClientPipeline pipeline, string model, OpenAIClientOptions options); + public ChatClient(string model, ApiKeyCredential credential, OpenAIClientOptions options); + public ChatClient(string model, ApiKeyCredential credential); public virtual ClientPipeline Pipeline { get; } public virtual ClientResult CompleteChat(params ChatMessage[] messages); [EditorBrowsable(EditorBrowsableState.Never)] @@ -1213,6 +1294,8 @@ public class ChatCompletion : IJsonModel, IPersistableModel RefusalTokenLogProbabilities { get; } public ChatMessageRole Role { get; } public string SystemFingerprint { get; } public IReadOnlyList ToolCalls { get; } @@ -1225,6 +1308,7 @@ public class ChatCompletion : IJsonModel, IPersistableModel, IPersistableModel { + public string EndUserId { get; set; } public float? FrequencyPenalty { get; set; } public ChatFunctionChoice FunctionChoice { get; set; } public IList Functions { get; } @@ -1241,7 +1325,6 @@ public class ChatCompletionOptions : IJsonModel, IPersist public IList Tools { get; } public int? TopLogProbabilityCount { get; set; } public float? TopP { get; set; } - public string User { get; set; } ChatCompletionOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options); ChatCompletionOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); @@ -1287,14 +1370,38 @@ public class ChatFunctionChoice : IJsonModel, IPersistableMo string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); } + public readonly partial struct ChatImageDetailLevel : IEquatable { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public ChatImageDetailLevel(string value); + public static ChatImageDetailLevel Auto { get; } + public static ChatImageDetailLevel High { get; } + public static ChatImageDetailLevel Low { get; } + public readonly bool Equals(ChatImageDetailLevel other); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly bool Equals(object obj); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly int GetHashCode(); + public static bool operator ==(ChatImageDetailLevel left, ChatImageDetailLevel right); + public static implicit operator ChatImageDetailLevel(string value); + public static bool operator !=(ChatImageDetailLevel left, ChatImageDetailLevel right); + public override readonly string ToString(); + } public abstract class ChatMessage : IJsonModel, IPersistableModel { - public IList Content { get; protected set; } + public IList Content { get; } public static AssistantChatMessage CreateAssistantMessage(ChatCompletion chatCompletion); public static AssistantChatMessage CreateAssistantMessage(ChatFunctionCall functionCall, string content = null); + public static AssistantChatMessage CreateAssistantMessage(params ChatMessageContentPart[] contentParts); + public static AssistantChatMessage CreateAssistantMessage(IEnumerable contentParts); public static AssistantChatMessage CreateAssistantMessage(IEnumerable toolCalls, string content = null); public static AssistantChatMessage CreateAssistantMessage(string content); + [Obsolete("This field is marked as deprecated.")] public static FunctionChatMessage CreateFunctionMessage(string functionName, string content); + public static SystemChatMessage CreateSystemMessage(params ChatMessageContentPart[] contentParts); + public static SystemChatMessage CreateSystemMessage(IEnumerable contentParts); public static SystemChatMessage CreateSystemMessage(string content); + public static ToolChatMessage CreateToolChatMessage(string toolCallId, params ChatMessageContentPart[] contentParts); + public static ToolChatMessage CreateToolChatMessage(string toolCallId, IEnumerable contentParts); public static ToolChatMessage CreateToolChatMessage(string toolCallId, string content); public static UserChatMessage CreateUserMessage(params ChatMessageContentPart[] contentParts); public static UserChatMessage CreateUserMessage(IEnumerable contentParts); @@ -1305,31 +1412,32 @@ public abstract class ChatMessage : IJsonModel, IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); - protected abstract void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); } public class ChatMessageContentPart : IJsonModel, IPersistableModel { public BinaryData ImageBytes { get; } public string ImageBytesMediaType { get; } - public ImageChatMessageContentPartDetail? ImageDetail { get; } + public ChatImageDetailLevel? ImageDetailLevel { get; } public Uri ImageUri { get; } public ChatMessageContentPartKind Kind { get; } + public string Refusal { get; } public string Text { get; } - public static ChatMessageContentPart CreateImageMessageContentPart(BinaryData imageBytes, string imageBytesMediaType, ImageChatMessageContentPartDetail? imageDetail = null); - public static ChatMessageContentPart CreateImageMessageContentPart(Uri imageUri, ImageChatMessageContentPartDetail? imageDetail = null); - public static ChatMessageContentPart CreateTextMessageContentPart(string text); - public static implicit operator ChatMessageContentPart(string content); + public static ChatMessageContentPart CreateImagePart(BinaryData imageBytes, string imageBytesMediaType, ChatImageDetailLevel? imageDetailLevel = null); + public static ChatMessageContentPart CreateImagePart(Uri imageUri, ChatImageDetailLevel? imageDetailLevel = null); + public static ChatMessageContentPart CreateRefusalPart(string refusal); + public static ChatMessageContentPart CreateTextPart(string text); + public static implicit operator ChatMessageContentPart(string text); ChatMessageContentPart IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options); ChatMessageContentPart IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); - public override string ToString(); } public readonly partial struct ChatMessageContentPartKind : IEquatable { private readonly object _dummy; private readonly int _dummyPrimitive; public ChatMessageContentPartKind(string value); public static ChatMessageContentPartKind Image { get; } + public static ChatMessageContentPartKind Refusal { get; } public static ChatMessageContentPartKind Text { get; } public readonly bool Equals(ChatMessageContentPartKind other); [EditorBrowsable(EditorBrowsableState.Never)] @@ -1348,9 +1456,10 @@ public enum ChatMessageRole { Tool = 3, Function = 4 } - public class ChatResponseFormat : IJsonModel, IPersistableModel { - public static ChatResponseFormat JsonObject { get; } - public static ChatResponseFormat Text { get; } + public abstract class ChatResponseFormat : IJsonModel, IPersistableModel { + public static ChatResponseFormat CreateJsonObjectFormat(); + public static ChatResponseFormat CreateJsonSchemaFormat(string jsonSchemaFormatName, BinaryData jsonSchema, string jsonSchemaFormatDescription = null, bool? jsonSchemaIsStrict = null); + public static ChatResponseFormat CreateTextFormat(); ChatResponseFormat IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options); ChatResponseFormat IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); @@ -1392,8 +1501,9 @@ public class ChatTool : IJsonModel, IPersistableModel { public string FunctionDescription { get; } public string FunctionName { get; } public BinaryData FunctionParameters { get; } + public bool? FunctionSchemaIsStrict { get; } public ChatToolKind Kind { get; } - public static ChatTool CreateFunctionTool(string functionName, string functionDescription = null, BinaryData functionParameters = null); + public static ChatTool CreateFunctionTool(string functionName, string functionDescription = null, BinaryData functionParameters = null, bool? functionSchemaIsStrict = null); ChatTool IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options); ChatTool IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); @@ -1428,10 +1538,10 @@ public class ChatToolCall : IJsonModel, IPersistableModel, IPersistableModel { - public ChatToolChoice(ChatTool tool); - public static ChatToolChoice Auto { get; } - public static ChatToolChoice None { get; } - public static ChatToolChoice Required { get; } + public static ChatToolChoice CreateAutoChoice(); + public static ChatToolChoice CreateFunctionChoice(string functionName); + public static ChatToolChoice CreateNoneChoice(); + public static ChatToolChoice CreateRequiredChoice(); ChatToolChoice IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options); ChatToolChoice IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); @@ -1462,24 +1572,15 @@ public class FunctionChatMessage : ChatMessage, IJsonModel, FunctionChatMessage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); } - public readonly partial struct ImageChatMessageContentPartDetail : IEquatable { - private readonly object _dummy; - private readonly int _dummyPrimitive; - public ImageChatMessageContentPartDetail(string value); - public static ImageChatMessageContentPartDetail Auto { get; } - public static ImageChatMessageContentPartDetail High { get; } - public static ImageChatMessageContentPartDetail Low { get; } - public readonly bool Equals(ImageChatMessageContentPartDetail other); - [EditorBrowsable(EditorBrowsableState.Never)] - public override readonly bool Equals(object obj); - [EditorBrowsable(EditorBrowsableState.Never)] - public override readonly int GetHashCode(); - public static bool operator ==(ImageChatMessageContentPartDetail left, ImageChatMessageContentPartDetail right); - public static implicit operator ImageChatMessageContentPartDetail(string value); - public static bool operator !=(ImageChatMessageContentPartDetail left, ImageChatMessageContentPartDetail right); - public override readonly string ToString(); + public static class OpenAIChatModelFactory { + public static ChatCompletion ChatCompletion(string id = null, ChatFinishReason finishReason = ChatFinishReason.Stop, IEnumerable content = null, string refusal = null, IEnumerable toolCalls = null, ChatMessageRole role = ChatMessageRole.System, ChatFunctionCall functionCall = null, IEnumerable contentTokenLogProbabilities = null, IEnumerable refusalTokenLogProbabilities = null, DateTimeOffset createdAt = default, string model = null, string systemFingerprint = null, ChatTokenUsage usage = null); + public static ChatTokenLogProbabilityInfo ChatTokenLogProbabilityInfo(string token = null, float logProbability = 0, IEnumerable utf8ByteValues = null, IEnumerable topLogProbabilities = null); + public static ChatTokenTopLogProbabilityInfo ChatTokenTopLogProbabilityInfo(string token = null, float logProbability = 0, IEnumerable utf8ByteValues = null); + public static ChatTokenUsage ChatTokenUsage(int outputTokens = 0, int inputTokens = 0, int totalTokens = 0); + public static StreamingChatCompletionUpdate StreamingChatCompletionUpdate(string id = null, IEnumerable contentUpdate = null, StreamingChatFunctionCallUpdate functionCallUpdate = null, IEnumerable toolCallUpdates = null, ChatMessageRole? role = null, string refusalUpdate = null, IEnumerable contentTokenLogProbabilities = null, IEnumerable refusalTokenLogProbabilities = null, ChatFinishReason? finishReason = null, DateTimeOffset createdAt = default, string model = null, string systemFingerprint = null, ChatTokenUsage usage = null); + public static StreamingChatFunctionCallUpdate StreamingChatFunctionCallUpdate(string functionArgumentsUpdate = null, string functionName = null); + public static StreamingChatToolCallUpdate StreamingChatToolCallUpdate(int index = 0, string id = null, ChatToolCallKind kind = default, string functionName = null, string functionArgumentsUpdate = null); } public class StreamingChatCompletionUpdate : IJsonModel, IPersistableModel { public IReadOnlyList ContentTokenLogProbabilities { get; } @@ -1489,6 +1590,8 @@ public class StreamingChatCompletionUpdate : IJsonModel RefusalTokenLogProbabilities { get; } + public string RefusalUpdate { get; } public ChatMessageRole? Role { get; } public string SystemFingerprint { get; } public IReadOnlyList ToolCallUpdates { get; } @@ -1521,6 +1624,8 @@ public class StreamingChatToolCallUpdate : IJsonModel.Write(ModelReaderWriterOptions options); } public class SystemChatMessage : ChatMessage, IJsonModel, IPersistableModel { + public SystemChatMessage(params ChatMessageContentPart[] contentParts); + public SystemChatMessage(IEnumerable contentParts); public SystemChatMessage(string content); public string ParticipantName { get; set; } SystemChatMessage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); @@ -1528,9 +1633,10 @@ public class SystemChatMessage : ChatMessage, IJsonModel, IPe SystemChatMessage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); } public class ToolChatMessage : ChatMessage, IJsonModel, IPersistableModel { + public ToolChatMessage(string toolCallId, params ChatMessageContentPart[] contentParts); + public ToolChatMessage(string toolCallId, IEnumerable contentParts); public ToolChatMessage(string toolCallId, string content); public string ToolCallId { get; } ToolChatMessage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); @@ -1538,7 +1644,6 @@ public class ToolChatMessage : ChatMessage, IJsonModel, IPersis ToolChatMessage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); } public class UserChatMessage : ChatMessage, IJsonModel, IPersistableModel { public UserChatMessage(params ChatMessageContentPart[] content); @@ -1550,7 +1655,6 @@ public class UserChatMessage : ChatMessage, IJsonModel, IPersis UserChatMessage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options); BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); } } namespace OpenAI.Embeddings { @@ -1565,9 +1669,9 @@ public class Embedding : IJsonModel, IPersistableModel { } public class EmbeddingClient { protected EmbeddingClient(); - protected internal EmbeddingClient(ClientPipeline pipeline, string model, Uri endpoint, OpenAIClientOptions options); - public EmbeddingClient(string model, OpenAIClientOptions options = null); - public EmbeddingClient(string model, ApiKeyCredential credential, OpenAIClientOptions options = null); + protected internal EmbeddingClient(ClientPipeline pipeline, string model, OpenAIClientOptions options); + public EmbeddingClient(string model, ApiKeyCredential credential, OpenAIClientOptions options); + public EmbeddingClient(string model, ApiKeyCredential credential); public virtual ClientPipeline Pipeline { get; } public virtual ClientResult GenerateEmbedding(string input, EmbeddingGenerationOptions options = null, CancellationToken cancellationToken = default); public virtual Task> GenerateEmbeddingAsync(string input, EmbeddingGenerationOptions options = null, CancellationToken cancellationToken = default); @@ -1591,7 +1695,7 @@ public class EmbeddingCollection : ObjectModel.ReadOnlyCollection, IJ } public class EmbeddingGenerationOptions : IJsonModel, IPersistableModel { public int? Dimensions { get; set; } - public string User { get; set; } + public string EndUserId { get; set; } EmbeddingGenerationOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options); EmbeddingGenerationOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); @@ -1616,23 +1720,19 @@ public static class OpenAIEmbeddingsModelFactory { namespace OpenAI.Files { public class FileClient { protected FileClient(); - public FileClient(OpenAIClientOptions options = null); - public FileClient(ApiKeyCredential credential, OpenAIClientOptions options = null); - protected internal FileClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options); + public FileClient(ApiKeyCredential credential, OpenAIClientOptions options); + public FileClient(ApiKeyCredential credential); + protected internal FileClient(ClientPipeline pipeline, OpenAIClientOptions options); public virtual ClientPipeline Pipeline { get; } - public virtual ClientResult DeleteFile(OpenAIFileInfo file); [EditorBrowsable(EditorBrowsableState.Never)] public virtual ClientResult DeleteFile(string fileId, RequestOptions options); public virtual ClientResult DeleteFile(string fileId, CancellationToken cancellationToken = default); - public virtual Task> DeleteFileAsync(OpenAIFileInfo file); [EditorBrowsable(EditorBrowsableState.Never)] public virtual Task DeleteFileAsync(string fileId, RequestOptions options); public virtual Task> DeleteFileAsync(string fileId, CancellationToken cancellationToken = default); - public virtual ClientResult DownloadFile(OpenAIFileInfo file); [EditorBrowsable(EditorBrowsableState.Never)] public virtual ClientResult DownloadFile(string fileId, RequestOptions options); public virtual ClientResult DownloadFile(string fileId, CancellationToken cancellationToken = default); - public virtual Task> DownloadFileAsync(OpenAIFileInfo file); [EditorBrowsable(EditorBrowsableState.Never)] public virtual Task DownloadFileAsync(string fileId, RequestOptions options); public virtual Task> DownloadFileAsync(string fileId, CancellationToken cancellationToken = default); @@ -1682,7 +1782,7 @@ public class OpenAIFileInfo : IJsonModel, IPersistableModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); @@ -1760,9 +1860,9 @@ public class CreateJobOperation : OperationResult { } public class FineTuningClient { protected FineTuningClient(); - public FineTuningClient(OpenAIClientOptions options = null); - public FineTuningClient(ApiKeyCredential credential, OpenAIClientOptions options = null); - protected internal FineTuningClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options); + public FineTuningClient(ApiKeyCredential credential, OpenAIClientOptions options); + public FineTuningClient(ApiKeyCredential credential); + protected internal FineTuningClient(ClientPipeline pipeline, OpenAIClientOptions options); public virtual ClientPipeline Pipeline { get; } public virtual CreateJobOperation CreateJob(BinaryContent content, bool waitUntilCompleted, RequestOptions options = null); public virtual Task CreateJobAsync(BinaryContent content, bool waitUntilCompleted, RequestOptions options = null); @@ -1822,9 +1922,9 @@ public enum GeneratedImageStyle { } public class ImageClient { protected ImageClient(); - protected internal ImageClient(ClientPipeline pipeline, string model, Uri endpoint, OpenAIClientOptions options); - public ImageClient(string model, OpenAIClientOptions options = null); - public ImageClient(string model, ApiKeyCredential credential, OpenAIClientOptions options = null); + protected internal ImageClient(ClientPipeline pipeline, string model, OpenAIClientOptions options); + public ImageClient(string model, ApiKeyCredential credential, OpenAIClientOptions options); + public ImageClient(string model, ApiKeyCredential credential); public virtual ClientPipeline Pipeline { get; } public virtual ClientResult GenerateImage(string prompt, ImageGenerationOptions options = null, CancellationToken cancellationToken = default); public virtual Task> GenerateImageAsync(string prompt, ImageGenerationOptions options = null, CancellationToken cancellationToken = default); @@ -1868,9 +1968,9 @@ public class ImageClient { public virtual Task> GenerateImageVariationsAsync(string imageFilePath, int imageCount, ImageVariationOptions options = null); } public class ImageEditOptions : IJsonModel, IPersistableModel { + public string EndUserId { get; set; } public GeneratedImageFormat? ResponseFormat { get; set; } public GeneratedImageSize? Size { get; set; } - public string User { get; set; } ImageEditOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options); ImageEditOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); @@ -1878,11 +1978,11 @@ public class ImageEditOptions : IJsonModel, IPersistableModel< BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); } public class ImageGenerationOptions : IJsonModel, IPersistableModel { + public string EndUserId { get; set; } public GeneratedImageQuality? Quality { get; set; } public GeneratedImageFormat? ResponseFormat { get; set; } public GeneratedImageSize? Size { get; set; } public GeneratedImageStyle? Style { get; set; } - public string User { get; set; } ImageGenerationOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options); ImageGenerationOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); @@ -1890,9 +1990,9 @@ public class ImageGenerationOptions : IJsonModel, IPersi BinaryData IPersistableModel.Write(ModelReaderWriterOptions options); } public class ImageVariationOptions : IJsonModel, IPersistableModel { + public string EndUserId { get; set; } public GeneratedImageFormat? ResponseFormat { get; set; } public GeneratedImageSize? Size { get; set; } - public string User { get; set; } ImageVariationOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options); ImageVariationOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); @@ -1907,9 +2007,9 @@ public static class OpenAIImagesModelFactory { namespace OpenAI.Models { public class ModelClient { protected ModelClient(); - public ModelClient(OpenAIClientOptions options = null); - public ModelClient(ApiKeyCredential credential, OpenAIClientOptions options = null); - protected internal ModelClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options); + public ModelClient(ApiKeyCredential credential, OpenAIClientOptions options); + public ModelClient(ApiKeyCredential credential); + protected internal ModelClient(ClientPipeline pipeline, OpenAIClientOptions options); public virtual ClientPipeline Pipeline { get; } [EditorBrowsable(EditorBrowsableState.Never)] public virtual ClientResult DeleteModel(string model, RequestOptions options); @@ -1991,9 +2091,9 @@ public class ModerationCategoryScores : IJsonModel, IP } public class ModerationClient { protected ModerationClient(); - protected internal ModerationClient(ClientPipeline pipeline, string model, Uri endpoint, OpenAIClientOptions options); - public ModerationClient(string model, OpenAIClientOptions options = null); - public ModerationClient(string model, ApiKeyCredential credential, OpenAIClientOptions options = null); + protected internal ModerationClient(ClientPipeline pipeline, string model, OpenAIClientOptions options); + public ModerationClient(string model, ApiKeyCredential credential, OpenAIClientOptions options); + public ModerationClient(string model, ApiKeyCredential credential); public virtual ClientPipeline Pipeline { get; } public virtual ClientResult ClassifyTextInput(string input, CancellationToken cancellationToken = default); public virtual Task> ClassifyTextInputAsync(string input, CancellationToken cancellationToken = default); @@ -2164,9 +2264,9 @@ public class VectorStoreBatchFileJob : IJsonModel, IPer } public class VectorStoreClient { protected VectorStoreClient(); - public VectorStoreClient(OpenAIClientOptions options = null); - public VectorStoreClient(ApiKeyCredential credential, OpenAIClientOptions options = null); - protected internal VectorStoreClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options); + public VectorStoreClient(ApiKeyCredential credential, OpenAIClientOptions options); + public VectorStoreClient(ApiKeyCredential credential); + protected internal VectorStoreClient(ClientPipeline pipeline, OpenAIClientOptions options); public virtual ClientPipeline Pipeline { get; } public virtual AddFileToVectorStoreOperation AddFileToVectorStore(VectorStore vectorStore, OpenAIFileInfo file, bool waitUntilCompleted); public virtual AddFileToVectorStoreOperation AddFileToVectorStore(bool waitUntilCompleted, string vectorStoreId, string fileId, CancellationToken cancellationToken = default); @@ -2254,8 +2354,24 @@ public class VectorStoreClient { public class VectorStoreCollectionOptions { public string AfterId { get; set; } public string BeforeId { get; set; } - public ListOrder? Order { get; set; } - public int? PageSize { get; set; } + public VectorStoreCollectionOrder? Order { get; set; } + public int? PageSizeLimit { get; set; } + } + public readonly partial struct VectorStoreCollectionOrder : IEquatable { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public VectorStoreCollectionOrder(string value); + public static VectorStoreCollectionOrder Ascending { get; } + public static VectorStoreCollectionOrder Descending { get; } + public readonly bool Equals(VectorStoreCollectionOrder other); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly bool Equals(object obj); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly int GetHashCode(); + public static bool operator ==(VectorStoreCollectionOrder left, VectorStoreCollectionOrder right); + public static implicit operator VectorStoreCollectionOrder(string value); + public static bool operator !=(VectorStoreCollectionOrder left, VectorStoreCollectionOrder right); + public override readonly string ToString(); } public class VectorStoreCreationOptions : IJsonModel, IPersistableModel { public FileChunkingStrategy ChunkingStrategy { get; set; } @@ -2302,8 +2418,24 @@ public class VectorStoreFileAssociationCollectionOptions { public string AfterId { get; set; } public string BeforeId { get; set; } public VectorStoreFileStatusFilter? Filter { get; set; } - public ListOrder? Order { get; set; } - public int? PageSize { get; set; } + public VectorStoreFileAssociationCollectionOrder? Order { get; set; } + public int? PageSizeLimit { get; set; } + } + public readonly partial struct VectorStoreFileAssociationCollectionOrder : IEquatable { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public VectorStoreFileAssociationCollectionOrder(string value); + public static VectorStoreFileAssociationCollectionOrder Ascending { get; } + public static VectorStoreFileAssociationCollectionOrder Descending { get; } + public readonly bool Equals(VectorStoreFileAssociationCollectionOrder other); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly bool Equals(object obj); + [EditorBrowsable(EditorBrowsableState.Never)] + public override readonly int GetHashCode(); + public static bool operator ==(VectorStoreFileAssociationCollectionOrder left, VectorStoreFileAssociationCollectionOrder right); + public static implicit operator VectorStoreFileAssociationCollectionOrder(string value); + public static bool operator !=(VectorStoreFileAssociationCollectionOrder left, VectorStoreFileAssociationCollectionOrder right); + public override readonly string ToString(); } public class VectorStoreFileAssociationError : IJsonModel, IPersistableModel { public VectorStoreFileAssociationErrorCode Code { get; } @@ -2318,10 +2450,9 @@ public class VectorStoreFileAssociationError : IJsonModel messagePages - = assistantClient.GetMessages(threadRun.ThreadId, new MessageCollectionOptions() { Order = ListOrder.OldestFirst }); + = assistantClient.GetMessages(threadRun.ThreadId, new MessageCollectionOptions() { Order = MessageCollectionOrder.Ascending }); IEnumerable messages = messagePages.GetAllValues(); foreach (ThreadMessage message in messages) @@ -145,6 +144,6 @@ PageCollection messagePages // Optionally, delete any persistent resources you no longer need. _ = assistantClient.DeleteThread(threadRun.ThreadId); _ = assistantClient.DeleteAssistant(assistant); - _ = fileClient.DeleteFile(salesFile); + _ = fileClient.DeleteFile(salesFile.Id); } } diff --git a/.dotnet/examples/Assistants/Example01_RetrievalAugmentedGenerationAsync.cs b/.dotnet/examples/Assistants/Example01_RetrievalAugmentedGenerationAsync.cs index 83b95fb0e..a5e7672e7 100644 --- a/.dotnet/examples/Assistants/Example01_RetrievalAugmentedGenerationAsync.cs +++ b/.dotnet/examples/Assistants/Example01_RetrievalAugmentedGenerationAsync.cs @@ -16,7 +16,6 @@ public partial class AssistantExamples public async Task Example01_RetrievalAugmentedGenerationAsync() { // Assistants is a beta API and subject to change; acknowledge its experimental status by suppressing the matching warning. -#pragma warning disable OPENAI001 OpenAIClient openAIClient = new(Environment.GetEnvironmentVariable("OPENAI_API_KEY")); FileClient fileClient = openAIClient.GetFileClient(); AssistantClient assistantClient = openAIClient.GetAssistantClient(); @@ -100,7 +99,7 @@ public async Task Example01_RetrievalAugmentedGenerationAsync() // Finally, we'll print out the full history for the thread that includes the augmented generation AsyncPageCollection messagePages - = assistantClient.GetMessagesAsync(threadRun.ThreadId, new MessageCollectionOptions() { Order = ListOrder.OldestFirst }); + = assistantClient.GetMessagesAsync(threadRun.ThreadId, new MessageCollectionOptions() { Order = MessageCollectionOrder.Ascending }); IAsyncEnumerable messages = messagePages.GetAllValuesAsync(); await foreach (ThreadMessage message in messages) @@ -146,6 +145,6 @@ AsyncPageCollection messagePages // Optionally, delete any persistent resources you no longer need. _ = await assistantClient.DeleteThreadAsync(threadRun.ThreadId); _ = await assistantClient.DeleteAssistantAsync(assistant); - _ = await fileClient.DeleteFileAsync(salesFile); + _ = await fileClient.DeleteFileAsync(salesFile.Id); } } diff --git a/.dotnet/examples/Assistants/Example02_FunctionCalling.cs b/.dotnet/examples/Assistants/Example02_FunctionCalling.cs index a0eb05637..1e7f59e92 100644 --- a/.dotnet/examples/Assistants/Example02_FunctionCalling.cs +++ b/.dotnet/examples/Assistants/Example02_FunctionCalling.cs @@ -61,7 +61,6 @@ string GetCurrentWeather(string location, string unit = "celsius") #endregion // Assistants is a beta API and subject to change; acknowledge its experimental status by suppressing the matching warning. -#pragma warning disable OPENAI001 AssistantClient client = new(Environment.GetEnvironmentVariable("OPENAI_API_KEY")); #region @@ -152,7 +151,7 @@ string GetCurrentWeather(string location, string unit = "celsius") if (run.Status == RunStatus.Completed) { PageCollection messagePages - = client.GetMessages(run.ThreadId, new MessageCollectionOptions() { Order = ListOrder.OldestFirst }); + = client.GetMessages(run.ThreadId, new MessageCollectionOptions() { Order = MessageCollectionOrder.Ascending }); IEnumerable messages = messagePages.GetAllValues(); foreach (ThreadMessage message in messages) diff --git a/.dotnet/examples/Assistants/Example02_FunctionCallingAsync.cs b/.dotnet/examples/Assistants/Example02_FunctionCallingAsync.cs index 2ee924c56..71d655668 100644 --- a/.dotnet/examples/Assistants/Example02_FunctionCallingAsync.cs +++ b/.dotnet/examples/Assistants/Example02_FunctionCallingAsync.cs @@ -61,7 +61,6 @@ string GetCurrentWeather(string location, string unit = "celsius") #endregion // Assistants is a beta API and subject to change; acknowledge its experimental status by suppressing the matching warning. -#pragma warning disable OPENAI001 AssistantClient client = new(Environment.GetEnvironmentVariable("OPENAI_API_KEY")); #region @@ -152,7 +151,7 @@ string GetCurrentWeather(string location, string unit = "celsius") if (run.Status == RunStatus.Completed) { AsyncPageCollection messagePages - = client.GetMessagesAsync(run.ThreadId, new MessageCollectionOptions() { Order = ListOrder.OldestFirst }); + = client.GetMessagesAsync(run.ThreadId, new MessageCollectionOptions() { Order = MessageCollectionOrder.Ascending }); IAsyncEnumerable messages = messagePages.GetAllValuesAsync(); await foreach (ThreadMessage message in messages) diff --git a/.dotnet/examples/Assistants/Example02b_FunctionCallingStreaming.cs b/.dotnet/examples/Assistants/Example02b_FunctionCallingStreaming.cs index 9c3e0adfc..d4499015b 100644 --- a/.dotnet/examples/Assistants/Example02b_FunctionCallingStreaming.cs +++ b/.dotnet/examples/Assistants/Example02b_FunctionCallingStreaming.cs @@ -63,7 +63,6 @@ public async Task Example02b_FunctionCallingStreaming() #endregion // Assistants is a beta API and subject to change; acknowledge its experimental status by suppressing the matching warning. -#pragma warning disable OPENAI001 AssistantClient client = new(Environment.GetEnvironmentVariable("OPENAI_API_KEY")); #region Create a new assistant with function tools diff --git a/.dotnet/examples/Assistants/Example03_ListAssistantsWithPagination.cs b/.dotnet/examples/Assistants/Example03_ListAssistantsWithPagination.cs index 83776d7a1..ace0608fa 100644 --- a/.dotnet/examples/Assistants/Example03_ListAssistantsWithPagination.cs +++ b/.dotnet/examples/Assistants/Example03_ListAssistantsWithPagination.cs @@ -12,7 +12,6 @@ public partial class AssistantExamples public void Example03_ListAssistantsWithPagination() { // Assistants is a beta API and subject to change; acknowledge its experimental status by suppressing the matching warning. -#pragma warning disable OPENAI001 AssistantClient client = new(Environment.GetEnvironmentVariable("OPENAI_API_KEY")); int count = 0; diff --git a/.dotnet/examples/Assistants/Example03_ListAssistantsWithPaginationAsync.cs b/.dotnet/examples/Assistants/Example03_ListAssistantsWithPaginationAsync.cs index 1f4d8218e..57696ddb9 100644 --- a/.dotnet/examples/Assistants/Example03_ListAssistantsWithPaginationAsync.cs +++ b/.dotnet/examples/Assistants/Example03_ListAssistantsWithPaginationAsync.cs @@ -13,7 +13,6 @@ public partial class AssistantExamples public async Task Example03_ListAssistantsWithPaginationAsync() { // Assistants is a beta API and subject to change; acknowledge its experimental status by suppressing the matching warning. -#pragma warning disable OPENAI001 AssistantClient client = new(Environment.GetEnvironmentVariable("OPENAI_API_KEY")); int count = 0; diff --git a/.dotnet/examples/Assistants/Example04_AllTheTools.cs b/.dotnet/examples/Assistants/Example04_AllTheTools.cs index 3d55253d0..7b5ee309c 100644 --- a/.dotnet/examples/Assistants/Example04_AllTheTools.cs +++ b/.dotnet/examples/Assistants/Example04_AllTheTools.cs @@ -14,8 +14,6 @@ public partial class AssistantExamples [Test] public void Example04_AllTheTools() { -#pragma warning disable OPENAI001 - #region Define a function tool static string GetNameOfFamilyMember(string relation) => relation switch @@ -45,7 +43,7 @@ static string GetNameOfFamilyMember(string relation) }; #region Upload a mock file for use with file search - FileClient fileClient = new(); + FileClient fileClient = new(Environment.GetEnvironmentVariable("OPENAI_API_KEY")); OpenAIFileInfo favoriteNumberFile = fileClient.UploadFile( BinaryData.FromString(""" This file contains the favorite numbers for individuals. @@ -59,7 +57,7 @@ static string GetNameOfFamilyMember(string relation) #endregion #region Create an assistant with functions, file search, and code interpreter all enabled - AssistantClient client = new(); + AssistantClient client = new(Environment.GetEnvironmentVariable("OPENAI_API_KEY")); Assistant assistant = client.CreateAssistant("gpt-4-turbo", new AssistantCreationOptions() { Instructions = "Use functions to resolve family relations into the names of people. Use file search to " @@ -138,7 +136,7 @@ static string GetNameOfFamilyMember(string relation) if (run.Status == RunStatus.Completed) { PageCollection messagePages - = client.GetMessages(run.ThreadId, new MessageCollectionOptions() { Order = ListOrder.OldestFirst }); + = client.GetMessages(run.ThreadId, new MessageCollectionOptions() { Order = MessageCollectionOrder.Ascending }); IEnumerable messages = messagePages.GetAllValues(); foreach (ThreadMessage message in messages) @@ -175,7 +173,7 @@ PageCollection messagePages PageCollection runSteps = client.GetRunSteps( run, new RunStepCollectionOptions() { - Order = ListOrder.OldestFirst + Order = RunStepCollectionOrder.Ascending }); foreach (RunStep step in runSteps.GetAllValues()) { diff --git a/.dotnet/examples/Assistants/Example05_AssistantsWithVision.cs b/.dotnet/examples/Assistants/Example05_AssistantsWithVision.cs index 4d10c84cf..d7af06205 100644 --- a/.dotnet/examples/Assistants/Example05_AssistantsWithVision.cs +++ b/.dotnet/examples/Assistants/Example05_AssistantsWithVision.cs @@ -12,7 +12,6 @@ public partial class AssistantExamples public void Example05_AssistantsWithVision() { // Assistants is a beta API and subject to change; acknowledge its experimental status by suppressing the matching warning. -#pragma warning disable OPENAI001 OpenAIClient openAIClient = new(Environment.GetEnvironmentVariable("OPENAI_API_KEY")); FileClient fileClient = openAIClient.GetFileClient(); AssistantClient assistantClient = openAIClient.GetAssistantClient(); @@ -65,7 +64,7 @@ public void Example05_AssistantsWithVision() } // Delete temporary resources, if desired - _ = fileClient.DeleteFile(pictureOfAppleFile); + _ = fileClient.DeleteFile(pictureOfAppleFile.Id); _ = assistantClient.DeleteThread(thread); _ = assistantClient.DeleteAssistant(assistant); } diff --git a/.dotnet/examples/Assistants/Example05_AssistantsWithVisionAsync.cs b/.dotnet/examples/Assistants/Example05_AssistantsWithVisionAsync.cs index 3f79137e8..1e1e521fc 100644 --- a/.dotnet/examples/Assistants/Example05_AssistantsWithVisionAsync.cs +++ b/.dotnet/examples/Assistants/Example05_AssistantsWithVisionAsync.cs @@ -13,7 +13,6 @@ public partial class AssistantExamples public async Task Example05_AssistantsWithVisionAsync() { // Assistants is a beta API and subject to change; acknowledge its experimental status by suppressing the matching warning. -#pragma warning disable OPENAI001 OpenAIClient openAIClient = new(Environment.GetEnvironmentVariable("OPENAI_API_KEY")); FileClient fileClient = openAIClient.GetFileClient(); AssistantClient assistantClient = openAIClient.GetAssistantClient(); @@ -65,7 +64,7 @@ public async Task Example05_AssistantsWithVisionAsync() } } - _ = await fileClient.DeleteFileAsync(pictureOfAppleFile); + _ = await fileClient.DeleteFileAsync(pictureOfAppleFile.Id); _ = await assistantClient.DeleteThreadAsync(thread); _ = await assistantClient.DeleteAssistantAsync(assistant); } diff --git a/.dotnet/examples/Audio/Example01_SimpleTextToSpeech.cs b/.dotnet/examples/Audio/Example01_SimpleTextToSpeech.cs index 79b8ac127..8573152ff 100644 --- a/.dotnet/examples/Audio/Example01_SimpleTextToSpeech.cs +++ b/.dotnet/examples/Audio/Example01_SimpleTextToSpeech.cs @@ -18,7 +18,7 @@ public void Example01_SimpleTextToSpeech() + " moisture, it is wise to postpone watering for a couple more days. When in doubt, it is often safer" + " to water sparingly and maintain a less-is-more approach."; - BinaryData speech = client.GenerateSpeechFromText(input, GeneratedSpeechVoice.Alloy); + BinaryData speech = client.GenerateSpeech(input, GeneratedSpeechVoice.Alloy); using FileStream stream = File.OpenWrite($"{Guid.NewGuid()}.mp3"); speech.ToStream().CopyTo(stream); diff --git a/.dotnet/examples/Audio/Example01_SimpleTextToSpeechAsync.cs b/.dotnet/examples/Audio/Example01_SimpleTextToSpeechAsync.cs index ae8432eb8..7226ae739 100644 --- a/.dotnet/examples/Audio/Example01_SimpleTextToSpeechAsync.cs +++ b/.dotnet/examples/Audio/Example01_SimpleTextToSpeechAsync.cs @@ -19,7 +19,7 @@ public async Task Example01_SimpleTextToSpeechAsync() + " moisture, it is wise to postpone watering for a couple more days. When in doubt, it is often safer" + " to water sparingly and maintain a less-is-more approach."; - BinaryData speech = await client.GenerateSpeechFromTextAsync(input, GeneratedSpeechVoice.Alloy); + BinaryData speech = await client.GenerateSpeechAsync(input, GeneratedSpeechVoice.Alloy); using FileStream stream = File.OpenWrite($"{Guid.NewGuid()}.mp3"); speech.ToStream().CopyTo(stream); diff --git a/.dotnet/examples/Chat/Example01_SimpleChat_Cancellations.cs b/.dotnet/examples/Chat/Example01_SimpleChat_Cancellations.cs deleted file mode 100644 index aaf13df02..000000000 --- a/.dotnet/examples/Chat/Example01_SimpleChat_Cancellations.cs +++ /dev/null @@ -1,40 +0,0 @@ -using NUnit.Framework; -using OpenAI.Chat; -using System; -using System.ClientModel; -using System.ClientModel.Primitives; -using System.Threading; - -namespace OpenAI.Examples; - -public partial class ChatExamples -{ - [Test] - public void Example01_SimpleChat_Cancellations() - { - ChatClient client = new(model: "gpt-4o", Environment.GetEnvironmentVariable("OPENAI_API_KEY")); - - CancellationTokenSource ct = new CancellationTokenSource(); - RequestOptions options = new() { CancellationToken = ct.Token }; - - ChatMessage message = ChatMessage.CreateUserMessage("Say 'this is a test.'"); - var body = new { - model = "gpt-4o", - messages = new[] { - new - { - role = "user", - content = "Say \u0027this is a test.\u0027" - } - } - }; - - BinaryData json = BinaryData.FromObjectAsJson(body); - ClientResult result = client.CompleteChat(BinaryContent.Create(json), options); - - // The following code will be simplified in the future. - var wireFormat = new ModelReaderWriterOptions("W"); - ChatCompletion completion = ModelReaderWriter.Read(result.GetRawResponse().Content, wireFormat); - Console.WriteLine($"[ASSISTANT]: {completion}"); - } -} diff --git a/.dotnet/examples/Chat/Example05_ChatWithVision.cs b/.dotnet/examples/Chat/Example05_ChatWithVision.cs index dbe8543ea..dd5724548 100644 --- a/.dotnet/examples/Chat/Example05_ChatWithVision.cs +++ b/.dotnet/examples/Chat/Example05_ChatWithVision.cs @@ -19,8 +19,8 @@ public void Example05_ChatWithVision() List messages = [ new UserChatMessage( - ChatMessageContentPart.CreateTextMessageContentPart("Please describe the following image."), - ChatMessageContentPart.CreateImageMessageContentPart(imageBytes, "image/png")) + ChatMessageContentPart.CreateTextPart("Please describe the following image."), + ChatMessageContentPart.CreateImagePart(imageBytes, "image/png")) ]; ChatCompletion chatCompletion = client.CompleteChat(messages); diff --git a/.dotnet/examples/Chat/Example05_ChatWithVisionAsync.cs b/.dotnet/examples/Chat/Example05_ChatWithVisionAsync.cs index c8f4c05c0..d18cc7b74 100644 --- a/.dotnet/examples/Chat/Example05_ChatWithVisionAsync.cs +++ b/.dotnet/examples/Chat/Example05_ChatWithVisionAsync.cs @@ -20,8 +20,8 @@ public async Task Example05_ChatWithVisionAsync() List messages = [ new UserChatMessage( - ChatMessageContentPart.CreateTextMessageContentPart("Please describe the following image."), - ChatMessageContentPart.CreateImageMessageContentPart(imageBytes, "image/png")) + ChatMessageContentPart.CreateTextPart("Please describe the following image."), + ChatMessageContentPart.CreateImagePart(imageBytes, "image/png")) ]; ChatCompletion chatCompletion = await client.CompleteChatAsync(messages); diff --git a/.dotnet/examples/Chat/Example07_StructuredOutputs.cs b/.dotnet/examples/Chat/Example07_StructuredOutputs.cs new file mode 100644 index 000000000..a64757c9a --- /dev/null +++ b/.dotnet/examples/Chat/Example07_StructuredOutputs.cs @@ -0,0 +1,59 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.Text.Json; + +namespace OpenAI.Examples; + +public partial class ChatExamples +{ + [Test] + public void Example07_StructuredOutputs() + { + ChatClient client = new("gpt-4o-mini", Environment.GetEnvironmentVariable("OPENAI_API_KEY")); + + ChatCompletionOptions options = new() + { + ResponseFormat = ChatResponseFormat.CreateJsonSchemaFormat( + jsonSchemaFormatName: "math_reasoning", + jsonSchema: BinaryData.FromString(""" + { + "type": "object", + "properties": { + "steps": { + "type": "array", + "items": { + "type": "object", + "properties": { + "explanation": { "type": "string" }, + "output": { "type": "string" } + }, + "required": ["explanation", "output"], + "additionalProperties": false + } + }, + "final_answer": { "type": "string" } + }, + "required": ["steps", "final_answer"], + "additionalProperties": false + } + """), + jsonSchemaIsStrict: true) + }; + + ChatCompletion chatCompletion = client.CompleteChat( + [ new UserChatMessage("How can I solve 8x + 7 = -23?") ], + options); + + using JsonDocument structuredJson = JsonDocument.Parse(chatCompletion.ToString()); + + Console.WriteLine($"Final answer: {structuredJson.RootElement.GetProperty("final_answer").GetString()}"); + Console.WriteLine("Reasoning steps:"); + + foreach (JsonElement stepElement in structuredJson.RootElement.GetProperty("steps").EnumerateArray()) + { + Console.WriteLine($" - Explanation: {stepElement.GetProperty("explanation").GetString()}"); + Console.WriteLine($" Output: {stepElement.GetProperty("output")}"); + } + } +} diff --git a/.dotnet/examples/Chat/Example07_StructuredOutputsAsync.cs b/.dotnet/examples/Chat/Example07_StructuredOutputsAsync.cs new file mode 100644 index 000000000..81df15f2e --- /dev/null +++ b/.dotnet/examples/Chat/Example07_StructuredOutputsAsync.cs @@ -0,0 +1,60 @@ +using NUnit.Framework; +using OpenAI.Chat; +using System; +using System.Text.Json; +using System.Threading.Tasks; + +namespace OpenAI.Examples; + +public partial class ChatExamples +{ + [Test] + public async Task Example07_StructuredOutputsAsync() + { + ChatClient client = new("gpt-4o-mini", Environment.GetEnvironmentVariable("OPENAI_API_KEY")); + + ChatCompletionOptions options = new() + { + ResponseFormat = ChatResponseFormat.CreateJsonSchemaFormat( + jsonSchemaFormatName: "math_reasoning", + jsonSchema: BinaryData.FromString(""" + { + "type": "object", + "properties": { + "steps": { + "type": "array", + "items": { + "type": "object", + "properties": { + "explanation": { "type": "string" }, + "output": { "type": "string" } + }, + "required": ["explanation", "output"], + "additionalProperties": false + } + }, + "final_answer": { "type": "string" } + }, + "required": ["steps", "final_answer"], + "additionalProperties": false + } + """), + jsonSchemaIsStrict: true) + }; + + ChatCompletion chatCompletion = await client.CompleteChatAsync( + [ new UserChatMessage("How can I solve 8x + 7 = -23?") ], + options); + + using JsonDocument structuredJson = JsonDocument.Parse(chatCompletion.ToString()); + + Console.WriteLine($"Final answer: {structuredJson.RootElement.GetProperty("final_answer").GetString()}"); + Console.WriteLine("Reasoning steps:"); + + foreach (JsonElement stepElement in structuredJson.RootElement.GetProperty("steps").EnumerateArray()) + { + Console.WriteLine($" - Explanation: {stepElement.GetProperty("explanation").GetString()}"); + Console.WriteLine($" Output: {stepElement.GetProperty("output")}"); + } + } +} diff --git a/.dotnet/examples/ClientExamples.cs b/.dotnet/examples/ClientExamples.cs index dff74f119..18947925d 100644 --- a/.dotnet/examples/ClientExamples.cs +++ b/.dotnet/examples/ClientExamples.cs @@ -42,8 +42,6 @@ public void CreateAssistantAndFileClients() { OpenAIClient openAIClient = new(Environment.GetEnvironmentVariable("OPENAI_API_KEY")); FileClient fileClient = openAIClient.GetFileClient(); -#pragma warning disable OPENAI001 AssistantClient assistantClient = openAIClient.GetAssistantClient(); -#pragma warning restore OPENAI001 } } diff --git a/.dotnet/examples/CombinationExamples.cs b/.dotnet/examples/CombinationExamples.cs index 31d5ef4cd..633b4294c 100644 --- a/.dotnet/examples/CombinationExamples.cs +++ b/.dotnet/examples/CombinationExamples.cs @@ -15,7 +15,7 @@ public partial class CombinationExamples public void AlpacaArtAssessor() { // First, we create an image using dall-e-3: - ImageClient imageClient = new("dall-e-3"); + ImageClient imageClient = new("dall-e-3", Environment.GetEnvironmentVariable("OPENAI_API_KEY")); ClientResult imageResult = imageClient.GenerateImage( "a majestic alpaca on a mountain ridge, backed by an expansive blue sky accented with sparse clouds", new() @@ -28,14 +28,14 @@ public void AlpacaArtAssessor() Console.WriteLine($"Majestic alpaca available at:\n{imageGeneration.ImageUri.AbsoluteUri}"); // Now, we'll ask a cranky art critic to evaluate the image using gpt-4-vision-preview: - ChatClient chatClient = new("gpt-4-vision-preview"); + ChatClient chatClient = new("gpt-4o-mini", Environment.GetEnvironmentVariable("OPENAI_API_KEY")); ChatCompletion chatCompletion = chatClient.CompleteChat( [ new SystemChatMessage("Assume the role of a cranky art critic. When asked to describe or " + "evaluate imagery, focus on criticizing elements of subject, composition, and other details."), new UserChatMessage( - ChatMessageContentPart.CreateTextMessageContentPart("describe the following image in a few sentences"), - ChatMessageContentPart.CreateImageMessageContentPart(imageGeneration.ImageUri)), + ChatMessageContentPart.CreateTextPart("describe the following image in a few sentences"), + ChatMessageContentPart.CreateImagePart(imageGeneration.ImageUri)), ], new ChatCompletionOptions() { @@ -47,13 +47,13 @@ public void AlpacaArtAssessor() Console.WriteLine($"Art critique of majestic alpaca:\n{chatResponseText}"); // Finally, we'll get some text-to-speech for that critical evaluation using tts-1-hd: - AudioClient audioClient = new("tts-1-hd"); - ClientResult ttsResult = audioClient.GenerateSpeechFromText( + AudioClient audioClient = new("tts-1-hd", Environment.GetEnvironmentVariable("OPENAI_API_KEY")); + ClientResult ttsResult = audioClient.GenerateSpeech( text: chatResponseText, GeneratedSpeechVoice.Fable, new SpeechGenerationOptions() { - Speed = 0.9f, + SpeedRatio = 0.9f, ResponseFormat = GeneratedSpeechFormat.Opus, }); FileInfo ttsFileInfo = new($"{chatCompletion.Id}.opus"); @@ -69,7 +69,7 @@ public void AlpacaArtAssessor() public async Task CuriousCreatureCreator() { // First, we'll use gpt-4 to have a creative helper imagine a twist on a household pet - ChatClient creativeWriterClient = new("gpt-4"); + ChatClient creativeWriterClient = new("gpt-4o-mini", Environment.GetEnvironmentVariable("OPENAI_API_KEY")); ClientResult creativeWriterResult = creativeWriterClient.CompleteChat( [ new SystemChatMessage("You're a creative helper that specializes in brainstorming designs for concepts that fuse ordinary, mundane items with a fantastical touch. In particular, you can provide good one-paragraph descriptions of concept images."), @@ -83,13 +83,13 @@ public async Task CuriousCreatureCreator() Console.WriteLine($"Creative helper's creature description:\n{description}"); // Asynchronously, in parallel to the next steps, we'll get the creative description in the voice of Onyx - AudioClient ttsClient = new("tts-1-hd"); - Task> imageDescriptionAudioTask = ttsClient.GenerateSpeechFromTextAsync( + AudioClient ttsClient = new("tts-1-hd", Environment.GetEnvironmentVariable("OPENAI_API_KEY")); + Task> imageDescriptionAudioTask = ttsClient.GenerateSpeechAsync( description, GeneratedSpeechVoice.Onyx, new SpeechGenerationOptions() { - Speed = 1.1f, + SpeedRatio = 1.1f, ResponseFormat = GeneratedSpeechFormat.Opus, }); _ = Task.Run(async () => @@ -103,7 +103,7 @@ public async Task CuriousCreatureCreator() }); // Meanwhile, we'll use dall-e-3 to generate a rendition of our LLM artist's vision - ImageClient imageGenerationClient = new("dall-e-3"); + ImageClient imageGenerationClient = new("dall-e-3", Environment.GetEnvironmentVariable("OPENAI_API_KEY")); ClientResult imageGenerationResult = await imageGenerationClient.GenerateImageAsync( description, new ImageGenerationOptions() @@ -115,13 +115,13 @@ public async Task CuriousCreatureCreator() Console.WriteLine($"Creature image available at:\n{imageLocation.AbsoluteUri}"); // Now, we'll use gpt-4-vision-preview to get a hopelessly taken assessment from a usually exigent art connoisseur - ChatClient imageCriticClient = new("gpt-4-vision-preview"); + ChatClient imageCriticClient = new("gpt-4o-mini", Environment.GetEnvironmentVariable("OPENAI_API_KEY")); ClientResult criticalAppraisalResult = await imageCriticClient.CompleteChatAsync( [ new SystemChatMessage("Assume the role of an art critic. Although usually cranky and occasionally even referred to as a 'curmudgeon', you're somehow entirely smitten with the subject presented to you and, despite your best efforts, can't help but lavish praise when you're asked to appraise a provided image."), new UserChatMessage( - ChatMessageContentPart.CreateTextMessageContentPart("Evaluate this image for me. What is it, and what do you think of it?"), - ChatMessageContentPart.CreateImageMessageContentPart(imageLocation)), + ChatMessageContentPart.CreateTextPart("Evaluate this image for me. What is it, and what do you think of it?"), + ChatMessageContentPart.CreateImagePart(imageLocation)), ], new ChatCompletionOptions() { @@ -131,12 +131,12 @@ public async Task CuriousCreatureCreator() Console.WriteLine($"Critic's appraisal:\n{appraisal}"); // Finally, we'll get that art expert's laudations in the voice of Fable - ClientResult appraisalAudioResult = await ttsClient.GenerateSpeechFromTextAsync( + ClientResult appraisalAudioResult = await ttsClient.GenerateSpeechAsync( appraisal, GeneratedSpeechVoice.Fable, new SpeechGenerationOptions() { - Speed = 0.9f, + SpeedRatio = 0.9f, ResponseFormat = GeneratedSpeechFormat.Opus, }); FileInfo criticAudioFileInfo = new($"{criticalAppraisalResult.Value.Id}-appraisal.opus"); diff --git a/.dotnet/examples/Embeddings/Example04_SimpleEmbeddingProtocol.cs b/.dotnet/examples/Embeddings/Example04_SimpleEmbeddingProtocol.cs index ea0992be8..d23112e8d 100644 --- a/.dotnet/examples/Embeddings/Example04_SimpleEmbeddingProtocol.cs +++ b/.dotnet/examples/Embeddings/Example04_SimpleEmbeddingProtocol.cs @@ -17,11 +17,12 @@ public void Example04_SimpleEmbeddingProtocol() + " and a really helpful concierge. The location is perfect -- right downtown, close to all the tourist" + " attractions. We highly recommend this hotel."; - BinaryData input = BinaryData.FromObjectAsJson(new { - model = "text-embedding-3-small", - input = description, - encoding_format = "float" - }); + BinaryData input = BinaryData.FromObjectAsJson(new + { + model = "text-embedding-3-small", + input = description, + encoding_format = "float" + }); using BinaryContent content = BinaryContent.Create(input); ClientResult result = client.GenerateEmbeddings(content); diff --git a/.dotnet/examples/OpenAI.Examples.csproj b/.dotnet/examples/OpenAI.Examples.csproj index b33b036ea..74bf8c489 100644 --- a/.dotnet/examples/OpenAI.Examples.csproj +++ b/.dotnet/examples/OpenAI.Examples.csproj @@ -1,8 +1,13 @@ net8.0 - - $(NoWarn);CS1591 + + + $(NoWarn);CS1591; + + + $(NoWarn);OPENAI001; + latest diff --git a/.dotnet/src/Custom/Administration/Internal/GeneratorStubs.cs b/.dotnet/src/Custom/Administration/Internal/GeneratorStubs.cs new file mode 100644 index 000000000..7ed945616 --- /dev/null +++ b/.dotnet/src/Custom/Administration/Internal/GeneratorStubs.cs @@ -0,0 +1,36 @@ +namespace OpenAI.Administration; + +[CodeGenModel("AuditLogActorServiceAccount")] internal partial class InternalAuditLogActorServiceAccount { } +[CodeGenModel("AuditLogActorUser")] internal partial class InternalAuditLogActorUser { } +[CodeGenModel("AuditLogActorApiKey")] internal partial class InternalAuditLogActorApiKey { } +[CodeGenModel("AuditLogActorSession")] internal partial class InternalAuditLogActorSession { } +[CodeGenModel("AuditLogActor")] internal partial class InternalAuditLogActor { } +[CodeGenModel("AuditLog")] internal partial class InternalAuditLog { } +[CodeGenModel("ListAuditLogsResponse")] internal partial class InternalListAuditLogsResponse { } +[CodeGenModel("Invite")] internal partial class InternalInvite { } +[CodeGenModel("InviteListResponse")] internal partial class InternalInviteListResponse { } +[CodeGenModel("InviteRequest")] internal partial class InternalInviteRequest { } +[CodeGenModel("InviteDeleteResponse")] internal partial class InternalInviteDeleteResponse { } +[CodeGenModel("User")] internal partial class InternalUser { } +[CodeGenModel("UserListResponse")] internal partial class InternalUserListResponse { } +[CodeGenModel("UserRoleUpdateRequest")] internal partial class InternalUserRoleUpdateRequest { } +[CodeGenModel("UserDeleteResponse")] internal partial class InternalUserDeleteResponse { } +[CodeGenModel("Project")] internal partial class InternalProject { } +[CodeGenModel("ProjectListResponse")] internal partial class InternalProjectListResponse { } +[CodeGenModel("ProjectCreateRequest")] internal partial class InternalProjectCreateRequest { } +[CodeGenModel("ProjectUpdateRequest")] internal partial class InternalProjectUpdateRequest { } +[CodeGenModel("DefaultProjectErrorResponse")] internal partial class InternalDefaultProjectErrorResponse { } +[CodeGenModel("ProjectUser")] internal partial class InternalProjectUser { } +[CodeGenModel("ProjectUserListResponse")] internal partial class InternalProjectUserListResponse { } +[CodeGenModel("ProjectUserCreateRequest")] internal partial class InternalProjectUserCreateRequest { } +[CodeGenModel("ProjectUserUpdateRequest")] internal partial class InternalProjectUserUpdateRequest { } +[CodeGenModel("ProjectUserDeleteResponse")] internal partial class InternalProjectUserDeleteResponse { } +[CodeGenModel("ProjectServiceAccount")] internal partial class InternalProjectServiceAccount { } +[CodeGenModel("ProjectServiceAccountListResponse")] internal partial class InternalProjectServiceAccountListResponse { } +[CodeGenModel("ProjectServiceAccountCreateRequest")] internal partial class InternalProjectServiceAccountCreateRequest { } +[CodeGenModel("ProjectServiceAccountCreateResponse")] internal partial class InternalProjectServiceAccountCreateResponse { } +[CodeGenModel("ProjectServiceAccountApiKey")] internal partial class InternalProjectServiceAccountApiKey { } +[CodeGenModel("ProjectServiceAccountDeleteResponse")] internal partial class InternalProjectServiceAccountDeleteResponse { } +[CodeGenModel("ProjectApiKey")] internal partial class InternalProjectApiKey { } +[CodeGenModel("ProjectApiKeyListResponse")] internal partial class InternalProjectApiKeyListResponse { } +[CodeGenModel("ProjectApiKeyDeleteResponse")] internal partial class InternalProjectApiKeyDeleteResponse { } diff --git a/.dotnet/src/Custom/Assistants/Assistant.cs b/.dotnet/src/Custom/Assistants/Assistant.cs index 34ee40166..db714f552 100644 --- a/.dotnet/src/Custom/Assistants/Assistant.cs +++ b/.dotnet/src/Custom/Assistants/Assistant.cs @@ -1,5 +1,8 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("AssistantObject")] public partial class Assistant { diff --git a/.dotnet/src/Custom/Assistants/AssistantClient.Convenience.cs b/.dotnet/src/Custom/Assistants/AssistantClient.Convenience.cs index 5eb15133d..cc6f19ee4 100644 --- a/.dotnet/src/Custom/Assistants/AssistantClient.Convenience.cs +++ b/.dotnet/src/Custom/Assistants/AssistantClient.Convenience.cs @@ -152,7 +152,7 @@ public virtual AsyncPageCollection GetMessagesAsync( /// page of values, call . /// A collection of pages of . public virtual PageCollection GetMessages( - AssistantThread thread, + AssistantThread thread, MessageCollectionOptions options = default) { Argument.AssertNotNull(thread, nameof(thread)); diff --git a/.dotnet/src/Custom/Assistants/AssistantClient.cs b/.dotnet/src/Custom/Assistants/AssistantClient.cs index 3ca8be2f8..b0b1f8ffe 100644 --- a/.dotnet/src/Custom/Assistants/AssistantClient.cs +++ b/.dotnet/src/Custom/Assistants/AssistantClient.cs @@ -10,9 +10,7 @@ namespace OpenAI.Assistants; -/// -/// The service client for OpenAI assistants. -/// +/// The service client for OpenAI assistants operations. [Experimental("OPENAI001")] [CodeGenClient("Assistants")] [CodeGenSuppress("AssistantClient", typeof(ClientPipeline), typeof(ApiKeyCredential), typeof(Uri))] @@ -24,55 +22,61 @@ namespace OpenAI.Assistants; [CodeGenSuppress("ModifyAssistant", typeof(string), typeof(AssistantModificationOptions))] [CodeGenSuppress("DeleteAssistantAsync", typeof(string))] [CodeGenSuppress("DeleteAssistant", typeof(string))] -[CodeGenSuppress("GetAssistantsAsync", typeof(int?), typeof(ListOrder?), typeof(string), typeof(string))] -[CodeGenSuppress("GetAssistants", typeof(int?), typeof(ListOrder?), typeof(string), typeof(string))] +[CodeGenSuppress("GetAssistantsAsync", typeof(int?), typeof(AssistantCollectionOrder?), typeof(string), typeof(string))] +[CodeGenSuppress("GetAssistants", typeof(int?), typeof(AssistantCollectionOrder?), typeof(string), typeof(string))] public partial class AssistantClient { private readonly InternalAssistantMessageClient _messageSubClient; private readonly InternalAssistantRunClient _runSubClient; private readonly InternalAssistantThreadClient _threadSubClient; - /// - /// Initializes a new instance of that will use an API key when authenticating. - /// - /// The API key used to authenticate with the service endpoint. - /// Additional options to customize the client. - /// The provided was null. - public AssistantClient(ApiKeyCredential credential, OpenAIClientOptions options = default) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(credential, requireExplicitCredential: true), options), - OpenAIClient.GetEndpoint(options), - options) - { } + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// is null. + public AssistantClient(ApiKeyCredential credential) : this(credential, new OpenAIClientOptions()) + { + } - /// - /// Initializes a new instance of that will use an API key from the OPENAI_API_KEY - /// environment variable when authenticating. - /// - /// - /// To provide an explicit credential instead of using the environment variable, use an alternate constructor like - /// . - /// - /// Additional options to customize the client. - /// The OPENAI_API_KEY environment variable was not found. - public AssistantClient(OpenAIClientOptions options = default) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(), options), - OpenAIClient.GetEndpoint(options), - options) - { } - - /// Initializes a new instance of . - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// OpenAI Endpoint. - /// Client-wide options to propagate settings from. - protected internal AssistantClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options) + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// The options to configure the client. + /// is null. + public AssistantClient(ApiKeyCredential credential, OpenAIClientOptions options) { + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); + + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); + _messageSubClient = new(_pipeline, options); + _runSubClient = new(_pipeline, options); + _threadSubClient = new(_pipeline, options); + } + + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The options to configure the client. + /// is null. + protected internal AssistantClient(ClientPipeline pipeline, OpenAIClientOptions options) + { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + options ??= new OpenAIClientOptions(); + _pipeline = pipeline; - _endpoint = endpoint; - _messageSubClient = new(_pipeline, _endpoint, options); - _runSubClient = new(_pipeline, _endpoint, options); - _threadSubClient = new(_pipeline, _endpoint, options); + _endpoint = OpenAIClient.GetEndpoint(options); + _messageSubClient = new(_pipeline, options); + _runSubClient = new(_pipeline, options); + _threadSubClient = new(_pipeline, options); } /// Creates a new assistant. @@ -118,7 +122,7 @@ public virtual AsyncPageCollection GetAssistantsAsync( AssistantCollectionOptions options = default, CancellationToken cancellationToken = default) { - return GetAssistantsAsync(options?.PageSize, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) + return GetAssistantsAsync(options?.PageSizeLimit, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) as AsyncPageCollection; } @@ -155,7 +159,7 @@ public virtual PageCollection GetAssistants( AssistantCollectionOptions options = default, CancellationToken cancellationToken = default) { - return GetAssistants(options?.PageSize, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) + return GetAssistants(options?.PageSizeLimit, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) as PageCollection; } @@ -462,7 +466,7 @@ public virtual AsyncPageCollection GetMessagesAsync( { Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); - return GetMessagesAsync(threadId, options?.PageSize, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) + return GetMessagesAsync(threadId, options?.PageSizeLimit, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) as AsyncPageCollection; } @@ -503,7 +507,7 @@ public virtual PageCollection GetMessages( { Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); - return GetMessages(threadId, options?.PageSize, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) + return GetMessages(threadId, options?.PageSizeLimit, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) as PageCollection; } @@ -837,7 +841,7 @@ public virtual AsyncPageCollection GetRunsAsync( { Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); - return GetRunsAsync(threadId, options?.PageSize, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) + return GetRunsAsync(threadId, options?.PageSizeLimit, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) as AsyncPageCollection; } @@ -878,7 +882,7 @@ public virtual PageCollection GetRuns( { Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); - return GetRuns(threadId, options?.PageSize, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) + return GetRuns(threadId, options?.PageSizeLimit, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) as PageCollection; } @@ -1089,7 +1093,7 @@ public virtual AsyncPageCollection GetRunStepsAsync( Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); Argument.AssertNotNullOrEmpty(runId, nameof(runId)); - return GetRunStepsAsync(threadId, runId, options?.PageSize, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) + return GetRunStepsAsync(threadId, runId, options?.PageSizeLimit, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) as AsyncPageCollection; } @@ -1133,7 +1137,7 @@ public virtual PageCollection GetRunSteps( Argument.AssertNotNullOrEmpty(threadId, nameof(threadId)); Argument.AssertNotNullOrEmpty(runId, nameof(runId)); - return GetRunSteps(threadId, runId, options?.PageSize, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) + return GetRunSteps(threadId, runId, options?.PageSizeLimit, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) as PageCollection; } diff --git a/.dotnet/src/Custom/Assistants/AssistantCollectionOptions.cs b/.dotnet/src/Custom/Assistants/AssistantCollectionOptions.cs index c7baa96d1..8d219d5e8 100644 --- a/.dotnet/src/Custom/Assistants/AssistantCollectionOptions.cs +++ b/.dotnet/src/Custom/Assistants/AssistantCollectionOptions.cs @@ -1,33 +1,34 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; -/// -/// Represents addition options available when requesting a collection of instances. -/// +/// The options to configure how objects are retrieved and paginated. +[Experimental("OPENAI001")] public class AssistantCollectionOptions { - /// - /// Creates a new instance of . - /// + /// Initializes a new instance of . public AssistantCollectionOptions() { } - /// - /// The order that results should appear in the list according to - /// their created_at timestamp. + /// + /// A limit on the number of objects to be returned per page. /// - public ListOrder? Order { get; set; } + public int? PageSizeLimit { get; set; } /// - /// The number of values to return in a page result. + /// The order in which to retrieve objects when sorted by their + /// timestamp. /// - public int? PageSize { get; set; } + public AssistantCollectionOrder? Order { get; set; } /// - /// The id of the item preceeding the first item in the collection. + /// The used to retrieve the page of objects that come + /// after this one. /// public string AfterId { get; set; } /// - /// The id of the item following the last item in the collection. + /// The used to retrieve the page of objects that come + /// before this one. /// public string BeforeId { get; set; } } diff --git a/.dotnet/src/Custom/Assistants/AssistantCollectionOrder.cs b/.dotnet/src/Custom/Assistants/AssistantCollectionOrder.cs new file mode 100644 index 000000000..4a25b8885 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/AssistantCollectionOrder.cs @@ -0,0 +1,17 @@ +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI.Assistants; + +// CUSTOM: Renamed. +[Experimental("OPENAI001")] +[CodeGenModel("ListAssistantsRequestOrder")] +public readonly partial struct AssistantCollectionOrder +{ + // CUSTOM: Renamed. + [CodeGenMember("Asc")] + public static AssistantCollectionOrder Ascending { get; } = new AssistantCollectionOrder(AscendingValue); + + // CUSTOM: Renamed. + [CodeGenMember("Desc")] + public static AssistantCollectionOrder Descending { get; } = new AssistantCollectionOrder(DescendingValue); +} diff --git a/.dotnet/src/Custom/Assistants/AssistantCreationOptions.cs b/.dotnet/src/Custom/Assistants/AssistantCreationOptions.cs index c00b6e82e..1072600ce 100644 --- a/.dotnet/src/Custom/Assistants/AssistantCreationOptions.cs +++ b/.dotnet/src/Custom/Assistants/AssistantCreationOptions.cs @@ -1,10 +1,12 @@ using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Assistants; /// /// Represents additional options available when creating a new . /// +[Experimental("OPENAI001")] [CodeGenModel("CreateAssistantRequest")] [CodeGenSuppress(nameof(AssistantCreationOptions), typeof(string))] public partial class AssistantCreationOptions @@ -36,7 +38,7 @@ public partial class AssistantCreationOptions /// [CodeGenMember("TopP")] public float? NucleusSamplingFactor { get; set; } - + internal AssistantCreationOptions(InternalCreateAssistantRequestModel model) : this() { @@ -51,4 +53,4 @@ public AssistantCreationOptions() Metadata = new ChangeTrackingDictionary(); Tools = new ChangeTrackingList(); } -} \ No newline at end of file +} diff --git a/.dotnet/src/Custom/Assistants/AssistantModificationOptions.cs b/.dotnet/src/Custom/Assistants/AssistantModificationOptions.cs index bbd8b2efb..6d2544983 100644 --- a/.dotnet/src/Custom/Assistants/AssistantModificationOptions.cs +++ b/.dotnet/src/Custom/Assistants/AssistantModificationOptions.cs @@ -1,10 +1,12 @@ using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Assistants; /// /// Represents additional options available when modifying an existing . /// +[Experimental("OPENAI001")] [CodeGenModel("ModifyAssistantRequest")] public partial class AssistantModificationOptions { @@ -40,4 +42,4 @@ public partial class AssistantModificationOptions /// [CodeGenMember("TopP")] public float? NucleusSamplingFactor { get; set; } -} \ No newline at end of file +} diff --git a/.dotnet/src/Custom/Assistants/AssistantResponseFormat.Serialization.cs b/.dotnet/src/Custom/Assistants/AssistantResponseFormat.Serialization.cs index 19b3c5b01..363c33eab 100644 --- a/.dotnet/src/Custom/Assistants/AssistantResponseFormat.Serialization.cs +++ b/.dotnet/src/Custom/Assistants/AssistantResponseFormat.Serialization.cs @@ -1,6 +1,6 @@ using System; +using System.ClientModel; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Assistants; @@ -9,10 +9,33 @@ namespace OpenAI.Assistants; [CodeGenSuppress("global::System.ClientModel.Primitives.IJsonModel.Create", typeof(Utf8JsonReader), typeof(ModelReaderWriterOptions))] [CodeGenSuppress("global::System.ClientModel.Primitives.IPersistableModel.Write", typeof(ModelReaderWriterOptions))] [CodeGenSuppress("global::System.ClientModel.Primitives.IPersistableModel.Create", typeof(BinaryData), typeof(ModelReaderWriterOptions))] +[CodeGenSuppress("global::System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions", typeof(ModelReaderWriterOptions))] public partial class AssistantResponseFormat : IJsonModel { + internal static void SerializeAssistantResponseFormat(AssistantResponseFormat instance, Utf8JsonWriter writer, ModelReaderWriterOptions options = null) + { + throw new InvalidOperationException(); + } + + internal static AssistantResponseFormat DeserializeAssistantResponseFormat(JsonElement element, ModelReaderWriterOptions options = null) + { + return element.ValueKind switch + { + JsonValueKind.String => InternalAssistantResponseFormatPlainTextNoObject.DeserializeInternalAssistantResponseFormatPlainTextNoObject(element, options), + JsonValueKind.Object when element.TryGetProperty("type", out JsonElement discriminatorElement) + => discriminatorElement.GetString() switch + { + "json_object" => InternalAssistantResponseFormatJsonObject.DeserializeInternalAssistantResponseFormatJsonObject(element, options), + "json_schema" => InternalAssistantResponseFormatJsonSchema.DeserializeInternalAssistantResponseFormatJsonSchema(element, options), + "text" => InternalAssistantResponseFormatText.DeserializeInternalAssistantResponseFormatText(element, options), + _ => null, + }, + _ => null, + }; + } + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - => CustomSerializationHelpers.SerializeInstance(this, SerializeAssistantResponseFormat, writer, options); + => CustomSerializationHelpers.SerializeInstance(this, SerializeAssistantResponseFormat, writer, options); AssistantResponseFormat IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) => CustomSerializationHelpers.DeserializeNewInstance(this, DeserializeAssistantResponseFormat, ref reader, options); @@ -23,53 +46,15 @@ BinaryData IPersistableModel.Write(ModelReaderWriterOpt AssistantResponseFormat IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) => CustomSerializationHelpers.DeserializeNewInstance(this, DeserializeAssistantResponseFormat, data, options); - internal static void SerializeAssistantResponseFormat(AssistantResponseFormat formatInstance, Utf8JsonWriter writer, ModelReaderWriterOptions options) + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static AssistantResponseFormat FromResponse(PipelineResponse response) { - if (formatInstance._plainTextValue is not null) - { - writer.WriteStringValue(formatInstance._plainTextValue); - } - else - { - writer.WriteStartObject(); - writer.WritePropertyName("type"u8); - writer.WriteStringValue(formatInstance._objectType); - writer.WriteSerializedAdditionalRawData(formatInstance.SerializedAdditionalRawData, options); - writer.WriteEndObject(); - } + throw new InvalidOperationException(); } - internal static AssistantResponseFormat DeserializeAssistantResponseFormat(JsonElement element, ModelReaderWriterOptions options = null) + internal virtual BinaryContent ToBinaryContent() { - options ??= ModelSerializationExtensions.WireOptions; - - string plainTextValue = null; - string objectType = null; - IDictionary rawDataDictionary = new ChangeTrackingDictionary(); - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - else if (element.ValueKind == JsonValueKind.String) - { - plainTextValue = element.GetString(); - } - else - { - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("type"u8)) - { - objectType = property.Value.GetString(); - continue; - } - if (true) - { - rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - } - return new AssistantResponseFormat(plainTextValue, objectType, rawDataDictionary); + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); } } diff --git a/.dotnet/src/Custom/Assistants/AssistantResponseFormat.cs b/.dotnet/src/Custom/Assistants/AssistantResponseFormat.cs index 226fa31df..597bbb6b4 100644 --- a/.dotnet/src/Custom/Assistants/AssistantResponseFormat.cs +++ b/.dotnet/src/Custom/Assistants/AssistantResponseFormat.cs @@ -1,107 +1,111 @@ +using OpenAI.Internal; using System; -using System.Collections.Generic; +using System.ClientModel.Primitives; using System.ComponentModel; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Assistants; -/// -/// Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106. -/// -/// -/// -/// Setting to { "type": "json_object" } enables JSON mode, which guarantees the message the model generates is valid JSON. -/// -/// -/// Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request.Also note that the message content may be partially cut off if finish_reason= "length", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length. -/// -/// +[Experimental("OPENAI001")] [CodeGenModel("AssistantResponseFormat")] -public partial class AssistantResponseFormat +public partial class AssistantResponseFormat : IEquatable, IEquatable { - private readonly string _plainTextValue; - private readonly string _objectType; - private readonly IDictionary SerializedAdditionalRawData; + public static AssistantResponseFormat Auto { get; } = CreateAutoFormat(); + public static AssistantResponseFormat Text { get; } = CreateTextFormat(); + public static AssistantResponseFormat JsonObject { get; } = CreateJsonObjectFormat(); - private const string AutoValue = "auto"; - private const string TextValue = "text"; - private const string JsonObjectValue = "json_object"; - - /// - /// Default option. Specifies that the model should automatically determine whether it should respond with - /// plain text or another format. - /// - public static AssistantResponseFormat Auto { get; } - = new(plainTextValue: AutoValue, objectType: null, serializedAdditionalRawData: null); + public static AssistantResponseFormat CreateAutoFormat() + => new InternalAssistantResponseFormatPlainTextNoObject("auto"); + public static AssistantResponseFormat CreateTextFormat() + => new InternalAssistantResponseFormatText(); + public static AssistantResponseFormat CreateJsonObjectFormat() + => new InternalAssistantResponseFormatJsonObject(); + public static AssistantResponseFormat CreateJsonSchemaFormat( + string name, + BinaryData jsonSchema, + string description = null, + bool? strictSchemaEnabled = null) + { + Argument.AssertNotNullOrEmpty(name, nameof(name)); + Argument.AssertNotNull(jsonSchema, nameof(jsonSchema)); - /// - /// Specifies that the model should respond with plain text. - /// - public static AssistantResponseFormat Text { get; } - = new(plainTextValue: null, objectType: TextValue, serializedAdditionalRawData: null); + InternalResponseFormatJsonSchemaJsonSchema internalSchema = new( + description, + name, + jsonSchema, + strictSchemaEnabled, + null); + return new InternalAssistantResponseFormatJsonSchema(internalSchema); + } - /// - /// Specifies that the model should reply with a structured JSON object, enabling JSON mode. - /// - /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a - /// system or user message. Without this, the model may generate an unending stream of whitespace until the - /// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that - /// the message content may be partially cut off if `finish_reason="length"`, which indicates the generation - /// exceeded `max_tokens` or the conversation exceeded the max context length. - /// - public static AssistantResponseFormat JsonObject { get; } - = new(plainTextValue: null, objectType: JsonObjectValue, serializedAdditionalRawData: null); + [EditorBrowsable(EditorBrowsableState.Never)] + public static bool operator ==(AssistantResponseFormat first, AssistantResponseFormat second) + { + if (first is null) + { + return second is null; + } + return first.Equals(second); + } - /// - /// Creates a new instance of for mocking. - /// - protected AssistantResponseFormat() - { } + [EditorBrowsable(EditorBrowsableState.Never)] + public static bool operator !=(AssistantResponseFormat first, AssistantResponseFormat second) + => !(first == second); - internal AssistantResponseFormat(string plainTextValue, string objectType, IDictionary serializedAdditionalRawData) + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) { - _plainTextValue = plainTextValue; - _objectType = objectType; - SerializedAdditionalRawData = serializedAdditionalRawData ?? new ChangeTrackingDictionary(); + return (this as IEquatable).Equals(obj as AssistantResponseFormat) + || ToString().Equals(obj?.ToString()); } - /// - public static bool operator ==(AssistantResponseFormat left, AssistantResponseFormat right) => left.Equals(right); - /// - public static bool operator !=(AssistantResponseFormat left, AssistantResponseFormat right) => !left.Equals(right); + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => ToString().GetHashCode(); - /// - public static implicit operator AssistantResponseFormat(string value) + [EditorBrowsable(EditorBrowsableState.Never)] + public static implicit operator AssistantResponseFormat(string plainTextFormat) + => new InternalAssistantResponseFormatPlainTextNoObject(plainTextFormat); + + [EditorBrowsable(EditorBrowsableState.Never)] + bool IEquatable.Equals(AssistantResponseFormat other) { - if (string.Equals(value, AutoValue, StringComparison.OrdinalIgnoreCase)) + if (other is null) { - return Auto; + return false; } - if (string.Equals(value, TextValue, StringComparison.OrdinalIgnoreCase)) + + if (Object.ReferenceEquals(this, other)) { - return Text; + return true; } - if (string.Equals(value, JsonObjectValue, StringComparison.OrdinalIgnoreCase)) + + return + (this is InternalAssistantResponseFormatPlainTextNoObject thisPlainText + && other is InternalAssistantResponseFormatPlainTextNoObject otherPlainText + && thisPlainText.Value.Equals(otherPlainText.Value)) + || (this is InternalAssistantResponseFormatText && other is InternalAssistantResponseFormatText) + || (this is InternalAssistantResponseFormatJsonObject && other is InternalAssistantResponseFormatJsonObject) + || (this is InternalAssistantResponseFormatJsonSchema thisJsonSchema + && other is InternalAssistantResponseFormatJsonSchema otherJsonSchema + && thisJsonSchema.JsonSchema.Name.Equals(otherJsonSchema.JsonSchema.Name)); + } + + [EditorBrowsable(EditorBrowsableState.Never)] + bool IEquatable.Equals(string other) + { + return this is InternalAssistantResponseFormatPlainTextNoObject thisPlainText + && thisPlainText.Value.Equals(other); + } + + public override string ToString() + { + if (this is InternalAssistantResponseFormatPlainTextNoObject plainTextInstance) { - return JsonObject; + return plainTextInstance.Value; } else { - return new(plainTextValue: null, objectType: value, serializedAdditionalRawData: null); + return ModelReaderWriter.Write(this).ToString(); } } - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is AssistantResponseFormat other && Equals(other); - /// - public bool Equals(AssistantResponseFormat other) - => string.Equals(_plainTextValue, other?._plainTextValue, StringComparison.InvariantCultureIgnoreCase) - && string.Equals(_objectType, other?._objectType, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => $"{_plainTextValue}/{_objectType}".GetHashCode(); - /// - public override string ToString() => _plainTextValue ?? _objectType; } diff --git a/.dotnet/src/Custom/Assistants/AssistantThread.cs b/.dotnet/src/Custom/Assistants/AssistantThread.cs index 376766668..ea5e65f72 100644 --- a/.dotnet/src/Custom/Assistants/AssistantThread.cs +++ b/.dotnet/src/Custom/Assistants/AssistantThread.cs @@ -1,5 +1,8 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("ThreadObject")] public partial class AssistantThread { diff --git a/.dotnet/src/Custom/Assistants/CodeInterpreterToolDefinition.Serialization.cs b/.dotnet/src/Custom/Assistants/CodeInterpreterToolDefinition.Serialization.cs index 6318e87c7..208ac9240 100644 --- a/.dotnet/src/Custom/Assistants/CodeInterpreterToolDefinition.Serialization.cs +++ b/.dotnet/src/Custom/Assistants/CodeInterpreterToolDefinition.Serialization.cs @@ -1,7 +1,4 @@ -using System; -using System.ClientModel; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Assistants; @@ -15,7 +12,7 @@ void IJsonModel.Write(Utf8JsonWriter writer, Mode internal static void SerializeCodeInterpreterToolDefinition(CodeInterpreterToolDefinition instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) => instance.WriteCore(writer, options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); writer.WritePropertyName("type"u8); @@ -23,4 +20,4 @@ protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOption writer.WriteSerializedAdditionalRawData(SerializedAdditionalRawData, options); writer.WriteEndObject(); } -} \ No newline at end of file +} diff --git a/.dotnet/src/Custom/Assistants/CodeInterpreterToolResources.cs b/.dotnet/src/Custom/Assistants/CodeInterpreterToolResources.cs index 82f354922..6b763dc67 100644 --- a/.dotnet/src/Custom/Assistants/CodeInterpreterToolResources.cs +++ b/.dotnet/src/Custom/Assistants/CodeInterpreterToolResources.cs @@ -1,8 +1,10 @@ using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Assistants; /// The AssistantObjectToolResourcesCodeInterpreter. +[Experimental("OPENAI001")] [CodeGenModel("AssistantObjectToolResourcesCodeInterpreter")] public partial class CodeInterpreterToolResources { diff --git a/.dotnet/src/Custom/Assistants/FileSearchToolDefinition.Serialization.cs b/.dotnet/src/Custom/Assistants/FileSearchToolDefinition.Serialization.cs index 19cd98074..71c9fc541 100644 --- a/.dotnet/src/Custom/Assistants/FileSearchToolDefinition.Serialization.cs +++ b/.dotnet/src/Custom/Assistants/FileSearchToolDefinition.Serialization.cs @@ -1,7 +1,4 @@ -using System; -using System.ClientModel; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Assistants; @@ -15,7 +12,7 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelRead internal static void SerializeFileSearchToolDefinition(FileSearchToolDefinition instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) => instance.WriteCore(writer, options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); writer.WritePropertyName("type"u8); diff --git a/.dotnet/src/Custom/Assistants/FileSearchToolDefinition.cs b/.dotnet/src/Custom/Assistants/FileSearchToolDefinition.cs index 2bc8efff7..1fb69bec1 100644 --- a/.dotnet/src/Custom/Assistants/FileSearchToolDefinition.cs +++ b/.dotnet/src/Custom/Assistants/FileSearchToolDefinition.cs @@ -1,9 +1,8 @@ -using System; -using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("AssistantToolsFileSearch")] [CodeGenSuppress(nameof(FileSearchToolDefinition))] public partial class FileSearchToolDefinition : ToolDefinition diff --git a/.dotnet/src/Custom/Assistants/FileSearchToolResources.cs b/.dotnet/src/Custom/Assistants/FileSearchToolResources.cs index 66c0a1d43..8c199b06d 100644 --- a/.dotnet/src/Custom/Assistants/FileSearchToolResources.cs +++ b/.dotnet/src/Custom/Assistants/FileSearchToolResources.cs @@ -1,10 +1,12 @@ using System.ClientModel.Primitives; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Text.Json; namespace OpenAI.Assistants; -[CodeGenModel("AssistantObjectToolResourcesFileSearch")] +[Experimental("OPENAI001")] +[CodeGenModel("ToolResourcesFileSearch")] [CodeGenSerialization(nameof(NewVectorStores), "vector_stores", SerializationValueHook = nameof(SerializeNewVectorStores))] public partial class FileSearchToolResources { @@ -24,7 +26,7 @@ public IList VectorStoreIds } } - [CodeGenMember("vector_stores")] + [CodeGenMember("VectorStores")] public IList NewVectorStores { get; } = new ChangeTrackingList(); public FileSearchToolResources() diff --git a/.dotnet/src/Custom/Assistants/FunctionToolDefinition.Serialization.cs b/.dotnet/src/Custom/Assistants/FunctionToolDefinition.Serialization.cs index 701725671..cafdd5d47 100644 --- a/.dotnet/src/Custom/Assistants/FunctionToolDefinition.Serialization.cs +++ b/.dotnet/src/Custom/Assistants/FunctionToolDefinition.Serialization.cs @@ -1,7 +1,4 @@ -using System; -using System.ClientModel; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Assistants; @@ -14,8 +11,8 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReader internal static void SerializeFunctionToolDefinition(FunctionToolDefinition instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) => instance.WriteCore(writer, options); - - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); writer.WritePropertyName("function"u8); @@ -25,4 +22,4 @@ protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOption writer.WriteSerializedAdditionalRawData(SerializedAdditionalRawData, options); writer.WriteEndObject(); } -} \ No newline at end of file +} diff --git a/.dotnet/src/Custom/Assistants/FunctionToolDefinition.cs b/.dotnet/src/Custom/Assistants/FunctionToolDefinition.cs index 764e93ccf..7ceaa6435 100644 --- a/.dotnet/src/Custom/Assistants/FunctionToolDefinition.cs +++ b/.dotnet/src/Custom/Assistants/FunctionToolDefinition.cs @@ -4,6 +4,7 @@ namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("AssistantToolsFunction")] [CodeGenSuppress(nameof(FunctionToolDefinition), typeof(InternalFunctionDefinition))] public partial class FunctionToolDefinition : ToolDefinition @@ -34,15 +35,21 @@ public BinaryData Parameters set => _internalFunction.Parameters = value; } + public bool? StrictParameterSchemaEnabled + { + get => _internalFunction.Strict; + set => _internalFunction.Strict = value; + } + /// /// Creates a new instance of . /// [SetsRequiredMembers] - public FunctionToolDefinition(string name, string description = null, BinaryData parameters = null) + public FunctionToolDefinition(string name) : base("function") { Argument.AssertNotNullOrEmpty(name, nameof(name)); - _internalFunction = new(description, name, parameters, null); + _internalFunction = new(null, name, null, null, null); } /// diff --git a/.dotnet/src/Custom/Assistants/GeneratorStubs.cs b/.dotnet/src/Custom/Assistants/GeneratorStubs.cs index 9efb3dfb5..93223b656 100644 --- a/.dotnet/src/Custom/Assistants/GeneratorStubs.cs +++ b/.dotnet/src/Custom/Assistants/GeneratorStubs.cs @@ -1,51 +1,67 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; /* * This file stubs and performs minimal customization to generated public types for the OpenAI.Assistants namespace * that are not otherwise attributed elsewhere. */ - +[Experimental("OPENAI001")] [CodeGenModel("AssistantToolsCode")] public partial class CodeInterpreterToolDefinition : ToolDefinition { } +[Experimental("OPENAI001")] [CodeGenModel("MessageObjectStatus")] public readonly partial struct MessageStatus { } +[Experimental("OPENAI001")] [CodeGenModel("MessageObjectIncompleteDetails")] public partial class MessageFailureDetails { } +[Experimental("OPENAI001")] [CodeGenModel("MessageObjectIncompleteDetailsReason")] public readonly partial struct MessageFailureReason { } +[Experimental("OPENAI001")] [CodeGenModel("RunCompletionUsage")] public partial class RunTokenUsage { } +[Experimental("OPENAI001")] [CodeGenModel("RunObjectLastError")] public partial class RunError { } +[Experimental("OPENAI001")] [CodeGenModel("RunObjectLastErrorCode")] public readonly partial struct RunErrorCode { } +[Experimental("OPENAI001")] [CodeGenModel("RunObjectIncompleteDetails")] public partial class RunIncompleteDetails { } +[Experimental("OPENAI001")] [CodeGenModel("RunObjectIncompleteDetailsReason")] public readonly partial struct RunIncompleteReason { } +[Experimental("OPENAI001")] [CodeGenModel("RunStepObjectType")] public readonly partial struct RunStepType { } +[Experimental("OPENAI001")] [CodeGenModel("RunStepObjectStatus")] public readonly partial struct RunStepStatus { } +[Experimental("OPENAI001")] [CodeGenModel("RunStepObjectLastError")] public partial class RunStepError { } +[Experimental("OPENAI001")] [CodeGenModel("RunStepObjectLastErrorCode")] public readonly partial struct RunStepErrorCode { } +[Experimental("OPENAI001")] [CodeGenModel("RunStepCompletionUsage")] public partial class RunStepTokenUsage { } +[Experimental("OPENAI001")] [CodeGenModel("RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputsObject")] public partial class RunStepCodeInterpreterOutput { } diff --git a/.dotnet/src/Custom/Assistants/Internal/GeneratorStubs.Internal.cs b/.dotnet/src/Custom/Assistants/Internal/GeneratorStubs.Internal.cs index e8fae6b4f..03d4426b3 100644 --- a/.dotnet/src/Custom/Assistants/Internal/GeneratorStubs.Internal.cs +++ b/.dotnet/src/Custom/Assistants/Internal/GeneratorStubs.Internal.cs @@ -120,6 +120,9 @@ internal partial class InternalMessageContentItemFileObjectImageFile [CodeGenModel("MessageContentTextObjectText")] internal partial class InternalMessageContentTextObjectText { } +[CodeGenModel("MessageContentRefusalObjectType")] +internal readonly partial struct InternalMessageContentRefusalObjectType { } + [CodeGenModel("RunStepDetailsMessageCreationObjectMessageCreation")] internal partial class InternalRunStepDetailsMessageCreationObjectMessageCreation { } @@ -151,31 +154,31 @@ internal partial class InternalRunToolCallObjectFunction { } internal partial class InternalListAssistantsResponse : IInternalListResponse { } [CodeGenModel("ListAssistantsResponseObject")] -internal readonly partial struct InternalListAssistantsResponseObject {} +internal readonly partial struct InternalListAssistantsResponseObject { } [CodeGenModel("ListThreadsResponse")] internal partial class InternalListThreadsResponse : IInternalListResponse { } [CodeGenModel("ListThreadsResponseObject")] -internal readonly partial struct InternalListThreadsResponseObject {} +internal readonly partial struct InternalListThreadsResponseObject { } [CodeGenModel("ListMessagesResponse")] internal partial class InternalListMessagesResponse : IInternalListResponse { } [CodeGenModel("ListMessagesResponseObject")] -internal readonly partial struct InternalListMessagesResponseObject {} +internal readonly partial struct InternalListMessagesResponseObject { } [CodeGenModel("ListRunsResponse")] internal partial class InternalListRunsResponse : IInternalListResponse { } [CodeGenModel("ListRunsResponseObject")] -internal readonly partial struct InternalListRunsResponseObject {} +internal readonly partial struct InternalListRunsResponseObject { } [CodeGenModel("ListRunStepsResponse")] internal partial class InternalListRunStepsResponse : IInternalListResponse { } [CodeGenModel("ListRunStepsResponseObject")] -internal readonly partial struct InternalListRunStepsResponseObject {} +internal readonly partial struct InternalListRunStepsResponseObject { } [CodeGenModel("RunStepDetailsToolCallsFileSearchObject")] internal partial class InternalRunStepFileSearchToolCallDetails { } @@ -189,9 +192,6 @@ internal readonly partial struct InternalAssistantsNamedToolChoiceType { } [CodeGenModel("RunStepDeltaStepDetailsToolCallsCodeObject")] internal partial class InternalRunStepDeltaStepDetailsToolCallsCodeObject { } -[CodeGenModel("RunStepUpdateCodeInterpreterOutput")] -internal abstract partial class InternalRunStepUpdateCodeInterpreterOutput { } - [CodeGenModel("RunStepDeltaStepDetailsToolCallsCodeOutputImageObject")] internal partial class InternalRunStepDeltaStepDetailsToolCallsCodeOutputImageObject { } @@ -232,12 +232,6 @@ internal partial class InternalRunStepDeltaStepDetails { } [CodeGenModel("RunStepDeltaStepDetailsToolCallsFunctionObjectFunction")] internal partial class InternalRunStepDeltaStepDetailsToolCallsFunctionObjectFunction { } -[CodeGenModel("AssistantsApiResponseFormat")] -internal partial class InternalAssistantsApiResponseFormat { } - -[CodeGenModel("AssistantsApiResponseFormatType")] -internal readonly partial struct InternalAssistantsApiResponseFormatType { } - [CodeGenModel("AssistantsNamedToolChoiceFunction")] internal partial class InternalAssistantsNamedToolChoiceFunction { } @@ -307,33 +301,15 @@ internal readonly partial struct InternalMessageObjectRole { } [CodeGenModel("CreateRunRequestModel")] internal readonly partial struct InternalCreateRunRequestModel { } -[CodeGenModel("CreateRunRequestToolChoice")] -internal readonly partial struct InternalCreateRunRequestToolChoice { } - -[CodeGenModel("CreateAssistantRequestResponseFormat1")] -internal readonly partial struct InternalCreateAssistantRequestResponseFormat { } - [CodeGenModel("CreateAssistantRequestToolResources")] internal partial class InternalCreateAssistantRequestToolResources { } [CodeGenModel("CreateAssistantRequestToolResourcesCodeInterpreter")] internal partial class InternalCreateAssistantRequestToolResourcesCodeInterpreter { } -[CodeGenModel("CreateAssistantRequestToolResourcesFileSearchBase")] -internal partial class InternalCreateAssistantRequestToolResourcesFileSearchBase { } - -[CodeGenModel("CreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers")] -internal partial class InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers { } - -[CodeGenModel("CreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences")] -internal partial class InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences { } - [CodeGenModel("CreateThreadAndRunRequestModel")] internal readonly partial struct InternalCreateThreadAndRunRequestModel { } -[CodeGenModel("CreateThreadAndRunRequestResponseFormat")] -internal readonly partial struct InternalCreateThreadAndRunRequestResponseFormat { } - [CodeGenModel("CreateThreadAndRunRequestToolChoice")] internal readonly partial struct InternalCreateThreadAndRunRequestToolChoice { } @@ -343,9 +319,6 @@ internal partial class InternalCreateThreadAndRunRequestToolResources { } [CodeGenModel("CreateThreadAndRunRequestToolResourcesCodeInterpreter")] internal partial class InternalCreateThreadAndRunRequestToolResourcesCodeInterpreter { } -[CodeGenModel("CreateThreadAndRunRequestToolResourcesFileSearch")] -internal partial class InternalCreateThreadAndRunRequestToolResourcesFileSearch { } - [CodeGenModel("CreateThreadRequestToolResources")] internal partial class InternalCreateThreadRequestToolResources { } @@ -355,33 +328,18 @@ internal partial class InternalCreateThreadRequestToolResourcesCodeInterpreter { [CodeGenModel("CreateThreadRequestToolResourcesFileSearchBase")] internal partial class InternalCreateThreadRequestToolResourcesFileSearchBase { } -[CodeGenModel("CreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers")] -internal partial class InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers { } - -[CodeGenModel("CreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore")] -internal partial class InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore { } - -[CodeGenModel("CreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences")] -internal partial class InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences { } - [CodeGenModel("ModifyAssistantRequestToolResources")] internal partial class InternalModifyAssistantRequestToolResources { } [CodeGenModel("ModifyAssistantRequestToolResourcesCodeInterpreter")] internal partial class InternalModifyAssistantRequestToolResourcesCodeInterpreter { } -[CodeGenModel("ModifyAssistantRequestToolResourcesFileSearch")] -internal partial class InternalModifyAssistantRequestToolResourcesFileSearch { } - [CodeGenModel("ModifyThreadRequestToolResources")] internal partial class InternalModifyThreadRequestToolResources { } [CodeGenModel("ModifyThreadRequestToolResourcesCodeInterpreter")] internal partial class InternalModifyThreadRequestToolResourcesCodeInterpreter { } -[CodeGenModel("ModifyThreadRequestToolResourcesFileSearch")] -internal partial class InternalModifyThreadRequestToolResourcesFileSearch { } - [CodeGenModel("ThreadObjectToolResources")] internal partial class InternalThreadObjectToolResources { } @@ -396,3 +354,21 @@ internal partial class InternalAssistantToolsFileSearchTypeOnly { } [CodeGenModel("AssistantToolsFileSearchTypeOnlyType")] internal readonly partial struct InternalAssistantToolsFileSearchTypeOnlyType { } + +[CodeGenModel("AssistantResponseFormatText")] +internal partial class InternalAssistantResponseFormatText { } + +[CodeGenModel("AssistantResponseFormatJsonObject")] +internal partial class InternalAssistantResponseFormatJsonObject { } + +[CodeGenModel("AssistantResponseFormatJsonSchema")] +internal partial class InternalAssistantResponseFormatJsonSchema { } + +[CodeGenModel("UnknownAssistantResponseFormat")] +internal partial class InternalUnknownAssistantResponseFormat { } + +[CodeGenModel("MessageDeltaContentRefusalObject")] +internal partial class InternalMessageDeltaContentRefusalObject { } + +[CodeGenModel("ToolResourcesFileSearchIdsOnly")] +internal partial class InternalToolResourcesFileSearchIdsOnly { } \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/Internal/InternalAssistantMessageClient.cs b/.dotnet/src/Custom/Assistants/Internal/InternalAssistantMessageClient.cs index baaf1a3af..800de4483 100644 --- a/.dotnet/src/Custom/Assistants/Internal/InternalAssistantMessageClient.cs +++ b/.dotnet/src/Custom/Assistants/Internal/InternalAssistantMessageClient.cs @@ -8,8 +8,8 @@ namespace OpenAI.Assistants; [CodeGenSuppress("InternalAssistantMessageClient", typeof(ClientPipeline), typeof(ApiKeyCredential), typeof(Uri))] [CodeGenSuppress("CreateMessageAsync", typeof(string), typeof(MessageCreationOptions))] [CodeGenSuppress("CreateMessage", typeof(string), typeof(MessageCreationOptions))] -[CodeGenSuppress("GetMessagesAsync", typeof(string), typeof(int?), typeof(ListOrder?), typeof(string), typeof(string))] -[CodeGenSuppress("GetMessages", typeof(string), typeof(int?), typeof(ListOrder?), typeof(string), typeof(string))] +[CodeGenSuppress("GetMessagesAsync", typeof(string), typeof(int?), typeof(MessageCollectionOrder?), typeof(string), typeof(string))] +[CodeGenSuppress("GetMessages", typeof(string), typeof(int?), typeof(MessageCollectionOrder?), typeof(string), typeof(string))] [CodeGenSuppress("GetMessageAsync", typeof(string), typeof(string))] [CodeGenSuppress("GetMessage", typeof(string), typeof(string))] [CodeGenSuppress("ModifyMessageAsync", typeof(string), typeof(string), typeof(MessageModificationOptions))] @@ -18,42 +18,46 @@ namespace OpenAI.Assistants; [CodeGenSuppress("DeleteMessage", typeof(string), typeof(string))] internal partial class InternalAssistantMessageClient { - /// - /// Initializes a new instance of that will use an API key when authenticating. - /// - /// The API key used to authenticate with the service endpoint. - /// Additional options to customize the client. - /// The provided was null. - public InternalAssistantMessageClient(ApiKeyCredential credential, OpenAIClientOptions options = default) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(credential, requireExplicitCredential: true), options), - OpenAIClient.GetEndpoint(options), - options) - { } + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// is null. + public InternalAssistantMessageClient(ApiKeyCredential credential) : this(credential, new OpenAIClientOptions()) + { + } + + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// The options to configure the client. + /// is null. + public InternalAssistantMessageClient(ApiKeyCredential credential, OpenAIClientOptions options) + { + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); - /// - /// Initializes a new instance of that will use an API key from the OPENAI_API_KEY - /// environment variable when authenticating. - /// - /// - /// To provide an explicit credential instead of using the environment variable, use an alternate constructor like - /// . - /// - /// Additional options to customize the client. - /// The OPENAI_API_KEY environment variable was not found. - public InternalAssistantMessageClient(OpenAIClientOptions options = default) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(), options), - OpenAIClient.GetEndpoint(options), - options) - { } + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); + } - /// Initializes a new instance of . - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// OpenAI Endpoint. - protected internal InternalAssistantMessageClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options) + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The options to configure the client. + /// is null. + protected internal InternalAssistantMessageClient(ClientPipeline pipeline, OpenAIClientOptions options) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + options ??= new OpenAIClientOptions(); + _pipeline = pipeline; - _endpoint = endpoint; + _endpoint = OpenAIClient.GetEndpoint(options); } } diff --git a/.dotnet/src/Custom/Assistants/Internal/InternalAssistantResponseFormatPlainTextNoObject.Serialization.cs b/.dotnet/src/Custom/Assistants/Internal/InternalAssistantResponseFormatPlainTextNoObject.Serialization.cs new file mode 100644 index 000000000..1d962611c --- /dev/null +++ b/.dotnet/src/Custom/Assistants/Internal/InternalAssistantResponseFormatPlainTextNoObject.Serialization.cs @@ -0,0 +1,36 @@ +using System; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +internal partial class InternalAssistantResponseFormatPlainTextNoObject : IJsonModel +{ + internal static void SerializeInternalAssistantResponseFormatPlainTextNoObject(InternalAssistantResponseFormatPlainTextNoObject instance, Utf8JsonWriter writer, ModelReaderWriterOptions options = null) + { + writer.WriteStringValue(instance.Value); + } + + internal static InternalAssistantResponseFormatPlainTextNoObject DeserializeInternalAssistantResponseFormatPlainTextNoObject(JsonElement element, ModelReaderWriterOptions options = null) + { + if (element.ValueKind == JsonValueKind.String) + { + return new(element.GetString()); + } + return null; + } + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + => CustomSerializationHelpers.SerializeInstance(this, SerializeInternalAssistantResponseFormatPlainTextNoObject, writer, options); + + InternalAssistantResponseFormatPlainTextNoObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + => CustomSerializationHelpers.DeserializeNewInstance(this, DeserializeInternalAssistantResponseFormatPlainTextNoObject, ref reader, options); + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + => CustomSerializationHelpers.SerializeInstance(this, options); + + InternalAssistantResponseFormatPlainTextNoObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + => CustomSerializationHelpers.DeserializeNewInstance(this, DeserializeInternalAssistantResponseFormatPlainTextNoObject, data, options); + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; +} diff --git a/.dotnet/src/Custom/Assistants/Internal/InternalAssistantResponseFormatPlainTextNoObject.cs b/.dotnet/src/Custom/Assistants/Internal/InternalAssistantResponseFormatPlainTextNoObject.cs new file mode 100644 index 000000000..6c3e2ee7c --- /dev/null +++ b/.dotnet/src/Custom/Assistants/Internal/InternalAssistantResponseFormatPlainTextNoObject.cs @@ -0,0 +1,11 @@ +namespace OpenAI.Assistants; + +internal partial class InternalAssistantResponseFormatPlainTextNoObject : AssistantResponseFormat +{ + public string Value { get; set; } + + public InternalAssistantResponseFormatPlainTextNoObject(string value) + { + Value = value; + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/Internal/InternalAssistantRunClient.cs b/.dotnet/src/Custom/Assistants/Internal/InternalAssistantRunClient.cs index 675133fa1..a1237b54f 100644 --- a/.dotnet/src/Custom/Assistants/Internal/InternalAssistantRunClient.cs +++ b/.dotnet/src/Custom/Assistants/Internal/InternalAssistantRunClient.cs @@ -10,8 +10,8 @@ namespace OpenAI.Assistants; [CodeGenSuppress("CreateThreadAndRun", typeof(InternalCreateThreadAndRunRequest))] [CodeGenSuppress("CreateRunAsync", typeof(string), typeof(RunCreationOptions))] [CodeGenSuppress("CreateRun", typeof(string), typeof(RunCreationOptions))] -[CodeGenSuppress("GetRunsAsync", typeof(string), typeof(int?), typeof(ListOrder?), typeof(string), typeof(string))] -[CodeGenSuppress("GetRuns", typeof(string), typeof(int?), typeof(ListOrder?), typeof(string), typeof(string))] +[CodeGenSuppress("GetRunsAsync", typeof(string), typeof(int?), typeof(RunCollectionOrder?), typeof(string), typeof(string))] +[CodeGenSuppress("GetRuns", typeof(string), typeof(int?), typeof(RunCollectionOrder?), typeof(string), typeof(string))] [CodeGenSuppress("GetRunAsync", typeof(string), typeof(string))] [CodeGenSuppress("GetRun", typeof(string), typeof(string))] [CodeGenSuppress("ModifyRunAsync", typeof(string), typeof(string), typeof(RunModificationOptions))] @@ -20,49 +20,52 @@ namespace OpenAI.Assistants; [CodeGenSuppress("CancelRun", typeof(string), typeof(string))] [CodeGenSuppress("SubmitToolOutputsToRunAsync", typeof(string), typeof(string), typeof(InternalSubmitToolOutputsRunRequest))] [CodeGenSuppress("SubmitToolOutputsToRun", typeof(string), typeof(string), typeof(InternalSubmitToolOutputsRunRequest))] -[CodeGenSuppress("GetRunStepsAsync", typeof(string), typeof(string), typeof(int?), typeof(ListOrder?), typeof(string), typeof(string))] -[CodeGenSuppress("GetRunSteps", typeof(string), typeof(string), typeof(int?), typeof(ListOrder?), typeof(string), typeof(string))] +[CodeGenSuppress("GetRunStepsAsync", typeof(string), typeof(string), typeof(int?), typeof(RunStepCollectionOrder?), typeof(string), typeof(string))] +[CodeGenSuppress("GetRunSteps", typeof(string), typeof(string), typeof(int?), typeof(RunStepCollectionOrder?), typeof(string), typeof(string))] [CodeGenSuppress("GetRunStepAsync", typeof(string), typeof(string), typeof(string))] [CodeGenSuppress("GetRunStep", typeof(string), typeof(string), typeof(string))] internal partial class InternalAssistantRunClient { - /// - /// Initializes a new instance of that will use an API key when authenticating. - /// - /// The API key used to authenticate with the service endpoint. - /// Additional options to customize the client. - /// The provided was null. - public InternalAssistantRunClient(ApiKeyCredential credential, OpenAIClientOptions options = default) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(credential, requireExplicitCredential: true), options), - OpenAIClient.GetEndpoint(options), - options) - { } + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// is null. + public InternalAssistantRunClient(ApiKeyCredential credential) : this(credential, new OpenAIClientOptions()) + { + } + + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// The options to configure the client. + /// is null. + public InternalAssistantRunClient(ApiKeyCredential credential, OpenAIClientOptions options) + { + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); - /// - /// Initializes a new instance of that will use an API key from the OPENAI_API_KEY - /// environment variable when authenticating. - /// - /// - /// To provide an explicit credential instead of using the environment variable, use an alternate constructor like - /// . - /// - /// Additional options to customize the client. - /// The OPENAI_API_KEY environment variable was not found. - public InternalAssistantRunClient(OpenAIClientOptions options = default) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(), options), - OpenAIClient.GetEndpoint(options), - options) - { } + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); + } - /// Initializes a new instance of . - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// OpenAI Endpoint. - /// Client-wide options to propagate settings from. - protected internal InternalAssistantRunClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options) + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The options to configure the client. + /// is null. + protected internal InternalAssistantRunClient(ClientPipeline pipeline, OpenAIClientOptions options) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + options ??= new OpenAIClientOptions(); + _pipeline = pipeline; - _endpoint = endpoint; + _endpoint = OpenAIClient.GetEndpoint(options); } } diff --git a/.dotnet/src/Custom/Assistants/Internal/InternalAssistantThreadClient.cs b/.dotnet/src/Custom/Assistants/Internal/InternalAssistantThreadClient.cs index e99afc049..295a8a492 100644 --- a/.dotnet/src/Custom/Assistants/Internal/InternalAssistantThreadClient.cs +++ b/.dotnet/src/Custom/Assistants/Internal/InternalAssistantThreadClient.cs @@ -16,43 +16,46 @@ namespace OpenAI.Assistants; [CodeGenSuppress("DeleteThread", typeof(string))] internal partial class InternalAssistantThreadClient { - /// - /// Initializes a new instance of that will use an API key when authenticating. - /// - /// The API key used to authenticate with the service endpoint. - /// Additional options to customize the client. - /// The provided was null. - public InternalAssistantThreadClient(ApiKeyCredential credential, OpenAIClientOptions options = default) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(credential, requireExplicitCredential: true), options), - OpenAIClient.GetEndpoint(options), - options) - { } + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// is null. + public InternalAssistantThreadClient(ApiKeyCredential credential) : this(credential, new OpenAIClientOptions()) + { + } + + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// The options to configure the client. + /// is null. + public InternalAssistantThreadClient(ApiKeyCredential credential, OpenAIClientOptions options) + { + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); - /// - /// Initializes a new instance of that will use an API key from the OPENAI_API_KEY - /// environment variable when authenticating. - /// - /// - /// To provide an explicit credential instead of using the environment variable, use an alternate constructor like - /// . - /// - /// Additional options to customize the client. - /// The OPENAI_API_KEY environment variable was not found. - public InternalAssistantThreadClient(OpenAIClientOptions options = default) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(), options), - OpenAIClient.GetEndpoint(options), - options) - { } + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); + } - /// Initializes a new instance of . - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// OpenAI Endpoint. - /// Client-wide options to propagate settings from. - protected internal InternalAssistantThreadClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options) + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The options to configure the client. + /// is null. + protected internal InternalAssistantThreadClient(ClientPipeline pipeline, OpenAIClientOptions options) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + options ??= new OpenAIClientOptions(); + _pipeline = pipeline; - _endpoint = endpoint; + _endpoint = OpenAIClient.GetEndpoint(options); } } diff --git a/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageFileContent.Serialization.cs b/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageFileContent.Serialization.cs index c6dd038a1..1cd98176f 100644 --- a/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageFileContent.Serialization.cs +++ b/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageFileContent.Serialization.cs @@ -1,7 +1,4 @@ -using System; -using System.ClientModel; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Assistants; @@ -15,7 +12,7 @@ void IJsonModel.Write(Utf8JsonWriter writer, Mo internal static void SerializeInternalMessageImageFileContent(InternalMessageImageFileContent instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) => instance.WriteCore(writer, options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); writer.WritePropertyName("type"u8); diff --git a/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageFileContent.cs b/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageFileContent.cs index 222a44dc8..1aac69dd4 100644 --- a/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageFileContent.cs +++ b/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageFileContent.cs @@ -28,7 +28,7 @@ internal partial class InternalMessageImageFileContent /// Initializes a new instance of . internal InternalMessageImageFileContent(string imageFileId, MessageImageDetail? detail = null) : this(new InternalMessageContentItemFileObjectImageFile(imageFileId, detail?.ToSerialString(), null)) - {} + { } /// Initializes a new instance of . /// diff --git a/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageUrlContent.Serialization.cs b/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageUrlContent.Serialization.cs index 48d0f7751..2a4eb69cc 100644 --- a/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageUrlContent.Serialization.cs +++ b/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageUrlContent.Serialization.cs @@ -1,7 +1,4 @@ -using System; -using System.ClientModel; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Assistants; @@ -15,7 +12,7 @@ void IJsonModel.Write(Utf8JsonWriter writer, Mod internal static void SerializeInternalMessageImageUrlContent(InternalMessageImageUrlContent instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) => instance.WriteCore(writer, options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); writer.WritePropertyName("type"u8); diff --git a/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageUrlContent.cs b/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageUrlContent.cs index f007fec8c..76388449d 100644 --- a/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageUrlContent.cs +++ b/.dotnet/src/Custom/Assistants/Internal/InternalMessageImageUrlContent.cs @@ -28,7 +28,7 @@ internal partial class InternalMessageImageUrlContent /// Initializes a new instance of . internal InternalMessageImageUrlContent(Uri url, MessageImageDetail? detail = null) : this(new InternalMessageContentImageUrlObjectImageUrl(url, detail?.ToSerialString(), null)) - {} + { } /// Initializes a new instance of . /// diff --git a/.dotnet/src/Custom/Assistants/Internal/InternalMessageRefusalContent.Serialization.cs b/.dotnet/src/Custom/Assistants/Internal/InternalMessageRefusalContent.Serialization.cs new file mode 100644 index 000000000..72ac79768 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/Internal/InternalMessageRefusalContent.Serialization.cs @@ -0,0 +1,24 @@ +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Assistants; + +[CodeGenSuppress("global::System.ClientModel.Primitives.IJsonModel.Write", typeof(Utf8JsonWriter), typeof(ModelReaderWriterOptions))] +internal partial class InternalMessageRefusalContent : IJsonModel +{ + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + => CustomSerializationHelpers.SerializeInstance(this, SerializeInternalMessageRefusalContent, writer, options); + + internal static void SerializeInternalMessageRefusalContent(InternalMessageRefusalContent instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) + => instance.WriteCore(writer, options); + + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(_type); + writer.WritePropertyName("refusal"u8); + writer.WriteStringValue(Refusal); + writer.WriteEndObject(); + } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/Internal/InternalMessageRefusalContent.cs b/.dotnet/src/Custom/Assistants/Internal/InternalMessageRefusalContent.cs new file mode 100644 index 000000000..ed0188314 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/Internal/InternalMessageRefusalContent.cs @@ -0,0 +1,20 @@ +using System; + +namespace OpenAI.Assistants; + +/// +/// Represents an item of image URL content within an Assistants API message. +/// +/// +/// Use the method to +/// create an instance of this type. +/// +[CodeGenModel("MessageContentRefusalObject")] +internal partial class InternalMessageRefusalContent +{ + [CodeGenMember("Type")] + private string _type = "refusal"; + + [CodeGenMember("Refusal")] + public string InternalRefusal { get; set; } +} diff --git a/.dotnet/src/Custom/Assistants/Internal/InternalRequestMessageTextContent.Serialization.cs b/.dotnet/src/Custom/Assistants/Internal/InternalRequestMessageTextContent.Serialization.cs index 565c34ee4..931aed080 100644 --- a/.dotnet/src/Custom/Assistants/Internal/InternalRequestMessageTextContent.Serialization.cs +++ b/.dotnet/src/Custom/Assistants/Internal/InternalRequestMessageTextContent.Serialization.cs @@ -1,7 +1,4 @@ -using System; -using System.ClientModel; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Assistants; @@ -15,7 +12,7 @@ void IJsonModel.Write(Utf8JsonWriter writer, internal static void SerializeInternalRequestMessageTextContent(InternalRequestMessageTextContent instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) => instance.WriteCore(writer, options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); writer.WritePropertyName("type"u8); diff --git a/.dotnet/src/Custom/Assistants/Internal/InternalRequiredFunctionToolCall.cs b/.dotnet/src/Custom/Assistants/Internal/InternalRequiredFunctionToolCall.cs index 89ed0f097..4bd70fc9c 100644 --- a/.dotnet/src/Custom/Assistants/Internal/InternalRequiredFunctionToolCall.cs +++ b/.dotnet/src/Custom/Assistants/Internal/InternalRequiredFunctionToolCall.cs @@ -10,7 +10,7 @@ internal partial class InternalRequiredFunctionToolCall : InternalRequiredToolCa // - 'Type' is hidden, as the object discriminator does not carry additional value to the caller in the context // of a strongly-typed object model // - 'Function' is hidden and its constituent 'Name' and 'Arguments' members are promoted to direct visibility - + [CodeGenMember("Type")] private readonly object _type; [CodeGenMember("Function")] diff --git a/.dotnet/src/Custom/Assistants/Internal/InternalResponseMessageTextContent.Serialization.cs b/.dotnet/src/Custom/Assistants/Internal/InternalResponseMessageTextContent.Serialization.cs index 5cb19bb15..0b1f5487a 100644 --- a/.dotnet/src/Custom/Assistants/Internal/InternalResponseMessageTextContent.Serialization.cs +++ b/.dotnet/src/Custom/Assistants/Internal/InternalResponseMessageTextContent.Serialization.cs @@ -1,7 +1,4 @@ -using System; -using System.ClientModel; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Assistants; @@ -15,7 +12,7 @@ void IJsonModel.Write(Utf8JsonWriter writer, internal static void SerializeInternalResponseMessageTextContent(InternalResponseMessageTextContent instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) => instance.WriteCore(writer, options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); writer.WritePropertyName("type"u8); diff --git a/.dotnet/src/Custom/Assistants/Internal/InternalRunStepCodeInterpreterToolCallDetails.cs b/.dotnet/src/Custom/Assistants/Internal/InternalRunStepCodeInterpreterToolCallDetails.cs index 71f873f02..db9e2eb30 100644 --- a/.dotnet/src/Custom/Assistants/Internal/InternalRunStepCodeInterpreterToolCallDetails.cs +++ b/.dotnet/src/Custom/Assistants/Internal/InternalRunStepCodeInterpreterToolCallDetails.cs @@ -7,7 +7,7 @@ internal partial class InternalRunStepCodeInterpreterToolCallDetails { /// public string Input => _codeInterpreter.Input; - + /// public IReadOnlyList Outputs => _codeInterpreter.Outputs; diff --git a/.dotnet/src/Custom/Assistants/Internal/Pagination/AssistantsPageEnumerator.cs b/.dotnet/src/Custom/Assistants/Internal/Pagination/AssistantsPageEnumerator.cs index 66019bbb5..7fb974d72 100644 --- a/.dotnet/src/Custom/Assistants/Internal/Pagination/AssistantsPageEnumerator.cs +++ b/.dotnet/src/Custom/Assistants/Internal/Pagination/AssistantsPageEnumerator.cs @@ -22,7 +22,7 @@ internal partial class AssistantsPageEnumerator : PageEnumerator private readonly RequestOptions _options; public virtual ClientPipeline Pipeline => _pipeline; - + public AssistantsPageEnumerator( ClientPipeline pipeline, Uri endpoint, diff --git a/.dotnet/src/Custom/Assistants/Internal/Pagination/MessagesPageEnumerator.cs b/.dotnet/src/Custom/Assistants/Internal/Pagination/MessagesPageEnumerator.cs index 53e09fec5..a5488cc6d 100644 --- a/.dotnet/src/Custom/Assistants/Internal/Pagination/MessagesPageEnumerator.cs +++ b/.dotnet/src/Custom/Assistants/Internal/Pagination/MessagesPageEnumerator.cs @@ -27,7 +27,7 @@ internal partial class MessagesPageEnumerator : PageEnumerator public MessagesPageEnumerator( ClientPipeline pipeline, Uri endpoint, - string threadId, + string threadId, int? limit, string order, string after, string before, RequestOptions options) { diff --git a/.dotnet/src/Custom/Assistants/Internal/Pagination/MessagesPageToken.cs b/.dotnet/src/Custom/Assistants/Internal/Pagination/MessagesPageToken.cs index ff4bfa9fa..974fa40c3 100644 --- a/.dotnet/src/Custom/Assistants/Internal/Pagination/MessagesPageToken.cs +++ b/.dotnet/src/Custom/Assistants/Internal/Pagination/MessagesPageToken.cs @@ -65,7 +65,7 @@ public override BinaryData ToBytes() return BinaryData.FromStream(stream); } - + public MessagesPageToken? GetNextPageToken(bool hasMore, string? lastId) { if (!hasMore || lastId is null) diff --git a/.dotnet/src/Custom/Assistants/Internal/Pagination/RunStepsPageToken.cs b/.dotnet/src/Custom/Assistants/Internal/Pagination/RunStepsPageToken.cs index 2579e7442..ad9883d02 100644 --- a/.dotnet/src/Custom/Assistants/Internal/Pagination/RunStepsPageToken.cs +++ b/.dotnet/src/Custom/Assistants/Internal/Pagination/RunStepsPageToken.cs @@ -69,7 +69,7 @@ public override BinaryData ToBytes() return BinaryData.FromStream(stream); } - + public RunStepsPageToken? GetNextPageToken(bool hasMore, string? lastId) { if (!hasMore || lastId is null) diff --git a/.dotnet/src/Custom/Assistants/Internal/Pagination/RunsPageToken.cs b/.dotnet/src/Custom/Assistants/Internal/Pagination/RunsPageToken.cs index 28b27f475..06d6878f6 100644 --- a/.dotnet/src/Custom/Assistants/Internal/Pagination/RunsPageToken.cs +++ b/.dotnet/src/Custom/Assistants/Internal/Pagination/RunsPageToken.cs @@ -65,7 +65,7 @@ public override BinaryData ToBytes() return BinaryData.FromStream(stream); } - + public RunsPageToken? GetNextPageToken(bool hasMore, string? lastId) { if (!hasMore || lastId is null) diff --git a/.dotnet/src/Custom/Assistants/Internal/UnknownAssistantToolDefinition.Serialization.cs b/.dotnet/src/Custom/Assistants/Internal/UnknownAssistantToolDefinition.Serialization.cs index 3a80df55e..26f4e5baa 100644 --- a/.dotnet/src/Custom/Assistants/Internal/UnknownAssistantToolDefinition.Serialization.cs +++ b/.dotnet/src/Custom/Assistants/Internal/UnknownAssistantToolDefinition.Serialization.cs @@ -1,7 +1,4 @@ -using System; -using System.ClientModel; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Assistants; @@ -15,7 +12,7 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOp internal static void SerializeUnknownAssistantToolDefinition(UnknownAssistantToolDefinition instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) => instance.WriteCore(writer, options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); writer.WritePropertyName("type"u8); diff --git a/.dotnet/src/Custom/Assistants/MessageCollectionOptions.cs b/.dotnet/src/Custom/Assistants/MessageCollectionOptions.cs index ac4bf2215..3e542b736 100644 --- a/.dotnet/src/Custom/Assistants/MessageCollectionOptions.cs +++ b/.dotnet/src/Custom/Assistants/MessageCollectionOptions.cs @@ -1,33 +1,34 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; -/// -/// Represents addition options available when requesting a collection of instances. -/// +/// The options to configure how objects are retrieved and paginated. +[Experimental("OPENAI001")] public class MessageCollectionOptions { - /// - /// Creates a new instance of . - /// + /// Initializes a new instance of . public MessageCollectionOptions() { } - /// - /// The order that results should appear in the list according to - /// their created_at timestamp. + /// + /// A limit on the number of objects to be returned per page. /// - public ListOrder? Order { get; set; } + public int? PageSizeLimit { get; set; } /// - /// The number of values to return in a page result. + /// The order in which to retrieve objects when sorted by their + /// timestamp. /// - public int? PageSize { get; set; } + public MessageCollectionOrder? Order { get; set; } /// - /// The id of the item preceeding the first item in the collection. + /// The used to retrieve the page of objects that come + /// after this one. /// public string AfterId { get; set; } /// - /// The id of the item following the last item in the collection. + /// The used to retrieve the page of objects that come + /// before this one. /// public string BeforeId { get; set; } -} +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Assistants/MessageCollectionOrder.cs b/.dotnet/src/Custom/Assistants/MessageCollectionOrder.cs new file mode 100644 index 000000000..aa8d4444c --- /dev/null +++ b/.dotnet/src/Custom/Assistants/MessageCollectionOrder.cs @@ -0,0 +1,17 @@ +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI.Assistants; + +// CUSTOM: Renamed. +[Experimental("OPENAI001")] +[CodeGenModel("ListMessagesRequestOrder")] +public readonly partial struct MessageCollectionOrder +{ + // CUSTOM: Renamed. + [CodeGenMember("Asc")] + public static MessageCollectionOrder Ascending { get; } = new MessageCollectionOrder(AscendingValue); + + // CUSTOM: Renamed. + [CodeGenMember("Desc")] + public static MessageCollectionOrder Descending { get; } = new MessageCollectionOrder(DescendingValue); +} diff --git a/.dotnet/src/Custom/Assistants/MessageContent.Serialization.cs b/.dotnet/src/Custom/Assistants/MessageContent.Serialization.cs index 77d646504..2743997a0 100644 --- a/.dotnet/src/Custom/Assistants/MessageContent.Serialization.cs +++ b/.dotnet/src/Custom/Assistants/MessageContent.Serialization.cs @@ -12,7 +12,7 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOp internal static void WriteCore(MessageContent instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) => instance.WriteCore(writer, options); - protected abstract void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); + internal abstract void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); internal static MessageContent DeserializeMessageContent(JsonElement element, ModelReaderWriterOptions options = null) { diff --git a/.dotnet/src/Custom/Assistants/MessageContent.cs b/.dotnet/src/Custom/Assistants/MessageContent.cs index a32b8e982..f8b9840ee 100644 --- a/.dotnet/src/Custom/Assistants/MessageContent.cs +++ b/.dotnet/src/Custom/Assistants/MessageContent.cs @@ -1,8 +1,10 @@ using System; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("MessageContent")] public abstract partial class MessageContent { @@ -45,11 +47,13 @@ public static MessageContent FromText(string text) public string Text => AsInternalRequestText?.InternalText ?? AsInternalResponseText?.InternalText; /// public IReadOnlyList TextAnnotations => AsInternalResponseText?.InternalAnnotations ?? []; + public string Refusal => AsRefusal?.InternalRefusal; private InternalMessageImageFileContent AsInternalImageFile => this as InternalMessageImageFileContent; private InternalMessageImageUrlContent AsInternalImageUrl => this as InternalMessageImageUrlContent; private InternalResponseMessageTextContent AsInternalResponseText => this as InternalResponseMessageTextContent; private InternalRequestMessageTextContent AsInternalRequestText => this as InternalRequestMessageTextContent; + private InternalMessageRefusalContent AsRefusal => this as InternalMessageRefusalContent; /// /// The implicit conversion operator that infers an equivalent diff --git a/.dotnet/src/Custom/Assistants/MessageCreationAttachment.cs b/.dotnet/src/Custom/Assistants/MessageCreationAttachment.cs index 46827002a..d3409532f 100644 --- a/.dotnet/src/Custom/Assistants/MessageCreationAttachment.cs +++ b/.dotnet/src/Custom/Assistants/MessageCreationAttachment.cs @@ -1,9 +1,11 @@ using System.ClientModel.Primitives; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Text.Json; namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("CreateMessageRequestAttachment")] [CodeGenSerialization(nameof(Tools), "tools", SerializationValueHook = nameof(SerializeTools), DeserializationValueHook = nameof(DeserializeTools))] public partial class MessageCreationAttachment @@ -42,4 +44,4 @@ private static void DeserializeTools(JsonProperty property, ref IReadOnlyList /// Represents additional options available when creating a new . /// +[Experimental("OPENAI001")] [CodeGenModel("CreateMessageRequest")] [CodeGenSuppress("MessageCreationOptions", typeof(MessageRole), typeof(IEnumerable))] -[CodeGenSerialization(nameof(Content), SerializationValueHook=nameof(SerializeContent))] +[CodeGenSerialization(nameof(Content), SerializationValueHook = nameof(SerializeContent))] public partial class MessageCreationOptions { // CUSTOM: role is hidden, as this required property is promoted to a method parameter @@ -32,10 +34,10 @@ public MessageCreationOptions() new ChangeTrackingList(), new ChangeTrackingDictionary(), new ChangeTrackingDictionary()) - {} + { } internal MessageCreationOptions(IEnumerable content) : this() { Content = [.. content]; } -} \ No newline at end of file +} diff --git a/.dotnet/src/Custom/Assistants/MessageImageDetail.Serialization.cs b/.dotnet/src/Custom/Assistants/MessageImageDetail.Serialization.cs index 1aa776a84..1fb868534 100644 --- a/.dotnet/src/Custom/Assistants/MessageImageDetail.Serialization.cs +++ b/.dotnet/src/Custom/Assistants/MessageImageDetail.Serialization.cs @@ -1,6 +1,7 @@ using System; namespace OpenAI.Assistants; + internal static partial class MessageImageDetailExtensions { public static string ToSerialString(this MessageImageDetail value) => value switch diff --git a/.dotnet/src/Custom/Assistants/MessageImageDetail.cs b/.dotnet/src/Custom/Assistants/MessageImageDetail.cs index 17cc7827e..83768cfcd 100644 --- a/.dotnet/src/Custom/Assistants/MessageImageDetail.cs +++ b/.dotnet/src/Custom/Assistants/MessageImageDetail.cs @@ -1,9 +1,12 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; /// /// The available detail settings to use when processing an image. /// These settings balance token consumption and the resolution of evaluation performed. /// +[Experimental("OPENAI001")] public enum MessageImageDetail { /// Default. Allows the model to automatically select detail. diff --git a/.dotnet/src/Custom/Assistants/MessageModificationOptions.cs b/.dotnet/src/Custom/Assistants/MessageModificationOptions.cs index 3c43bea30..55cffbb38 100644 --- a/.dotnet/src/Custom/Assistants/MessageModificationOptions.cs +++ b/.dotnet/src/Custom/Assistants/MessageModificationOptions.cs @@ -1,9 +1,12 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; /// /// Represents additional options available when modifying an existing . /// +[Experimental("OPENAI001")] [CodeGenModel("ModifyMessageRequest")] public partial class MessageModificationOptions { -} \ No newline at end of file +} diff --git a/.dotnet/src/Custom/Assistants/MessageRole.cs b/.dotnet/src/Custom/Assistants/MessageRole.cs index 6aae4c418..28dce0718 100644 --- a/.dotnet/src/Custom/Assistants/MessageRole.cs +++ b/.dotnet/src/Custom/Assistants/MessageRole.cs @@ -1,5 +1,8 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("CreateMessageRequestRole")] public enum MessageRole { @@ -14,4 +17,4 @@ public enum MessageRole /// [CodeGenMember("Assistant")] Assistant, -} \ No newline at end of file +} diff --git a/.dotnet/src/Custom/Assistants/RequiredAction.cs b/.dotnet/src/Custom/Assistants/RequiredAction.cs index 3de39cd43..10e3a0d38 100644 --- a/.dotnet/src/Custom/Assistants/RequiredAction.cs +++ b/.dotnet/src/Custom/Assistants/RequiredAction.cs @@ -1,3 +1,5 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; /// @@ -11,6 +13,7 @@ namespace OpenAI.Assistants; /// /// /// +[Experimental("OPENAI001")] public abstract partial class RequiredAction { /// diff --git a/.dotnet/src/Custom/Assistants/RunCollectionOptions.cs b/.dotnet/src/Custom/Assistants/RunCollectionOptions.cs index f64482328..078a3070a 100644 --- a/.dotnet/src/Custom/Assistants/RunCollectionOptions.cs +++ b/.dotnet/src/Custom/Assistants/RunCollectionOptions.cs @@ -1,33 +1,34 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; -/// -/// Represents addition options available when requesting a collection of instances. -/// +/// The options to configure how objects are retrieved and paginated. +[Experimental("OPENAI001")] public class RunCollectionOptions { - /// - /// Creates a new instance of . - /// + /// Initializes a new instance of . public RunCollectionOptions() { } - /// - /// The order that results should appear in the list according to - /// their created_at timestamp. + /// + /// A limit on the number of objects to be returned per page. /// - public ListOrder? Order { get; set; } + public int? PageSizeLimit { get; set; } /// - /// The number of values to return in a page result. + /// The order in which to retrieve objects when sorted by their + /// timestamp. /// - public int? PageSize { get; set; } + public RunCollectionOrder? Order { get; set; } /// - /// The id of the item preceeding the first item in the collection. + /// The used to retrieve the page of objects that come + /// after this one. /// public string AfterId { get; set; } /// - /// The id of the item following the last item in the collection. + /// The used to retrieve the page of objects that come + /// before this one. /// public string BeforeId { get; set; } } diff --git a/.dotnet/src/Custom/Assistants/RunCollectionOrder.cs b/.dotnet/src/Custom/Assistants/RunCollectionOrder.cs new file mode 100644 index 000000000..db99d10b9 --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunCollectionOrder.cs @@ -0,0 +1,17 @@ +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI.Assistants; + +// CUSTOM: Renamed. +[Experimental("OPENAI001")] +[CodeGenModel("ListRunsRequestOrder")] +public readonly partial struct RunCollectionOrder +{ + // CUSTOM: Renamed. + [CodeGenMember("Asc")] + public static RunCollectionOrder Ascending { get; } = new RunCollectionOrder(AscendingValue); + + // CUSTOM: Renamed. + [CodeGenMember("Desc")] + public static RunCollectionOrder Descending { get; } = new RunCollectionOrder(DescendingValue); +} diff --git a/.dotnet/src/Custom/Assistants/RunCreationOptions.cs b/.dotnet/src/Custom/Assistants/RunCreationOptions.cs index bcc45ee2f..f4d242f20 100644 --- a/.dotnet/src/Custom/Assistants/RunCreationOptions.cs +++ b/.dotnet/src/Custom/Assistants/RunCreationOptions.cs @@ -1,5 +1,6 @@ using System.ClientModel.Primitives; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Linq; using System.Text.Json; @@ -8,6 +9,7 @@ namespace OpenAI.Assistants; /// /// Represents additional options available when creating a new . /// +[Experimental("OPENAI001")] [CodeGenModel("CreateRunRequest")] [CodeGenSuppress("RunCreationOptions", typeof(string))] [CodeGenSerialization(nameof(ToolConstraint), "tool_choice", SerializationValueHook = nameof(SerializeToolConstraint))] @@ -135,4 +137,4 @@ public RunCreationOptions() private void SerializeToolConstraint(Utf8JsonWriter writer, ModelReaderWriterOptions options) => writer.WriteObjectValue(ToolConstraint, options); -} \ No newline at end of file +} diff --git a/.dotnet/src/Custom/Assistants/RunModificationOptions.cs b/.dotnet/src/Custom/Assistants/RunModificationOptions.cs index 561074b99..8b8a125b8 100644 --- a/.dotnet/src/Custom/Assistants/RunModificationOptions.cs +++ b/.dotnet/src/Custom/Assistants/RunModificationOptions.cs @@ -1,9 +1,12 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; /// /// Represents additional options available when modifying an existing . /// +[Experimental("OPENAI001")] [CodeGenModel("ModifyRunRequest")] public partial class RunModificationOptions { -} \ No newline at end of file +} diff --git a/.dotnet/src/Custom/Assistants/RunStatus.cs b/.dotnet/src/Custom/Assistants/RunStatus.cs index b2b5da90e..6075e7460 100644 --- a/.dotnet/src/Custom/Assistants/RunStatus.cs +++ b/.dotnet/src/Custom/Assistants/RunStatus.cs @@ -1,5 +1,8 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("RunObjectStatus")] public readonly partial struct RunStatus { @@ -17,4 +20,4 @@ public bool IsTerminal || _value == FailedValue || _value == IncompleteValue || _value == CancelledValue; -} \ No newline at end of file +} diff --git a/.dotnet/src/Custom/Assistants/RunStep.cs b/.dotnet/src/Custom/Assistants/RunStep.cs index 061cb3cd8..568e7bc82 100644 --- a/.dotnet/src/Custom/Assistants/RunStep.cs +++ b/.dotnet/src/Custom/Assistants/RunStep.cs @@ -1,5 +1,8 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("RunStepObject")] public partial class RunStep { diff --git a/.dotnet/src/Custom/Assistants/RunStepCollectionOptions.cs b/.dotnet/src/Custom/Assistants/RunStepCollectionOptions.cs index d85097058..95d58793c 100644 --- a/.dotnet/src/Custom/Assistants/RunStepCollectionOptions.cs +++ b/.dotnet/src/Custom/Assistants/RunStepCollectionOptions.cs @@ -1,33 +1,34 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; -/// -/// Represents addition options available when requesting a collection of instances. -/// +/// The options to configure how objects are retrieved and paginated. +[Experimental("OPENAI001")] public class RunStepCollectionOptions { - /// - /// Creates a new instance of . - /// + /// Initializes a new instance of . public RunStepCollectionOptions() { } - /// - /// The order that results should appear in the list according to - /// their created_at timestamp. + /// + /// A limit on the number of objects to be returned per page. /// - public ListOrder? Order { get; set; } + public int? PageSizeLimit { get; set; } /// - /// The number of values to return in a page result. + /// The order in which to retrieve objects when sorted by their + /// timestamp. /// - public int? PageSize { get; set; } + public RunStepCollectionOrder? Order { get; set; } /// - /// The id of the item preceeding the first item in the collection. + /// The used to retrieve the page of objects that come + /// after this one. /// public string AfterId { get; set; } /// - /// The id of the item following the last item in the collection. + /// The used to retrieve the page of objects that come + /// before this one. /// public string BeforeId { get; set; } } diff --git a/.dotnet/src/Custom/Assistants/RunStepCollectionOrder.cs b/.dotnet/src/Custom/Assistants/RunStepCollectionOrder.cs new file mode 100644 index 000000000..e7011053d --- /dev/null +++ b/.dotnet/src/Custom/Assistants/RunStepCollectionOrder.cs @@ -0,0 +1,17 @@ +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI.Assistants; + +// CUSTOM: Renamed. +[Experimental("OPENAI001")] +[CodeGenModel("ListRunStepsRequestOrder")] +public readonly partial struct RunStepCollectionOrder +{ + // CUSTOM: Renamed. + [CodeGenMember("Asc")] + public static RunStepCollectionOrder Ascending { get; } = new RunStepCollectionOrder(AscendingValue); + + // CUSTOM: Renamed. + [CodeGenMember("Desc")] + public static RunStepCollectionOrder Descending { get; } = new RunStepCollectionOrder(DescendingValue); +} diff --git a/.dotnet/src/Custom/Assistants/RunStepDetails.cs b/.dotnet/src/Custom/Assistants/RunStepDetails.cs index 8ade16fc9..e003f0ece 100644 --- a/.dotnet/src/Custom/Assistants/RunStepDetails.cs +++ b/.dotnet/src/Custom/Assistants/RunStepDetails.cs @@ -1,7 +1,9 @@ using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Assistants { + [Experimental("OPENAI001")] [CodeGenModel("RunStepObjectStepDetails")] public abstract partial class RunStepDetails { diff --git a/.dotnet/src/Custom/Assistants/RunStepToolCall.cs b/.dotnet/src/Custom/Assistants/RunStepToolCall.cs index 3cb729b12..8fa52a114 100644 --- a/.dotnet/src/Custom/Assistants/RunStepToolCall.cs +++ b/.dotnet/src/Custom/Assistants/RunStepToolCall.cs @@ -1,8 +1,10 @@ using System; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("RunStepDetailsToolCallsObjectToolCallsObject")] public partial class RunStepToolCall { @@ -31,4 +33,4 @@ private InternalRunStepCodeInterpreterToolCallDetails AsCodeInterpreter => this as InternalRunStepCodeInterpreterToolCallDetails; private InternalRunStepFunctionToolCallDetails AsFunction => this as InternalRunStepFunctionToolCallDetails; private InternalRunStepFileSearchToolCallDetails AsFileSearch => this as InternalRunStepFileSearchToolCallDetails; -} \ No newline at end of file +} diff --git a/.dotnet/src/Custom/Assistants/RunStepToolCallKind.Serialization.cs b/.dotnet/src/Custom/Assistants/RunStepToolCallKind.Serialization.cs index 869a9ad02..5bdf3a347 100644 --- a/.dotnet/src/Custom/Assistants/RunStepToolCallKind.Serialization.cs +++ b/.dotnet/src/Custom/Assistants/RunStepToolCallKind.Serialization.cs @@ -1,6 +1,7 @@ using System; namespace OpenAI.Assistants; + internal static partial class RunStepToolCallKindExtensions { public static string ToSerialString(this RunStepToolCallKind value) => value switch diff --git a/.dotnet/src/Custom/Assistants/RunStepToolCallKind.cs b/.dotnet/src/Custom/Assistants/RunStepToolCallKind.cs index f1d89ab29..6e5011fb7 100644 --- a/.dotnet/src/Custom/Assistants/RunStepToolCallKind.cs +++ b/.dotnet/src/Custom/Assistants/RunStepToolCallKind.cs @@ -1,5 +1,8 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; +[Experimental("OPENAI001")] public enum RunStepToolCallKind { Unknown, diff --git a/.dotnet/src/Custom/Assistants/RunTruncationStrategy.cs b/.dotnet/src/Custom/Assistants/RunTruncationStrategy.cs index 324723a0c..d4328f349 100644 --- a/.dotnet/src/Custom/Assistants/RunTruncationStrategy.cs +++ b/.dotnet/src/Custom/Assistants/RunTruncationStrategy.cs @@ -1,9 +1,11 @@ using System; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Assistants { /// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + [Experimental("OPENAI001")] [CodeGenModel("TruncationObject")] [CodeGenSuppress(nameof(RunTruncationStrategy), typeof(InternalTruncationObjectType))] public partial class RunTruncationStrategy diff --git a/.dotnet/src/Custom/Assistants/Streaming/AsyncStreamingUpdateCollection.cs b/.dotnet/src/Custom/Assistants/Streaming/AsyncStreamingUpdateCollection.cs index 1a44545a5..1a50d6d4c 100644 --- a/.dotnet/src/Custom/Assistants/Streaming/AsyncStreamingUpdateCollection.cs +++ b/.dotnet/src/Custom/Assistants/Streaming/AsyncStreamingUpdateCollection.cs @@ -53,7 +53,7 @@ private sealed class AsyncStreamingUpdateEnumerator : IAsyncEnumerator> getResultAsync, - AsyncStreamingUpdateCollection enumerable, + AsyncStreamingUpdateCollection enumerable, CancellationToken cancellationToken) { Debug.Assert(getResultAsync is not null); diff --git a/.dotnet/src/Custom/Assistants/Streaming/MessageContentUpdate.cs b/.dotnet/src/Custom/Assistants/Streaming/MessageContentUpdate.cs index 468e27082..579a93af1 100644 --- a/.dotnet/src/Custom/Assistants/Streaming/MessageContentUpdate.cs +++ b/.dotnet/src/Custom/Assistants/Streaming/MessageContentUpdate.cs @@ -1,5 +1,6 @@ using System.ClientModel.Primitives; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Text.Json; namespace OpenAI.Assistants; @@ -12,6 +13,7 @@ namespace OpenAI.Assistants; /// and each content subcomponent, such as instances, even if this information /// arrived in the same response chunk. /// +[Experimental("OPENAI001")] public partial class MessageContentUpdate : StreamingUpdate { /// @@ -21,6 +23,7 @@ public partial class MessageContentUpdate : StreamingUpdate public int MessageIndex => _textContent?.Index ?? _imageFileContent?.Index ?? _imageUrlContent?.Index + ?? _refusalContent?.Index ?? TextAnnotation?.ContentIndex ?? 0; @@ -42,9 +45,12 @@ public partial class MessageContentUpdate : StreamingUpdate /// public TextAnnotationUpdate TextAnnotation { get; } + public string RefusalUpdate => _refusalContent?.Refusal; + private readonly InternalMessageDeltaContentImageFileObject _imageFileContent; private readonly InternalMessageDeltaContentTextObject _textContent; private readonly InternalMessageDeltaContentImageUrlObject _imageUrlContent; + private readonly InternalMessageDeltaContentRefusalObject _refusalContent; private readonly InternalMessageDeltaObject _delta; internal MessageContentUpdate(InternalMessageDeltaObject delta, InternalMessageDeltaContent content) @@ -54,6 +60,7 @@ internal MessageContentUpdate(InternalMessageDeltaObject delta, InternalMessageD _textContent = content as InternalMessageDeltaContentTextObject; _imageFileContent = content as InternalMessageDeltaContentImageFileObject; _imageUrlContent = content as InternalMessageDeltaContentImageUrlObject; + _refusalContent = content as InternalMessageDeltaContentRefusalObject; } internal MessageContentUpdate(InternalMessageDeltaObject delta, TextAnnotationUpdate annotation) diff --git a/.dotnet/src/Custom/Assistants/Streaming/MessageStatusUpdate.cs b/.dotnet/src/Custom/Assistants/Streaming/MessageStatusUpdate.cs index d6b19cf29..b37fd98ea 100644 --- a/.dotnet/src/Custom/Assistants/Streaming/MessageStatusUpdate.cs +++ b/.dotnet/src/Custom/Assistants/Streaming/MessageStatusUpdate.cs @@ -1,5 +1,6 @@ using System.ClientModel.Primitives; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Text.Json; namespace OpenAI.Assistants; @@ -7,6 +8,7 @@ namespace OpenAI.Assistants; /// /// The update type presented when the status of a message changes. /// +[Experimental("OPENAI001")] public class MessageStatusUpdate : StreamingUpdate { internal MessageStatusUpdate(ThreadMessage message, StreamingUpdateReason updateKind) diff --git a/.dotnet/src/Custom/Assistants/Streaming/RequiredActionUpdate.cs b/.dotnet/src/Custom/Assistants/Streaming/RequiredActionUpdate.cs index 2c1899236..bfebfe8b9 100644 --- a/.dotnet/src/Custom/Assistants/Streaming/RequiredActionUpdate.cs +++ b/.dotnet/src/Custom/Assistants/Streaming/RequiredActionUpdate.cs @@ -1,4 +1,5 @@ using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Text.Json; namespace OpenAI.Assistants; @@ -11,6 +12,7 @@ namespace OpenAI.Assistants; /// Distinct instances will generated for each required action, meaning that /// parallel function calling will present multiple updates even if the tool calls arrive at the same time. /// +[Experimental("OPENAI001")] public class RequiredActionUpdate : RunUpdate { /// diff --git a/.dotnet/src/Custom/Assistants/Streaming/RunStepDetailsUpdate.cs b/.dotnet/src/Custom/Assistants/Streaming/RunStepDetailsUpdate.cs index 588d3fd7b..0549f7fe2 100644 --- a/.dotnet/src/Custom/Assistants/Streaming/RunStepDetailsUpdate.cs +++ b/.dotnet/src/Custom/Assistants/Streaming/RunStepDetailsUpdate.cs @@ -1,6 +1,7 @@ using System; using System.ClientModel.Primitives; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Text.Json; namespace OpenAI.Assistants; @@ -8,6 +9,7 @@ namespace OpenAI.Assistants; /// /// The update type presented when run step details, including tool call progress, have changed. /// +[Experimental("OPENAI001")] public class RunStepDetailsUpdate : StreamingUpdate { internal readonly InternalRunStepDelta _delta; @@ -18,7 +20,7 @@ public class RunStepDetailsUpdate : StreamingUpdate private readonly InternalRunStepDeltaStepDetailsToolCallsFunctionObject _asFunctionCall; /// - public string StepId => _delta?.Id; + public string StepId => _delta?.Id; /// public string CreatedMessageId => _asMessageCreation?.MessageCreation?.MessageId; diff --git a/.dotnet/src/Custom/Assistants/Streaming/RunStepDetailsUpdateCodeInterpreterOutput.cs b/.dotnet/src/Custom/Assistants/Streaming/RunStepDetailsUpdateCodeInterpreterOutput.cs index c24ea9dd4..77c041f15 100644 --- a/.dotnet/src/Custom/Assistants/Streaming/RunStepDetailsUpdateCodeInterpreterOutput.cs +++ b/.dotnet/src/Custom/Assistants/Streaming/RunStepDetailsUpdateCodeInterpreterOutput.cs @@ -1,5 +1,8 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputsObject")] public partial class RunStepUpdateCodeInterpreterOutput { @@ -16,4 +19,4 @@ private InternalRunStepDeltaStepDetailsToolCallsCodeOutputLogsObject AsLogs => this as InternalRunStepDeltaStepDetailsToolCallsCodeOutputLogsObject; private InternalRunStepDeltaStepDetailsToolCallsCodeOutputImageObject AsImage => this as InternalRunStepDeltaStepDetailsToolCallsCodeOutputImageObject; -} \ No newline at end of file +} diff --git a/.dotnet/src/Custom/Assistants/Streaming/RunStepUpdate.cs b/.dotnet/src/Custom/Assistants/Streaming/RunStepUpdate.cs index c34345a8e..80e20766a 100644 --- a/.dotnet/src/Custom/Assistants/Streaming/RunStepUpdate.cs +++ b/.dotnet/src/Custom/Assistants/Streaming/RunStepUpdate.cs @@ -1,5 +1,6 @@ using System.ClientModel.Primitives; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Text.Json; namespace OpenAI.Assistants; @@ -7,6 +8,7 @@ namespace OpenAI.Assistants; /// /// The update type presented when the status of a run step changes. /// +[Experimental("OPENAI001")] public class RunStepUpdate : StreamingUpdate { internal RunStepUpdate(RunStep runStep, StreamingUpdateReason updateKind) diff --git a/.dotnet/src/Custom/Assistants/Streaming/RunUpdate.cs b/.dotnet/src/Custom/Assistants/Streaming/RunUpdate.cs index 77df3f678..c94bc01b5 100644 --- a/.dotnet/src/Custom/Assistants/Streaming/RunUpdate.cs +++ b/.dotnet/src/Custom/Assistants/Streaming/RunUpdate.cs @@ -1,5 +1,6 @@ using System.ClientModel.Primitives; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Text.Json; namespace OpenAI.Assistants; @@ -7,6 +8,7 @@ namespace OpenAI.Assistants; /// /// The update type presented when the status of a has changed. /// +[Experimental("OPENAI001")] public class RunUpdate : StreamingUpdate { internal RunUpdate(ThreadRun run, StreamingUpdateReason updateKind) : base(run, updateKind) diff --git a/.dotnet/src/Custom/Assistants/Streaming/StreamingUpdate.cs b/.dotnet/src/Custom/Assistants/Streaming/StreamingUpdate.cs index 7940993d3..9f6a362b8 100644 --- a/.dotnet/src/Custom/Assistants/Streaming/StreamingUpdate.cs +++ b/.dotnet/src/Custom/Assistants/Streaming/StreamingUpdate.cs @@ -1,4 +1,5 @@ using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Net.ServerSentEvents; using System.Text.Json; @@ -21,6 +22,7 @@ namespace OpenAI.Assistants; /// For threads: /// /// +[Experimental("OPENAI001")] public abstract partial class StreamingUpdate { /// @@ -79,6 +81,7 @@ or StreamingUpdateReason.MessageCompleted /// Represents a single item of streamed data that encapsulates an underlying response value type. /// /// The response value type of the "delta" payload. +[Experimental("OPENAI001")] public partial class StreamingUpdate : StreamingUpdate where T : class { diff --git a/.dotnet/src/Custom/Assistants/Streaming/StreamingUpdateCollection.cs b/.dotnet/src/Custom/Assistants/Streaming/StreamingUpdateCollection.cs index d0099d995..cad7ca61d 100644 --- a/.dotnet/src/Custom/Assistants/Streaming/StreamingUpdateCollection.cs +++ b/.dotnet/src/Custom/Assistants/Streaming/StreamingUpdateCollection.cs @@ -49,7 +49,7 @@ private sealed class StreamingUpdateEnumerator : IEnumerator private StreamingUpdate? _current; private bool _started; - public StreamingUpdateEnumerator(Func getResult, + public StreamingUpdateEnumerator(Func getResult, StreamingUpdateCollection enumerable) { Debug.Assert(getResult is not null); diff --git a/.dotnet/src/Custom/Assistants/Streaming/StreamingUpdateReason.cs b/.dotnet/src/Custom/Assistants/Streaming/StreamingUpdateReason.cs index e23673342..40dfd76d6 100644 --- a/.dotnet/src/Custom/Assistants/Streaming/StreamingUpdateReason.cs +++ b/.dotnet/src/Custom/Assistants/Streaming/StreamingUpdateReason.cs @@ -1,3 +1,5 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; /// @@ -5,6 +7,7 @@ namespace OpenAI.Assistants; /// expected downcast data type of the as well as to the expected data present in the /// payload. /// +[Experimental("OPENAI001")] public enum StreamingUpdateReason { /// diff --git a/.dotnet/src/Custom/Assistants/Streaming/TextAnnotationUpdate.cs b/.dotnet/src/Custom/Assistants/Streaming/TextAnnotationUpdate.cs index e7abb4030..caab5bf29 100644 --- a/.dotnet/src/Custom/Assistants/Streaming/TextAnnotationUpdate.cs +++ b/.dotnet/src/Custom/Assistants/Streaming/TextAnnotationUpdate.cs @@ -1,7 +1,9 @@ using System; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Assistants; +[Experimental("OPENAI001")] public class TextAnnotationUpdate { /// diff --git a/.dotnet/src/Custom/Assistants/Streaming/ThreadUpdate.cs b/.dotnet/src/Custom/Assistants/Streaming/ThreadUpdate.cs index c839281ff..636dc3038 100644 --- a/.dotnet/src/Custom/Assistants/Streaming/ThreadUpdate.cs +++ b/.dotnet/src/Custom/Assistants/Streaming/ThreadUpdate.cs @@ -1,6 +1,7 @@ using System; using System.ClientModel.Primitives; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Text.Json; namespace OpenAI.Assistants; @@ -8,6 +9,7 @@ namespace OpenAI.Assistants; /// /// The update type presented when a streamed event indicates a thread was created. /// +[Experimental("OPENAI001")] public class ThreadUpdate : StreamingUpdate { /// diff --git a/.dotnet/src/Custom/Assistants/TextAnnotation.cs b/.dotnet/src/Custom/Assistants/TextAnnotation.cs index 2a4652b54..3a2980838 100644 --- a/.dotnet/src/Custom/Assistants/TextAnnotation.cs +++ b/.dotnet/src/Custom/Assistants/TextAnnotation.cs @@ -1,7 +1,9 @@ using System; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Assistants; +[Experimental("OPENAI001")] public class TextAnnotation { internal readonly InternalMessageContentTextObjectAnnotation _internalAnnotation; diff --git a/.dotnet/src/Custom/Assistants/ThreadCreationOptions.cs b/.dotnet/src/Custom/Assistants/ThreadCreationOptions.cs index f0ae040f7..82d02e471 100644 --- a/.dotnet/src/Custom/Assistants/ThreadCreationOptions.cs +++ b/.dotnet/src/Custom/Assistants/ThreadCreationOptions.cs @@ -1,4 +1,5 @@ using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Linq; namespace OpenAI.Assistants; @@ -6,6 +7,7 @@ namespace OpenAI.Assistants; /// /// Represents additional options available when creating a new . /// +[Experimental("OPENAI001")] [CodeGenModel("CreateThreadRequest")] public partial class ThreadCreationOptions { @@ -48,4 +50,4 @@ private set /// /// public IList InitialMessages { get; } = new ChangeTrackingList(); -} \ No newline at end of file +} diff --git a/.dotnet/src/Custom/Assistants/ThreadInitializationMessage.cs b/.dotnet/src/Custom/Assistants/ThreadInitializationMessage.cs index 60c443977..bcae23ac5 100644 --- a/.dotnet/src/Custom/Assistants/ThreadInitializationMessage.cs +++ b/.dotnet/src/Custom/Assistants/ThreadInitializationMessage.cs @@ -1,7 +1,9 @@ using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Assistants; +[Experimental("OPENAI001")] public partial class ThreadInitializationMessage : MessageCreationOptions { /// diff --git a/.dotnet/src/Custom/Assistants/ThreadMessage.cs b/.dotnet/src/Custom/Assistants/ThreadMessage.cs index 1e831a070..401479440 100644 --- a/.dotnet/src/Custom/Assistants/ThreadMessage.cs +++ b/.dotnet/src/Custom/Assistants/ThreadMessage.cs @@ -1,13 +1,15 @@ using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("MessageObject")] public partial class ThreadMessage { // CUSTOM: Made internal. /// The object type, which is always `thread.message`. - [CodeGenMember("Object")] + [CodeGenMember("Object")] internal InternalMessageObjectObject Object { get; } = InternalMessageObjectObject.ThreadMessage; diff --git a/.dotnet/src/Custom/Assistants/ThreadModificationOptions.cs b/.dotnet/src/Custom/Assistants/ThreadModificationOptions.cs index 5216abe20..31c81001c 100644 --- a/.dotnet/src/Custom/Assistants/ThreadModificationOptions.cs +++ b/.dotnet/src/Custom/Assistants/ThreadModificationOptions.cs @@ -1,8 +1,11 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; /// /// Represents additional options available when modifying an existing . /// +[Experimental("OPENAI001")] [CodeGenModel("ModifyThreadRequest")] public partial class ThreadModificationOptions { @@ -12,4 +15,4 @@ public partial class ThreadModificationOptions /// [CodeGenMember("ToolResources")] public ToolResources ToolResources { get; set; } -} \ No newline at end of file +} diff --git a/.dotnet/src/Custom/Assistants/ThreadRun.cs b/.dotnet/src/Custom/Assistants/ThreadRun.cs index b2a0f3f43..1eb62ecc5 100644 --- a/.dotnet/src/Custom/Assistants/ThreadRun.cs +++ b/.dotnet/src/Custom/Assistants/ThreadRun.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Linq; namespace OpenAI.Assistants; @@ -8,6 +9,7 @@ namespace OpenAI.Assistants; // - Required actions are abstracted into a forward-compatible, strongly-typed conceptual // hierarchy and formatted into a more intuitive collection for the consumer. +[Experimental("OPENAI001")] [CodeGenModel("RunObject")] public partial class ThreadRun { diff --git a/.dotnet/src/Custom/Assistants/ToolConstraint.Serialization.cs b/.dotnet/src/Custom/Assistants/ToolConstraint.Serialization.cs index 7d2a335e4..ccba3e954 100644 --- a/.dotnet/src/Custom/Assistants/ToolConstraint.Serialization.cs +++ b/.dotnet/src/Custom/Assistants/ToolConstraint.Serialization.cs @@ -94,4 +94,4 @@ internal static ToolConstraint DeserializeToolConstraint(JsonElement element, Mo return new ToolConstraint(plainTextValue, objectType, objectFunctionName, rawDataDictionary); } -} \ No newline at end of file +} diff --git a/.dotnet/src/Custom/Assistants/ToolConstraint.cs b/.dotnet/src/Custom/Assistants/ToolConstraint.cs index dc3de61ac..b333236f8 100644 --- a/.dotnet/src/Custom/Assistants/ToolConstraint.cs +++ b/.dotnet/src/Custom/Assistants/ToolConstraint.cs @@ -1,8 +1,10 @@ using System; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("AssistantsNamedToolChoice")] public partial class ToolConstraint { diff --git a/.dotnet/src/Custom/Assistants/ToolDefinition.Serialization.cs b/.dotnet/src/Custom/Assistants/ToolDefinition.Serialization.cs index 9894fb1ed..32b7626f6 100644 --- a/.dotnet/src/Custom/Assistants/ToolDefinition.Serialization.cs +++ b/.dotnet/src/Custom/Assistants/ToolDefinition.Serialization.cs @@ -1,7 +1,4 @@ -using System; using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Runtime.CompilerServices; using System.Text.Json; namespace OpenAI.Assistants; @@ -15,5 +12,5 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOp internal static void WriteCore(ToolDefinition instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) => instance.WriteCore(writer, options); - protected abstract void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); -} \ No newline at end of file + internal abstract void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); +} diff --git a/.dotnet/src/Custom/Assistants/ToolDefinition.cs b/.dotnet/src/Custom/Assistants/ToolDefinition.cs index 92528d30b..cca50418d 100644 --- a/.dotnet/src/Custom/Assistants/ToolDefinition.cs +++ b/.dotnet/src/Custom/Assistants/ToolDefinition.cs @@ -1,10 +1,12 @@ using System; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("AssistantToolDefinition")] public abstract partial class ToolDefinition -{ +{ public static CodeInterpreterToolDefinition CreateCodeInterpreter() => new CodeInterpreterToolDefinition(); public static FileSearchToolDefinition CreateFileSearch(int? maxResults = null) @@ -14,8 +16,13 @@ public static FileSearchToolDefinition CreateFileSearch(int? maxResults = null) MaxResults = maxResults }; } - public static FunctionToolDefinition CreateFunction(string name, string description = null, BinaryData parameters = null) - => new FunctionToolDefinition(name, description, parameters); + public static FunctionToolDefinition CreateFunction(string name, string description = null, BinaryData parameters = null, bool? strictParameterSchemaEnabled = null) + => new FunctionToolDefinition(name) + { + Description = description, + Parameters = parameters, + StrictParameterSchemaEnabled = strictParameterSchemaEnabled, + }; protected ToolDefinition(string type) { diff --git a/.dotnet/src/Custom/Assistants/ToolOutput.cs b/.dotnet/src/Custom/Assistants/ToolOutput.cs index ad32112d0..fbdc8c911 100644 --- a/.dotnet/src/Custom/Assistants/ToolOutput.cs +++ b/.dotnet/src/Custom/Assistants/ToolOutput.cs @@ -1,5 +1,8 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("SubmitToolOutputsRunRequestToolOutput")] public partial class ToolOutput { @@ -12,5 +15,5 @@ public partial class ToolOutput /// The output from the specified tool. public ToolOutput(string toolCallId, string output) : this(toolCallId, output, null) - {} -} \ No newline at end of file + { } +} diff --git a/.dotnet/src/Custom/Assistants/ToolResources.cs b/.dotnet/src/Custom/Assistants/ToolResources.cs index 1043eead2..0cd833266 100644 --- a/.dotnet/src/Custom/Assistants/ToolResources.cs +++ b/.dotnet/src/Custom/Assistants/ToolResources.cs @@ -1,8 +1,10 @@ using System.ClientModel.Primitives; +using System.Diagnostics.CodeAnalysis; using System.Text.Json; namespace OpenAI.Assistants; +[Experimental("OPENAI001")] [CodeGenModel("AssistantObjectToolResources")] [CodeGenSerialization(nameof(FileSearch), "file_search", SerializationValueHook = nameof(SerializeFileSearch))] public partial class ToolResources @@ -13,7 +15,7 @@ public partial class ToolResources public FileSearchToolResources FileSearch { get; set; } public ToolResources() - {} + { } private void SerializeFileSearch(Utf8JsonWriter writer, ModelReaderWriterOptions options) => writer.WriteObjectValue(FileSearch, options); diff --git a/.dotnet/src/Custom/Assistants/VectorStoreCreationHelper.cs b/.dotnet/src/Custom/Assistants/VectorStoreCreationHelper.cs index 91c231ef4..6de65e10a 100644 --- a/.dotnet/src/Custom/Assistants/VectorStoreCreationHelper.cs +++ b/.dotnet/src/Custom/Assistants/VectorStoreCreationHelper.cs @@ -1,20 +1,25 @@ using OpenAI.Files; +using OpenAI.VectorStores; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Linq; -namespace OpenAI.Assistants +namespace OpenAI.Assistants; + +[Experimental("OPENAI001")] +[CodeGenModel("ToolResourcesFileSearchVectorStore")] +public partial class VectorStoreCreationHelper { - [CodeGenModel("CreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore")] - public partial class VectorStoreCreationHelper - { - public VectorStoreCreationHelper(IEnumerable fileIds, IDictionary metadata = null) - { - FileIds = fileIds.ToList(); - Metadata = metadata ?? new ChangeTrackingDictionary(); - } + [CodeGenMember("ChunkingStrategy")] + public FileChunkingStrategy ChunkingStrategy { get; set; } - public VectorStoreCreationHelper(IEnumerable files, IDictionary metadata = null) - : this(files?.Select(file => file.Id) ?? [], metadata) - {} + public VectorStoreCreationHelper(IEnumerable fileIds, IDictionary metadata = null) + { + FileIds = fileIds.ToList(); + Metadata = metadata ?? new ChangeTrackingDictionary(); } + + public VectorStoreCreationHelper(IEnumerable files, IDictionary metadata = null) + : this(files?.Select(file => file.Id) ?? [], metadata) + { } } diff --git a/.dotnet/src/Custom/Audio/AudioClient.Protocol.cs b/.dotnet/src/Custom/Audio/AudioClient.Protocol.cs index ba0001627..0fd68c698 100644 --- a/.dotnet/src/Custom/Audio/AudioClient.Protocol.cs +++ b/.dotnet/src/Custom/Audio/AudioClient.Protocol.cs @@ -27,7 +27,7 @@ public partial class AudioClient /// Service returned a non-success status code. /// The response returned from the service. [EditorBrowsable(EditorBrowsableState.Never)] - public virtual async Task GenerateSpeechFromTextAsync(BinaryContent content, RequestOptions options = null) + public virtual async Task GenerateSpeechAsync(BinaryContent content, RequestOptions options = null) { Argument.AssertNotNull(content, nameof(content)); @@ -48,7 +48,7 @@ public virtual async Task GenerateSpeechFromTextAsync(BinaryConten /// Service returned a non-success status code. /// The response returned from the service. [EditorBrowsable(EditorBrowsableState.Never)] - public virtual ClientResult GenerateSpeechFromText(BinaryContent content, RequestOptions options = null) + public virtual ClientResult GenerateSpeech(BinaryContent content, RequestOptions options = null) { Argument.AssertNotNull(content, nameof(content)); diff --git a/.dotnet/src/Custom/Audio/AudioClient.cs b/.dotnet/src/Custom/Audio/AudioClient.cs index bf5ab688b..3782e7fbd 100644 --- a/.dotnet/src/Custom/Audio/AudioClient.cs +++ b/.dotnet/src/Custom/Audio/AudioClient.cs @@ -7,6 +7,10 @@ namespace OpenAI.Audio; +// CUSTOM: +// - Renamed. +// - Suppressed constructor that takes endpoint parameter; endpoint is now a property in the options class. +// - Suppressed methods that only take the options parameter. /// The service client for OpenAI audio operations. [CodeGenClient("Audio")] [CodeGenSuppress("AudioClient", typeof(ClientPipeline), typeof(ApiKeyCredential), typeof(Uri))] @@ -22,73 +26,74 @@ public partial class AudioClient // CUSTOM: // - Added `model` parameter. - // - Added support for retrieving credential and endpoint from environment variables. - - /// - /// Initializes a new instance of that will use an API key when authenticating. - /// - /// The model name to use for audio operations. - /// The API key used to authenticate with the service endpoint. - /// Additional options to customize the client. - /// The provided was null. - public AudioClient(string model, ApiKeyCredential credential, OpenAIClientOptions options = default) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(credential, requireExplicitCredential: true), options), - model, - OpenAIClient.GetEndpoint(options), - options) - { } - - /// - /// Initializes a new instance of that will use an API key from the OPENAI_API_KEY - /// environment variable when authenticating. - /// - /// - /// To provide an explicit credential instead of using the environment variable, use an alternate constructor like - /// . - /// - /// The model name to use for audio operations. - /// Additional options to customize the client. - /// The OPENAI_API_KEY environment variable was not found. - public AudioClient(string model, OpenAIClientOptions options = default) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(), options), - model, - OpenAIClient.GetEndpoint(options), - options) - { } + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The API key to authenticate with the service. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public AudioClient(string model, ApiKeyCredential credential) : this(model, credential, new OpenAIClientOptions()) + { + } // CUSTOM: // - Added `model` parameter. + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The API key to authenticate with the service. + /// The options to configure the client. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public AudioClient(string model, ApiKeyCredential credential, OpenAIClientOptions options) + { + Argument.AssertNotNullOrEmpty(model, nameof(model)); + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); + + _model = model; + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); + } - /// Initializes a new instance of EmbeddingClient. - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// OpenAI Endpoint. - protected internal AudioClient(ClientPipeline pipeline, string model, Uri endpoint, OpenAIClientOptions options) + // CUSTOM: + // - Added `model` parameter. + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The options to configure the client. + /// or is null. + /// is an empty string, and was expected to be non-empty. + protected internal AudioClient(ClientPipeline pipeline, string model, OpenAIClientOptions options) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); Argument.AssertNotNullOrEmpty(model, nameof(model)); + options ??= new OpenAIClientOptions(); - _pipeline = pipeline; _model = model; - _endpoint = endpoint; + _pipeline = pipeline; + _endpoint = OpenAIClient.GetEndpoint(options); } #region GenerateSpeech - /// - /// Generates text-to-speech audio using the specified voice speaking the provided input text. - /// + /// Generates a life-like, spoken audio recording of the input text. /// - /// The default format of the generated audio is unless otherwise specified - /// via . + /// The default format of the generated audio is unless otherwise specified + /// via . /// - /// The text for the voice to speak. - /// The voice to use. - /// Additional options to tailor the text-to-speech request. - /// A token that can be used to cancel this method call. + /// The text to generate audio for. + /// The voice to use in the generated audio. + /// The options to configure the audio generation. + /// A token that can be used to cancel this method call. + /// is null. /// The generated audio in the specified output format. - public virtual async Task> GenerateSpeechFromTextAsync(string text, GeneratedSpeechVoice voice, SpeechGenerationOptions options = null, CancellationToken cancellationToken = default) + public virtual async Task> GenerateSpeechAsync(string text, GeneratedSpeechVoice voice, SpeechGenerationOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(text, nameof(text)); @@ -96,23 +101,22 @@ public virtual async Task> GenerateSpeechFromTextAsync( CreateSpeechGenerationOptions(text, voice, ref options); using BinaryContent content = options.ToBinaryContent(); - ClientResult result = await GenerateSpeechFromTextAsync(content, cancellationToken.ToRequestOptions()).ConfigureAwait(false); + ClientResult result = await GenerateSpeechAsync(content, cancellationToken.ToRequestOptions()).ConfigureAwait(false); return ClientResult.FromValue(result.GetRawResponse().Content, result.GetRawResponse()); } - /// - /// Generates text-to-speech audio using the specified voice speaking the provided input text. - /// + /// Generates a life-like, spoken audio recording of the input text. /// - /// The default format of the generated audio is unless otherwise specified - /// via . + /// The default format of the generated audio is unless otherwise specified + /// via . /// - /// The text for the voice to speak. - /// The voice to use. - /// Additional options to tailor the text-to-speech request. - /// A token that can be used to cancel this method call. + /// The text to generate audio for. + /// The voice to use in the generated audio. + /// The options to configure the audio generation. + /// A token that can be used to cancel this method call. + /// is null. /// The generated audio in the specified output format. - public virtual ClientResult GenerateSpeechFromText(string text, GeneratedSpeechVoice voice, SpeechGenerationOptions options = null, CancellationToken cancellationToken = default) + public virtual ClientResult GenerateSpeech(string text, GeneratedSpeechVoice voice, SpeechGenerationOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(text, nameof(text)); @@ -120,7 +124,7 @@ public virtual ClientResult GenerateSpeechFromText(string text, Gene CreateSpeechGenerationOptions(text, voice, ref options); using BinaryContent content = options.ToBinaryContent(); - ClientResult result = GenerateSpeechFromText(content, cancellationToken.ToRequestOptions()); ; + ClientResult result = GenerateSpeech(content, cancellationToken.ToRequestOptions()); ; return ClientResult.FromValue(result.GetRawResponse().Content, result.GetRawResponse()); } @@ -128,20 +132,17 @@ public virtual ClientResult GenerateSpeechFromText(string text, Gene #region TranscribeAudio - /// - /// Transcribes audio from a stream. - /// - /// The audio to transcribe. + /// Transcribes the input audio. + /// The audio stream to transcribe. /// - /// The filename associated with the audio stream. The filename's extension (for example: .mp3) will be used to - /// validate the format of the input audio. The request may fail if the file extension and input audio format do - /// not match. + /// The filename associated with the audio stream. The filename's extension (for example: .mp3) will be used to + /// validate the format of the input audio. The request may fail if the filename's extension and the actual + /// format of the input audio do not match. /// - /// Additional options to tailor the audio transcription request. - /// A token that can be used to cancel this method call. + /// The options to configure the audio transcription. + /// A token that can be used to cancel this method call. /// or is null. /// is an empty string, and was expected to be non-empty. - /// The audio transcription. public virtual async Task> TranscribeAudioAsync(Stream audio, string audioFilename, AudioTranscriptionOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(audio, nameof(audio)); @@ -155,20 +156,17 @@ public virtual async Task> TranscribeAudioAsync return ClientResult.FromValue(AudioTranscription.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// - /// Transcribes audio from a stream. - /// - /// The audio to transcribe. + /// Transcribes the input audio. + /// The audio stream to transcribe. /// - /// The filename associated with the audio stream. The filename's extension (for example: .mp3) will be used to - /// validate the format of the input audio. The request may fail if the file extension and input audio format do - /// not match. + /// The filename associated with the audio stream. The filename's extension (for example: .mp3) will be used to + /// validate the format of the input audio. The request may fail if the filename's extension and the actual + /// format of the input audio do not match. /// - /// Additional options to tailor the audio transcription request. - /// A token that can be used to cancel this method call. + /// The options to configure the audio transcription. + /// A token that can be used to cancel this method call. /// or is null. /// is an empty string, and was expected to be non-empty. - /// The audio transcription. public virtual ClientResult TranscribeAudio(Stream audio, string audioFilename, AudioTranscriptionOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(audio, nameof(audio)); @@ -182,18 +180,15 @@ public virtual ClientResult TranscribeAudio(Stream audio, st return ClientResult.FromValue(AudioTranscription.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// - /// Transcribes audio from a file with a known path. - /// + /// Transcribes the input audio. /// - /// The path of the audio file to transcribe. The provided file path's extension (for example: .mp3) will be used - /// to validate the format of the input audio. The request may fail if the file extension and input audio format - /// do not match. + /// The path of the audio file to transcribe. The provided file path's extension (for example: .mp3) will be + /// used to validate the format of the input audio. The request may fail if the file path's extension and the + /// actual format of the input audio do not match. /// - /// Additional options to tailor the audio transcription request. + /// The options to configure the audio transcription. /// is null. /// is an empty string, and was expected to be non-empty. - /// The audio transcription. public virtual async Task> TranscribeAudioAsync(string audioFilePath, AudioTranscriptionOptions options = null) { Argument.AssertNotNullOrEmpty(audioFilePath, nameof(audioFilePath)); @@ -202,18 +197,15 @@ public virtual async Task> TranscribeAudioAsync return await TranscribeAudioAsync(audioStream, audioFilePath, options).ConfigureAwait(false); } - /// - /// Transcribes audio from a file with a known path. - /// + /// Transcribes the input audio. /// - /// The path of the audio file to transcribe. The provided file path's extension (for example: .mp3) will be used - /// to validate the format of the input audio. The request may fail if the file extension and input audio format - /// do not match. + /// The path of the audio file to transcribe. The provided file path's extension (for example: .mp3) will be + /// used to validate the format of the input audio. The request may fail if the file path's extension and the + /// actual format of the input audio do not match. /// - /// Additional options to tailor the audio transcription request. + /// The options to configure the audio transcription. /// is null. /// is an empty string, and was expected to be non-empty. - /// The audio transcription. public virtual ClientResult TranscribeAudio(string audioFilePath, AudioTranscriptionOptions options = null) { Argument.AssertNotNullOrEmpty(audioFilePath, nameof(audioFilePath)); @@ -226,18 +218,17 @@ public virtual ClientResult TranscribeAudio(string audioFile #region TranslateAudio - /// Translates audio from a stream into English. - /// The audio to translate. + /// Translates the input audio into English. + /// The audio stream to translate. /// - /// The filename associated with the audio stream. The filename's extension (for example: .mp3) will be used to - /// validate the format of the input audio. The request may fail if the file extension and input audio format do - /// not match. + /// The filename associated with the audio stream. The filename's extension (for example: .mp3) will be used to + /// validate the format of the input audio. The request may fail if the filename's extension and the actual + /// format of the input audio do not match. /// - /// Additional options to tailor the audio translation request. - /// A token that can be used to cancel this method call. + /// The options to configure the audio translation. + /// A token that can be used to cancel this method call. /// or is null. /// is an empty string, and was expected to be non-empty. - /// The audio translation. public virtual async Task> TranslateAudioAsync(Stream audio, string audioFilename, AudioTranslationOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(audio, nameof(audio)); @@ -251,18 +242,17 @@ public virtual async Task> TranslateAudioAsync(St return ClientResult.FromValue(AudioTranslation.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// Translates audio from a stream into English. - /// The audio to translate. + /// Translates the input audio into English. + /// The audio stream to translate. /// - /// The filename associated with the audio stream. The filename's extension (for example: .mp3) will be used to - /// validate the format of the input audio. The request may fail if the file extension and input audio format do - /// not match. + /// The filename associated with the audio stream. The filename's extension (for example: .mp3) will be used to + /// validate the format of the input audio. The request may fail if the filename's extension and the actual + /// format of the input audio do not match. /// - /// Additional options to tailor the audio translation request. - /// A token that can be used to cancel this method call. + /// The options to configure the audio translation. + /// A token that can be used to cancel this method call. /// or is null. /// is an empty string, and was expected to be non-empty. - /// The audio translation. public virtual ClientResult TranslateAudio(Stream audio, string audioFilename, AudioTranslationOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(audio, nameof(audio)); @@ -276,18 +266,15 @@ public virtual ClientResult TranslateAudio(Stream audio, strin return ClientResult.FromValue(AudioTranslation.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// - /// Translates audio from a file with a known path into English. - /// + /// Translates the input audio into English. /// - /// The path of the audio file to translate. The provided file path's extension (for example: .mp3) will be used - /// to validate the format of the input audio. The request may fail if the file extension and input audio format - /// do not match. + /// The path of the audio file to translate. The provided file path's extension (for example: .mp3) will be + /// used to validate the format of the input audio. The request may fail if the file path's extension and the + /// actual format of the input audio do not match. /// - /// Additional options to tailor the audio translation request. + /// The options to configure the audio translation. /// was null. /// is an empty string, and was expected to be non-empty. - /// The audio translation. public virtual ClientResult TranslateAudio(string audioFilePath, AudioTranslationOptions options = null) { Argument.AssertNotNullOrEmpty(audioFilePath, nameof(audioFilePath)); @@ -296,18 +283,15 @@ public virtual ClientResult TranslateAudio(string audioFilePat return TranslateAudio(audioStream, audioFilePath, options); } - /// - /// Translates audio from a file with a known path into English. - /// + /// Translates the input audio into English. /// - /// The path of the audio file to translate. The provided file path's extension (for example: .mp3) will be used - /// to validate the format of the input audio. The request may fail if the file extension and input audio format - /// do not match. + /// The path of the audio file to translate. The provided file path's extension (for example: .mp3) will be + /// used to validate the format of the input audio. The request may fail if the file path's extension and the + /// actual format of the input audio do not match. /// - /// Additional options to tailor the audio translation request. + /// The options to configure the audio translation. /// was null. /// is an empty string, and was expected to be non-empty. - /// The audio translation. public virtual async Task> TranslateAudioAsync(string audioFilePath, AudioTranslationOptions options = null) { Argument.AssertNotNull(audioFilePath, nameof(audioFilePath)); @@ -317,7 +301,7 @@ public virtual async Task> TranslateAudioAsync(st } #endregion - + private void CreateSpeechGenerationOptions(string text, GeneratedSpeechVoice voice, ref SpeechGenerationOptions options) { options.Input = text; diff --git a/.dotnet/src/Custom/Audio/AudioTimestampGranularities.cs b/.dotnet/src/Custom/Audio/AudioTimestampGranularities.cs index 1deac3f0c..cab2290ae 100644 --- a/.dotnet/src/Custom/Audio/AudioTimestampGranularities.cs +++ b/.dotnet/src/Custom/Audio/AudioTimestampGranularities.cs @@ -12,17 +12,17 @@ public enum AudioTimestampGranularities /// The default value that, when equivalent to a request's flags, specifies no specific audio timestamp granularity /// and defers to the default timestamp behavior. /// - Default = 0, + Default = 0, /// /// The value that, when present in the request's flags, specifies that audio information should include word-level /// timestamp information. /// - Word = 1, + Word = 1, /// /// The value that, when present in the request's flags, specifies that audio information should include /// segment-level timestamp information. /// - Segment = 2, + Segment = 2, } \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/GeneratedSpeechFormat.cs b/.dotnet/src/Custom/Audio/GeneratedSpeechFormat.cs index 4559bca96..8ef097051 100644 --- a/.dotnet/src/Custom/Audio/GeneratedSpeechFormat.cs +++ b/.dotnet/src/Custom/Audio/GeneratedSpeechFormat.cs @@ -1,32 +1,7 @@ namespace OpenAI.Audio; -/// -/// Represents an audio data format available as either input or output into an audio operation. -/// +/// The audio format in which to generate the speech. [CodeGenModel("CreateSpeechRequestResponseFormat")] -public enum GeneratedSpeechFormat +public readonly partial struct GeneratedSpeechFormat { - /// MP3. /// - [CodeGenMember("Mp3")] - Mp3, - - /// Opus. /// - [CodeGenMember("Opus")] - Opus, - - /// AAC (advanced audio coding). /// - [CodeGenMember("Aac")] - Aac, - - /// FLAC (free lossless audio codec). /// - [CodeGenMember("Flac")] - Flac, - - /// WAV. /// - [CodeGenMember("Wav")] - Wav, - - /// PCM (pulse-code modulation). /// - [CodeGenMember("Pcm")] - Pcm, } \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/GeneratedSpeechVoice.cs b/.dotnet/src/Custom/Audio/GeneratedSpeechVoice.cs index d0aa0653c..cecbea1bb 100644 --- a/.dotnet/src/Custom/Audio/GeneratedSpeechVoice.cs +++ b/.dotnet/src/Custom/Audio/GeneratedSpeechVoice.cs @@ -1,34 +1,11 @@ -using System; - namespace OpenAI.Audio; +// CUSTOM: Renamed. /// -/// Represents the available text-to-speech voices. +/// The voice to use in the generated speech. Previews of the available voices can be found in the +/// text-to-speech guide. /// [CodeGenModel("CreateSpeechRequestVoice")] -public enum GeneratedSpeechVoice +public readonly partial struct GeneratedSpeechVoice { - /// Alloy. - [CodeGenMember("Alloy")] - Alloy, - - /// Echo. - [CodeGenMember("Echo")] - Echo, - - /// Fable. - [CodeGenMember("Fable")] - Fable, - - /// Onyx. - [CodeGenMember("Onyx")] - Onyx, - - /// Nova. - [CodeGenMember("Nova")] - Nova, - - /// Shimmer. - [CodeGenMember("Shimmer")] - Shimmer, } \ No newline at end of file diff --git a/.dotnet/src/Custom/Audio/SpeechGenerationOptions.cs b/.dotnet/src/Custom/Audio/SpeechGenerationOptions.cs index 0edbdc02f..387d3c5b2 100644 --- a/.dotnet/src/Custom/Audio/SpeechGenerationOptions.cs +++ b/.dotnet/src/Custom/Audio/SpeechGenerationOptions.cs @@ -1,9 +1,6 @@ namespace OpenAI.Audio; -/// -/// A representation of additional options available to control the behavior of a text-to-speech audio generation -/// operation. -/// +/// The options to configure text-to-speech audio generation. [CodeGenModel("CreateSpeechRequest")] [CodeGenSuppress("SpeechGenerationOptions", typeof(InternalCreateSpeechRequestModel), typeof(string), typeof(GeneratedSpeechVoice))] public partial class SpeechGenerationOptions @@ -11,28 +8,33 @@ public partial class SpeechGenerationOptions // CUSTOM: // - Made internal. The model is specified by the client. // - Added setter. - /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`. + [CodeGenMember("Model")] internal InternalCreateSpeechRequestModel Model { get; set; } // CUSTOM: // - Made internal. This value comes from a parameter on the client method. // - Added setter. /// The text to generate audio for. The maximum length is 4096 characters. + [CodeGenMember("Input")] internal string Input { get; set; } // CUSTOM: // - Made internal. This value comes from a parameter on the client method. // - Added setter. - /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, - /// `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the - /// [Text to speech guide](/docs/guides/text-to-speech/voice-options). - /// - internal GeneratedSpeechVoice Voice { get; set; } + [CodeGenMember("Voice")] + internal GeneratedSpeechVoice Voice { get; set; } // CUSTOM: Made public now that there are no required properties. /// Initializes a new instance of . public SpeechGenerationOptions() { } + + // CUSTOM: Renamed. + /// + /// The speed of the generated audio expressed as a ratio between 0.5 and 2.0. The default is 1.0. + /// + [CodeGenMember("Speed")] + + public float? SpeedRatio { get; set; } } \ No newline at end of file diff --git a/.dotnet/src/Custom/Batch/BatchClient.Protocol.cs b/.dotnet/src/Custom/Batch/BatchClient.Protocol.cs index 9f72dfd7b..505c5fff9 100644 --- a/.dotnet/src/Custom/Batch/BatchClient.Protocol.cs +++ b/.dotnet/src/Custom/Batch/BatchClient.Protocol.cs @@ -6,6 +6,8 @@ namespace OpenAI.Batch; +[CodeGenSuppress("RetrieveBatch", typeof(string), typeof(RequestOptions))] +[CodeGenSuppress("RetrieveBatchAsync", typeof(string), typeof(RequestOptions))] public partial class BatchClient { /// @@ -73,10 +75,10 @@ public virtual CreateBatchOperation CreateBatch(BinaryContent content, bool wait /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetBatchesAsync(string after, int? limit, RequestOptions options) + public virtual IAsyncEnumerable GetBatchesAsync(string after, int? limit, RequestOptions options) { - using PipelineMessage message = CreateGetBatchesRequest(after, limit, options); - return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + BatchesPageEnumerator enumerator = new BatchesPageEnumerator(_pipeline, _endpoint, after, limit, options); + return PageCollectionHelpers.CreateAsync(enumerator); } /// @@ -87,10 +89,10 @@ public virtual async Task GetBatchesAsync(string after, int? limit /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The response returned from the service. - public virtual ClientResult GetBatches(string after, int? limit, RequestOptions options) + public virtual IEnumerable GetBatches(string after, int? limit, RequestOptions options) { - using PipelineMessage message = CreateGetBatchesRequest(after, limit, options); - return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + BatchesPageEnumerator enumerator = new BatchesPageEnumerator(_pipeline, _endpoint, after, limit, options); + return PageCollectionHelpers.Create(enumerator); } /// diff --git a/.dotnet/src/Custom/Batch/BatchClient.cs b/.dotnet/src/Custom/Batch/BatchClient.cs index 484444dee..6906cccb4 100644 --- a/.dotnet/src/Custom/Batch/BatchClient.cs +++ b/.dotnet/src/Custom/Batch/BatchClient.cs @@ -2,9 +2,16 @@ using System.ClientModel; using System.ClientModel.Primitives; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Batch; +// CUSTOM: +// - Renamed. +// - Suppressed constructor that takes endpoint parameter; endpoint is now a property in the options class. +// - Suppressed convenience methods for now. +/// The service client for OpenAI batch operations. +[Experimental("OPENAI001")] [CodeGenClient("Batches")] [CodeGenSuppress("BatchClient", typeof(ClientPipeline), typeof(ApiKeyCredential), typeof(Uri))] [CodeGenSuppress("CreateBatch", typeof(string), typeof(InternalCreateBatchRequestEndpoint), typeof(InternalBatchCompletionTimeframe), typeof(IDictionary))] @@ -13,8 +20,6 @@ namespace OpenAI.Batch; [CodeGenSuppress("CreateBatchAsync", typeof(BinaryContent), typeof(RequestOptions))] [CodeGenSuppress("RetrieveBatch", typeof(string))] [CodeGenSuppress("RetrieveBatchAsync", typeof(string))] -[CodeGenSuppress("RetrieveBatch", typeof(string), typeof(RequestOptions))] -[CodeGenSuppress("RetrieveBatchAsync", typeof(string), typeof(RequestOptions))] [CodeGenSuppress("CancelBatch", typeof(string))] [CodeGenSuppress("CancelBatchAsync", typeof(string))] [CodeGenSuppress("CancelBatch", typeof(string), typeof(RequestOptions))] @@ -38,31 +43,24 @@ public BatchClient(ApiKeyCredential credential, OpenAIClientOptions options = nu options) { } - /// - /// Initializes a new instance of that will use an API key from the OPENAI_API_KEY - /// environment variable when authenticating. - /// - /// - /// To provide an explicit credential instead of using the environment variable, use an alternate constructor like - /// . - /// - /// Additional options to customize the client. - /// The OPENAI_API_KEY environment variable was not found. - public BatchClient(OpenAIClientOptions options = null) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(), options), - OpenAIClient.GetEndpoint(options), - options) - { } + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); + } - /// - /// Initializes a new instance of . - /// - /// The client pipeline to use. - /// The endpoint to use. - protected internal BatchClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options) + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The options to configure the client. + /// is null. + protected internal BatchClient(ClientPipeline pipeline, OpenAIClientOptions options) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + options ??= new OpenAIClientOptions(); + _pipeline = pipeline; - _endpoint = endpoint; + _endpoint = OpenAIClient.GetEndpoint(options); } } diff --git a/.dotnet/src/Custom/Batch/Internal/Pagination/BatchesPageEnumerator.cs b/.dotnet/src/Custom/Batch/Internal/Pagination/BatchesPageEnumerator.cs new file mode 100644 index 000000000..1939f1d78 --- /dev/null +++ b/.dotnet/src/Custom/Batch/Internal/Pagination/BatchesPageEnumerator.cs @@ -0,0 +1,108 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; +using System.Threading.Tasks; + +#nullable enable + +namespace OpenAI.Batch; + +internal partial class BatchesPageEnumerator : PageResultEnumerator +{ + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + private readonly int? _limit; + private readonly RequestOptions _options; + + private string _after; + + public BatchesPageEnumerator( + ClientPipeline pipeline, + Uri endpoint, + string after, int? limit, + RequestOptions options) + { + _pipeline = pipeline; + _endpoint = endpoint; + + _after = after; + _limit = limit; + _options = options; + } + + public override async Task GetFirstAsync() + => await GetBatchesAsync(_after, _limit, _options).ConfigureAwait(false); + + public override ClientResult GetFirst() + => GetBatches(_after, _limit, _options); + + public override async Task GetNextAsync(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response.Content); + _after = doc.RootElement.GetProperty("last_id"u8).GetString()!; + + return await GetBatchesAsync(_after, _limit, _options).ConfigureAwait(false); + } + + public override ClientResult GetNext(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response.Content); + _after = doc.RootElement.GetProperty("last_id"u8).GetString()!; + + return GetBatches(_after, _limit, _options); + } + + public override bool HasNext(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response.Content); + bool hasMore = doc.RootElement.GetProperty("has_more"u8).GetBoolean(); + + return hasMore; + } + + internal virtual async Task GetBatchesAsync(string after, int? limit, RequestOptions options) + { + using PipelineMessage message = CreateGetBatchesRequest(after, limit, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + + internal virtual ClientResult GetBatches(string after, int? limit, RequestOptions options) + { + using PipelineMessage message = CreateGetBatchesRequest(after, limit, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + + internal PipelineMessage CreateGetBatchesRequest(string after, int? limit, RequestOptions options) + { + var message = _pipeline.CreateMessage(); + message.ResponseClassifier = PipelineMessageClassifier200; + var request = message.Request; + request.Method = "GET"; + var uri = new ClientUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/batches", false); + if (after != null) + { + uri.AppendQuery("after", after, true); + } + if (limit != null) + { + uri.AppendQuery("limit", limit.Value, true); + } + request.Uri = uri.ToUri(); + request.Headers.Set("Accept", "application/json"); + message.Apply(options); + return message; + } + + private static PipelineMessageClassifier? _pipelineMessageClassifier200; + private static PipelineMessageClassifier PipelineMessageClassifier200 => _pipelineMessageClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); +} diff --git a/.dotnet/src/Custom/Chat/AssistantChatMessage.Serialization.cs b/.dotnet/src/Custom/Chat/AssistantChatMessage.Serialization.cs index 38d9b4e4c..d3a91bba8 100644 --- a/.dotnet/src/Custom/Chat/AssistantChatMessage.Serialization.cs +++ b/.dotnet/src/Custom/Chat/AssistantChatMessage.Serialization.cs @@ -1,6 +1,4 @@ -using System; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Chat; @@ -14,50 +12,16 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWr internal static void SerializeAssistantChatMessage(AssistantChatMessage instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) => instance.WriteCore(writer, options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); - if (Optional.IsDefined(ParticipantName)) - { - writer.WritePropertyName("name"u8); - writer.WriteStringValue(ParticipantName); - } - if (Optional.IsCollectionDefined(ToolCalls)) - { - writer.WritePropertyName("tool_calls"u8); - writer.WriteStartArray(); - foreach (var item in ToolCalls) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } - if (Optional.IsDefined(FunctionCall)) - { - if (FunctionCall != null) - { - writer.WritePropertyName("function_call"u8); - writer.WriteObjectValue(FunctionCall, options); - } - else - { - writer.WriteNull("function_call"); - } - } writer.WritePropertyName("role"u8); - writer.WriteStringValue(Role); - if (Optional.IsCollectionDefined(Content)) - { - if (Content[0] != null) - { - writer.WritePropertyName("content"u8); - writer.WriteStringValue(Content[0].Text); - } - else - { - writer.WriteNull("content"); - } - } + writer.WriteStringValue(Role.ToSerialString()); + ChatMessageContentPart.WriteCoreContentPartList(Content, writer, options); + writer.WriteOptionalProperty("refusal"u8, Refusal, options); + writer.WriteOptionalProperty("name"u8, ParticipantName, options); + writer.WriteOptionalCollection("tool_calls"u8, ToolCalls, options); + writer.WriteOptionalProperty("function_call"u8, FunctionCall, options); writer.WriteSerializedAdditionalRawData(SerializedAdditionalRawData, options); writer.WriteEndObject(); } diff --git a/.dotnet/src/Custom/Chat/AssistantChatMessage.cs b/.dotnet/src/Custom/Chat/AssistantChatMessage.cs index 0549fcd2c..46928d9a4 100644 --- a/.dotnet/src/Custom/Chat/AssistantChatMessage.cs +++ b/.dotnet/src/Custom/Chat/AssistantChatMessage.cs @@ -11,16 +11,31 @@ namespace OpenAI.Chat; [CodeGenModel("ChatCompletionRequestAssistantMessage")] public partial class AssistantChatMessage : ChatMessage { - // CUSTOM: Made internal. - /// Initializes a new instance of . - internal AssistantChatMessage() + /// + /// Creates a new instance of using a collection of content items. + /// For assistant messages, this can be one or more of type text or exactly one of type refusal. + /// + /// + /// The collection of content items associated with the message. + /// + public AssistantChatMessage(IEnumerable contentParts) + : base(ChatMessageRole.Assistant, contentParts) { + Argument.AssertNotNullOrEmpty(contentParts, nameof(contentParts)); } - // Assistant messages may present ONE OF: - // - Ordinary text content without tools or a function, in which case the content is required. - // - A list of tool calls, together with optional text content - // - A function call, together with optional text content + /// + /// Creates a new instance of using a collection of content items. + /// For assistant messages, this can be one or more of type text or exactly one of type refusal. + /// + /// + /// The collection of text and image content items associated with the message. + /// + public AssistantChatMessage(params ChatMessageContentPart[] contentParts) + : base(ChatMessageRole.Assistant, contentParts) + { + Argument.AssertNotNullOrEmpty(contentParts, nameof(contentParts)); + } /// /// Creates a new instance of that represents ordinary text content and @@ -28,12 +43,9 @@ internal AssistantChatMessage() /// /// The text content of the message. public AssistantChatMessage(string content) + : base(ChatMessageRole.Assistant, content) { Argument.AssertNotNull(content, nameof(content)); - - Role = "assistant"; - Content = [ChatMessageContentPart.CreateTextMessageContentPart(content)]; - ToolCalls = new ChangeTrackingList(); } /// @@ -43,14 +55,14 @@ public AssistantChatMessage(string content) /// The tool_calls made by the model. /// Optional text content associated with the message. public AssistantChatMessage(IEnumerable toolCalls, string content = null) + : base(ChatMessageRole.Assistant, content) { Argument.AssertNotNull(toolCalls, nameof(toolCalls)); - Role = "assistant"; - Content = (content == null) - ? new ChangeTrackingList() - : [ChatMessageContentPart.CreateTextMessageContentPart(content)]; - ToolCalls = new List(toolCalls); + foreach (ChatToolCall toolCall in toolCalls) + { + ToolCalls.Add(toolCall); + } } /// @@ -60,14 +72,10 @@ public AssistantChatMessage(IEnumerable toolCalls, string content /// The function_call made by the model. /// Optional text content associated with the message. public AssistantChatMessage(ChatFunctionCall functionCall, string content = null) + : base(ChatMessageRole.Assistant, content) { Argument.AssertNotNull(functionCall, nameof(functionCall)); - Role = "assistant"; - Content = (content == null) - ? new ChangeTrackingList() - : [ChatMessageContentPart.CreateTextMessageContentPart(content)]; - ToolCalls = new ChangeTrackingList(); FunctionCall = functionCall; } @@ -86,6 +94,7 @@ public AssistantChatMessage(ChatFunctionCall functionCall, string content = null /// The role of the provided chat completion response was not . /// public AssistantChatMessage(ChatCompletion chatCompletion) + : base(ChatMessageRole.Assistant, chatCompletion?.Content) { Argument.AssertNotNull(chatCompletion, nameof(chatCompletion)); @@ -94,10 +103,12 @@ public AssistantChatMessage(ChatCompletion chatCompletion) throw new NotSupportedException($"Cannot instantiate an {nameof(AssistantChatMessage)} from a {nameof(ChatCompletion)} with role: {chatCompletion.Role}."); } - Role = "assistant"; - Content = (IList)chatCompletion.Content; - ToolCalls = (IList)chatCompletion.ToolCalls; + Refusal = chatCompletion.Refusal; FunctionCall = chatCompletion.FunctionCall; + foreach (ChatToolCall toolCall in chatCompletion.ToolCalls ?? []) + { + ToolCalls.Add(toolCall); + } } // CUSTOM: Renamed. @@ -107,4 +118,11 @@ public AssistantChatMessage(ChatCompletion chatCompletion) /// [CodeGenMember("Name")] public string ParticipantName { get; set; } + + // CUSTOM: Common initialization for input model collection property. + [CodeGenMember("ToolCalls")] + public IList ToolCalls { get; } = new ChangeTrackingList(); + + // CUSTOM: Made internal. + internal AssistantChatMessage() { } } \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatClient.cs b/.dotnet/src/Custom/Chat/ChatClient.cs index 2f366b9a1..164441aa9 100644 --- a/.dotnet/src/Custom/Chat/ChatClient.cs +++ b/.dotnet/src/Custom/Chat/ChatClient.cs @@ -9,7 +9,13 @@ namespace OpenAI.Chat; +// CUSTOM: +// - Renamed. +// - Suppressed constructor that takes endpoint parameter; endpoint is now a property in the options class. +// - Suppressed methods that only take the options parameter. +/// The service client for OpenAI chat operations. [CodeGenClient("Chat")] +[CodeGenSuppress("ChatClient", typeof(ClientPipeline), typeof(ApiKeyCredential), typeof(Uri))] [CodeGenSuppress("CreateChatCompletionAsync", typeof(ChatCompletionOptions))] [CodeGenSuppress("CreateChatCompletion", typeof(ChatCompletionOptions))] public partial class ChatClient @@ -17,63 +23,72 @@ public partial class ChatClient private readonly string _model; private readonly OpenTelemetrySource _telemetry; - /// - /// Initializes a new instance of that will use an API key when authenticating. - /// - /// The model name for chat completions that the client should use. - /// The API key used to authenticate with the service endpoint. - /// Additional options to customize the client. - /// The provided was null. - public ChatClient(string model, ApiKeyCredential credential, OpenAIClientOptions options = null) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(credential, requireExplicitCredential: true), options), - model, - OpenAIClient.GetEndpoint(options), - options) - { } + // CUSTOM: + // - Added `model` parameter. + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The API key to authenticate with the service. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public ChatClient(string model, ApiKeyCredential credential) : this(model, credential, new OpenAIClientOptions()) + { + } - /// - /// Initializes a new instance of that will use an API key from the OPENAI_API_KEY - /// environment variable when authenticating. - /// - /// - /// To provide an explicit credential instead of using the environment variable, use an alternate constructor like - /// . - /// - /// The model name for chat completions that the client should use. - /// Additional options to customize the client. - /// The OPENAI_API_KEY environment variable was not found. - public ChatClient(string model, OpenAIClientOptions options = null) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(), options), - model, - OpenAIClient.GetEndpoint(options), - options) - { } + // CUSTOM: + // - Added `model` parameter. + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Added telemetry support. + /// Initializes a new instance of . + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The API key to authenticate with the service. + /// The options to configure the client. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public ChatClient(string model, ApiKeyCredential credential, OpenAIClientOptions options) + { + Argument.AssertNotNullOrEmpty(model, nameof(model)); + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); - /// - /// Initializes a new instance of . - /// - /// The instance to use. - /// The model name to use. - /// The endpoint to use. - protected internal ChatClient(ClientPipeline pipeline, string model, Uri endpoint, OpenAIClientOptions options) + _model = model; + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); + _telemetry = new OpenTelemetrySource(model, _endpoint); + } + + // CUSTOM: + // - Added `model` parameter. + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Added telemetry support. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The options to configure the client. + /// or is null. + /// is an empty string, and was expected to be non-empty. + protected internal ChatClient(ClientPipeline pipeline, string model, OpenAIClientOptions options) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); Argument.AssertNotNullOrEmpty(model, nameof(model)); + options ??= new OpenAIClientOptions(); _model = model; _pipeline = pipeline; - _endpoint = endpoint; - _telemetry = new OpenTelemetrySource(model, endpoint); + _endpoint = OpenAIClient.GetEndpoint(options); + _telemetry = new OpenTelemetrySource(model, _endpoint); } - /// - /// Generates a single chat completion result for a provided set of input chat messages. - /// - /// The messages to provide as input and history for chat completion. - /// Additional options for the chat completion request. - /// A token that can be used to cancel this method call. - /// A result for a single chat completion. + /// Generates a completion for the given chat. + /// The messages comprising the chat so far. + /// The options to configure the chat completion. + /// A token that can be used to cancel this method call. + /// is null. + /// is an empty collection, and was expected to be non-empty. public virtual async Task> CompleteChatAsync(IEnumerable messages, ChatCompletionOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(messages, nameof(messages)); @@ -98,21 +113,12 @@ public virtual async Task> CompleteChatAsync(IEnume } } - /// - /// Generates a single chat completion result for a provided set of input chat messages. - /// - /// The messages to provide as input and history for chat completion. - /// A result for a single chat completion. - public virtual async Task> CompleteChatAsync(params ChatMessage[] messages) - => await CompleteChatAsync(messages, default(ChatCompletionOptions)).ConfigureAwait(false); - - /// - /// Generates a single chat completion result for a provided set of input chat messages. - /// - /// The messages to provide as input and history for chat completion. - /// Additional options for the chat completion request. - /// A token that can be used to cancel this method call. - /// A result for a single chat completion. + /// Generates a completion for the given chat. + /// The messages comprising the chat so far. + /// The options to configure the chat completion. + /// A token that can be used to cancel this method call. + /// is null. + /// is an empty collection, and was expected to be non-empty. public virtual ClientResult CompleteChat(IEnumerable messages, ChatCompletionOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(messages, nameof(messages)); @@ -137,26 +143,33 @@ public virtual ClientResult CompleteChat(IEnumerable - /// Generates a single chat completion result for a provided set of input chat messages. - /// - /// The messages to provide as input and history for chat completion. - /// A result for a single chat completion. + /// Generates a completion for the given chat. + /// The messages comprising the chat so far. + /// is null. + /// is an empty collection, and was expected to be non-empty. + public virtual async Task> CompleteChatAsync(params ChatMessage[] messages) + => await CompleteChatAsync(messages, default(ChatCompletionOptions)).ConfigureAwait(false); + + /// Generates a completion for the given chat. + /// The messages comprising the chat so far. + /// is null. + /// is an empty collection, and was expected to be non-empty. public virtual ClientResult CompleteChat(params ChatMessage[] messages) => CompleteChat(messages, default(ChatCompletionOptions)); /// - /// Begins a streaming response for a chat completion request using the provided chat messages as input and - /// history. + /// Generates a completion for the given chat. The completion is streamed back token by token as it is being + /// generated by the model instead of waiting for it to be finished first. /// /// - /// can be enumerated over using the await foreach pattern using the - /// interface. + /// implements the interface and can be + /// enumerated over using the await foreach pattern. /// - /// The messages to provide as input for chat completion. - /// Additional options for the chat completion request. - /// A token that can be used to cancel this method call. - /// A streaming result with incremental chat completion updates. + /// The messages comprising the chat so far. + /// The options to configure the chat completion. + /// A token that can be used to cancel this method call. + /// is null. + /// is an empty collection, and was expected to be non-empty. public virtual AsyncCollectionResult CompleteChatStreamingAsync(IEnumerable messages, ChatCompletionOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(messages, nameof(messages)); @@ -168,34 +181,22 @@ public virtual AsyncCollectionResult CompleteChat async Task getResultAsync() => await CompleteChatAsync(content, cancellationToken.ToRequestOptions(streaming: true)).ConfigureAwait(false); - return new AsyncStreamingChatCompletionUpdateCollection(getResultAsync); + return new InternalAsyncStreamingChatCompletionUpdateCollection(getResultAsync); } /// - /// Begins a streaming response for a chat completion request using the provided chat messages as input and - /// history. + /// Generates a completion for the given chat. The completion is streamed back token by token as it is being + /// generated by the model instead of waiting for it to be finished first. /// /// - /// can be enumerated over using the await foreach pattern using the - /// interface. + /// implements the interface and can be + /// enumerated over using the await foreach pattern. /// - /// The messages to provide as input for chat completion. - /// A streaming result with incremental chat completion updates. - public virtual AsyncCollectionResult CompleteChatStreamingAsync(params ChatMessage[] messages) - => CompleteChatStreamingAsync(messages, default(ChatCompletionOptions)); - - /// - /// Begins a streaming response for a chat completion request using the provided chat messages as input and - /// history. - /// - /// - /// can be enumerated over using the foreach pattern using the - /// interface. - /// - /// The messages to provide as input for chat completion. - /// Additional options for the chat completion request. - /// A token that can be used to cancel this method call. - /// A streaming result with incremental chat completion updates. + /// The messages comprising the chat so far. + /// The options to configure the chat completion. + /// A token that can be used to cancel this method call. + /// is null. + /// is an empty collection, and was expected to be non-empty. public virtual CollectionResult CompleteChatStreaming(IEnumerable messages, ChatCompletionOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(messages, nameof(messages)); @@ -205,19 +206,34 @@ public virtual CollectionResult CompleteChatStrea using BinaryContent content = options.ToBinaryContent(); ClientResult getResult() => CompleteChat(content, cancellationToken.ToRequestOptions(streaming: true)); - return new StreamingChatCompletionUpdateCollection(getResult); + return new InternalStreamingChatCompletionUpdateCollection(getResult); } /// - /// Begins a streaming response for a chat completion request using the provided chat messages as input and - /// history. + /// Generates a completion for the given chat. The completion is streamed back token by token as it is being + /// generated by the model instead of waiting for it to be finished first. + /// + /// + /// implements the interface and can be + /// enumerated over using the await foreach pattern. + /// + /// The messages comprising the chat so far. + /// is null. + /// is an empty collection, and was expected to be non-empty. + public virtual AsyncCollectionResult CompleteChatStreamingAsync(params ChatMessage[] messages) + => CompleteChatStreamingAsync(messages, default(ChatCompletionOptions)); + + /// + /// Generates a completion for the given chat. The completion is streamed back token by token as it is being + /// generated by the model instead of waiting for it to be finished first. /// /// - /// can be enumerated over using the foreach pattern using the - /// interface. + /// implements the interface and can be + /// enumerated over using the await foreach pattern. /// - /// The messages to provide as input for chat completion. - /// A streaming result with incremental chat completion updates. + /// The messages comprising the chat so far. + /// is null. + /// is an empty collection, and was expected to be non-empty. public virtual CollectionResult CompleteChatStreaming(params ChatMessage[] messages) => CompleteChatStreaming(messages, default(ChatCompletionOptions)); diff --git a/.dotnet/src/Custom/Chat/ChatCompletion.cs b/.dotnet/src/Custom/Chat/ChatCompletion.cs index 0efd2e53f..7f0681f3f 100644 --- a/.dotnet/src/Custom/Chat/ChatCompletion.cs +++ b/.dotnet/src/Custom/Chat/ChatCompletion.cs @@ -1,4 +1,5 @@ using System; +using System.ClientModel.Primitives; using System.Collections.Generic; namespace OpenAI.Chat; @@ -7,6 +8,7 @@ namespace OpenAI.Chat; public partial class ChatCompletion { private IReadOnlyList _contentTokenLogProbabilities; + private IReadOnlyList _refusalTokenLogProbabilities; // CUSTOM: Made private. This property does not add value in the context of a strongly-typed class. /// The object type, which is always `chat.completion`. @@ -40,6 +42,11 @@ public partial class ChatCompletion ? Choices[0].Logprobs.Content : _contentTokenLogProbabilities ??= new ChangeTrackingList(); + // CUSTOM: Flattened refusal logprobs property. + public IReadOnlyList RefusalTokenLogProbabilities => (Choices[0]?.Logprobs != null) + ? Choices[0].Logprobs.Refusal + : _refusalTokenLogProbabilities ??= new ChangeTrackingList(); + // CUSTOM: Flattened choice message property. /// /// The role of the author of this message. @@ -61,9 +68,18 @@ public partial class ChatCompletion // CUSTOM: Flattened choice message property. public ChatFunctionCall FunctionCall => Choices[0].Message.FunctionCall; + // CUSTOM: Flattened choice message property. + public string Refusal => Choices[0].Message.Refusal; + /// /// Returns text representation of the first part of the first choice. /// /// - public override string ToString() => Content[0].Text; + public override string ToString() => Content.Count > 0 ? Content[0].Text + : ToolCalls.Count > 0 ? ModelReaderWriter.Write(ToolCalls[0]).ToString() + : null; + + // CUSTOM: Made internal. + [CodeGenMember("ServiceTier")] + internal InternalCreateChatCompletionResponseServiceTier? _serviceTier; } diff --git a/.dotnet/src/Custom/Chat/ChatCompletionOptions.cs b/.dotnet/src/Custom/Chat/ChatCompletionOptions.cs index 77ef66507..7a2024656 100644 --- a/.dotnet/src/Custom/Chat/ChatCompletionOptions.cs +++ b/.dotnet/src/Custom/Chat/ChatCompletionOptions.cs @@ -1,4 +1,5 @@ using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.Chat; @@ -102,6 +103,7 @@ public ChatCompletionOptions() [CodeGenMember("FunctionCall")] public ChatFunctionChoice FunctionChoice { get; set; } + // CUSTOM: Renamed. /// /// Whether to enable parallel function calling during tool use. /// @@ -110,4 +112,36 @@ public ChatCompletionOptions() /// [CodeGenMember("ParallelToolCalls")] public bool? ParallelToolCallsEnabled { get; set; } -} \ No newline at end of file + + /// + /// An object specifying the format that the model must output. + /// + /// + ///

+ /// Compatible with GPT-4o, GPT-4o mini, GPT-4 Turbo and all GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. + ///

+ ///

+ /// Learn more in the Structured Outputs guide. + ///

+ ///
+ //[CodeGenMember("ResponseFormat")] + //public ChatResponseFormat ResponseFormat { get; set; } + + [CodeGenMember("ServiceTier")] + internal InternalCreateChatCompletionRequestServiceTier? _serviceTier; + + // CUSTOM: Renamed. + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// Learn more. + /// + [CodeGenMember("User")] + public string EndUserId { get; set; } + + // CUSTOM: Added the Experimental attribute + /// + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. + /// + [Experimental("OPENAI001")] + public long? Seed { get; set; } +} diff --git a/.dotnet/src/Custom/Chat/ChatImageDetailLevel.cs b/.dotnet/src/Custom/Chat/ChatImageDetailLevel.cs new file mode 100644 index 000000000..5676bc43a --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatImageDetailLevel.cs @@ -0,0 +1,10 @@ +namespace OpenAI.Chat; + +/// +/// The level of detail with which the model should process the image and generate its textual understanding of +/// it. Learn more in the vision guide. +/// +[CodeGenModel("ChatCompletionRequestMessageContentPartImageImageUrlDetail")] +public readonly partial struct ChatImageDetailLevel +{ +} diff --git a/.dotnet/src/Custom/Chat/ChatMessage.Serialization.cs b/.dotnet/src/Custom/Chat/ChatMessage.Serialization.cs index 3ad48ee5e..6d1ae8482 100644 --- a/.dotnet/src/Custom/Chat/ChatMessage.Serialization.cs +++ b/.dotnet/src/Custom/Chat/ChatMessage.Serialization.cs @@ -19,11 +19,16 @@ internal void SerializeContentValue(Utf8JsonWriter writer, ModelReaderWriterOpti internal static void DeserializeContentValue(JsonProperty property, ref IList content, ModelReaderWriterOptions options = null) { content ??= new ChangeTrackingList(); - if (property.Value.ValueKind == JsonValueKind.String) + + if (property.Value.ValueKind == JsonValueKind.Null) + { + return; + } + else if (property.Value.ValueKind == JsonValueKind.String) { - content.Add(ChatMessageContentPart.CreateTextMessageContentPart(property.Value.GetString())); + content.Add(ChatMessageContentPart.CreateTextPart(property.Value.GetString())); } - else + else if (property.Value.ValueKind == JsonValueKind.Array) { foreach (var item in property.Value.EnumerateArray()) { @@ -38,5 +43,5 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptio internal static void WriteCore(ChatMessage instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) => instance.WriteCore(writer, options); - protected abstract void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); + internal abstract void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); } diff --git a/.dotnet/src/Custom/Chat/ChatMessage.cs b/.dotnet/src/Custom/Chat/ChatMessage.cs index 99fca4302..6ad95bc82 100644 --- a/.dotnet/src/Custom/Chat/ChatMessage.cs +++ b/.dotnet/src/Custom/Chat/ChatMessage.cs @@ -1,3 +1,4 @@ +using System; using System.Collections.Generic; namespace OpenAI.Chat; @@ -55,40 +56,101 @@ namespace OpenAI.Chat; [CodeGenSerialization(nameof(Content), SerializationValueHook = nameof(SerializeContentValue), DeserializationValueHook = nameof(DeserializeContentValue))] public abstract partial class ChatMessage { + // CUSTOM: Changed type from string to ChatMessageRole. + [CodeGenMember("Role")] + internal ChatMessageRole Role { get; set; } + + // CUSTOM: Made internal. + internal ChatMessage() + { + } + + internal ChatMessage(ChatMessageRole role, IEnumerable contentParts) + { + Role = role; + + if (contentParts != null) + { + foreach (ChatMessageContentPart contentPart in contentParts) + { + Content.Add(contentPart); + } + } + } + + internal ChatMessage(ChatMessageRole role, string content) + { + Role = role; + + if (content != null) + { + Content.Add(ChatMessageContentPart.CreateTextPart(content)); + } + } + /// /// The content associated with the message. The interpretation of this content will vary depending on the message type. /// - public IList Content { get; protected set; } + public IList Content { get; } = new ChangeTrackingList(); + #region SystemChatMessage /// - public static SystemChatMessage CreateSystemMessage(string content) => new SystemChatMessage(content); + public static SystemChatMessage CreateSystemMessage(string content) => new(content); + /// + public static SystemChatMessage CreateSystemMessage(IEnumerable contentParts) => new(contentParts); + + /// + public static SystemChatMessage CreateSystemMessage(params ChatMessageContentPart[] contentParts) => new(contentParts); + #endregion + + #region UserChatMessage /// - public static UserChatMessage CreateUserMessage(string content) => new UserChatMessage(content); + public static UserChatMessage CreateUserMessage(string content) => new(content); /// - public static UserChatMessage CreateUserMessage(IEnumerable contentParts) => new UserChatMessage(contentParts); + public static UserChatMessage CreateUserMessage(IEnumerable contentParts) => new(contentParts); /// - public static UserChatMessage CreateUserMessage(params ChatMessageContentPart[] contentParts) => new UserChatMessage(contentParts); + public static UserChatMessage CreateUserMessage(params ChatMessageContentPart[] contentParts) => new(contentParts); + #endregion + #region AssistantChatMessage /// - public static AssistantChatMessage CreateAssistantMessage(string content) => new AssistantChatMessage(content); + public static AssistantChatMessage CreateAssistantMessage(string content) => new(content); + + /// + public static AssistantChatMessage CreateAssistantMessage(IEnumerable contentParts) => new(contentParts); + + /// + public static AssistantChatMessage CreateAssistantMessage(params ChatMessageContentPart[] contentParts) => new(contentParts); /// - public static AssistantChatMessage CreateAssistantMessage(IEnumerable toolCalls, string content = null) => new AssistantChatMessage(toolCalls, content); + public static AssistantChatMessage CreateAssistantMessage(IEnumerable toolCalls, string content = null) => new(toolCalls, content); /// - public static AssistantChatMessage CreateAssistantMessage(ChatFunctionCall functionCall, string content = null) => new AssistantChatMessage(functionCall, content); + public static AssistantChatMessage CreateAssistantMessage(ChatFunctionCall functionCall, string content = null) => new(functionCall, content); /// - public static AssistantChatMessage CreateAssistantMessage(ChatCompletion chatCompletion) => new AssistantChatMessage(chatCompletion); + public static AssistantChatMessage CreateAssistantMessage(ChatCompletion chatCompletion) => new(chatCompletion); + #endregion + #region ToolChatMessage /// - public static ToolChatMessage CreateToolChatMessage(string toolCallId, string content) => new ToolChatMessage(toolCallId, content); + public static ToolChatMessage CreateToolChatMessage(string toolCallId, string content) => new(toolCallId, content); + + /// + public static ToolChatMessage CreateToolChatMessage(string toolCallId, IEnumerable contentParts) => new(toolCallId, contentParts); + + /// + public static ToolChatMessage CreateToolChatMessage(string toolCallId, params ChatMessageContentPart[] contentParts) => new(toolCallId, contentParts); + #endregion + #region FunctionChatMessage /// - public static FunctionChatMessage CreateFunctionMessage(string functionName, string content) => new FunctionChatMessage(functionName, content); + [Obsolete("This field is marked as deprecated.")] + public static FunctionChatMessage CreateFunctionMessage(string functionName, string content) => new(functionName, content); + #endregion /// /// Creates UserChatMessage. diff --git a/.dotnet/src/Custom/Chat/ChatMessageContentPart.Serialization.cs b/.dotnet/src/Custom/Chat/ChatMessageContentPart.Serialization.cs index 42e6ad202..506cddcfb 100644 --- a/.dotnet/src/Custom/Chat/ChatMessageContentPart.Serialization.cs +++ b/.dotnet/src/Custom/Chat/ChatMessageContentPart.Serialization.cs @@ -14,25 +14,51 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReader internal static void WriteCoreContentPart(ChatMessageContentPart instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(instance._kind.ToString()); if (instance._kind == ChatMessageContentPartKind.Text) { - writer.WritePropertyName("type"u8); - writer.WriteStringValue(instance._kind.ToString()); writer.WritePropertyName("text"u8); writer.WriteStringValue(instance._text); } + else if (instance._kind == ChatMessageContentPartKind.Refusal) + { + writer.WritePropertyName("refusal"u8); + writer.WriteStringValue(instance._refusal); + } else if (instance._kind == ChatMessageContentPartKind.Image) { - writer.WritePropertyName("type"u8); - writer.WriteStringValue(instance._kind.ToString()); writer.WritePropertyName("image_url"u8); - writer.WriteObjectValue(instance._imageUrl, options); + writer.WriteObjectValue(instance._imageUri, options); } writer.WriteSerializedAdditionalRawData(instance.SerializedAdditionalRawData, options); writer.WriteEndObject(); } + internal static void WriteCoreContentPartList(IList instances, Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + if (!Optional.IsCollectionDefined(instances)) + { + return; + } + + writer.WritePropertyName("content"u8); + if (instances.Count == 1 && !string.IsNullOrEmpty(instances[0].Text)) + { + writer.WriteStringValue(instances[0].Text); + } + else + { + writer.WriteStartArray(); + foreach (var item in instances) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + } + internal static ChatMessageContentPart DeserializeChatMessageContentPart(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -44,7 +70,8 @@ internal static ChatMessageContentPart DeserializeChatMessageContentPart(JsonEle string kind = default; string text = default; - InternalChatCompletionRequestMessageContentPartImageImageUrl imageUrl = default; + string refusal = default; + InternalChatCompletionRequestMessageContentPartImageImageUrl imageUri = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -61,7 +88,12 @@ internal static ChatMessageContentPart DeserializeChatMessageContentPart(JsonEle } if (property.NameEquals("image_url"u8)) { - imageUrl = InternalChatCompletionRequestMessageContentPartImageImageUrl.DeserializeInternalChatCompletionRequestMessageContentPartImageImageUrl(property.Value, options); + imageUri = InternalChatCompletionRequestMessageContentPartImageImageUrl.DeserializeInternalChatCompletionRequestMessageContentPartImageImageUrl(property.Value, options); + continue; + } + if (property.NameEquals("refusal"u8)) + { + refusal = property.Value.GetString(); continue; } if (true) @@ -70,6 +102,6 @@ internal static ChatMessageContentPart DeserializeChatMessageContentPart(JsonEle } } serializedAdditionalRawData = rawDataDictionary; - return new ChatMessageContentPart(kind, text, imageUrl, serializedAdditionalRawData); + return new ChatMessageContentPart(kind, text, imageUri, refusal, serializedAdditionalRawData); } } diff --git a/.dotnet/src/Custom/Chat/ChatMessageContentPart.cs b/.dotnet/src/Custom/Chat/ChatMessageContentPart.cs index 603008327..d4ebd3724 100644 --- a/.dotnet/src/Custom/Chat/ChatMessageContentPart.cs +++ b/.dotnet/src/Custom/Chat/ChatMessageContentPart.cs @@ -4,7 +4,22 @@ namespace OpenAI.Chat; /// -/// Represents the common base type for a piece of message content used for chat completions. +/// A part of the chat message content. +/// +/// +/// Call to create a that +/// encapsulates text. +/// +/// +/// Call or +/// to create a +/// that encapsulates an image. +/// +/// +/// Call to create a that +/// encapsulates a refusal coming from the model. +/// +/// /// [CodeGenModel("ChatMessageContentPart")] [CodeGenSuppress("ChatMessageContentPart", typeof(IDictionary))] @@ -12,138 +27,136 @@ public partial class ChatMessageContentPart { private readonly ChatMessageContentPartKind _kind; private readonly string _text; - private readonly InternalChatCompletionRequestMessageContentPartImageImageUrl _imageUrl; - private readonly string _dataUri; - - internal ChatMessageContentPart(string text) - { - Argument.AssertNotNull(text, nameof(text)); - - _text = text; - _kind = ChatMessageContentPartKind.Text; - } + private readonly InternalChatCompletionRequestMessageContentPartImageImageUrl _imageUri; + private readonly string _refusal; // CUSTOM: Made internal. internal ChatMessageContentPart() { } - - internal ChatMessageContentPart(Uri imageUri, ImageChatMessageContentPartDetail? imageDetail = null) - { - Argument.AssertNotNull(imageUri, nameof(imageUri)); - - _imageUrl = new(imageUri) { Detail = imageDetail }; - _kind = ChatMessageContentPartKind.Image; - } - - internal ChatMessageContentPart(BinaryData imageBytes, string imageBytesMediaType, ImageChatMessageContentPartDetail? imageDetail = null) - { - Argument.AssertNotNull(imageBytes, nameof(imageBytes)); - Argument.AssertNotNull(imageBytesMediaType, nameof(imageBytesMediaType)); - - _imageUrl = new(imageBytes, imageBytesMediaType) { Detail = imageDetail }; - _kind = ChatMessageContentPartKind.Image; - } - - /// Initializes a new instance of . - /// The kind. - /// The text. - /// The image URI. - /// Keeps track of any properties unknown to the library. - internal ChatMessageContentPart(string kind, string text, InternalChatCompletionRequestMessageContentPartImageImageUrl imageUrl, IDictionary serializedAdditionalRawData) + // CUSTOM: Added to support deserialization. + internal ChatMessageContentPart(string kind, string text, InternalChatCompletionRequestMessageContentPartImageImageUrl imageUri, string refusal, IDictionary serializedAdditionalRawData) { _kind = new ChatMessageContentPartKind(kind); _text = text; - _imageUrl = imageUrl; + _imageUri = imageUri; + _refusal = refusal; SerializedAdditionalRawData = serializedAdditionalRawData; } - /// - /// The content part kind. - /// + /// The kind of content part. public ChatMessageContentPartKind Kind => _kind; - /// - /// The text content. - /// + // CUSTOM: Spread. + /// The text. + /// Present when is . public string Text => _text; - /// - /// The image URI content. - /// - public Uri ImageUri => _imageUrl?.ImageUri; + // CUSTOM: Spread. + /// The public internet URI where the image is located. + /// Present when is . + public Uri ImageUri => _imageUri?.ImageUri; - /// - /// The image URI content. - /// - public BinaryData ImageBytes => _imageUrl?.ImageBytes; + // CUSTOM: Spread. + /// The image bytes. + /// Present when is . + public BinaryData ImageBytes => _imageUri?.ImageBytes; - /// - /// The image URI content. - /// - public string ImageBytesMediaType => _imageUrl?.ImageBytesMediaType; + // CUSTOM: Spread. + /// The MIME type of the image, e.g., image/png. + /// Present when is . + public string ImageBytesMediaType => _imageUri?.ImageBytesMediaType; + // CUSTOM: Spread. /// - /// The image URI detail. + /// The level of detail with which the model should process the image and generate its textual understanding of + /// it. Learn more in the vision guide. /// - public ImageChatMessageContentPartDetail? ImageDetail => _imageUrl?.Detail; + /// Present when is . + public ChatImageDetailLevel? ImageDetailLevel => _imageUri?.Detail; - /// - /// Creates a new instance of that encapsulates text content. - /// - /// The content for the new instance. - /// A new instance of . - public static ChatMessageContentPart CreateTextMessageContentPart(string text) + // CUSTOM: Spread. + /// The refusal message generated by the model. + /// Present when is . + public string Refusal => _refusal; + + /// Creates a new that encapsulates text. + /// The text. + /// is null. + public static ChatMessageContentPart CreateTextPart(string text) { Argument.AssertNotNull(text, nameof(text)); - return new(text); + return new ChatMessageContentPart( + kind: ChatMessageContentPartKind.Text.ToString(), + text: text, + imageUri: null, + refusal: null, + serializedAdditionalRawData: null); } - /// - /// Creates a new instance of that encapsulates image content obtained from - /// an internet location that will be accessible to the model when evaluating a message with this content. - /// - /// An internet location pointing to an image. This must be accessible to the model. - /// The detail level of the image. - /// A new instance of . - public static ChatMessageContentPart CreateImageMessageContentPart(Uri imageUri, ImageChatMessageContentPartDetail? imageDetail = null) + /// Creates a new that encapsulates an image. + /// The public internet URI where the image is located. + /// + /// The level of detail with which the model should process the image and generate its textual understanding of + /// it. Learn more in the vision guide. + /// + /// is null. + public static ChatMessageContentPart CreateImagePart(Uri imageUri, ChatImageDetailLevel? imageDetailLevel = null) { Argument.AssertNotNull(imageUri, nameof(imageUri)); - return new(imageUri, imageDetail); + return new ChatMessageContentPart( + kind: ChatMessageContentPartKind.Image.ToString(), + text: null, + imageUri: new(imageUri) { Detail = imageDetailLevel }, + refusal: null, + serializedAdditionalRawData: null); } - /// - /// Creates a new instance of that encapsulates image content obtained from - /// an internet location that will be accessible to the model when evaluating a message with this content. - /// - /// The readable stream containing the image data to use as content. - /// The MIME descriptor, like image/png, corresponding to the image data format of the provided data. - /// The detail level of the image. - /// A new instance of . - public static ChatMessageContentPart CreateImageMessageContentPart(BinaryData imageBytes, string imageBytesMediaType, ImageChatMessageContentPartDetail? imageDetail = null) + /// Creates a new that encapsulates an image. + /// The image bytes. + /// The MIME type of the image, e.g., image/png. + /// + /// The level of detail with which the model should process the image and generate its textual understanding of + /// it. Learn more in the vision guide. + /// + /// or is null. + /// is an empty string, and was expected to be non-empty. + public static ChatMessageContentPart CreateImagePart(BinaryData imageBytes, string imageBytesMediaType, ChatImageDetailLevel? imageDetailLevel = null) { Argument.AssertNotNull(imageBytes, nameof(imageBytes)); - Argument.AssertNotNull(imageBytesMediaType, nameof(imageBytesMediaType)); - - return new(imageBytes, imageBytesMediaType, imageDetail); + Argument.AssertNotNullOrEmpty(imageBytesMediaType, nameof(imageBytesMediaType)); + + return new ChatMessageContentPart( + kind: ChatMessageContentPartKind.Image.ToString(), + text: null, + imageUri: new(imageBytes, imageBytesMediaType) { Detail = imageDetailLevel }, + refusal: null, + serializedAdditionalRawData: null); } - /// - /// Returns text representation of this part. - /// - /// - public override string ToString() => Text; + /// Creates a new that encapsulates a refusal coming from the model. + /// The refusal message generated by the model. + /// is null. + public static ChatMessageContentPart CreateRefusalPart(string refusal) + { + Argument.AssertNotNull(refusal, nameof(refusal)); + + return new ChatMessageContentPart( + kind: ChatMessageContentPartKind.Refusal.ToString(), + text: null, + imageUri: null, + refusal: refusal, + serializedAdditionalRawData: null); + } /// - /// Implicitly creates a new instance from an item of plain text. + /// Implicitly intantiates a new from a . As such, + /// using a in place of a is equivalent to calling the + /// method. /// - /// - /// Using a in the position of a is equivalent to - /// calling the method. - /// - /// The text content to use as this content part. - public static implicit operator ChatMessageContentPart(string content) => new(content); + /// The text encapsulated by this . + public static implicit operator ChatMessageContentPart(string text) => CreateTextPart(text); } diff --git a/.dotnet/src/Custom/Chat/ChatMessageContentPartKind.cs b/.dotnet/src/Custom/Chat/ChatMessageContentPartKind.cs index ce5a097ed..0962a4bc5 100644 --- a/.dotnet/src/Custom/Chat/ChatMessageContentPartKind.cs +++ b/.dotnet/src/Custom/Chat/ChatMessageContentPartKind.cs @@ -18,10 +18,13 @@ public ChatMessageContentPartKind(string value) } private const string TextValue = "text"; + private const string RefusalValue = "refusal"; private const string ImageValue = "image_url"; /// Text. public static ChatMessageContentPartKind Text { get; } = new ChatMessageContentPartKind(TextValue); + /// Refusal. + public static ChatMessageContentPartKind Refusal { get; } = new ChatMessageContentPartKind(RefusalValue); /// Image. public static ChatMessageContentPartKind Image { get; } = new ChatMessageContentPartKind(ImageValue); @@ -40,7 +43,7 @@ public ChatMessageContentPartKind(string value) /// [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value?.GetHashCode() ?? 0; + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; /// public override string ToString() => _value; } \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatResponseFormat.Serialization.cs b/.dotnet/src/Custom/Chat/ChatResponseFormat.Serialization.cs new file mode 100644 index 000000000..ff7fdeaaf --- /dev/null +++ b/.dotnet/src/Custom/Chat/ChatResponseFormat.Serialization.cs @@ -0,0 +1,19 @@ +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Runtime.CompilerServices; +using System.Text.Json; + +namespace OpenAI.Chat; + +[CodeGenSuppress("global::System.ClientModel.Primitives.IJsonModel.Write", typeof(Utf8JsonWriter), typeof(ModelReaderWriterOptions))] +public abstract partial class ChatResponseFormat : IJsonModel +{ + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + => CustomSerializationHelpers.SerializeInstance(this, WriteCore, writer, options); + + internal static void WriteCore(ChatResponseFormat instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) + => instance.WriteCore(writer, options); + + internal abstract void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options); +} diff --git a/.dotnet/src/Custom/Chat/ChatResponseFormat.cs b/.dotnet/src/Custom/Chat/ChatResponseFormat.cs index 96ec8b837..e7123c46d 100644 --- a/.dotnet/src/Custom/Chat/ChatResponseFormat.cs +++ b/.dotnet/src/Custom/Chat/ChatResponseFormat.cs @@ -1,45 +1,106 @@ -using OpenAI.Models; +using OpenAI.Internal; +using System; namespace OpenAI.Chat; /// -/// Represents a requested response_format for the model to use, enabling "JSON mode" for guaranteed valid output. +/// The format that the model should output. +/// +/// +/// Call to create a requesting plain +/// text. +/// +/// +/// Call to create a requesting +/// valid JSON, a.k.a. JSON mode. +/// +/// +/// Call to create a +/// requesting adherence to the specified JSON schema, +/// a.k.a. structured outputs. +/// +/// /// -/// -/// Important: when using JSON mode, the model must also be instructed to produce JSON via a -/// system or user message. -/// -/// Without this paired, message-based accompaniment, the model may generate an unending stream of whitespace until the -/// generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. -/// -/// -/// Also note that the message content may be partially cut off if finish_reason is length, which -/// indicates that the generation exceeded max_tokens or the conversation exceeded the max context length for -/// the model. -/// -/// -[CodeGenModel("CreateChatCompletionRequestResponseFormat")] -public partial class ChatResponseFormat +[CodeGenModel("ChatResponseFormat")] +public abstract partial class ChatResponseFormat { - // CUSTOM: Made internal. + /// Creates a new requesting plain text. + public static ChatResponseFormat CreateTextFormat() => new InternalChatResponseFormatText(); - /// Must be one of `text` or `json_object`. - [CodeGenMember("Type")] - internal InternalCreateChatCompletionRequestResponseFormatType? Type { get; set; } + /// Creates a new requesting valid JSON, a.k.a. JSON mode. + public static ChatResponseFormat CreateJsonObjectFormat() => new InternalChatResponseFormatJsonObject(); - // CUSTOM: Made internal. - /// Initializes a new instance of . - internal ChatResponseFormat() + /// + /// Creates a new requesting adherence to the specified JSON schema, + /// a.k.a. structured outputs. + /// + /// The name of the response format. + /// + /// + /// The schema of the response format, described as a JSON schema. Learn more in the + /// structured outputs guide. + /// and the + /// JSON schema reference documentation. + /// + /// + /// You can easily create a JSON schema via the factory methods of the class, such + /// as or . For + /// example, the following code defines a simple schema for step-by-step responses to math problems: + /// + /// BinaryData jsonSchema = BinaryData.FromBytes(""" + /// { + /// "type": "object", + /// "properties": { + /// "steps": { + /// "type": "array", + /// "items": { + /// "type": "object", + /// "properties": { + /// "explanation": {"type": "string"}, + /// "output": {"type": "string"} + /// }, + /// "required": ["explanation", "output"], + /// "additionalProperties": false + /// } + /// }, + /// "final_answer": {"type": "string"} + /// }, + /// "required": ["steps", "final_answer"], + /// "additionalProperties": false + /// } + /// """U8.ToArray()); + /// + /// + /// + /// + /// The description of what the response format is for, which is used by the model to determine how to respond + /// in the format. + /// + /// + /// + /// Whether to enable strict schema adherence when generating the response. If set to true, the + /// model will follow the exact schema defined in . + /// + /// + /// Only a subset of the JSON schema specification is supported when this is set to true. Learn more + /// in the + /// structured outputs guide. + /// + /// + /// or is null. + /// is an empty string, and was expected to be non-empty. + public static ChatResponseFormat CreateJsonSchemaFormat(string jsonSchemaFormatName, BinaryData jsonSchema, string jsonSchemaFormatDescription = null, bool? jsonSchemaIsStrict = null) { - } + Argument.AssertNotNullOrEmpty(jsonSchemaFormatName, nameof(jsonSchemaFormatName)); + Argument.AssertNotNull(jsonSchema, nameof(jsonSchema)); - internal ChatResponseFormat(InternalCreateChatCompletionRequestResponseFormatType? type) - { - Type = type; - } + InternalResponseFormatJsonSchemaJsonSchema internalSchema = new( + jsonSchemaFormatDescription, + jsonSchemaFormatName, + jsonSchema, + jsonSchemaIsStrict, + serializedAdditionalRawData: null); - /// text. - public static ChatResponseFormat Text { get; } = new ChatResponseFormat(InternalCreateChatCompletionRequestResponseFormatType.Text); - /// json_object. - public static ChatResponseFormat JsonObject { get; } = new ChatResponseFormat(InternalCreateChatCompletionRequestResponseFormatType.JsonObject); + return new InternalChatResponseFormatJsonSchema(internalSchema); + } } \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatTool.cs b/.dotnet/src/Custom/Chat/ChatTool.cs index eab0ec3b4..5811edc17 100644 --- a/.dotnet/src/Custom/Chat/ChatTool.cs +++ b/.dotnet/src/Custom/Chat/ChatTool.cs @@ -3,88 +3,134 @@ namespace OpenAI.Chat; /// -/// A base representation of a tool supplied to a chat completion request. Tools inform the model about additional, -/// caller-provided behaviors that can be invoked to provide prompt enrichment or custom actions. +/// A tool that the model may call. +/// +/// +/// Call to create a +/// representing a function that the model may call. +/// +/// /// [CodeGenModel("ChatCompletionTool")] public partial class ChatTool { // CUSTOM: Made internal. - /// Gets the function. [CodeGenMember("Function")] internal InternalFunctionDefinition Function { get; } // CUSTOM: Made internal. - /// Initializes a new instance of . - /// - /// is null. internal ChatTool(InternalFunctionDefinition function) { - Kind = ChatToolKind.Function; + Argument.AssertNotNull(function, nameof(function)); Function = function; } // CUSTOM: Renamed. - /// The type of the tool. Currently, only is supported. + /// The kind of tool. [CodeGenMember("Type")] public ChatToolKind Kind { get; } = ChatToolKind.Function; - // CUSTOM: Flattened. - /// - /// The name of the function that the tool represents. - /// + // CUSTOM: Spread. + /// The name of the function. + /// Present when is . public string FunctionName => Function?.Name; - // CUSTOM: Flattened. + // CUSTOM: Spread. /// - /// A friendly description of the function. This supplements in informing the model about when - /// it should call the function. + /// The description of what the function does, which is used by the model to choose when and how to call the + /// function. /// + /// Present when is . public string FunctionDescription => Function?.Description; - // CUSTOM: Flattened. + // CUSTOM: Spread. /// - /// The parameter information for the function, provided in JSON Schema format. + /// The parameters that the function accepts, which are described as a JSON schema. If omitted, this + /// defines a function with an empty parameter list. Learn more in the + /// function calling guide + /// and the + /// JSON schema reference documentation. /// - /// - /// The method provides - /// an easy definition interface using the dynamic type: - /// - /// Parameters = BinaryData.FromObjectAsJson(new - /// { - /// type = "object", - /// properties = new - /// { - /// your_function_argument = new - /// { - /// type = "string", - /// description = "the description of your function argument" - /// } - /// }, - /// required = new[] { "your_function_argument" } - /// }) - /// - /// + /// Present when is . public BinaryData FunctionParameters => Function?.Parameters; - // CUSTOM: Added custom constructor. + // CUSTOM: Spread. /// - /// Creates a new instance of . + /// + /// Whether to enable strict schema adherence when generating the function call. If set to true, the + /// model will follow the exact schema defined in + /// + /// + /// Only a subset of the JSON schema specification is supported when this is set to true. Learn more + /// about structured outputs in the + /// function calling guide. + /// /// - /// The name of the function. - /// The description of the function. - /// The parameters into the function, in JSON Schema format. - public static ChatTool CreateFunctionTool(string functionName, string functionDescription = null, BinaryData functionParameters = null) + /// Present when is . + public bool? FunctionSchemaIsStrict => Function?.Strict; + + /// Creates a new representing a function that the model may call. + /// The name of the function. + /// + /// The description of what the function does, which is used by the model to choose when and how to call the + /// function. + /// + /// + /// + /// The parameters that the function accepts, which are described as a JSON schema. If omitted, this + /// defines a function with an empty parameter list. Learn more in the + /// function calling guide + /// and the + /// JSON schema reference documentation. + /// + /// + /// You can easily create a JSON schema via the factory methods of the class, such + /// as or . For + /// example, the following code defines a simple schema for a function that takes a customer's order ID as + /// a string parameter: + /// + /// BinaryData functionParameters = BinaryData.FromBytes(""" + /// { + /// "type": "object", + /// "properties": { + /// "order_id": { + /// "type": "string", + /// "description": "The customer's order ID." + /// } + /// }, + /// "required": ["order_id"], + /// "additionalProperties": false + /// } + /// """u8.ToArray()); + /// + /// + /// + /// + /// + /// Whether to enable strict schema adherence when generating the function call. If set to true, the + /// model will follow the exact schema defined in . + /// + /// + /// Only a subset of the JSON schema specification is supported when this is set to true. Learn more + /// about structured outputs in the + /// function calling guide. + /// + /// + public static ChatTool CreateFunctionTool(string functionName, string functionDescription = null, BinaryData functionParameters = null, bool? functionSchemaIsStrict = null) { Argument.AssertNotNull(functionName, nameof(functionName)); InternalFunctionDefinition function = new(functionName) { Description = functionDescription, - Parameters = functionParameters + Parameters = functionParameters, + Strict = functionSchemaIsStrict, }; - return new(function); + return new( + kind: ChatToolKind.Function, + function: function, + serializedAdditionalRawData: null); } } diff --git a/.dotnet/src/Custom/Chat/ChatToolCall.cs b/.dotnet/src/Custom/Chat/ChatToolCall.cs index 7a508f743..65f57ede0 100644 --- a/.dotnet/src/Custom/Chat/ChatToolCall.cs +++ b/.dotnet/src/Custom/Chat/ChatToolCall.cs @@ -1,69 +1,66 @@ using System; +using System.Collections.Generic; namespace OpenAI.Chat; /// -/// A base representation of an item in an assistant role response's tool_calls that specifies -/// parameterized resolution against a previously defined tool that is needed for the model to continue the logical -/// conversation. +/// A tool call made by the model. +/// +/// +/// Call to create a +/// representing a function call made by the model. +/// +/// /// [CodeGenModel("ChatCompletionMessageToolCall")] +[CodeGenSuppress("ChatToolCall", typeof(string), typeof(InternalChatCompletionMessageToolCallFunction))] public partial class ChatToolCall { - /// The function that the model called. - [CodeGenMember("Function")] - internal InternalChatCompletionMessageToolCallFunction Function { get; } - // CUSTOM: Made internal. - /// Initializes a new instance of . - /// The ID of the tool call. - /// The function that the model called. - /// or is null. - internal ChatToolCall(string id, InternalChatCompletionMessageToolCallFunction function) - { - Argument.AssertNotNull(id, nameof(id)); - Argument.AssertNotNull(function, nameof(function)); - - Kind = ChatToolCallKind.Function; - - Id = id; - Function = function; - } + [CodeGenMember("Function")] + internal InternalChatCompletionMessageToolCallFunction Function { get; set; } // CUSTOM: Renamed. - /// The kind of tool. Currently, only is supported. + /// The kind of tool call. [CodeGenMember("Type")] public ChatToolCallKind Kind { get; } = ChatToolCallKind.Function; - // CUSTOM: Flattened. - /// - /// Gets the name of the function. - /// + // CUSTOM: Spread. + /// The name of the function that model is calling. + /// Present when is . public string FunctionName => Function?.Name; - // CUSTOM: Flattened. - /// - /// Gets the arguments to the function. + // CUSTOM: Spread. + /// + /// The arguments that model is calling the function with, which are generated by the model in JSON format. + /// Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your + /// function schema. Validate the arguments in your code before calling your function. /// + /// Present when is . public string FunctionArguments => Function?.Arguments; - /// - /// Creates a new instance of . - /// - /// - /// The ID of the tool call, used when resolving the tool call with a future - /// . + /// Creates a new representing a function call made by the model. + /// The ID of the tool call. + /// The name of the function that model is calling. + /// + /// The arguments that model is calling the function with, which are generated by the model in JSON format. + /// Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your + /// function schema. Validate the arguments in your code before calling your function. /// - /// The name of the function. - /// The arguments to the function. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. public static ChatToolCall CreateFunctionToolCall(string toolCallId, string functionName, string functionArguments) { - Argument.AssertNotNull(toolCallId, nameof(toolCallId)); - Argument.AssertNotNull(functionName, nameof(functionName)); - Argument.AssertNotNull(functionArguments, nameof(functionArguments)); + Argument.AssertNotNullOrEmpty(toolCallId, nameof(toolCallId)); + Argument.AssertNotNullOrEmpty(functionName, nameof(functionName)); + Argument.AssertNotNullOrEmpty(functionArguments, nameof(functionArguments)); InternalChatCompletionMessageToolCallFunction function = new(functionName, functionArguments); - return new(toolCallId, function); + return new( + id: toolCallId, + kind: ChatToolCallKind.Function, + function: function, + serializedAdditionalRawData: null); } } \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatToolChoice.Serialization.cs b/.dotnet/src/Custom/Chat/ChatToolChoice.Serialization.cs index 28ebbf323..964fc7566 100644 --- a/.dotnet/src/Custom/Chat/ChatToolChoice.Serialization.cs +++ b/.dotnet/src/Custom/Chat/ChatToolChoice.Serialization.cs @@ -13,9 +13,9 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOp internal static void SerializeChatToolChoice(ChatToolChoice instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) { - if (instance._isPlainString) + if (instance._predefined) { - writer.WriteStringValue(instance._string); + writer.WriteStringValue(instance._predefinedValue); } else { @@ -39,7 +39,12 @@ internal static ChatToolChoice DeserializeChatToolChoice(JsonElement element, Mo } else if (element.ValueKind == JsonValueKind.String) { - return new ChatToolChoice(element.ToString()); + return new ChatToolChoice( + predefined: true, + predefinedValue: element.ToString(), + type: null, + function: null, + serializedAdditionalRawData: null); } else { @@ -65,7 +70,12 @@ internal static ChatToolChoice DeserializeChatToolChoice(JsonElement element, Mo } } serializedAdditionalRawData = rawDataDictionary; - return new ChatToolChoice(function.Name, serializedAdditionalRawData); + return new ChatToolChoice( + predefined: false, + predefinedValue: null, + type: InternalChatCompletionNamedToolChoiceType.Function, + function: new InternalChatCompletionNamedToolChoiceFunction(function.Name), + serializedAdditionalRawData: rawDataDictionary); } } } \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/ChatToolChoice.cs b/.dotnet/src/Custom/Chat/ChatToolChoice.cs index 0fb3fa258..b82850a78 100644 --- a/.dotnet/src/Custom/Chat/ChatToolChoice.cs +++ b/.dotnet/src/Custom/Chat/ChatToolChoice.cs @@ -4,16 +4,33 @@ namespace OpenAI.Chat; /// -/// Represents tool_choice, the desired manner in which the model should use the tools defined in a -/// chat completion request. +/// The manner in which the model chooses which tool (if any) to call. +/// +/// +/// Call to create a indicating that the +/// model can freely pick between generating a message or calling one or more tools. +/// +/// +/// Call to create a indicating that the +/// model must not call any tools and that instead it must generate a message. +/// +/// +/// Call to create a indicating that the +/// model must call one or more tools. +/// +/// +/// Call to create a indicating +/// that the model must call the specified function. +/// +/// /// [CodeGenModel("ChatCompletionToolChoice")] [CodeGenSuppress("ChatToolChoice", typeof(IDictionary))] public partial class ChatToolChoice { - private readonly bool _isPlainString; - private readonly string _string; - private readonly InternalChatCompletionNamedToolChoiceType _type; + private readonly bool _predefined; + private readonly string _predefinedValue; + private readonly InternalChatCompletionNamedToolChoiceType? _type; private readonly InternalChatCompletionNamedToolChoiceFunction _function; private const string AutoValue = "auto"; @@ -25,56 +42,71 @@ internal ChatToolChoice() { } - // CUSTOM: Added custom internal constructor to handle the plain string representation (e.g. "auto", "none", etc.). - internal ChatToolChoice(string predefinedToolChoice) + // CUSTOM: Added to support deserialization. + internal ChatToolChoice(bool predefined, string predefinedValue, InternalChatCompletionNamedToolChoiceType? type, InternalChatCompletionNamedToolChoiceFunction function, IDictionary serializedAdditionalRawData) { - Argument.AssertNotNull(predefinedToolChoice, nameof(predefinedToolChoice)); - - _string = predefinedToolChoice; - _isPlainString = true; + _predefined = predefined; + _predefinedValue = predefinedValue; + _type = type; + _function = function; + SerializedAdditionalRawData = serializedAdditionalRawData; } - // CUSTOM: Added custom public constructor to handle the object representation. /// - /// Creates a new instance of which requests that the model restricts its behavior - /// to calling the specified tool. + /// Creates a new indicating that the model can freely pick between generating a + /// message or calling one or more tools. /// - /// The definition of the tool that the model should call. - public ChatToolChoice(ChatTool tool) + public static ChatToolChoice CreateAutoChoice() { - Argument.AssertNotNull(tool, nameof(tool)); - - _function = new(tool.FunctionName); - _type = InternalChatCompletionNamedToolChoiceType.Function; - _isPlainString = false; + return new ChatToolChoice( + predefined: true, + predefinedValue: AutoValue, + type: null, + function: null, + serializedAdditionalRawData: null); } - // CUSTOM: Added the function name parameter to the constructor that takes additional data to handle the object representation. - /// Initializes a new instance of . - /// The function name. - /// Keeps track of any properties unknown to the library. - internal ChatToolChoice(string functionName, IDictionary serializedAdditionalRawData) + /// + /// Creates a new indicating that the model must not call any tools and that + /// instead it must generate a message. + /// + public static ChatToolChoice CreateNoneChoice() { - Argument.AssertNotNull(functionName, nameof(functionName)); - - _function = new(functionName); - _type = InternalChatCompletionNamedToolChoiceType.Function; - _isPlainString = false; - - SerializedAdditionalRawData = serializedAdditionalRawData; + return new ChatToolChoice( + predefined: true, + predefinedValue: NoneValue, + type: null, + function: null, + serializedAdditionalRawData: null); } /// - /// Specifies that the model must freely pick between generating a message or calling one or more tools. + /// Creates a new indicating that the model must call one or more tools. /// - public static ChatToolChoice Auto { get; } = new ChatToolChoice(AutoValue); - /// - /// Specifies that the model must not invoke any tools, and instead it must generate an ordinary message. Note - /// that the tools that were provided may still influence the model's behavior even if they are not called. - /// - public static ChatToolChoice None { get; } = new ChatToolChoice(NoneValue); + public static ChatToolChoice CreateRequiredChoice() + { + return new ChatToolChoice( + predefined: true, + predefinedValue: RequiredValue, + type: null, + function: null, + serializedAdditionalRawData: null); + } + /// - /// Specifies that the model must call one or more tools. + /// Creates a new indicating that the model must call the specified function. /// - public static ChatToolChoice Required { get; } = new ChatToolChoice(RequiredValue); + /// is null. + /// is an empty string, and was expected to be non-empty. + public static ChatToolChoice CreateFunctionChoice(string functionName) + { + Argument.AssertNotNullOrEmpty(functionName, nameof(functionName)); + + return new ChatToolChoice( + predefined: false, + predefinedValue: null, + type: InternalChatCompletionNamedToolChoiceType.Function, + function: new InternalChatCompletionNamedToolChoiceFunction(functionName), + serializedAdditionalRawData: null); + } } diff --git a/.dotnet/src/Custom/Chat/ChatToolKind.cs b/.dotnet/src/Custom/Chat/ChatToolKind.cs index 3970f0f07..1bd3ca393 100644 --- a/.dotnet/src/Custom/Chat/ChatToolKind.cs +++ b/.dotnet/src/Custom/Chat/ChatToolKind.cs @@ -1,4 +1,4 @@ -namespace OpenAI.Chat; +namespace OpenAI.Chat; [CodeGenModel("ChatCompletionToolType")] public readonly partial struct ChatToolKind diff --git a/.dotnet/src/Custom/Chat/FunctionChatMessage.Serialization.cs b/.dotnet/src/Custom/Chat/FunctionChatMessage.Serialization.cs index cb46e2760..752db58d1 100644 --- a/.dotnet/src/Custom/Chat/FunctionChatMessage.Serialization.cs +++ b/.dotnet/src/Custom/Chat/FunctionChatMessage.Serialization.cs @@ -1,6 +1,4 @@ -using System; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Chat; @@ -16,71 +14,19 @@ internal static void SerializeFunctionChatMessage(FunctionChatMessage instance, instance.WriteCore(writer, options); } - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); + writer.WritePropertyName("role"u8); + writer.WriteStringValue(Role.ToSerialString()); writer.WritePropertyName("name"u8); writer.WriteStringValue(FunctionName); - writer.WritePropertyName("role"u8); - writer.WriteStringValue(Role); if (Optional.IsCollectionDefined(Content)) { - if (Content[0] != null) - { - writer.WritePropertyName("content"u8); - writer.WriteStringValue(Content[0].Text); - } - else - { - writer.WriteNull("content"); - } + writer.WritePropertyName("content"u8); + writer.WriteStringValue(Content?[0]?.Text); } writer.WriteSerializedAdditionalRawData(SerializedAdditionalRawData, options); writer.WriteEndObject(); } - - internal static FunctionChatMessage DeserializeFunctionChatMessage(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= ModelSerializationExtensions.WireOptions; - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - string name = default; - string role = default; - IList content = default; - IDictionary serializedAdditionalRawData = default; - Dictionary rawDataDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("name"u8)) - { - name = property.Value.GetString(); - continue; - } - if (property.NameEquals("role"u8)) - { - role = property.Value.GetString(); - continue; - } - if (property.NameEquals("content"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - array.Add(ChatMessageContentPart.CreateTextMessageContentPart(property.Value.GetString())); - content = array; - continue; - } - if (true) - { - rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = rawDataDictionary; - return new FunctionChatMessage(role, content ?? new ChangeTrackingList(), serializedAdditionalRawData, name); - } } diff --git a/.dotnet/src/Custom/Chat/FunctionChatMessage.cs b/.dotnet/src/Custom/Chat/FunctionChatMessage.cs index 7e03f2d95..10468f427 100644 --- a/.dotnet/src/Custom/Chat/FunctionChatMessage.cs +++ b/.dotnet/src/Custom/Chat/FunctionChatMessage.cs @@ -23,14 +23,11 @@ public partial class FunctionChatMessage : ChatMessage /// restriction (e.g. JSON) imposed on this content. /// public FunctionChatMessage(string functionName, string content = null) + : base(ChatMessageRole.Function, content) { Argument.AssertNotNull(functionName, nameof(functionName)); - Role = "function"; FunctionName = functionName; - Content = (content == null) - ? new ChangeTrackingList() - : [ChatMessageContentPart.CreateTextMessageContentPart(content)]; } // CUSTOM: Renamed. diff --git a/.dotnet/src/Custom/Chat/ImageChatMessageContentPartDetail.cs b/.dotnet/src/Custom/Chat/ImageChatMessageContentPartDetail.cs deleted file mode 100644 index 943400a8e..000000000 --- a/.dotnet/src/Custom/Chat/ImageChatMessageContentPartDetail.cs +++ /dev/null @@ -1,6 +0,0 @@ -namespace OpenAI.Chat; - -[CodeGenModel("ChatCompletionRequestMessageContentPartImageImageUrlDetail")] -public readonly partial struct ImageChatMessageContentPartDetail -{ -} diff --git a/.dotnet/src/Custom/Chat/Internal/GeneratorStubs.cs b/.dotnet/src/Custom/Chat/Internal/GeneratorStubs.cs index 5288b2600..87294c512 100644 --- a/.dotnet/src/Custom/Chat/Internal/GeneratorStubs.cs +++ b/.dotnet/src/Custom/Chat/Internal/GeneratorStubs.cs @@ -57,11 +57,17 @@ internal readonly partial struct InternalCreateChatCompletionFunctionResponseCho [CodeGenModel("CreateChatCompletionFunctionResponseObject")] internal readonly partial struct InternalCreateChatCompletionFunctionResponseObject { } +[CodeGenModel("ChatCompletionRequestMessageContentPartRefusal")] +internal partial class InternalChatCompletionRequestMessageContentPartRefusal { } + +[CodeGenModel("ChatCompletionRequestMessageContentPartRefusalType")] +internal readonly partial struct InternalChatCompletionRequestMessageContentPartRefusalType { } + [CodeGenModel("CreateChatCompletionRequestModel")] internal readonly partial struct InternalCreateChatCompletionRequestModel { } -[CodeGenModel("CreateChatCompletionRequestResponseFormatType")] -internal readonly partial struct InternalCreateChatCompletionRequestResponseFormatType { } +[CodeGenModel("CreateChatCompletionRequestServiceTier")] +internal readonly partial struct InternalCreateChatCompletionRequestServiceTier { } [CodeGenModel("CreateChatCompletionRequestToolChoice")] internal readonly partial struct InternalCreateChatCompletionRequestToolChoice { } @@ -75,6 +81,9 @@ internal partial class InternalCreateChatCompletionResponseChoiceLogprobs { } [CodeGenModel("CreateChatCompletionResponseObject")] internal readonly partial struct InternalCreateChatCompletionResponseObject { } +[CodeGenModel("CreateChatCompletionResponseServiceTier")] +internal readonly partial struct InternalCreateChatCompletionResponseServiceTier { } + [CodeGenModel("CreateChatCompletionStreamResponseChoice")] internal partial class InternalCreateChatCompletionStreamResponseChoice { } @@ -87,10 +96,11 @@ internal partial class InternalCreateChatCompletionStreamResponseChoiceLogprobs [CodeGenModel("CreateChatCompletionStreamResponseObject")] internal readonly partial struct InternalCreateChatCompletionStreamResponseObject { } +[CodeGenModel("CreateChatCompletionStreamResponseServiceTier")] +internal readonly partial struct InternalCreateChatCompletionStreamResponseServiceTier { } + [CodeGenModel("CreateChatCompletionStreamResponseUsage")] internal partial class InternalCreateChatCompletionStreamResponseUsage { } [CodeGenModel("FunctionParameters")] internal partial class InternalFunctionParameters { } - - diff --git a/.dotnet/src/Custom/Chat/Internal/InternalChatCompletionRequestMessageContentPartImageImageUrl.cs b/.dotnet/src/Custom/Chat/Internal/InternalChatCompletionRequestMessageContentPartImageImageUrl.cs index be29738a8..5e308a3cc 100644 --- a/.dotnet/src/Custom/Chat/Internal/InternalChatCompletionRequestMessageContentPartImageImageUrl.cs +++ b/.dotnet/src/Custom/Chat/Internal/InternalChatCompletionRequestMessageContentPartImageImageUrl.cs @@ -53,10 +53,10 @@ public InternalChatCompletionRequestMessageContentPartImageImageUrl(BinaryData i /// Either a URL of the image or the base64 encoded image data. /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). /// Keeps track of any properties unknown to the library. - internal InternalChatCompletionRequestMessageContentPartImageImageUrl(string url, ImageChatMessageContentPartDetail? detail, IDictionary serializedAdditionalRawData) + internal InternalChatCompletionRequestMessageContentPartImageImageUrl(string url, ChatImageDetailLevel? detail, IDictionary serializedAdditionalRawData) { Match parsedDataUri = ParseDataUriRegex().Match(url); - + if (parsedDataUri.Success) { _imageBytes = BinaryData.FromBytes(Convert.FromBase64String(parsedDataUri.Groups["data"].Value)); diff --git a/.dotnet/src/Custom/Chat/Internal/InternalChatCompletionResponseMessage.Serialization.cs b/.dotnet/src/Custom/Chat/Internal/InternalChatCompletionResponseMessage.Serialization.cs index c8fd80d8e..153e86e70 100644 --- a/.dotnet/src/Custom/Chat/Internal/InternalChatCompletionResponseMessage.Serialization.cs +++ b/.dotnet/src/Custom/Chat/Internal/InternalChatCompletionResponseMessage.Serialization.cs @@ -69,6 +69,7 @@ internal static InternalChatCompletionResponseMessage DeserializeInternalChatCom return null; } IReadOnlyList content = default; + string refusal = default; IReadOnlyList toolCalls = default; ChatMessageRole role = default; ChatFunctionCall functionCall = default; @@ -83,10 +84,19 @@ internal static InternalChatCompletionResponseMessage DeserializeInternalChatCom continue; } List array = new List(); - array.Add(ChatMessageContentPart.CreateTextMessageContentPart(property.Value.GetString())); + array.Add(ChatMessageContentPart.CreateTextPart(property.Value.GetString())); content = array; continue; } + if (property.NameEquals("refusal"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + refusal = property.Value.GetString(); + continue; + } if (property.NameEquals("tool_calls"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -121,6 +131,6 @@ internal static InternalChatCompletionResponseMessage DeserializeInternalChatCom } } serializedAdditionalRawData = rawDataDictionary; - return new InternalChatCompletionResponseMessage(content ?? new ChangeTrackingList(), toolCalls ?? new ChangeTrackingList(), role, functionCall, serializedAdditionalRawData); + return new InternalChatCompletionResponseMessage(content ?? new ChangeTrackingList(), refusal, toolCalls ?? new ChangeTrackingList(), role, functionCall, serializedAdditionalRawData); } } diff --git a/.dotnet/src/Custom/Chat/Internal/InternalChatCompletionStreamResponseDelta.Serialization.cs b/.dotnet/src/Custom/Chat/Internal/InternalChatCompletionStreamResponseDelta.Serialization.cs index 0a4fa1ea1..1c4f8ab1b 100644 --- a/.dotnet/src/Custom/Chat/Internal/InternalChatCompletionStreamResponseDelta.Serialization.cs +++ b/.dotnet/src/Custom/Chat/Internal/InternalChatCompletionStreamResponseDelta.Serialization.cs @@ -75,6 +75,7 @@ internal static InternalChatCompletionStreamResponseDelta DeserializeInternalCha StreamingChatFunctionCallUpdate functionCall = default; IReadOnlyList toolCalls = default; ChatMessageRole? role = default; + string refusal = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -86,7 +87,7 @@ internal static InternalChatCompletionStreamResponseDelta DeserializeInternalCha continue; } List array = new List(); - array.Add(ChatMessageContentPart.CreateTextMessageContentPart(property.Value.GetString())); + array.Add(ChatMessageContentPart.CreateTextPart(property.Value.GetString())); content = array; continue; } @@ -122,12 +123,21 @@ internal static InternalChatCompletionStreamResponseDelta DeserializeInternalCha role = property.Value.GetString().ToChatMessageRole(); continue; } + if (property.NameEquals("refusal"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + refusal = property.Value.GetString(); + continue; + } if (true) { rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); } } serializedAdditionalRawData = rawDataDictionary; - return new InternalChatCompletionStreamResponseDelta(content ?? new ChangeTrackingList(), functionCall, toolCalls ?? new ChangeTrackingList(), role, serializedAdditionalRawData); + return new InternalChatCompletionStreamResponseDelta(content ?? new ChangeTrackingList(), functionCall, toolCalls ?? new ChangeTrackingList(), role, refusal, serializedAdditionalRawData); } } diff --git a/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatJsonObject.Serialization.cs b/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatJsonObject.Serialization.cs new file mode 100644 index 000000000..586eaf06d --- /dev/null +++ b/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatJsonObject.Serialization.cs @@ -0,0 +1,44 @@ +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Chat; + +[CodeGenSuppress("global::System.ClientModel.Primitives.IJsonModel.Write", typeof(Utf8JsonWriter), typeof(ModelReaderWriterOptions))] +internal partial class InternalChatResponseFormatJsonObject : IJsonModel +{ + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + => CustomSerializationHelpers.SerializeInstance(this, SerializeInternalChatResponseFormatJsonObject, writer, options); + + internal static void SerializeInternalChatResponseFormatJsonObject(InternalChatResponseFormatJsonObject instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) + => instance.WriteCore(writer, options); + + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("type") != true) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type); + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } +} diff --git a/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatJsonObject.cs b/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatJsonObject.cs new file mode 100644 index 000000000..f161ae7a8 --- /dev/null +++ b/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatJsonObject.cs @@ -0,0 +1,6 @@ +namespace OpenAI.Chat; + +[CodeGenModel("ChatResponseFormatJsonObject")] +internal partial class InternalChatResponseFormatJsonObject +{ +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatJsonSchema.Serialization.cs b/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatJsonSchema.Serialization.cs new file mode 100644 index 000000000..e9b246f77 --- /dev/null +++ b/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatJsonSchema.Serialization.cs @@ -0,0 +1,49 @@ +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Chat; + +[CodeGenSuppress("global::System.ClientModel.Primitives.IJsonModel.Write", typeof(Utf8JsonWriter), typeof(ModelReaderWriterOptions))] +internal partial class InternalChatResponseFormatJsonSchema : IJsonModel +{ + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + => CustomSerializationHelpers.SerializeInstance(this, SerializeInternalChatResponseFormatJsonSchema, writer, options); + + internal static void SerializeInternalChatResponseFormatJsonSchema(InternalChatResponseFormatJsonSchema instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) + => instance.WriteCore(writer, options); + + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("json_schema") != true) + { + writer.WritePropertyName("json_schema"u8); + writer.WriteObjectValue(JsonSchema, options); + } + if (SerializedAdditionalRawData?.ContainsKey("type") != true) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type); + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } +} diff --git a/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatJsonSchema.cs b/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatJsonSchema.cs new file mode 100644 index 000000000..cd9e63b5a --- /dev/null +++ b/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatJsonSchema.cs @@ -0,0 +1,11 @@ +using System.ClientModel.Primitives; +using System.Data; +using System.Text.Json; +using System; + +namespace OpenAI.Chat; + +[CodeGenModel("ChatResponseFormatJsonSchema")] +internal partial class InternalChatResponseFormatJsonSchema +{ +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatText.Serialization.cs b/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatText.Serialization.cs new file mode 100644 index 000000000..608844c67 --- /dev/null +++ b/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatText.Serialization.cs @@ -0,0 +1,44 @@ +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Chat; + +[CodeGenSuppress("global::System.ClientModel.Primitives.IJsonModel.Write", typeof(Utf8JsonWriter), typeof(ModelReaderWriterOptions))] +internal partial class InternalChatResponseFormatText : IJsonModel +{ + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + => CustomSerializationHelpers.SerializeInstance(this, SerializeInternalChatResponseFormatText, writer, options); + + internal static void SerializeInternalChatResponseFormatText(InternalChatResponseFormatText instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) + => instance.WriteCore(writer, options); + + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("type") != true) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type); + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } +} diff --git a/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatText.cs b/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatText.cs new file mode 100644 index 000000000..3df5a8bf5 --- /dev/null +++ b/.dotnet/src/Custom/Chat/Internal/InternalChatResponseFormatText.cs @@ -0,0 +1,6 @@ +namespace OpenAI.Chat; + +[CodeGenModel("ChatResponseFormatText")] +internal partial class InternalChatResponseFormatText +{ +} diff --git a/.dotnet/src/Custom/Chat/Internal/UnknownChatMessage.Serialization.cs b/.dotnet/src/Custom/Chat/Internal/InternalUnknownChatMessage.Serialization.cs similarity index 66% rename from .dotnet/src/Custom/Chat/Internal/UnknownChatMessage.Serialization.cs rename to .dotnet/src/Custom/Chat/Internal/InternalUnknownChatMessage.Serialization.cs index b3eca176b..89f2efb2f 100644 --- a/.dotnet/src/Custom/Chat/Internal/UnknownChatMessage.Serialization.cs +++ b/.dotnet/src/Custom/Chat/Internal/InternalUnknownChatMessage.Serialization.cs @@ -6,36 +6,27 @@ namespace OpenAI.Chat; [CodeGenSuppress("global::System.ClientModel.Primitives.IJsonModel.Write", typeof(Utf8JsonWriter), typeof(ModelReaderWriterOptions))] -internal partial class UnknownChatMessage : IJsonModel +internal partial class InternalUnknownChatMessage : IJsonModel { void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - => CustomSerializationHelpers.SerializeInstance(this, WriteCore, writer, options); + => CustomSerializationHelpers.SerializeInstance(this, WriteCore, writer, options); - internal static void WriteCore(UnknownChatMessage instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) + internal static void WriteCore(InternalUnknownChatMessage instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) { instance.WriteCore(writer, options); } - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); writer.WritePropertyName("role"u8); - writer.WriteStringValue(Role); - if (Optional.IsCollectionDefined(Content)) - { - writer.WritePropertyName("content"u8); - writer.WriteStartArray(); - foreach (var item in Content) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } + writer.WriteStringValue(Role.ToSerialString()); + ChatMessageContentPart.WriteCoreContentPartList(Content, writer, options); writer.WriteSerializedAdditionalRawData(SerializedAdditionalRawData, options); writer.WriteEndObject(); } - internal static UnknownChatMessage DeserializeUnknownChatMessage(JsonElement element, ModelReaderWriterOptions options = null) + internal static InternalUnknownChatMessage DeserializeUnknownChatMessage(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -43,7 +34,7 @@ internal static UnknownChatMessage DeserializeUnknownChatMessage(JsonElement ele { return null; } - string role = "Unknown"; + ChatMessageRole? role = null; IList content = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -51,7 +42,7 @@ internal static UnknownChatMessage DeserializeUnknownChatMessage(JsonElement ele { if (property.NameEquals("role"u8)) { - role = property.Value.GetString(); + role = property.Value.GetString().ToChatMessageRole(); continue; } if (property.NameEquals("content"u8)) @@ -74,6 +65,6 @@ internal static UnknownChatMessage DeserializeUnknownChatMessage(JsonElement ele } } serializedAdditionalRawData = rawDataDictionary; - return new UnknownChatMessage(role, content ?? new ChangeTrackingList(), serializedAdditionalRawData); + return new InternalUnknownChatMessage(role.Value, content ?? new ChangeTrackingList(), serializedAdditionalRawData); } } diff --git a/.dotnet/src/Custom/Chat/Internal/UnknownChatMessage.cs b/.dotnet/src/Custom/Chat/Internal/InternalUnknownChatMessage.cs similarity index 56% rename from .dotnet/src/Custom/Chat/Internal/UnknownChatMessage.cs rename to .dotnet/src/Custom/Chat/Internal/InternalUnknownChatMessage.cs index ac4faf00f..cb046b82e 100644 --- a/.dotnet/src/Custom/Chat/Internal/UnknownChatMessage.cs +++ b/.dotnet/src/Custom/Chat/Internal/InternalUnknownChatMessage.cs @@ -1,7 +1,7 @@ namespace OpenAI.Chat; [CodeGenModel("UnknownChatCompletionRequestMessage")] -internal partial class UnknownChatMessage : ChatMessage +internal partial class InternalUnknownChatMessage : ChatMessage { } \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/Internal/InternalUnknownChatResponseFormat.Serialization.cs b/.dotnet/src/Custom/Chat/Internal/InternalUnknownChatResponseFormat.Serialization.cs new file mode 100644 index 000000000..0f7f3cec4 --- /dev/null +++ b/.dotnet/src/Custom/Chat/Internal/InternalUnknownChatResponseFormat.Serialization.cs @@ -0,0 +1,44 @@ +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Chat; + +[CodeGenSuppress("global::System.ClientModel.Primitives.IJsonModel.Write", typeof(Utf8JsonWriter), typeof(ModelReaderWriterOptions))] +internal partial class InternalUnknownChatResponseFormat : IJsonModel +{ + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + => CustomSerializationHelpers.SerializeInstance(this, SerializeChatResponseFormat, writer, options); + + internal static void SerializeChatResponseFormat(InternalUnknownChatResponseFormat instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) + => instance.WriteCore(writer, options); + + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("type") != true) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type); + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } +} diff --git a/.dotnet/src/Custom/Chat/Internal/InternalUnknownChatResponseFormat.cs b/.dotnet/src/Custom/Chat/Internal/InternalUnknownChatResponseFormat.cs new file mode 100644 index 000000000..b869078fc --- /dev/null +++ b/.dotnet/src/Custom/Chat/Internal/InternalUnknownChatResponseFormat.cs @@ -0,0 +1,6 @@ +namespace OpenAI.Chat; + +[CodeGenModel("UnknownChatResponseFormat")] +internal partial class InternalUnknownChatResponseFormat +{ +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Chat/OpenAIChatModelFactory.cs b/.dotnet/src/Custom/Chat/OpenAIChatModelFactory.cs new file mode 100644 index 000000000..4ba814372 --- /dev/null +++ b/.dotnet/src/Custom/Chat/OpenAIChatModelFactory.cs @@ -0,0 +1,187 @@ +using System; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Chat; + +/// Model factory for models. +public static partial class OpenAIChatModelFactory +{ + /// Initializes a new instance of . + /// A new instance for mocking. + public static ChatCompletion ChatCompletion( + string id = null, + ChatFinishReason finishReason = default, + IEnumerable content = null, + string refusal = null, + IEnumerable toolCalls = null, + ChatMessageRole role = default, + ChatFunctionCall functionCall = null, + IEnumerable contentTokenLogProbabilities = null, + IEnumerable refusalTokenLogProbabilities = null, + DateTimeOffset createdAt = default, + string model = null, + string systemFingerprint = null, + ChatTokenUsage usage = null) + { + content ??= new List(); + toolCalls ??= new List(); + contentTokenLogProbabilities ??= new List(); + refusalTokenLogProbabilities ??= new List(); + + InternalChatCompletionResponseMessage message = new InternalChatCompletionResponseMessage( + content.ToList(), + refusal, + toolCalls.ToList(), + role, + functionCall, + serializedAdditionalRawData: null); + + InternalCreateChatCompletionResponseChoiceLogprobs logprobs = new InternalCreateChatCompletionResponseChoiceLogprobs( + contentTokenLogProbabilities.ToList(), + refusalTokenLogProbabilities.ToList(), + serializedAdditionalRawData: null); + + IReadOnlyList choices = [ + new InternalCreateChatCompletionResponseChoice( + finishReason, + index: 0, + message, + logprobs, + serializedAdditionalRawData: null) + ]; + + return new ChatCompletion( + id, + choices, + createdAt, + model, + serviceTier: null, + systemFingerprint, + InternalCreateChatCompletionResponseObject.ChatCompletion, + usage, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A new instance for mocking. + public static ChatTokenLogProbabilityInfo ChatTokenLogProbabilityInfo(string token = null, float logProbability = default, IEnumerable utf8ByteValues = null, IEnumerable topLogProbabilities = null) + { + utf8ByteValues ??= new List(); + topLogProbabilities ??= new List(); + + return new ChatTokenLogProbabilityInfo( + token, + logProbability, + utf8ByteValues.ToList(), + topLogProbabilities.ToList(), + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A new instance for mocking. + public static ChatTokenTopLogProbabilityInfo ChatTokenTopLogProbabilityInfo(string token = null, float logProbability = default, IEnumerable utf8ByteValues = null) + { + utf8ByteValues ??= new List(); + + return new ChatTokenTopLogProbabilityInfo( + token, + logProbability, + utf8ByteValues.ToList(), + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A new instance for mocking. + public static ChatTokenUsage ChatTokenUsage(int outputTokens = default, int inputTokens = default, int totalTokens = default) + { + return new ChatTokenUsage( + outputTokens, + inputTokens, + totalTokens, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A new instance for mocking. + public static StreamingChatCompletionUpdate StreamingChatCompletionUpdate( + string id = null, + IEnumerable contentUpdate = null, + StreamingChatFunctionCallUpdate functionCallUpdate = null, + IEnumerable toolCallUpdates = null, + ChatMessageRole? role = null, + string refusalUpdate = null, + IEnumerable contentTokenLogProbabilities = null, + IEnumerable refusalTokenLogProbabilities = null, + ChatFinishReason? finishReason = null, + DateTimeOffset createdAt = default, + string model = null, + string systemFingerprint = null, + ChatTokenUsage usage = null) + { + contentUpdate ??= new List(); + toolCallUpdates ??= new List(); + contentTokenLogProbabilities ??= new List(); + refusalTokenLogProbabilities ??= new List(); + + InternalChatCompletionStreamResponseDelta delta = new InternalChatCompletionStreamResponseDelta( + contentUpdate.ToList(), + functionCallUpdate, + toolCallUpdates.ToList(), + role, + refusalUpdate, + serializedAdditionalRawData: null); + + InternalCreateChatCompletionStreamResponseChoiceLogprobs logprobs = new InternalCreateChatCompletionStreamResponseChoiceLogprobs( + contentTokenLogProbabilities.ToList(), + refusalTokenLogProbabilities.ToList(), + serializedAdditionalRawData: null); + + IReadOnlyList choices = [ + new InternalCreateChatCompletionStreamResponseChoice( + delta, + logprobs, + finishReason, + index: 0, + serializedAdditionalRawData: null) + ]; + + return new StreamingChatCompletionUpdate( + id, + choices, + createdAt, + model, + serviceTier: null, + systemFingerprint, + InternalCreateChatCompletionStreamResponseObject.ChatCompletionChunk, + usage, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A new instance for mocking. + public static StreamingChatFunctionCallUpdate StreamingChatFunctionCallUpdate(string functionArgumentsUpdate = null, string functionName = null) + { + return new StreamingChatFunctionCallUpdate( + functionArgumentsUpdate, + functionName, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A new instance for mocking. + public static StreamingChatToolCallUpdate StreamingChatToolCallUpdate(int index = default, string id = null, ChatToolCallKind kind = default, string functionName = null, string functionArgumentsUpdate = null) + { + InternalChatCompletionMessageToolCallChunkFunction function = new InternalChatCompletionMessageToolCallChunkFunction( + functionName, + functionArgumentsUpdate, + serializedAdditionalRawData: null); + + return new StreamingChatToolCallUpdate( + index, + id, + kind, + function, + serializedAdditionalRawData: null); + } +} diff --git a/.dotnet/src/Custom/Chat/Internal/AsyncStreamingChatCompletionUpdateCollection.cs b/.dotnet/src/Custom/Chat/Streaming/InternalAsyncStreamingChatCompletionUpdateCollection.cs similarity index 92% rename from .dotnet/src/Custom/Chat/Internal/AsyncStreamingChatCompletionUpdateCollection.cs rename to .dotnet/src/Custom/Chat/Streaming/InternalAsyncStreamingChatCompletionUpdateCollection.cs index d3bc8ae90..55e304d31 100644 --- a/.dotnet/src/Custom/Chat/Internal/AsyncStreamingChatCompletionUpdateCollection.cs +++ b/.dotnet/src/Custom/Chat/Streaming/InternalAsyncStreamingChatCompletionUpdateCollection.cs @@ -15,11 +15,11 @@ namespace OpenAI.Chat; /// /// Implementation of collection abstraction over streaming chat updates. /// -internal class AsyncStreamingChatCompletionUpdateCollection : AsyncCollectionResult +internal class InternalAsyncStreamingChatCompletionUpdateCollection : AsyncCollectionResult { private readonly Func> _getResultAsync; - public AsyncStreamingChatCompletionUpdateCollection(Func> getResultAsync) : base() + public InternalAsyncStreamingChatCompletionUpdateCollection(Func> getResultAsync) : base() { Argument.AssertNotNull(getResultAsync, nameof(getResultAsync)); @@ -36,7 +36,7 @@ private sealed class AsyncStreamingChatUpdateEnumerator : IAsyncEnumerator TerminalData => "[DONE]"u8; private readonly Func> _getResultAsync; - private readonly AsyncStreamingChatCompletionUpdateCollection _enumerable; + private readonly InternalAsyncStreamingChatCompletionUpdateCollection _enumerable; private readonly CancellationToken _cancellationToken; // These enumerators represent what is effectively a doubly-nested @@ -53,7 +53,7 @@ private sealed class AsyncStreamingChatUpdateEnumerator : IAsyncEnumerator> getResultAsync, - AsyncStreamingChatCompletionUpdateCollection enumerable, + InternalAsyncStreamingChatCompletionUpdateCollection enumerable, CancellationToken cancellationToken) { Debug.Assert(getResultAsync is not null); diff --git a/.dotnet/src/Custom/Chat/Internal/StreamingChatCompletionUpdateCollection.cs b/.dotnet/src/Custom/Chat/Streaming/InternalStreamingChatCompletionUpdateCollection.cs similarity index 92% rename from .dotnet/src/Custom/Chat/Internal/StreamingChatCompletionUpdateCollection.cs rename to .dotnet/src/Custom/Chat/Streaming/InternalStreamingChatCompletionUpdateCollection.cs index bc5d360d7..055d6d844 100644 --- a/.dotnet/src/Custom/Chat/Internal/StreamingChatCompletionUpdateCollection.cs +++ b/.dotnet/src/Custom/Chat/Streaming/InternalStreamingChatCompletionUpdateCollection.cs @@ -14,11 +14,11 @@ namespace OpenAI.Chat; /// /// Implementation of collection abstraction over streaming chat updates. /// -internal class StreamingChatCompletionUpdateCollection : CollectionResult +internal class InternalStreamingChatCompletionUpdateCollection : CollectionResult { private readonly Func _getResult; - public StreamingChatCompletionUpdateCollection(Func getResult) : base() + public InternalStreamingChatCompletionUpdateCollection(Func getResult) : base() { Argument.AssertNotNull(getResult, nameof(getResult)); @@ -35,7 +35,7 @@ private sealed class StreamingChatUpdateEnumerator : IEnumerator TerminalData => "[DONE]"u8; private readonly Func _getResult; - private readonly StreamingChatCompletionUpdateCollection _enumerable; + private readonly InternalStreamingChatCompletionUpdateCollection _enumerable; // These enumerators represent what is effectively a doubly-nested // loop over the outer event collection and the inner update collection, @@ -51,7 +51,7 @@ private sealed class StreamingChatUpdateEnumerator : IEnumerator getResult, - StreamingChatCompletionUpdateCollection enumerable) + InternalStreamingChatCompletionUpdateCollection enumerable) { Debug.Assert(getResult is not null); Debug.Assert(enumerable is not null); diff --git a/.dotnet/src/Custom/Chat/StreamingChatCompletionUpdate.cs b/.dotnet/src/Custom/Chat/Streaming/StreamingChatCompletionUpdate.cs similarity index 86% rename from .dotnet/src/Custom/Chat/StreamingChatCompletionUpdate.cs rename to .dotnet/src/Custom/Chat/Streaming/StreamingChatCompletionUpdate.cs index 84c2f9b39..f67deba92 100644 --- a/.dotnet/src/Custom/Chat/StreamingChatCompletionUpdate.cs +++ b/.dotnet/src/Custom/Chat/Streaming/StreamingChatCompletionUpdate.cs @@ -13,6 +13,7 @@ public partial class StreamingChatCompletionUpdate private IReadOnlyList _contentUpdate; private IReadOnlyList _toolCallUpdates; private IReadOnlyList _contentTokenLogProbabilities; + private IReadOnlyList _refusalTokenLogProbabilities; // CUSTOM: // - Made private. This property does not add value in the context of a strongly-typed class. @@ -26,7 +27,7 @@ public partial class StreamingChatCompletionUpdate /// A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the /// last chunk if you set `stream_options: {"include_usage": true}`. /// - [CodeGenMember("Choices")] + [CodeGenMember("Choices")] internal IReadOnlyList Choices { get; } // CUSTOM: Renamed. @@ -42,12 +43,16 @@ public partial class StreamingChatCompletionUpdate [CodeGenMember("Usage")] public ChatTokenUsage Usage { get; } + // CUSTOM: Made internal. + [CodeGenMember("ServiceTier")] + internal InternalCreateChatCompletionStreamResponseServiceTier? ServiceTier { get; } + // CUSTOM: Flattened choice property. /// /// Gets the associated with this update. /// public ChatFinishReason? FinishReason => (Choices.Count > 0) - ? Choices[0].FinishReason + ? Choices[0].FinishReason : null; // CUSTOM: Flattened choice logprobs property. @@ -58,6 +63,11 @@ public partial class StreamingChatCompletionUpdate ? Choices[0].Logprobs.Content : _contentTokenLogProbabilities ??= new ChangeTrackingList(); + // CUSTOM: Flattened refusal logprobs property. + public IReadOnlyList RefusalTokenLogProbabilities => (Choices.Count > 0 && Choices[0].Logprobs != null) + ? Choices[0].Logprobs.Refusal + : _refusalTokenLogProbabilities ??= new ChangeTrackingList(); + // CUSTOM: Flattened choice delta property. /// /// Gets the content fragment associated with this update. @@ -100,6 +110,11 @@ public partial class StreamingChatCompletionUpdate ? Choices[0].Delta.FunctionCall : null; + // CUSTOM: Flattened choice delta property. + public string RefusalUpdate => (Choices.Count > 0) + ? Choices[0].Delta?.Refusal + : null; + internal static List DeserializeStreamingChatCompletionUpdates(JsonElement element) { return [StreamingChatCompletionUpdate.DeserializeStreamingChatCompletionUpdate(element)]; diff --git a/.dotnet/src/Custom/Chat/StreamingChatFunctionCallUpdate.cs b/.dotnet/src/Custom/Chat/Streaming/StreamingChatFunctionCallUpdate.cs similarity index 100% rename from .dotnet/src/Custom/Chat/StreamingChatFunctionCallUpdate.cs rename to .dotnet/src/Custom/Chat/Streaming/StreamingChatFunctionCallUpdate.cs diff --git a/.dotnet/src/Custom/Chat/StreamingChatToolCallUpdate.cs b/.dotnet/src/Custom/Chat/Streaming/StreamingChatToolCallUpdate.cs similarity index 100% rename from .dotnet/src/Custom/Chat/StreamingChatToolCallUpdate.cs rename to .dotnet/src/Custom/Chat/Streaming/StreamingChatToolCallUpdate.cs diff --git a/.dotnet/src/Custom/Chat/SystemChatMessage.Serialization.cs b/.dotnet/src/Custom/Chat/SystemChatMessage.Serialization.cs index 6e98120cb..c5f7471b2 100644 --- a/.dotnet/src/Custom/Chat/SystemChatMessage.Serialization.cs +++ b/.dotnet/src/Custom/Chat/SystemChatMessage.Serialization.cs @@ -1,6 +1,4 @@ -using System; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Chat; @@ -14,21 +12,13 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWrite internal static void SerializeSystemChatMessage(SystemChatMessage instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) => instance.WriteCore(writer, options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); - if (Optional.IsDefined(ParticipantName)) - { - writer.WritePropertyName("name"u8); - writer.WriteStringValue(ParticipantName); - } writer.WritePropertyName("role"u8); - writer.WriteStringValue(Role); - if (Optional.IsCollectionDefined(Content)) - { - writer.WritePropertyName("content"u8); - writer.WriteStringValue(Content[0].Text); - } + writer.WriteStringValue(Role.ToSerialString()); + ChatMessageContentPart.WriteCoreContentPartList(Content, writer, options); + writer.WriteOptionalProperty("name"u8, ParticipantName, options); writer.WriteSerializedAdditionalRawData(SerializedAdditionalRawData, options); writer.WriteEndObject(); } diff --git a/.dotnet/src/Custom/Chat/SystemChatMessage.cs b/.dotnet/src/Custom/Chat/SystemChatMessage.cs index 21de09d3f..ed991f4d5 100644 --- a/.dotnet/src/Custom/Chat/SystemChatMessage.cs +++ b/.dotnet/src/Custom/Chat/SystemChatMessage.cs @@ -9,21 +9,40 @@ namespace OpenAI.Chat; /// restrictions for a model-based assistant. /// [CodeGenModel("ChatCompletionRequestSystemMessage")] -[CodeGenSuppress("SystemChatMessage", typeof(IEnumerable))] -// [CodeGenSuppress("SystemChatMessage", typeof(string), typeof(IList), typeof(IDictionary), typeof(string))] public partial class SystemChatMessage : ChatMessage { /// - /// Creates a new instance of . + /// Creates a new instance of using a collection of content items. + /// For system messages, these can only be of type text. /// - /// The system message text that guides the model's behavior. + /// + /// The collection of content items associated with the message. + /// + public SystemChatMessage(IEnumerable contentParts) + : base(ChatMessageRole.System, contentParts) + { } + + /// + /// Creates a new instance of using a collection of content items. + /// For system messages, these can only be of type text. + /// + /// + /// The collection of content items associated with the message. + /// + public SystemChatMessage(params ChatMessageContentPart[] contentParts) + : base(ChatMessageRole.System, contentParts) + { + Argument.AssertNotNullOrEmpty(contentParts, nameof(contentParts)); + } + + /// + /// Creates a new instance of with a single item of text content. + /// + /// The text content of the message. public SystemChatMessage(string content) + : base(ChatMessageRole.System, content) { Argument.AssertNotNull(content, nameof(content)); - - Role = "system"; - Content = new ChangeTrackingList( - (IList)[ChatMessageContentPart.CreateTextMessageContentPart(content)]); } /// diff --git a/.dotnet/src/Custom/Chat/ToolChatMessage.Serialization.cs b/.dotnet/src/Custom/Chat/ToolChatMessage.Serialization.cs index a32e807f8..85b75c58d 100644 --- a/.dotnet/src/Custom/Chat/ToolChatMessage.Serialization.cs +++ b/.dotnet/src/Custom/Chat/ToolChatMessage.Serialization.cs @@ -1,6 +1,4 @@ -using System; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Chat; @@ -14,18 +12,14 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterO internal static void SerializeToolChatMessage(ToolChatMessage instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) => instance.WriteCore(writer, options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); + writer.WritePropertyName("role"u8); + writer.WriteStringValue(Role.ToSerialString()); writer.WritePropertyName("tool_call_id"u8); writer.WriteStringValue(ToolCallId); - writer.WritePropertyName("role"u8); - writer.WriteStringValue(Role); - if (Optional.IsCollectionDefined(Content)) - { - writer.WritePropertyName("content"u8); - writer.WriteStringValue(Content[0].Text); - } + ChatMessageContentPart.WriteCoreContentPartList(Content, writer, options); writer.WriteSerializedAdditionalRawData(SerializedAdditionalRawData, options); writer.WriteEndObject(); } diff --git a/.dotnet/src/Custom/Chat/ToolChatMessage.cs b/.dotnet/src/Custom/Chat/ToolChatMessage.cs index 92936ba29..c85f8efed 100644 --- a/.dotnet/src/Custom/Chat/ToolChatMessage.cs +++ b/.dotnet/src/Custom/Chat/ToolChatMessage.cs @@ -24,21 +24,56 @@ namespace OpenAI.Chat; public partial class ToolChatMessage : ChatMessage { /// - /// Creates a new instance of . + /// Creates a new instance of using a collection of content items. + /// For tool messages, these can only be of type text. /// - /// The id correlating to a made by the model. - /// - /// The textual content, produced by the defined tool in response to the correlated , - /// that resolves the tool call and allows the logical conversation to continue. No format restrictions (e.g. - /// JSON) are imposed on the content emitted by tools. + /// + /// The ID of the tool call that this message responds to. /// + /// + /// The collection of content items associated with the message. + /// + public ToolChatMessage(string toolCallId, IEnumerable contentParts) + : base(ChatMessageRole.Tool, contentParts) + { + Argument.AssertNotNullOrEmpty(toolCallId, nameof(toolCallId)); + Argument.AssertNotNullOrEmpty(contentParts, nameof(contentParts)); + + ToolCallId = toolCallId; + } + + /// + /// Creates a new instance of using a collection of content items. + /// For tool messages, these can only be of type text. + /// + /// + /// The ID of the tool call that this message responds to. + /// + /// + /// The collection of content items associated with the message. + /// + public ToolChatMessage(string toolCallId, params ChatMessageContentPart[] contentParts) + : base(ChatMessageRole.Tool, contentParts) + { + Argument.AssertNotNullOrEmpty(toolCallId, nameof(toolCallId)); + Argument.AssertNotNullOrEmpty(contentParts, nameof(contentParts)); + + ToolCallId = toolCallId; + } + + /// + /// Creates a new instance of with a single item of text content. + /// + /// + /// The ID of the tool call that this message responds to. + /// + /// The text content of the message. public ToolChatMessage(string toolCallId, string content) + : base(ChatMessageRole.Tool, content) { - Argument.AssertNotNull(toolCallId, nameof(toolCallId)); + Argument.AssertNotNullOrEmpty(toolCallId, nameof(toolCallId)); Argument.AssertNotNull(content, nameof(content)); - Role = "tool"; ToolCallId = toolCallId; - Content = [ChatMessageContentPart.CreateTextMessageContentPart(content)]; } } diff --git a/.dotnet/src/Custom/Chat/UserChatMessage.Serialization.cs b/.dotnet/src/Custom/Chat/UserChatMessage.Serialization.cs index 9dd81b1b8..e08d9cd9c 100644 --- a/.dotnet/src/Custom/Chat/UserChatMessage.Serialization.cs +++ b/.dotnet/src/Custom/Chat/UserChatMessage.Serialization.cs @@ -1,6 +1,4 @@ -using System; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Chat; @@ -14,33 +12,13 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterO internal static void SerializeUserChatMessage(UserChatMessage instance, Utf8JsonWriter writer, ModelReaderWriterOptions options) => instance.WriteCore(writer, options); - protected override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + internal override void WriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); - if (Optional.IsDefined(ParticipantName)) - { - writer.WritePropertyName("name"u8); - writer.WriteStringValue(ParticipantName); - } writer.WritePropertyName("role"u8); - writer.WriteStringValue(Role); - if (Optional.IsCollectionDefined(Content)) - { - writer.WritePropertyName("content"u8); - if (Content.Count == 1 && !string.IsNullOrEmpty(Content[0].Text)) - { - writer.WriteStringValue(Content[0].Text); - } - else - { - writer.WriteStartArray(); - foreach (var item in Content) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } - } + writer.WriteStringValue(Role.ToSerialString()); + ChatMessageContentPart.WriteCoreContentPartList(Content, writer, options); + writer.WriteOptionalProperty("name"u8, ParticipantName, options); writer.WriteSerializedAdditionalRawData(SerializedAdditionalRawData, options); writer.WriteEndObject(); } diff --git a/.dotnet/src/Custom/Chat/UserChatMessage.cs b/.dotnet/src/Custom/Chat/UserChatMessage.cs index 0b2f3ef5d..18a881117 100644 --- a/.dotnet/src/Custom/Chat/UserChatMessage.cs +++ b/.dotnet/src/Custom/Chat/UserChatMessage.cs @@ -1,6 +1,5 @@ using System; using System.Collections.Generic; -using System.Linq; namespace OpenAI.Chat; @@ -13,48 +12,40 @@ namespace OpenAI.Chat; [CodeGenSuppress("UserChatMessage", typeof(ReadOnlyMemory))] public partial class UserChatMessage : ChatMessage { - /// - /// Creates a new instance of with ordinary text content. - /// - /// The textual content associated with the message. - public UserChatMessage(string content) - { - Argument.AssertNotNull(content, nameof(content)); - - Role = "user"; - Content = [ChatMessageContentPart.CreateTextMessageContentPart(content)]; - } - /// /// Creates a new instance of using a collection of content items that can /// include text and image information. This content format is currently only applicable to the - /// gpt-4-vision-preview model and will not be accepted by other models. + /// gpt-4o and later models and will not be accepted by older models. /// /// /// The collection of text and image content items associated with the message. /// public UserChatMessage(IEnumerable content) + : base(ChatMessageRole.User, content) { Argument.AssertNotNullOrEmpty(content, nameof(content)); - - Role = "user"; - Content = content.ToList(); } /// /// Creates a new instance of using a collection of content items that can /// include text and image information. This content format is currently only applicable to the - /// gpt-4-vision-preview model and will not be accepted by other models. + /// gpt-4o and later models and will not be accepted by older models. /// /// /// The collection of text and image content items associated with the message. /// public UserChatMessage(params ChatMessageContentPart[] content) - { - Argument.AssertNotNullOrEmpty(content, nameof(content)); + : base(ChatMessageRole.User, content) + { } - Role = "user"; - Content = content.ToList(); + /// + /// Creates a new instance of with ordinary text content. + /// + /// The textual content associated with the message. + public UserChatMessage(string content) + : base(ChatMessageRole.User, content) + { + Argument.AssertNotNull(content, nameof(content)); } // CUSTOM: Rename. diff --git a/.dotnet/src/Custom/Common/Internal/GeneratorStubs.cs b/.dotnet/src/Custom/Common/Internal/GeneratorStubs.cs new file mode 100644 index 000000000..a6172b6ac --- /dev/null +++ b/.dotnet/src/Custom/Common/Internal/GeneratorStubs.cs @@ -0,0 +1,19 @@ +namespace OpenAI.Internal; + +[CodeGenModel("OmniTypedResponseFormat")] +internal partial class InternalOmniTypedResponseFormat { } + +[CodeGenModel("ResponseFormatJsonObject")] +internal partial class InternalResponseFormatJsonObject { } + +[CodeGenModel("ResponseFormatJsonSchema")] +internal partial class InternalResponseFormatJsonSchema { } + +[CodeGenModel("ResponseFormatJsonSchemaSchema")] +internal partial class InternalResponseFormatJsonSchemaSchema { } + +[CodeGenModel("ResponseFormatText")] +internal partial class InternalResponseFormatText { } + +[CodeGenModel("UnknownOmniTypedResponseFormat")] +internal partial class InternalUnknownOmniTypedResponseFormat { } diff --git a/.dotnet/src/Custom/Common/Internal/InternalResponseFormatJsonSchemaJsonSchema.cs b/.dotnet/src/Custom/Common/Internal/InternalResponseFormatJsonSchemaJsonSchema.cs new file mode 100644 index 000000000..7fa1d70fd --- /dev/null +++ b/.dotnet/src/Custom/Common/Internal/InternalResponseFormatJsonSchemaJsonSchema.cs @@ -0,0 +1,10 @@ +using System; + +namespace OpenAI.Internal; + +[CodeGenModel("ResponseFormatJsonSchemaJsonSchema")] +internal partial class InternalResponseFormatJsonSchemaJsonSchema +{ + [CodeGenMember("Schema")] + public BinaryData Schema { get; set; } +} \ No newline at end of file diff --git a/.dotnet/src/Custom/Common/ListOrder.cs b/.dotnet/src/Custom/Common/ListOrder.cs deleted file mode 100644 index 41d3e065c..000000000 --- a/.dotnet/src/Custom/Common/ListOrder.cs +++ /dev/null @@ -1,12 +0,0 @@ -namespace OpenAI; - -[CodeGenModel("ListOrder")] -public readonly partial struct ListOrder -{ - // CUSTOM: Rename members. - - [CodeGenMember("Asc")] - public static ListOrder OldestFirst { get; } = new ListOrder(OldestFirstValue); - [CodeGenMember("Desc")] - public static ListOrder NewestFirst { get; } = new ListOrder(NewestFirstValue); -} \ No newline at end of file diff --git a/.dotnet/src/Custom/Embeddings/EmbeddingClient.cs b/.dotnet/src/Custom/Embeddings/EmbeddingClient.cs index 2280c9b54..eae8dad2c 100644 --- a/.dotnet/src/Custom/Embeddings/EmbeddingClient.cs +++ b/.dotnet/src/Custom/Embeddings/EmbeddingClient.cs @@ -8,7 +8,11 @@ namespace OpenAI.Embeddings; -/// The service client for the OpenAI Embeddings endpoint. +// CUSTOM: +// - Renamed. +// - Suppressed constructor that takes endpoint parameter; endpoint is now a property in the options class. +// - Suppressed methods that only take the options parameter. +/// The service client for OpenAI embedding operations. [CodeGenClient("Embeddings")] [CodeGenSuppress("EmbeddingClient", typeof(ClientPipeline), typeof(ApiKeyCredential), typeof(Uri))] [CodeGenSuppress("CreateEmbeddingAsync", typeof(EmbeddingGenerationOptions))] @@ -19,63 +23,65 @@ public partial class EmbeddingClient // CUSTOM: // - Added `model` parameter. - // - Added support for retrieving credential and endpoint from environment variables. - - /// - /// Initializes a new instance of that will use an API key when authenticating. - /// - /// The model name to use for audio operations. - /// The API key used to authenticate with the service endpoint. - /// Additional options to customize the client. - /// The provided was null. - public EmbeddingClient(string model, ApiKeyCredential credential, OpenAIClientOptions options = default) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(credential, requireExplicitCredential: true), options), - model, - OpenAIClient.GetEndpoint(options), - options) - {} - - /// - /// Initializes a new instance of that will use an API key from the OPENAI_API_KEY - /// environment variable when authenticating. - /// - /// - /// To provide an explicit credential instead of using the environment variable, use an alternate constructor like - /// . - /// - /// The model name to use for audio operations. - /// Additional options to customize the client. - /// The OPENAI_API_KEY environment variable was not found. - public EmbeddingClient(string model, OpenAIClientOptions options = default) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(), options), - model, - OpenAIClient.GetEndpoint(options), - options) - {} + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The API key to authenticate with the service. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public EmbeddingClient(string model, ApiKeyCredential credential) : this(model, credential, new OpenAIClientOptions()) + { + } // CUSTOM: // - Added `model` parameter. + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The API key to authenticate with the service. + /// The options to configure the client. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public EmbeddingClient(string model, ApiKeyCredential credential, OpenAIClientOptions options) + { + Argument.AssertNotNullOrEmpty(model, nameof(model)); + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); - /// Initializes a new instance of EmbeddingClient. - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// OpenAI Endpoint. - protected internal EmbeddingClient(ClientPipeline pipeline, string model, Uri endpoint, OpenAIClientOptions options) + _model = model; + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); + } + + // CUSTOM: + // - Added `model` parameter. + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The options to configure the client. + /// or is null. + /// is an empty string, and was expected to be non-empty. + protected internal EmbeddingClient(ClientPipeline pipeline, string model, OpenAIClientOptions options) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); Argument.AssertNotNullOrEmpty(model, nameof(model)); + options ??= new OpenAIClientOptions(); - _pipeline = pipeline; _model = model; - _endpoint = endpoint; + _pipeline = pipeline; + _endpoint = OpenAIClient.GetEndpoint(options); } // CUSTOM: Added to simplify generating a single embedding from a string input. - /// Creates an embedding vector representing the input text. - /// The string that will be turned into an embedding. - /// The to use. - /// A token that can be used to cancel this method call. + /// Generates an embedding representing the text input. + /// The text input to generate an embedding for. + /// The options to configure the embedding generation. + /// A token that can be used to cancel this method call. /// is null. /// is an empty string, and was expected to be non-empty. public virtual async Task> GenerateEmbeddingAsync(string input, EmbeddingGenerationOptions options = null, CancellationToken cancellationToken = default) @@ -91,10 +97,10 @@ public virtual async Task> GenerateEmbeddingAsync(string } // CUSTOM: Added to simplify generating a single embedding from a string input. - /// Creates an embedding vector representing the input text. - /// The string that will be turned into an embedding. - /// The to use. - /// A token that can be used to cancel this method call. + /// Generates an embedding representing the text input. + /// The text input to generate an embedding for. + /// The options to configure the embedding generation. + /// A token that can be used to cancel this method call. /// is null. /// is an empty string, and was expected to be non-empty. public virtual ClientResult GenerateEmbedding(string input, EmbeddingGenerationOptions options = null, CancellationToken cancellationToken = default) @@ -110,10 +116,10 @@ public virtual ClientResult GenerateEmbedding(string input, Embedding } // CUSTOM: Added to simplify passing the input as a collection of strings instead of BinaryData. - /// Creates an embedding vector representing the input text. - /// The strings that will be turned into embeddings. - /// The to use. - /// A token that can be used to cancel this method call. + /// Generates embeddings representing the text inputs. + /// The text inputs to generate embeddings for. + /// The options to configure the embedding generation. + /// A token that can be used to cancel this method call. /// is null. /// is an empty collection, and was expected to be non-empty. public virtual async Task> GenerateEmbeddingsAsync(IEnumerable inputs, EmbeddingGenerationOptions options = null, CancellationToken cancellationToken = default) @@ -130,10 +136,10 @@ public virtual async Task> GenerateEmbeddingsA } // CUSTOM: Added to simplify passing the input as a collection of strings instead of BinaryData. - /// Creates an embedding vector representing the input text. - /// The strings that will be turned into embeddings. - /// The to use. - /// A token that can be used to cancel this method call. + /// Generates embeddings representing the text inputs. + /// The text inputs to generate embeddings for. + /// The options to configure the embedding generation. + /// A token that can be used to cancel this method call. /// is null. /// is an empty collection, and was expected to be non-empty. public virtual ClientResult GenerateEmbeddings(IEnumerable inputs, EmbeddingGenerationOptions options = null, CancellationToken cancellationToken = default) @@ -149,10 +155,10 @@ public virtual ClientResult GenerateEmbeddings(IEnumerable< } // CUSTOM: Added to simplify passing the input as a collection of a collection of tokens instead of BinaryData. - /// Creates an embedding vector representing the input text. - /// The strings that will be turned into embeddings. - /// The to use. - /// A token that can be used to cancel this method call. + /// Generates embeddings representing the text inputs. + /// The text inputs to generate embeddings for. + /// The options to configure the embedding generation. + /// A token that can be used to cancel this method call. /// is null. /// is an empty collection, and was expected to be non-empty. public virtual async Task> GenerateEmbeddingsAsync(IEnumerable> inputs, EmbeddingGenerationOptions options = null, CancellationToken cancellationToken = default) @@ -168,10 +174,10 @@ public virtual async Task> GenerateEmbeddingsA } // CUSTOM: Added to simplify passing the input as a collection of a collection of tokens instead of BinaryData. - /// Creates an embedding vector representing the input text. - /// The strings that will be turned into embeddings. - /// The to use. - /// A token that can be used to cancel this method call. + /// Generates embeddings representing the text inputs. + /// The text inputs to generate embeddings for. + /// The options to configure the embedding generation. + /// A token that can be used to cancel this method call. /// is null. /// is an empty collection, and was expected to be non-empty. public virtual ClientResult GenerateEmbeddings(IEnumerable> inputs, EmbeddingGenerationOptions options = null, CancellationToken cancellationToken = default) diff --git a/.dotnet/src/Custom/Embeddings/EmbeddingGenerationOptions.cs b/.dotnet/src/Custom/Embeddings/EmbeddingGenerationOptions.cs index 0d3e807e6..0c3760797 100644 --- a/.dotnet/src/Custom/Embeddings/EmbeddingGenerationOptions.cs +++ b/.dotnet/src/Custom/Embeddings/EmbeddingGenerationOptions.cs @@ -86,4 +86,12 @@ public partial class EmbeddingGenerationOptions public EmbeddingGenerationOptions() { } + + // CUSTOM: Renamed. + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// Learn more. + /// + [CodeGenMember("User")] + public string EndUserId { get; set; } } \ No newline at end of file diff --git a/.dotnet/src/Custom/Files/FileClient.Protocol.cs b/.dotnet/src/Custom/Files/FileClient.Protocol.cs index f9784bfec..bc44ac05c 100644 --- a/.dotnet/src/Custom/Files/FileClient.Protocol.cs +++ b/.dotnet/src/Custom/Files/FileClient.Protocol.cs @@ -8,7 +8,7 @@ namespace OpenAI.Files; [CodeGenSuppress("CreateFileAsync", typeof(BinaryContent), typeof(string), typeof(RequestOptions))] [CodeGenSuppress("CreateFile", typeof(BinaryContent), typeof(string), typeof(RequestOptions))] -[CodeGenSuppress("GetFilesAsync", typeof(string), typeof(RequestOptions))] +[CodeGenSuppress("GetFilesAsync", typeof(string), typeof(RequestOptions))] [CodeGenSuppress("GetFiles", typeof(string), typeof(RequestOptions))] [CodeGenSuppress("RetrieveFileAsync", typeof(string), typeof(RequestOptions))] [CodeGenSuppress("RetrieveFile", typeof(string), typeof(RequestOptions))] diff --git a/.dotnet/src/Custom/Files/FileClient.cs b/.dotnet/src/Custom/Files/FileClient.cs index 3b8f7c338..1905aa714 100644 --- a/.dotnet/src/Custom/Files/FileClient.cs +++ b/.dotnet/src/Custom/Files/FileClient.cs @@ -7,9 +7,10 @@ namespace OpenAI.Files; -/// -/// The service client for OpenAI file operations. -/// +// CUSTOM: +// - Renamed. +// - Suppressed constructor that takes endpoint parameter; endpoint is now a property in the options class. +/// The service client for OpenAI file operations. [CodeGenClient("Files")] [CodeGenSuppress("FileClient", typeof(ClientPipeline), typeof(ApiKeyCredential), typeof(Uri))] [CodeGenSuppress("CreateFileAsync", typeof(InternalFileUploadOptions))] @@ -24,70 +25,64 @@ namespace OpenAI.Files; [CodeGenSuppress("DownloadFile", typeof(string))] public partial class FileClient { - /// - /// Initializes a new instance of that will use an API key when authenticating. - /// - /// The API key used to authenticate with the service endpoint. - /// Additional options to customize the client. - /// The provided was null. - public FileClient(ApiKeyCredential credential, OpenAIClientOptions options = null) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(credential, requireExplicitCredential: true), options), - OpenAIClient.GetEndpoint(options), - options) + private InternalUploadsClient _internalUploadsClient; + + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// is null. + public FileClient(ApiKeyCredential credential) : this(credential, new OpenAIClientOptions()) { } - /// - /// Initializes a new instance of that will use an API key from the OPENAI_API_KEY - /// environment variable when authenticating. - /// - /// - /// To provide an explicit credential instead of using the environment variable, use an alternate constructor like - /// . - /// - /// Additional options to customize the client. - /// The OPENAI_API_KEY environment variable was not found. - public FileClient(OpenAIClientOptions options = null) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(), options), - OpenAIClient.GetEndpoint(options), - options) + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// The options to configure the client. + /// is null. + public FileClient(ApiKeyCredential credential, OpenAIClientOptions options) { + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); + + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); } - /// - /// Initializes a new instance of . - /// - /// The client pipeline to use. - /// The endpoint to use. - protected internal FileClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options) + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The options to configure the client. + /// is null. + protected internal FileClient(ClientPipeline pipeline, OpenAIClientOptions options) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + options ??= new OpenAIClientOptions(); + _pipeline = pipeline; - _endpoint = endpoint; + _endpoint = OpenAIClient.GetEndpoint(options); + _internalUploadsClient = new(pipeline, options); } - /// - /// Upload a file that can be used across various endpoints. The size of all the files uploaded by - /// one organization can be up to 100 GB. - /// - /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See - /// the Assistants Tools guide to - /// learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files. - /// - /// Please contact us if you need to increase these - /// storage limits. - /// - /// The file to upload. + /// Uploads a file that can be used across various operations. + /// Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB. + /// The file stream to upload. /// - /// The filename associated with the file stream. The filename's extension (for example: .json) will be used to - /// validate the file format. The request may fail if the file extension and file format do not match. + /// The filename associated with the file stream. The filename's extension (for example: .json) will be used to + /// validate the file format. The request may fail if the filename's extension and the actual file format do + /// not match. /// /// The intended purpose of the uploaded file. - /// A token that can be used to cancel this method call. + /// A token that can be used to cancel this method call. /// or is null. /// is an empty string, and was expected to be non-empty. - /// Information about the uploaded file. public virtual async Task> UploadFileAsync(Stream file, string filename, FileUploadPurpose purpose, CancellationToken cancellationToken = default) { Argument.AssertNotNull(file, nameof(file)); @@ -103,27 +98,18 @@ public virtual async Task> UploadFileAsync(Stream f return ClientResult.FromValue(OpenAIFileInfo.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// - /// Upload a file that can be used across various endpoints. The size of all the files uploaded by - /// one organization can be up to 100 GB. - /// - /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See - /// the Assistants Tools guide to - /// learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files. - /// - /// Please contact us if you need to increase these - /// storage limits. - /// - /// The file to upload. + /// Uploads a file that can be used across various operations. + /// Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB. + /// The file stream to upload. /// - /// The filename associated with the file stream. The filename's extension (for example: .json) will be used to - /// validate the file format. The request may fail if the file extension and file format do not match. + /// The filename associated with the file stream. The filename's extension (for example: .json) will be used to + /// validate the file format. The request may fail if the filename's extension and the actual file format do + /// not match. /// /// The intended purpose of the uploaded file. - /// A token that can be used to cancel this method call. + /// A token that can be used to cancel this method call. /// or is null. /// is an empty string, and was expected to be non-empty. - /// Information about the uploaded file. public virtual ClientResult UploadFile(Stream file, string filename, FileUploadPurpose purpose, CancellationToken cancellationToken = default) { Argument.AssertNotNull(file, nameof(file)); @@ -139,26 +125,17 @@ public virtual ClientResult UploadFile(Stream file, string filen return ClientResult.FromValue(OpenAIFileInfo.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// - /// Upload a file that can be used across various endpoints. The size of all the files uploaded by - /// one organization can be up to 100 GB. - /// - /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See - /// the Assistants Tools guide to - /// learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files. - /// - /// Please contact us if you need to increase these - /// storage limits. - /// - /// The file to upload. + /// Uploads a file that can be used across various operations. + /// Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB. + /// The file bytes to upload. /// - /// The filename associated with the file binary data. The filename's extension (for example: .json) will be used to - /// validate the file format. The request may fail if the file extension and file format do not match. + /// The filename associated with the file bytes. The filename's extension (for example: .json) will be used to + /// validate the file format. The request may fail if the filename's extension and the actual file format do + /// not match. /// /// The intended purpose of the uploaded file. /// or is null. /// is an empty string, and was expected to be non-empty. - /// Information about the uploaded file. public virtual Task> UploadFileAsync(BinaryData file, string filename, FileUploadPurpose purpose) { Argument.AssertNotNull(file, nameof(file)); @@ -167,26 +144,17 @@ public virtual Task> UploadFileAsync(BinaryData fil return UploadFileAsync(file?.ToStream(), filename, purpose); } - /// - /// Upload a file that can be used across various endpoints. The size of all the files uploaded by - /// one organization can be up to 100 GB. - /// - /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See - /// the Assistants Tools guide to - /// learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files. - /// - /// Please contact us if you need to increase these - /// storage limits. - /// - /// The file to upload. + /// Uploads a file that can be used across various operations. + /// Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB. + /// The file bytes to upload. /// - /// The filename associated with the file binary data. The filename's extension (for example: .json) will be used to - /// validate the file format. The request may fail if the file extension and file format do not match. + /// The filename associated with the file bytes. The filename's extension (for example: .json) will be used to + /// validate the file format. The request may fail if the filename's extension and the actual file format do + /// not match. /// /// The intended purpose of the uploaded file. /// or is null. /// is an empty string, and was expected to be non-empty. - /// public virtual ClientResult UploadFile(BinaryData file, string filename, FileUploadPurpose purpose) { Argument.AssertNotNull(file, nameof(file)); @@ -195,25 +163,16 @@ public virtual ClientResult UploadFile(BinaryData file, string f return UploadFile(file?.ToStream(), filename, purpose); } - /// - /// Upload a file that can be used across various endpoints. The size of all the files uploaded by - /// one organization can be up to 100 GB. - /// - /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See - /// the Assistants Tools guide to - /// learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files. - /// - /// Please contact us if you need to increase these - /// storage limits. - /// + /// Uploads a file that can be used across various operations. + /// Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB. /// - /// The path of the file to upload. The provided file path's extension (for example: .json) will be used - /// to validate the file format. The request may fail if the file extension and file format do not match. + /// The path of the file to upload. The provided file path's extension (for example: .json) will be used to + /// validate the file format. The request may fail if the file path's extension and the actual file format do + /// not match. /// /// The intended purpose of the uploaded file. /// was null. /// is an empty string, and was expected to be non-empty. - /// Information about the uploaded file. public virtual async Task> UploadFileAsync(string filePath, FileUploadPurpose purpose) { Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); @@ -222,25 +181,16 @@ public virtual async Task> UploadFileAsync(string f return await UploadFileAsync(stream, filePath, purpose).ConfigureAwait(false); } - /// - /// Upload a file that can be used across various endpoints. The size of all the files uploaded by - /// one organization can be up to 100 GB. - /// - /// The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See - /// the Assistants Tools guide to - /// learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files. - /// - /// Please contact us if you need to increase these - /// storage limits. - /// + /// Uploads a file that can be used across various operations. + /// Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB. /// - /// The path of the file to upload. The provided file path's extension (for example: .json) will be used - /// to validate the file format. The request may fail if the file extension and file format do not match. + /// The path of the file to upload. The provided file path's extension (for example: .json) will be used to + /// validate the file format. The request may fail if the file path's extension and the actual file format do + /// not match. /// /// The intended purpose of the uploaded file. /// was null. /// is an empty string, and was expected to be non-empty. - /// Information about the uploaded file. public virtual ClientResult UploadFile(string filePath, FileUploadPurpose purpose) { Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); @@ -249,32 +199,29 @@ public virtual ClientResult UploadFile(string filePath, FileUplo return UploadFile(stream, filePath, purpose); } - /// Retrieves a list of files that belong to the user's organization. + /// Gets basic information about each of the files belonging to the user's organization. /// Only return files with the given purpose. - /// A token that can be used to cancel this method call. - /// Information about the files in the user's organization. + /// A token that can be used to cancel this method call. public virtual async Task> GetFilesAsync(OpenAIFilePurpose? purpose = null, CancellationToken cancellationToken = default) { ClientResult result = await GetFilesAsync(purpose?.ToString(), cancellationToken.ToRequestOptions()).ConfigureAwait(false); return ClientResult.FromValue(OpenAIFileInfoCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// Retrieves a list of files that belong to the user's organization. + /// Gets basic information about each of the files belonging to the user's organization. /// Only return files with the given purpose. - /// A token that can be used to cancel this method call. - /// Information about the files in the user's organization. + /// A token that can be used to cancel this method call. public virtual ClientResult GetFiles(OpenAIFilePurpose? purpose = null, CancellationToken cancellationToken = default) { ClientResult result = GetFiles(purpose?.ToString(), cancellationToken.ToRequestOptions()); return ClientResult.FromValue(OpenAIFileInfoCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// Retrieves information about a specified file. - /// The ID of the file to retrieve. - /// A token that can be used to cancel this method call. + /// Gets basic information about the specified file. + /// The ID of the desired file. + /// A token that can be used to cancel this method call. /// is null. /// is an empty string, and was expected to be non-empty. - /// Information about the specified file. public virtual async Task> GetFileAsync(string fileId, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); @@ -283,12 +230,11 @@ public virtual async Task> GetFileAsync(string file return ClientResult.FromValue(OpenAIFileInfo.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// Retrieves information about a specified file. - /// The ID of the file to retrieve. - /// A token that can be used to cancel this method call. + /// Gets basic information about the specified file. + /// The ID of the desired file. + /// A token that can be used to cancel this method call. /// is null. /// is an empty string, and was expected to be non-empty. - /// Information about the specified file. public virtual ClientResult GetFile(string fileId, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); @@ -297,12 +243,11 @@ public virtual ClientResult GetFile(string fileId, CancellationT return ClientResult.FromValue(OpenAIFileInfo.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// Deletes a previously uploaded file. + /// Deletes the specified file. /// The ID of the file to delete. - /// A token that can be used to cancel this method call. + /// A token that can be used to cancel this method call. /// is null. /// is an empty string, and was expected to be non-empty. - /// A boolean value indicating whether the deletion request was successful. public virtual async Task> DeleteFileAsync(string fileId, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); @@ -312,12 +257,11 @@ public virtual async Task> DeleteFileAsync(string fileId, Can return ClientResult.FromValue(internalDeletion.Deleted, result.GetRawResponse()); } - /// Deletes a previously uploaded file. + /// Deletes the specified file. /// The ID of the file to delete. - /// A token that can be used to cancel this method call. + /// A token that can be used to cancel this method call. /// is null. /// is an empty string, and was expected to be non-empty. - /// A boolean value indicating whether the deletion request was successful. public virtual ClientResult DeleteFile(string fileId, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); @@ -327,32 +271,11 @@ public virtual ClientResult DeleteFile(string fileId, CancellationToken ca return ClientResult.FromValue(internalDeletion.Deleted, result.GetRawResponse()); } - /// Deletes a previously uploaded file. - /// The file to delete. - /// is null. - /// A boolean value indicating whether the deletion request was successful. - public virtual Task> DeleteFileAsync(OpenAIFileInfo file) - { - Argument.AssertNotNull(file, nameof(file)); - return DeleteFileAsync(file.Id); - } - - /// Deletes a previously uploaded file. - /// The file to delete. - /// is null. - /// A boolean value indicating whether the deletion request was successful. - public virtual ClientResult DeleteFile(OpenAIFileInfo file) - { - Argument.AssertNotNull(file.Id, nameof(file)); - return DeleteFile(file.Id); - } - - /// Downloads the binary content of the specified file. + /// Downloads the content of the specified file. /// The ID of the file to download. - /// A token that can be used to cancel this method call. + /// A token that can be used to cancel this method call. /// is null. /// is an empty string, and was expected to be non-empty. - /// The contents of the specified file. public virtual async Task> DownloadFileAsync(string fileId, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); @@ -361,12 +284,11 @@ public virtual async Task> DownloadFileAsync(string fil return ClientResult.FromValue(result.GetRawResponse().Content, result.GetRawResponse()); } - /// Downloads the binary content of the specified file. + /// Downloads the content of the specified file. /// The ID of the file to download. - /// A token that can be used to cancel this method call. + /// A token that can be used to cancel this method call. /// is null. /// is an empty string, and was expected to be non-empty. - /// The bionary content of the specified file. public virtual ClientResult DownloadFile(string fileId, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(fileId, nameof(fileId)); @@ -374,24 +296,4 @@ public virtual ClientResult DownloadFile(string fileId, Cancellation ClientResult result = DownloadFile(fileId, cancellationToken.ToRequestOptions()); return ClientResult.FromValue(result.GetRawResponse().Content, result.GetRawResponse()); } - - /// Downloads the binary content of the specified file. - /// The file to download. - /// is null. - /// The binary content of the uploaded file. - public virtual Task> DownloadFileAsync(OpenAIFileInfo file) - { - Argument.AssertNotNull(file, nameof(file)); - return DownloadFileAsync(file.Id); - } - - /// Downloads the binary content of the specified file. - /// The file to download. - /// is null. - /// The binary content of the uploaded file. - public virtual ClientResult DownloadFile(OpenAIFileInfo file) - { - Argument.AssertNotNull(file, nameof(file)); - return DownloadFile(file.Id); - } } diff --git a/.dotnet/src/Custom/Files/Internal/GeneratorStubs.cs b/.dotnet/src/Custom/Files/Internal/GeneratorStubs.cs index 617be9be4..e21e8755d 100644 --- a/.dotnet/src/Custom/Files/Internal/GeneratorStubs.cs +++ b/.dotnet/src/Custom/Files/Internal/GeneratorStubs.cs @@ -10,4 +10,14 @@ internal readonly partial struct InternalDeleteFileResponseObject { } internal readonly partial struct InternalListFilesResponseObject { } [CodeGenModel("OpenAIFileObject")] -internal readonly partial struct InternalOpenAIFileObject { } \ No newline at end of file +internal readonly partial struct InternalOpenAIFileObject { } + +[CodeGenModel("AddUploadPartRequest")] internal partial class InternalAddUploadPartRequest { } +[CodeGenModel("CompleteUploadRequest")] internal partial class InternalCompleteUploadRequest { } +[CodeGenModel("CreateUploadRequest")] internal partial class InternalCreateUploadRequest { } +[CodeGenModel("CreateUploadRequestPurpose")] internal readonly partial struct InternalCreateUploadRequestPurpose { } +[CodeGenModel("Upload")] internal partial class InternalUpload { } +[CodeGenModel("UploadObject")] internal readonly partial struct InternalUploadObject { } +[CodeGenModel("UploadPart")] internal partial class InternalUploadPart { } +[CodeGenModel("UploadPartObject")] internal readonly partial struct InternalUploadPartObject { } +[CodeGenModel("UploadStatus")] internal readonly partial struct InternalUploadStatus { } diff --git a/.dotnet/src/Custom/Files/Internal/InternalUploadsClient.cs b/.dotnet/src/Custom/Files/Internal/InternalUploadsClient.cs new file mode 100644 index 000000000..0e9b3915b --- /dev/null +++ b/.dotnet/src/Custom/Files/Internal/InternalUploadsClient.cs @@ -0,0 +1,53 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; + +namespace OpenAI.Files; + +[CodeGenClient("Uploads")] +[CodeGenSuppress("InternalUploadsClient", typeof(ClientPipeline), typeof(ApiKeyCredential), typeof(Uri))] +internal partial class InternalUploadsClient +{ + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// is null. + internal InternalUploadsClient(ApiKeyCredential credential) : this(credential, new OpenAIClientOptions()) + { + } + + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// The options to configure the client. + /// is null. + internal InternalUploadsClient(ApiKeyCredential credential, OpenAIClientOptions options) + { + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); + + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); + } + + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The options to configure the client. + /// is null. + protected internal InternalUploadsClient(ClientPipeline pipeline, OpenAIClientOptions options) + { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + options ??= new OpenAIClientOptions(); + + _pipeline = pipeline; + _endpoint = OpenAIClient.GetEndpoint(options); + } +} diff --git a/.dotnet/src/Custom/Files/OpenAIFileInfo.cs b/.dotnet/src/Custom/Files/OpenAIFileInfo.cs index 0067c4793..17f0d4796 100644 --- a/.dotnet/src/Custom/Files/OpenAIFileInfo.cs +++ b/.dotnet/src/Custom/Files/OpenAIFileInfo.cs @@ -10,5 +10,5 @@ public partial class OpenAIFileInfo // CUSTOM: Renamed. /// The size of the file, in bytes. [CodeGenMember("Bytes")] - public long? SizeInBytes { get; } + public int? SizeInBytes { get; } } diff --git a/.dotnet/src/Custom/Files/OpenAIFilesModelFactory.cs b/.dotnet/src/Custom/Files/OpenAIFilesModelFactory.cs index 0d79b520e..41c130905 100644 --- a/.dotnet/src/Custom/Files/OpenAIFilesModelFactory.cs +++ b/.dotnet/src/Custom/Files/OpenAIFilesModelFactory.cs @@ -9,7 +9,7 @@ public static partial class OpenAIFilesModelFactory { /// Initializes a new instance of . /// A new instance for mocking. - public static OpenAIFileInfo OpenAIFileInfo(string id = null, long? sizeInBytes = null, DateTimeOffset createdAt = default, string filename = null, OpenAIFilePurpose purpose = default, OpenAIFileStatus status = default, string statusDetails = null) + public static OpenAIFileInfo OpenAIFileInfo(string id = null, int? sizeInBytes = null, DateTimeOffset createdAt = default, string filename = null, OpenAIFilePurpose purpose = default, OpenAIFileStatus status = default, string statusDetails = null) { return new OpenAIFileInfo( id, diff --git a/.dotnet/src/Custom/FineTuning/FineTuningClient.Protocol.cs b/.dotnet/src/Custom/FineTuning/FineTuningClient.Protocol.cs index b54bb39b6..86188a520 100644 --- a/.dotnet/src/Custom/FineTuning/FineTuningClient.Protocol.cs +++ b/.dotnet/src/Custom/FineTuning/FineTuningClient.Protocol.cs @@ -109,10 +109,10 @@ public virtual CreateJobOperation CreateJob( /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The response returned from the service. - public virtual async Task GetJobsAsync(string after, int? limit, RequestOptions options) + public virtual IAsyncEnumerable GetJobsAsync(string after, int? limit, RequestOptions options) { - using PipelineMessage message = CreateGetPaginatedFineTuningJobsRequest(after, limit, options); - return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + FineTuningJobsPageEnumerator enumerator = new FineTuningJobsPageEnumerator(_pipeline, _endpoint, after, limit, options); + return PageCollectionHelpers.CreateAsync(enumerator); } // CUSTOM: @@ -126,10 +126,10 @@ public virtual async Task GetJobsAsync(string after, int? limit, R /// The request options, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The response returned from the service. - public virtual ClientResult GetJobs(string after, int? limit, RequestOptions options) + public virtual IEnumerable GetJobs(string after, int? limit, RequestOptions options) { - using PipelineMessage message = CreateGetPaginatedFineTuningJobsRequest(after, limit, options); - return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + FineTuningJobsPageEnumerator enumerator = new FineTuningJobsPageEnumerator(_pipeline, _endpoint, after, limit, options); + return PageCollectionHelpers.Create(enumerator); } // CUSTOM: diff --git a/.dotnet/src/Custom/FineTuning/FineTuningClient.cs b/.dotnet/src/Custom/FineTuning/FineTuningClient.cs index 53ac0b085..6371b39d6 100644 --- a/.dotnet/src/Custom/FineTuning/FineTuningClient.cs +++ b/.dotnet/src/Custom/FineTuning/FineTuningClient.cs @@ -1,13 +1,18 @@ using System; using System.ClientModel; using System.ClientModel.Primitives; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.FineTuning; -/// -/// The service client for OpenAI fine-tuning operations. -/// +// CUSTOM: +// - Renamed. +// - Suppressed constructor that takes endpoint parameter; endpoint is now a property in the options class. +// - Suppressed convenience methods for now. +/// The service client for OpenAI fine-tuning operations. +[Experimental("OPENAI001")] [CodeGenClient("FineTuning")] +[CodeGenSuppress("FineTuningClient", typeof(ClientPipeline), typeof(ApiKeyCredential), typeof(Uri))] [CodeGenSuppress("CreateFineTuningJobAsync", typeof(InternalCreateFineTuningJobRequest))] [CodeGenSuppress("CreateFineTuningJob", typeof(InternalCreateFineTuningJobRequest))] [CodeGenSuppress("GetPaginatedFineTuningJobsAsync", typeof(string), typeof(int?))] @@ -22,7 +27,15 @@ namespace OpenAI.FineTuning; [CodeGenSuppress("GetFineTuningJobCheckpoints", typeof(string), typeof(string), typeof(int?))] public partial class FineTuningClient { - // Customization: documented constructors, apply protected visibility + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// is null. + public FineTuningClient(ApiKeyCredential credential) : this(credential, new OpenAIClientOptions()) + { + } internal Uri Endpoint => _endpoint; @@ -39,29 +52,24 @@ public FineTuningClient(ApiKeyCredential credential, OpenAIClientOptions options options) { } - /// - /// Initializes a new instance of that will use an API key from the OPENAI_API_KEY - /// environment variable when authenticating. - /// - /// - /// To provide an explicit credential instead of using the environment variable, use an alternate constructor like - /// . - /// - /// Additional options to customize the client. - /// The OPENAI_API_KEY environment variable was not found. - public FineTuningClient(OpenAIClientOptions options = null) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(), options), - OpenAIClient.GetEndpoint(options), - options) - {} + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); + } - /// Initializes a new instance of FineTuningClient. - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// OpenAI Endpoint. - protected internal FineTuningClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options) + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The options to configure the client. + /// is null. + protected internal FineTuningClient(ClientPipeline pipeline, OpenAIClientOptions options) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + options ??= new OpenAIClientOptions(); + _pipeline = pipeline; - _endpoint = endpoint; + _endpoint = OpenAIClient.GetEndpoint(options); } } diff --git a/.dotnet/src/Custom/FineTuning/Internal/GeneratorStubs.cs b/.dotnet/src/Custom/FineTuning/Internal/GeneratorStubs.cs index cf7b626a9..833c958ff 100644 --- a/.dotnet/src/Custom/FineTuning/Internal/GeneratorStubs.cs +++ b/.dotnet/src/Custom/FineTuning/Internal/GeneratorStubs.cs @@ -87,4 +87,11 @@ internal readonly partial struct InternalListFineTuningJobEventsResponseObject { internal partial class InternalListPaginatedFineTuningJobsResponse { } [CodeGenModel("ListPaginatedFineTuningJobsResponseObject")] -internal readonly partial struct InternalListPaginatedFineTuningJobsResponseObject { } \ No newline at end of file +internal readonly partial struct InternalListPaginatedFineTuningJobsResponseObject { } + +[CodeGenModel("CreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum")] internal readonly partial struct InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum { } +[CodeGenModel("CreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum")] internal readonly partial struct InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum { } +[CodeGenModel("CreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum")] internal readonly partial struct InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum { } +[CodeGenModel("FineTuningJobHyperparametersBatchSizeChoiceEnum")] internal readonly partial struct InternalFineTuningJobHyperparametersBatchSizeChoiceEnum { } +[CodeGenModel("FineTuningJobHyperparametersLearningRateMultiplierChoiceEnum")] internal readonly partial struct InternalFineTuningJobHyperparametersLearningRateMultiplierChoiceEnum { } +[CodeGenModel("FineTuningJobHyperparametersNEpochsChoiceEnum")] internal readonly partial struct InternalFineTuningJobHyperparametersNEpochsChoiceEnum { } diff --git a/.dotnet/src/Custom/FineTuning/Internal/Pagination/FineTuningJobCheckpointsPageEnumerator.cs b/.dotnet/src/Custom/FineTuning/Internal/Pagination/FineTuningJobCheckpointsPageEnumerator.cs new file mode 100644 index 000000000..3cce31fb8 --- /dev/null +++ b/.dotnet/src/Custom/FineTuning/Internal/Pagination/FineTuningJobCheckpointsPageEnumerator.cs @@ -0,0 +1,127 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Linq; +using System.Text.Json; +using System.Threading.Tasks; + +#nullable enable + +namespace OpenAI.FineTuning; + +internal partial class FineTuningJobCheckpointsPageEnumerator : PageResultEnumerator +{ + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + private readonly string _jobId; + private readonly int? _limit; + private readonly RequestOptions _options; + + private string? _after; + + public FineTuningJobCheckpointsPageEnumerator( + ClientPipeline pipeline, + Uri endpoint, + string jobId, string after, int? limit, + RequestOptions options) + { + _pipeline = pipeline; + _endpoint = endpoint; + + _jobId = jobId; + _after = after; + _limit = limit; + _options = options; + } + + public override async Task GetFirstAsync() + => await GetJobCheckpointsAsync(_jobId, _after!, _limit, _options).ConfigureAwait(false); + + public override ClientResult GetFirst() + => GetJobCheckpoints(_jobId, _after!, _limit, _options); + + public override async Task GetNextAsync(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response?.Content); + + if (doc?.RootElement.TryGetProperty("data", out JsonElement dataElement) == true + && dataElement.EnumerateArray().LastOrDefault().TryGetProperty("id", out JsonElement idElement) == true) + { + _after = idElement.GetString(); + } + + return await GetJobCheckpointsAsync(_jobId, _after!, _limit, _options).ConfigureAwait(false); + } + + public override ClientResult GetNext(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response?.Content); + + if (doc?.RootElement.TryGetProperty("data", out JsonElement dataElement) == true + && dataElement.EnumerateArray().LastOrDefault().TryGetProperty("id", out JsonElement idElement) == true) + { + _after = idElement.GetString(); + } + + return GetJobCheckpoints(_jobId, _after!, _limit, _options); + } + + public override bool HasNext(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response.Content); + bool hasMore = doc.RootElement.GetProperty("has_more"u8).GetBoolean(); + + return hasMore; + } + + internal virtual async Task GetJobCheckpointsAsync(string jobId, string after, int? limit, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using PipelineMessage message = CreateGetFineTuningJobCheckpointsRequest(jobId, after, limit, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + + internal virtual ClientResult GetJobCheckpoints(string jobId, string after, int? limit, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using PipelineMessage message = CreateGetFineTuningJobCheckpointsRequest(jobId, after, limit, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + + internal PipelineMessage CreateGetFineTuningJobCheckpointsRequest(string fineTuningJobId, string after, int? limit, RequestOptions options) + { + var message = _pipeline.CreateMessage(); + message.ResponseClassifier = PipelineMessageClassifier200; + var request = message.Request; + request.Method = "GET"; + var uri = new ClientUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/fine_tuning/jobs/", false); + uri.AppendPath(fineTuningJobId, true); + uri.AppendPath("/checkpoints", false); + if (after != null) + { + uri.AppendQuery("after", after, true); + } + if (limit != null) + { + uri.AppendQuery("limit", limit.Value, true); + } + request.Uri = uri.ToUri(); + request.Headers.Set("Accept", "application/json"); + message.Apply(options); + return message; + } + + private static PipelineMessageClassifier? _pipelineMessageClassifier200; + private static PipelineMessageClassifier PipelineMessageClassifier200 => _pipelineMessageClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); +} diff --git a/.dotnet/src/Custom/FineTuning/Internal/Pagination/FineTuningJobEventsPageEnumerator.cs b/.dotnet/src/Custom/FineTuning/Internal/Pagination/FineTuningJobEventsPageEnumerator.cs new file mode 100644 index 000000000..deea45174 --- /dev/null +++ b/.dotnet/src/Custom/FineTuning/Internal/Pagination/FineTuningJobEventsPageEnumerator.cs @@ -0,0 +1,127 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Linq; +using System.Text.Json; +using System.Threading.Tasks; + +#nullable enable + +namespace OpenAI.FineTuning; + +internal partial class FineTuningJobEventsPageEnumerator : PageResultEnumerator +{ + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + private readonly string _jobId; + private readonly int? _limit; + private readonly RequestOptions _options; + + private string? _after; + + public FineTuningJobEventsPageEnumerator( + ClientPipeline pipeline, + Uri endpoint, + string jobId, string after, int? limit, + RequestOptions options) + { + _pipeline = pipeline; + _endpoint = endpoint; + + _jobId = jobId; + _after = after; + _limit = limit; + _options = options; + } + + public override async Task GetFirstAsync() + => await GetJobEventsAsync(_jobId, _after!, _limit, _options).ConfigureAwait(false); + + public override ClientResult GetFirst() + => GetJobEvents(_jobId, _after!, _limit, _options); + + public override async Task GetNextAsync(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response?.Content); + + if (doc?.RootElement.TryGetProperty("data", out JsonElement dataElement) == true + && dataElement.EnumerateArray().LastOrDefault().TryGetProperty("id", out JsonElement idElement) == true) + { + _after = idElement.GetString(); + } + + return await GetJobEventsAsync(_jobId, _after!, _limit, _options).ConfigureAwait(false); + } + + public override ClientResult GetNext(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response?.Content); + + if (doc?.RootElement.TryGetProperty("data", out JsonElement dataElement) == true + && dataElement.EnumerateArray().LastOrDefault().TryGetProperty("id", out JsonElement idElement) == true) + { + _after = idElement.GetString(); + } + + return GetJobEvents(_jobId, _after!, _limit, _options); + } + + public override bool HasNext(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response.Content); + bool hasMore = doc.RootElement.GetProperty("has_more"u8).GetBoolean(); + + return hasMore; + } + + internal virtual async Task GetJobEventsAsync(string jobId, string after, int? limit, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using PipelineMessage message = CreateGetFineTuningEventsRequest(jobId, after, limit, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + + internal virtual ClientResult GetJobEvents(string jobId, string after, int? limit, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using PipelineMessage message = CreateGetFineTuningEventsRequest(jobId, after, limit, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + + internal PipelineMessage CreateGetFineTuningEventsRequest(string fineTuningJobId, string after, int? limit, RequestOptions options) + { + var message = _pipeline.CreateMessage(); + message.ResponseClassifier = PipelineMessageClassifier200; + var request = message.Request; + request.Method = "GET"; + var uri = new ClientUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/fine_tuning/jobs/", false); + uri.AppendPath(fineTuningJobId, true); + uri.AppendPath("/events", false); + if (after != null) + { + uri.AppendQuery("after", after, true); + } + if (limit != null) + { + uri.AppendQuery("limit", limit.Value, true); + } + request.Uri = uri.ToUri(); + request.Headers.Set("Accept", "application/json"); + message.Apply(options); + return message; + } + + private static PipelineMessageClassifier? _pipelineMessageClassifier200; + private static PipelineMessageClassifier PipelineMessageClassifier200 => _pipelineMessageClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); +} diff --git a/.dotnet/src/Custom/FineTuning/Internal/Pagination/FineTuningJobsPageEnumerator.cs b/.dotnet/src/Custom/FineTuning/Internal/Pagination/FineTuningJobsPageEnumerator.cs new file mode 100644 index 000000000..2c04d8152 --- /dev/null +++ b/.dotnet/src/Custom/FineTuning/Internal/Pagination/FineTuningJobsPageEnumerator.cs @@ -0,0 +1,119 @@ +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Linq; +using System.Text.Json; +using System.Threading.Tasks; + +#nullable enable + +namespace OpenAI.FineTuning; + +internal partial class FineTuningJobsPageEnumerator : PageResultEnumerator +{ + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + private readonly int? _limit; + private readonly RequestOptions _options; + + private string? _after; + + public FineTuningJobsPageEnumerator( + ClientPipeline pipeline, + Uri endpoint, + string after, int? limit, + RequestOptions options) + { + _pipeline = pipeline; + _endpoint = endpoint; + + _after = after; + _limit = limit; + _options = options; + } + + public override async Task GetFirstAsync() + => await GetJobsAsync(_after!, _limit, _options).ConfigureAwait(false); + + public override ClientResult GetFirst() + => GetJobs(_after!, _limit, _options); + + public override async Task GetNextAsync(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response?.Content); + + if (doc?.RootElement.TryGetProperty("data", out JsonElement dataElement) == true + && dataElement.EnumerateArray().LastOrDefault().TryGetProperty("id", out JsonElement idElement) == true) + { + _after = idElement.GetString(); + } + + return await GetJobsAsync(_after!, _limit, _options).ConfigureAwait(false); + } + + public override ClientResult GetNext(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response?.Content); + + if (doc?.RootElement.TryGetProperty("data", out JsonElement dataElement) == true + && dataElement.EnumerateArray().LastOrDefault().TryGetProperty("id", out JsonElement idElement) == true) + { + _after = idElement.GetString(); + } + + return GetJobs(_after!, _limit, _options); + } + + public override bool HasNext(ClientResult result) + { + PipelineResponse response = result.GetRawResponse(); + + using JsonDocument doc = JsonDocument.Parse(response.Content); + bool hasMore = doc.RootElement.GetProperty("has_more"u8).GetBoolean(); + + return hasMore; + } + + internal virtual async Task GetJobsAsync(string after, int? limit, RequestOptions options) + { + using PipelineMessage message = CreateGetFineTuningJobsRequest(after, limit, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + + internal virtual ClientResult GetJobs(string after, int? limit, RequestOptions options) + { + using PipelineMessage message = CreateGetFineTuningJobsRequest(after, limit, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + + internal PipelineMessage CreateGetFineTuningJobsRequest(string after, int? limit, RequestOptions options) + { + var message = _pipeline.CreateMessage(); + message.ResponseClassifier = PipelineMessageClassifier200; + var request = message.Request; + request.Method = "GET"; + var uri = new ClientUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/fine_tuning/jobs", false); + if (after != null) + { + uri.AppendQuery("after", after, true); + } + if (limit != null) + { + uri.AppendQuery("limit", limit.Value, true); + } + request.Uri = uri.ToUri(); + request.Headers.Set("Accept", "application/json"); + message.Apply(options); + return message; + } + + private static PipelineMessageClassifier? _pipelineMessageClassifier200; + private static PipelineMessageClassifier PipelineMessageClassifier200 => _pipelineMessageClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); +} diff --git a/.dotnet/src/Custom/Images/ImageClient.cs b/.dotnet/src/Custom/Images/ImageClient.cs index 7f57dd889..b9af9f9b6 100644 --- a/.dotnet/src/Custom/Images/ImageClient.cs +++ b/.dotnet/src/Custom/Images/ImageClient.cs @@ -8,6 +8,10 @@ namespace OpenAI.Images; +// CUSTOM: +// - Renamed. +// - Suppressed constructor that takes endpoint parameter; endpoint is now a property in the options class. +// - Suppressed methods that only take the options parameter. /// The service client for OpenAI image operations. [CodeGenClient("Images")] [CodeGenSuppress("ImageClient", typeof(ClientPipeline), typeof(ApiKeyCredential), typeof(Uri))] @@ -23,69 +27,68 @@ public partial class ImageClient // CUSTOM: // - Added `model` parameter. - // - Added support for retrieving credential and endpoint from environment variables. - - /// - /// Initializes a new instance of that will use an API key when authenticating. - /// - /// The model name to use for image operations. - /// The API key used to authenticate with the service endpoint. - /// Additional options to customize the client. - /// The provided was null. - public ImageClient(string model, ApiKeyCredential credential, OpenAIClientOptions options = default) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(credential, requireExplicitCredential: true), options), - model, - OpenAIClient.GetEndpoint(options), - options) - { } - - /// - /// Initializes a new instance of that will use an API key from the OPENAI_API_KEY - /// environment variable when authenticating. - /// - /// - /// To provide an explicit credential instead of using the environment variable, use an alternate constructor like - /// . - /// - /// The model name to use for image operations. - /// Additional options to customize the client. - /// The OPENAI_API_KEY environment variable was not found. - public ImageClient(string model, OpenAIClientOptions options = default) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(), options), - model, - OpenAIClient.GetEndpoint(options), - options) - { } + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The API key to authenticate with the service. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public ImageClient(string model, ApiKeyCredential credential) : this(model, credential, new OpenAIClientOptions()) + { + } // CUSTOM: // - Added `model` parameter. - - /// Initializes a new instance of EmbeddingClient. - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// OpenAI Endpoint. - protected internal ImageClient(ClientPipeline pipeline, string model, Uri endpoint, OpenAIClientOptions options) + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The API key to authenticate with the service. + /// The options to configure the client. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public ImageClient(string model, ApiKeyCredential credential, OpenAIClientOptions options) { Argument.AssertNotNullOrEmpty(model, nameof(model)); + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); - _pipeline = pipeline; _model = model; - _endpoint = endpoint; + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); + } + + // CUSTOM: + // - Added `model` parameter. + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The options to configure the client. + /// or is null. + /// is an empty string, and was expected to be non-empty. + protected internal ImageClient(ClientPipeline pipeline, string model, OpenAIClientOptions options) + { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + Argument.AssertNotNullOrEmpty(model, nameof(model)); + options ??= new OpenAIClientOptions(); + + _model = model; + _pipeline = pipeline; + _endpoint = OpenAIClient.GetEndpoint(options); } #region GenerateImages - /// - /// Generates an image based on a given prompt. - /// + /// Generates an image based on a prompt. /// A text description of the desired image. - /// Additional options to tailor the image generation request. - /// A token that can be used to cancel this method call. + /// The options to configure the image generation. + /// A token that can be used to cancel this method call. /// is null. /// is an empty string, and was expected to be non-empty. - /// The generated image. public virtual async Task> GenerateImageAsync(string prompt, ImageGenerationOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(prompt, nameof(prompt)); @@ -98,15 +101,12 @@ public virtual async Task> GenerateImageAsync(strin return ClientResult.FromValue(GeneratedImageCollection.FromResponse(result.GetRawResponse()).FirstOrDefault(), result.GetRawResponse()); } - /// - /// Generates an image based on a given prompt. - /// + /// Generates an image based on a prompt. /// A text description of the desired image. - /// Additional options to tailor the image generation request. - /// A token that can be used to cancel this method call. + /// The options to configure the image generation. + /// A token that can be used to cancel this method call. /// is null. /// is an empty string, and was expected to be non-empty. - /// The generated image. public virtual ClientResult GenerateImage(string prompt, ImageGenerationOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(prompt, nameof(prompt)); @@ -119,16 +119,13 @@ public virtual ClientResult GenerateImage(string prompt, ImageGe return ClientResult.FromValue(GeneratedImageCollection.FromResponse(result.GetRawResponse()).FirstOrDefault(), result.GetRawResponse()); } - /// - /// Generates images based on a given prompt. - /// + /// Generates images based on a prompt. /// A text description of the desired images. /// The number of images to generate. - /// Additional options to tailor the image generation request. - /// A token that can be used to cancel this method call. + /// The options to configure the image generation. + /// A token that can be used to cancel this method call. /// is null. /// is an empty string, and was expected to be non-empty. - /// The generated images. public virtual async Task> GenerateImagesAsync(string prompt, int imageCount, ImageGenerationOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(prompt, nameof(prompt)); @@ -141,16 +138,13 @@ public virtual async Task> GenerateImages return ClientResult.FromValue(GeneratedImageCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// - /// Generates images based on a given prompt. - /// + /// Generates images based on a prompt. /// A text description of the desired images. /// The number of images to generate. - /// Additional options to tailor the image generation request. - /// A token that can be used to cancel this method call. + /// The options to configure the image generation. + /// A token that can be used to cancel this method call. /// is null. /// is an empty string, and was expected to be non-empty. - /// The generated images. public virtual ClientResult GenerateImages(string prompt, int imageCount, ImageGenerationOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(prompt, nameof(prompt)); @@ -167,22 +161,21 @@ public virtual ClientResult GenerateImages(string prom #region GenerateImageEdits - /// Generates an edited or extended image given an original image and a prompt. + /// Generates an edited or extended image based on an original image and a prompt. /// - /// The image to edit. Must be a valid PNG file, less than 4MB, and square. The image must have transparency, which - /// will be used as the mask. + /// The image stream to edit. Must be a valid PNG file, less than 4MB, and square. The image must have transparency, which + /// will be used as the mask. /// /// - /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the input image. The request may fail if the file extension and input image format do - /// not match. + /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to + /// validate the format of the input image. The request may fail if the filename's extension and the actual + /// format of the input image do not match. /// /// A text description of the desired image. - /// Additional options to tailor the image edit request. - /// A token that can be used to cancel this method call. + /// The options to configure the image edit. + /// A token that can be used to cancel this method call. /// , , or is null. /// or is an empty string, and was expected to be non-empty. - /// The edited or extended image. public virtual async Task> GenerateImageEditAsync(Stream image, string imageFilename, string prompt, ImageEditOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(image, nameof(image)); @@ -197,22 +190,21 @@ public virtual async Task> GenerateImageEditAsync(S return ClientResult.FromValue(GeneratedImageCollection.FromResponse(result.GetRawResponse()).FirstOrDefault(), result.GetRawResponse()); } - /// Generates an edited or extended image given an original image and a prompt. + /// Generates an edited or extended image based on an original image and a prompt. /// - /// The image to edit. Must be a valid PNG file, less than 4MB, and square. The image must have transparency, which - /// will be used as the mask. + /// The image stream to edit. Must be a valid PNG file, less than 4MB, and square. The image must have transparency, which + /// will be used as the mask. /// /// - /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the input image. The request may fail if the file extension and input image format do - /// not match. + /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to + /// validate the format of the input image. The request may fail if the filename's extension and the actual + /// format of the input image do not match. /// /// A text description of the desired image. - /// Additional options to tailor the image edit request. - /// A token that can be used to cancel this method call. + /// The options to configure the image edit. + /// A token that can be used to cancel this method call. /// , , or is null. /// or is an empty string, and was expected to be non-empty. - /// The edited or extended image. public virtual ClientResult GenerateImageEdit(Stream image, string imageFilename, string prompt, ImageEditOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(image, nameof(image)); @@ -227,18 +219,17 @@ public virtual ClientResult GenerateImageEdit(Stream image, stri return ClientResult.FromValue(GeneratedImageCollection.FromResponse(result.GetRawResponse()).FirstOrDefault(), result.GetRawResponse()); } - /// Generates an edited or extended image given an original image and a prompt. + /// Generates an edited or extended image based on an original image and a prompt. /// - /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The image must have - /// transparency, which will be used as the mask. The provided file path's extension (for example: .png) will be - /// used to validate the format of the input image. The request may fail if the file extension and input image - /// format do not match. + /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The image must + /// have transparency, which will be used as the mask. The provided file path's extension (for example: .png) + /// will be used to validate the format of the input image. The request may fail if the file path's extension + /// and the actual format of the input image do not match. /// /// A text description of the desired image. - /// Additional options to tailor the image edit request. + /// The options to configure the image edit. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// The edited or extended image. public virtual async Task> GenerateImageEditAsync(string imageFilePath, string prompt, ImageEditOptions options = null) { Argument.AssertNotNullOrEmpty(imageFilePath, nameof(imageFilePath)); @@ -248,51 +239,47 @@ public virtual async Task> GenerateImageEditAsync(s return await GenerateImageEditAsync(imageStream, imageFilePath, prompt, options).ConfigureAwait(false); } - /// Generates an edited or extended image given an original image and a prompt. + /// Generates an edited or extended image based on an original image and a prompt. /// - /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The image must have - /// transparency, which will be used as the mask. The provided file path's extension (for example: .png) will be - /// used to validate the format of the input image. The request may fail if the file extension and input image - /// format do not match. + /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The image must + /// have transparency, which will be used as the mask. The provided file path's extension (for example: .png) + /// will be used to validate the format of the input image. The request may fail if the file path's extension + /// and the actual format of the input image do not match. /// /// A text description of the desired image. - /// Additional options to tailor the image edit request. + /// The options to configure the image edit. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// The edited or extended image. public virtual ClientResult GenerateImageEdit(string imageFilePath, string prompt, ImageEditOptions options = null) { Argument.AssertNotNullOrEmpty(imageFilePath, nameof(imageFilePath)); Argument.AssertNotNullOrEmpty(prompt, nameof(prompt)); using FileStream imageStream = File.OpenRead(imageFilePath); - return GenerateImageEdit(imageStream, imageFilePath, prompt,options); + return GenerateImageEdit(imageStream, imageFilePath, prompt, options); } - /// Generates an edited or extended image given an original image, a prompt, and a mask. - /// - /// The image to edit. Must be a valid PNG file, less than 4MB, and square. - /// + /// Generates an edited or extended image based on an original image, a prompt, and a mask. + /// The image stream to edit. Must be a valid PNG file, less than 4MB, and square. /// - /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the input image. The request may fail if the file extension and input image format do - /// not match. + /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to + /// validate the format of the input image. The request may fail if the filename's extension and the actual + /// format of the input image do not match. /// /// A text description of the desired image. /// - /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where the original image - /// should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image. + /// An additional image whose fully transparent areas (i.e., where alpha is zero) indicate where the original image + /// should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image. /// /// - /// The filename associated with the mask image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the mask image. The request may fail if the file extension and mask image format - /// do not match. + /// The filename associated with the mask image stream. The filename's extension (for example: .png) will be + /// used to validate the format of the mask image. The request may fail if the filename's extension and the + /// actual format of the mask image do not match. /// - /// Additional options to tailor the image edit request. - /// A token that can be used to cancel this method call. + /// The options to configure the image edit. + /// A token that can be used to cancel this method call. /// , , , , or is null. /// , , or is an empty string, and was expected to be non-empty. - /// The edited or extended image. public virtual async Task> GenerateImageEditAsync(Stream image, string imageFilename, string prompt, Stream mask, string maskFilename, ImageEditOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(image, nameof(image)); @@ -309,30 +296,27 @@ public virtual async Task> GenerateImageEditAsync(S return ClientResult.FromValue(GeneratedImageCollection.FromResponse(result.GetRawResponse()).FirstOrDefault(), result.GetRawResponse()); } - /// Generates an edited or extended image given an original image, a prompt, and a mask. - /// - /// The image to edit. Must be a valid PNG file, less than 4MB, and square. - /// + /// Generates an edited or extended image based on an original image, a prompt, and a mask. + /// The image stream to edit. Must be a valid PNG file, less than 4MB, and square. /// - /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the input image. The request may fail if the file extension and input image format do - /// not match. + /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to + /// validate the format of the input image. The request may fail if the filename's extension and the actual + /// format of the input image do not match. /// /// A text description of the desired image. /// - /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where the original image - /// should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image. + /// An additional image whose fully transparent areas (i.e., where alpha is zero) indicate where the original image + /// should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image. /// /// - /// The filename associated with the mask image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the mask image. The request may fail if the file extension and mask image format - /// do not match. + /// The filename associated with the mask image stream. The filename's extension (for example: .png) will be + /// used to validate the format of the mask image. The request may fail if the filename's extension and the + /// actual format of the mask image do not match. /// - /// Additional options to tailor the image edit request. - /// A token that can be used to cancel this method call. + /// The options to configure the image edit. + /// A token that can be used to cancel this method call. /// , , , , or is null. /// , , or is an empty string, and was expected to be non-empty. - /// The edited or extended image. public virtual ClientResult GenerateImageEdit(Stream image, string imageFilename, string prompt, Stream mask, string maskFilename, ImageEditOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(image, nameof(image)); @@ -349,23 +333,23 @@ public virtual ClientResult GenerateImageEdit(Stream image, stri return ClientResult.FromValue(GeneratedImageCollection.FromResponse(result.GetRawResponse()).FirstOrDefault(), result.GetRawResponse()); } - /// Generates an edited or extended image given an original image, a prompt, and a mask. + /// Generates an edited or extended image based on an original image, a prompt, and a mask. /// - /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The provided file - /// path's extension (for example: .png) will be used to validate the format of the input image. The request may - /// fail if the file extension and input image format do not match. + /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The provided file + /// path's extension (for example: .png) will be used to validate the format of the input image. The request + /// may fail if the file path's extension and the actual format of the input image do not match. /// /// A text description of the desired image. /// - /// The path of the mask image file whose fully transparent areas (e.g. where alpha is zero) indicate where the - /// original image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as the - /// original image. The provided file path's extension (for example: .png) will be used to validate the format of - /// the input image. The request may fail if the file extension and mask image format do not match. + /// The path of the mask image file whose fully transparent areas (i.e., where alpha is zero) indicate where + /// the original image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + /// as the original image. The provided file path's extension (for example: .png) will be used to validate the + /// format of the mask image. The request may fail if the file path's extension and the actual format of the + /// mask image do not match. /// - /// Additional options to tailor the image edit request. + /// The options to configure the image edit. /// , or is null. /// , , or is an empty string, and was expected to be non-empty. - /// The edited or extended image. public virtual async Task> GenerateImageEditAsync(string imageFilePath, string prompt, string maskFilePath, ImageEditOptions options = null) { Argument.AssertNotNullOrEmpty(imageFilePath, nameof(imageFilePath)); @@ -377,23 +361,23 @@ public virtual async Task> GenerateImageEditAsync(s return await GenerateImageEditAsync(imageStream, imageFilePath, prompt, maskStream, maskFilePath, options).ConfigureAwait(false); } - /// Generates an edited or extended image given an original image, a prompt, and a mask. + /// Generates an edited or extended image based on an original image, a prompt, and a mask. /// - /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The provided file - /// path's extension (for example: .png) will be used to validate the format of the input image. The request may - /// fail if the file extension and input image format do not match. + /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The provided file + /// path's extension (for example: .png) will be used to validate the format of the input image. The request + /// may fail if the file path's extension and the actual format of the input image do not match. /// /// A text description of the desired image. /// - /// The path of the mask image file whose fully transparent areas (e.g. where alpha is zero) indicate where the - /// original image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as the - /// original image. The provided file path's extension (for example: .png) will be used to validate the format of - /// the input image. The request may fail if the file extension and mask image format do not match. + /// The path of the mask image file whose fully transparent areas (i.e., where alpha is zero) indicate where + /// the original image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + /// as the original image. The provided file path's extension (for example: .png) will be used to validate the + /// format of the mask image. The request may fail if the file path's extension and the actual format of the + /// mask image do not match. /// - /// Additional options to tailor the image edit request. + /// The options to configure the image edit. /// , or is null. /// , , or is an empty string, and was expected to be non-empty. - /// The edited or extended image. public virtual ClientResult GenerateImageEdit(string imageFilePath, string prompt, string maskFilePath, ImageEditOptions options = null) { Argument.AssertNotNullOrEmpty(imageFilePath, nameof(imageFilePath)); @@ -405,23 +389,22 @@ public virtual ClientResult GenerateImageEdit(string imageFilePa return GenerateImageEdit(imageStream, imageFilePath, prompt, maskStream, maskFilePath, options); } - /// Generates edited or extended images given an original image and a prompt. + /// Generates edited or extended images based on an original image and a prompt. /// - /// The image to edit. Must be a valid PNG file, less than 4MB, and square. The image must have transparency, which - /// will be used as the mask. + /// The image stream to edit. Must be a valid PNG file, less than 4MB, and square. The image must have transparency, which + /// will be used as the mask. /// /// - /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the input image. The request may fail if the file extension and input image format do - /// not match. + /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to + /// validate the format of the input image. The request may fail if the filename's extension and the actual + /// format of the input image do not match. /// /// A text description of the desired image. - /// The number of edit or extended images to generate. - /// Additional options to tailor the image edit request. - /// A token that can be used to cancel this method call. + /// The number of edited or extended images to generate. + /// The options to configure the image edit. + /// A token that can be used to cancel this method call. /// , , or is null. /// or is an empty string, and was expected to be non-empty. - /// The edited or extended images. public virtual async Task> GenerateImageEditsAsync(Stream image, string imageFilename, string prompt, int imageCount, ImageEditOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(image, nameof(image)); @@ -436,23 +419,22 @@ public virtual async Task> GenerateImageE return ClientResult.FromValue(GeneratedImageCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// Generates edited or extended images given an original image and a prompt. + /// Generates edited or extended images based on an original image and a prompt. /// - /// The image to edit. Must be a valid PNG file, less than 4MB, and square. The image must have transparency, which - /// will be used as the mask. + /// The image stream to edit. Must be a valid PNG file, less than 4MB, and square. The image must have transparency, which + /// will be used as the mask. /// /// - /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the input image. The request may fail if the file extension and input image format do - /// not match. + /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to + /// validate the format of the input image. The request may fail if the filename's extension and the actual + /// format of the input image do not match. /// /// A text description of the desired image. - /// The number of edit or extended images to generate. - /// Additional options to tailor the image edit request. - /// A token that can be used to cancel this method call. + /// The number of edited or extended images to generate. + /// The options to configure the image edit. + /// A token that can be used to cancel this method call. /// , , or is null. /// or is an empty string, and was expected to be non-empty. - /// The edited or extended images. public virtual ClientResult GenerateImageEdits(Stream image, string imageFilename, string prompt, int imageCount, ImageEditOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(image, nameof(image)); @@ -467,19 +449,18 @@ public virtual ClientResult GenerateImageEdits(Stream return ClientResult.FromValue(GeneratedImageCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// Generates edited or extended images given an original image and a prompt. + /// Generates edited or extended images based on an original image and a prompt. /// - /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The image must have - /// transparency, which will be used as the mask. The provided file path's extension (for example: .png) will be - /// used to validate the format of the input image. The request may fail if the file extension and input image - /// format do not match. + /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The image must + /// have transparency, which will be used as the mask. The provided file path's extension (for example: .png) + /// will be used to validate the format of the input image. The request may fail if the file path's extension + /// and the actual format of the input image do not match. /// /// A text description of the desired image. - /// The number of edit or extended images to generate. - /// Additional options to tailor the image edit request. + /// The number of edited or extended images to generate. + /// The options to configure the image edit. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// The edited or extended images. public virtual async Task> GenerateImageEditsAsync(string imageFilePath, string prompt, int imageCount, ImageEditOptions options = null) { Argument.AssertNotNullOrEmpty(imageFilePath, nameof(imageFilePath)); @@ -489,19 +470,18 @@ public virtual async Task> GenerateImageE return await GenerateImageEditsAsync(imageStream, imageFilePath, prompt, imageCount, options).ConfigureAwait(false); } - /// Generates edited or extended images given an original image and a prompt. + /// Generates edited or extended images based on an original image and a prompt. /// - /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The image must have - /// transparency, which will be used as the mask. The provided file path's extension (for example: .png) will be - /// used to validate the format of the input image. The request may fail if the file extension and input image - /// format do not match. + /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The image must + /// have transparency, which will be used as the mask. The provided file path's extension (for example: .png) + /// will be used to validate the format of the input image. The request may fail if the file path's extension + /// and the actual format of the input image do not match. /// /// A text description of the desired image. - /// The number of edit or extended images to generate. - /// Additional options to tailor the image edit request. + /// The number of edited or extended images to generate. + /// The options to configure the image edit. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// The edited or extended images. public virtual ClientResult GenerateImageEdits(string imageFilePath, string prompt, int imageCount, ImageEditOptions options = null) { Argument.AssertNotNullOrEmpty(imageFilePath, nameof(imageFilePath)); @@ -511,31 +491,28 @@ public virtual ClientResult GenerateImageEdits(string return GenerateImageEdits(imageStream, imageFilePath, prompt, imageCount, options); } - /// Generates edited or extended images given an original image, a prompt, and a mask. - /// - /// The image to edit. Must be a valid PNG file, less than 4MB, and square. - /// + /// Generates edited or extended images based on an original image, a prompt, and a mask. + /// The image stream to edit. Must be a valid PNG file, less than 4MB, and square. /// - /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the input image. The request may fail if the file extension and input image format do - /// not match. + /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to + /// validate the format of the input image. The request may fail if the filename's extension and the actual + /// format of the input image do not match. /// /// A text description of the desired image. /// - /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where the original image - /// should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image. + /// An additional image whose fully transparent areas (i.e., where alpha is zero) indicate where the original image + /// should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image. /// /// - /// The filename associated with the mask image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the mask image. The request may fail if the file extension and mask image format - /// do not match. + /// The filename associated with the mask image stream. The filename's extension (for example: .png) will be + /// used to validate the format of the mask image. The request may fail if the filename's extension and the + /// actual format of the mask image do not match. /// - /// The number of edit or extended images to generate. - /// Additional options to tailor the image edit request. - /// A token that can be used to cancel this method call. + /// The number of edited or extended images to generate. + /// The options to configure the image edit. + /// A token that can be used to cancel this method call. /// , , , , or is null. /// , , or is an empty string, and was expected to be non-empty. - /// The edited or extended images. public virtual async Task> GenerateImageEditsAsync(Stream image, string imageFilename, string prompt, Stream mask, string maskFilename, int imageCount, ImageEditOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(image, nameof(image)); @@ -552,31 +529,28 @@ public virtual async Task> GenerateImageE return ClientResult.FromValue(GeneratedImageCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// Generates edited or extended images given an original image, a prompt, and a mask. - /// - /// The image to edit. Must be a valid PNG file, less than 4MB, and square. - /// + /// Generates edited or extended images based on an original image, a prompt, and a mask. + /// The image stream to edit. Must be a valid PNG file, less than 4MB, and square. /// - /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the input image. The request may fail if the file extension and input image format do - /// not match. + /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to + /// validate the format of the input image. The request may fail if the filename's extension and the actual + /// format of the input image do not match. /// /// A text description of the desired image. /// - /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where the original image - /// should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image. + /// An additional image whose fully transparent areas (i.e., where alpha is zero) indicate where the original image + /// should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image. /// /// - /// The filename associated with the mask image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the mask image. The request may fail if the file extension and mask image format - /// do not match. + /// The filename associated with the mask image stream. The filename's extension (for example: .png) will be + /// used to validate the format of the mask image. The request may fail if the filename's extension and the + /// actual format of the mask image do not match. /// - /// The number of edit or extended images to generate. - /// Additional options to tailor the image edit request. - /// A token that can be used to cancel this method call. + /// The number of edited or extended images to generate. + /// The options to configure the image edit. + /// A token that can be used to cancel this method call. /// , , , , or is null. /// , , or is an empty string, and was expected to be non-empty. - /// The edited or extended images. public virtual ClientResult GenerateImageEdits(Stream image, string imageFilename, string prompt, Stream mask, string maskFilename, int imageCount, ImageEditOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(image, nameof(image)); @@ -593,24 +567,24 @@ public virtual ClientResult GenerateImageEdits(Stream return ClientResult.FromValue(GeneratedImageCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// Generates edited or extended images given an original image, a prompt, and a mask. + /// Generates edited or extended images based on an original image, a prompt, and a mask. /// - /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The provided file - /// path's extension (for example: .png) will be used to validate the format of the input image. The request may - /// fail if the file extension and input image format do not match. + /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The provided file + /// path's extension (for example: .png) will be used to validate the format of the input image. The request + /// may fail if the file path's extension and the actual format of the input image do not match. /// /// A text description of the desired image. /// - /// The path of the mask image file whose fully transparent areas (e.g. where alpha is zero) indicate where the - /// original image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as the - /// original image. The provided file path's extension (for example: .png) will be used to validate the format of - /// the input image. The request may fail if the file extension and mask image format do not match. - /// - /// The number of edit or extended images to generate. - /// Additional options to tailor the image edit request. + /// The path of the mask image file whose fully transparent areas (i.e., where alpha is zero) indicate where + /// the original image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + /// as the original image. The provided file path's extension (for example: .png) will be used to validate the + /// format of the mask image. The request may fail if the file path's extension and the actual format of the + /// mask image do not match. + /// + /// The number of edited or extended images to generate. + /// The options to configure the image edit. /// , or is null. /// , , or is an empty string, and was expected to be non-empty. - /// The edited or extended images. public virtual async Task> GenerateImageEditsAsync(string imageFilePath, string prompt, string maskFilePath, int imageCount, ImageEditOptions options = null) { Argument.AssertNotNullOrEmpty(imageFilePath, nameof(imageFilePath)); @@ -622,24 +596,24 @@ public virtual async Task> GenerateImageE return await GenerateImageEditsAsync(imageStream, imageFilePath, prompt, maskStream, maskFilePath, imageCount, options).ConfigureAwait(false); } - /// Generates edited or extended images given an original image, a prompt, and a mask. + /// Generates edited or extended images based on an original image, a prompt, and a mask. /// - /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The provided file - /// path's extension (for example: .png) will be used to validate the format of the input image. The request may - /// fail if the file extension and input image format do not match. + /// The path of the image file to edit. Must be a valid PNG file, less than 4MB, and square. The provided file + /// path's extension (for example: .png) will be used to validate the format of the input image. The request + /// may fail if the file path's extension and the actual format of the input image do not match. /// /// A text description of the desired image. /// - /// The path of the mask image file whose fully transparent areas (e.g. where alpha is zero) indicate where the - /// original image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as the - /// original image. The provided file path's extension (for example: .png) will be used to validate the format of - /// the input image. The request may fail if the file extension and mask image format do not match. - /// - /// The number of edit or extended images to generate. - /// Additional options to tailor the image edit request. + /// The path of the mask image file whose fully transparent areas (i.e., where alpha is zero) indicate where + /// the original image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + /// as the original image. The provided file path's extension (for example: .png) will be used to validate the + /// format of the mask image. The request may fail if the file path's extension and the actual format of the + /// mask image do not match. + /// + /// The number of edited or extended images to generate. + /// The options to configure the image edit. /// , or is null. /// , , or is an empty string, and was expected to be non-empty. - /// The edited or extended images. public virtual ClientResult GenerateImageEdits(string imageFilePath, string prompt, string maskFilePath, int imageCount, ImageEditOptions options = null) { Argument.AssertNotNullOrEmpty(imageFilePath, nameof(imageFilePath)); @@ -656,19 +630,16 @@ public virtual ClientResult GenerateImageEdits(string #region GenerateImageVariations /// Generates a variation of a given image. - /// - /// The image to use as the basis for the variation. Must be a valid PNG file, less than 4MB, and square. - /// + /// The image stream to use as the basis for the variation. Must be a valid PNG file, less than 4MB, and square. /// - /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the input image. The request may fail if the file extension and input image format do - /// not match. + /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to + /// validate the format of the input image. The request may fail if the filename's extension and the actual + /// format of the input image do not match. /// - /// Additional options to tailor the image variation request. - /// A token that can be used to cancel this method call. + /// The options to configure the image variation. + /// A token that can be used to cancel this method call. /// or is null. /// is an empty string, and was expected to be non-empty. - /// The generated image variation. public virtual async Task> GenerateImageVariationAsync(Stream image, string imageFilename, ImageVariationOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(image, nameof(image)); @@ -683,19 +654,16 @@ public virtual async Task> GenerateImageVariationAs } /// Generates a variation of a given image. - /// - /// The image to use as the basis for the variation. Must be a valid PNG file, less than 4MB, and square. - /// + /// The image stream to use as the basis for the variation. Must be a valid PNG file, less than 4MB, and square. /// - /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the input image. The request may fail if the file extension and input image format do - /// not match. + /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to + /// validate the format of the input image. The request may fail if the filename's extension and the actual + /// format of the input image do not match. /// - /// Additional options to tailor the image variation request. - /// A token that can be used to cancel this method call. + /// The options to configure the image variation. + /// A token that can be used to cancel this method call. /// or is null. /// is an empty string, and was expected to be non-empty. - /// The generated image variation. public virtual ClientResult GenerateImageVariation(Stream image, string imageFilename, ImageVariationOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(image, nameof(image)); @@ -711,14 +679,14 @@ public virtual ClientResult GenerateImageVariation(Stream image, /// Generates a variation of a given image. /// - /// The path of the image file to use as the basis for the variation. Must be a valid PNG file, less than 4MB, and - /// square. The provided file path's extension (for example: .png) will be used to validate the format of the input - /// image. The request may fail if the file extension and input image format do not match. + /// The path of the image file to use as the basis for the variation. Must be a valid PNG file, less than 4MB, + /// and square. The provided file path's extension (for example: .png) will be used to validate the format of + /// the input image. The request may fail if the file path's extension and the actual format of the input image + /// do not match. /// - /// Additional options to tailor the image variation request. + /// The options to configure the image variation. /// is null. /// is an empty string, and was expected to be non-empty. - /// The generated image variation. public virtual async Task> GenerateImageVariationAsync(string imageFilePath, ImageVariationOptions options = null) { Argument.AssertNotNullOrEmpty(imageFilePath, nameof(imageFilePath)); @@ -729,14 +697,14 @@ public virtual async Task> GenerateImageVariationAs /// Generates a variation of a given image. /// - /// The path of the image file to use as the basis for the variation. Must be a valid PNG file, less than 4MB, and - /// square. The provided file path's extension (for example: .png) will be used to validate the format of the input - /// image. The request may fail if the file extension and input image format do not match. + /// The path of the image file to use as the basis for the variation. Must be a valid PNG file, less than 4MB, + /// and square. The provided file path's extension (for example: .png) will be used to validate the format of + /// the input image. The request may fail if the file path's extension and the actual format of the input image + /// do not match. /// - /// Additional options to tailor the image variation request. + /// The options to configure the image variation. /// is null. /// is an empty string, and was expected to be non-empty. - /// The generated image variation. public virtual ClientResult GenerateImageVariation(string imageFilePath, ImageVariationOptions options = null) { Argument.AssertNotNullOrEmpty(imageFilePath, nameof(imageFilePath)); @@ -746,20 +714,17 @@ public virtual ClientResult GenerateImageVariation(string imageF } /// Generates variations of a given image. - /// - /// The image to use as the basis for the variation. Must be a valid PNG file, less than 4MB, and square. - /// + /// The image stream to use as the basis for the variation. Must be a valid PNG file, less than 4MB, and square. /// - /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the input image. The request may fail if the file extension and input image format do - /// not match. + /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to + /// validate the format of the input image. The request may fail if the filename's extension and the actual + /// format of the input image do not match. /// /// The number of image variations to generate. - /// Additional options to tailor the image variation request. - /// A token that can be used to cancel this method call. + /// The options to configure the image variation. + /// A token that can be used to cancel this method call. /// or is null. /// is an empty string, and was expected to be non-empty. - /// The generated image variations. public virtual async Task> GenerateImageVariationsAsync(Stream image, string imageFilename, int imageCount, ImageVariationOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(image, nameof(image)); @@ -774,20 +739,17 @@ public virtual async Task> GenerateImageV } /// Generates variations of a given image. - /// - /// The image to use as the basis for the variation. Must be a valid PNG file, less than 4MB, and square. - /// + /// The image stream to use as the basis for the variation. Must be a valid PNG file, less than 4MB, and square. /// - /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to - /// validate the format of the input image. The request may fail if the file extension and input image format do - /// not match. + /// The filename associated with the image stream. The filename's extension (for example: .png) will be used to + /// validate the format of the input image. The request may fail if the filename's extension and the actual + /// format of the input image do not match. /// /// The number of image variations to generate. - /// Additional options to tailor the image variation request. - /// A token that can be used to cancel this method call. + /// The options to configure the image variation. + /// A token that can be used to cancel this method call. /// or is null. /// is an empty string, and was expected to be non-empty. - /// The generated image variations. public virtual ClientResult GenerateImageVariations(Stream image, string imageFilename, int imageCount, ImageVariationOptions options = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(image, nameof(image)); @@ -803,15 +765,15 @@ public virtual ClientResult GenerateImageVariations(St /// Generates variations of a given image. /// - /// The path of the image file to use as the basis for the variation. Must be a valid PNG file, less than 4MB, and - /// square. The provided file path's extension (for example: .png) will be used to validate the format of the input - /// image. The request may fail if the file extension and input image format do not match. + /// The path of the image file to use as the basis for the variation. Must be a valid PNG file, less than 4MB, + /// and square. The provided file path's extension (for example: .png) will be used to validate the format of + /// the input image. The request may fail if the file path's extension and the actual format of the input image + /// do not match. /// /// The number of image variations to generate. - /// Additional options to tailor the image variation request. + /// The options to configure the image variation. /// was null. /// is an empty string, and was expected to be non-empty. - /// The generated image variations. public virtual async Task> GenerateImageVariationsAsync(string imageFilePath, int imageCount, ImageVariationOptions options = null) { Argument.AssertNotNullOrEmpty(imageFilePath, nameof(imageFilePath)); @@ -822,15 +784,15 @@ public virtual async Task> GenerateImageV /// Generates variations of a given image. /// - /// The path of the image file to use as the basis for the variation. Must be a valid PNG file, less than 4MB, and - /// square. The provided file path's extension (for example: .png) will be used to validate the format of the input - /// image. The request may fail if the file extension and input image format do not match. + /// The path of the image file to use as the basis for the variation. Must be a valid PNG file, less than 4MB, + /// and square. The provided file path's extension (for example: .png) will be used to validate the format of + /// the input image. The request may fail if the file path's extension and the actual format of the input image + /// do not match. /// /// The number of image variations to generate. - /// Additional options to tailor the image variation request. + /// The options to configure the image variation. /// was null. /// is an empty string, and was expected to be non-empty. - /// The generated image variations. public virtual ClientResult GenerateImageVariations(string imageFilePath, int imageCount, ImageVariationOptions options = null) { Argument.AssertNotNullOrEmpty(imageFilePath, nameof(imageFilePath)); diff --git a/.dotnet/src/Custom/Images/ImageEditOptions.cs b/.dotnet/src/Custom/Images/ImageEditOptions.cs index 5ed8a92cd..47f836098 100644 --- a/.dotnet/src/Custom/Images/ImageEditOptions.cs +++ b/.dotnet/src/Custom/Images/ImageEditOptions.cs @@ -34,7 +34,7 @@ public partial class ImageEditOptions /// /// /// - internal BinaryData Image { get; set; } + internal BinaryData Image { get; set; } // CUSTOM: // - Made internal. This value comes from a parameter on the client method. @@ -75,12 +75,22 @@ public ImageEditOptions() // CUSTOM: Changed property type. /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + [CodeGenMember("Size")] public GeneratedImageSize? Size { get; set; } // CUSTOM: Changed property type. /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + [CodeGenMember("ResponseFormat")] public GeneratedImageFormat? ResponseFormat { get; set; } + // CUSTOM: Renamed. + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// Learn more. + /// + [CodeGenMember("User")] + public string EndUserId { get; set; } + internal MultipartFormDataBinaryContent ToMultipartContent(Stream image, string imageFilename, Stream mask, string maskFilename) { MultipartFormDataBinaryContent content = new(); @@ -116,9 +126,9 @@ internal MultipartFormDataBinaryContent ToMultipartContent(Stream image, string content.Add(Size.ToString(), "size"); } - if (User is not null) + if (EndUserId is not null) { - content.Add(User, "user"); + content.Add(EndUserId, "user"); } return content; diff --git a/.dotnet/src/Custom/Images/ImageGenerationOptions.cs b/.dotnet/src/Custom/Images/ImageGenerationOptions.cs index f22988e0b..688e7e701 100644 --- a/.dotnet/src/Custom/Images/ImageGenerationOptions.cs +++ b/.dotnet/src/Custom/Images/ImageGenerationOptions.cs @@ -32,4 +32,12 @@ public partial class ImageGenerationOptions public ImageGenerationOptions() { } + + // CUSTOM: Renamed. + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// Learn more. + /// + [CodeGenMember("User")] + public string EndUserId { get; set; } } \ No newline at end of file diff --git a/.dotnet/src/Custom/Images/ImageVariationOptions.cs b/.dotnet/src/Custom/Images/ImageVariationOptions.cs index f36e2ef47..0f71b265b 100644 --- a/.dotnet/src/Custom/Images/ImageVariationOptions.cs +++ b/.dotnet/src/Custom/Images/ImageVariationOptions.cs @@ -48,12 +48,22 @@ public ImageVariationOptions() // CUSTOM: Changed property type. /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + [CodeGenMember("Size")] public GeneratedImageSize? Size { get; set; } // CUSTOM: Changed property type. /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + [CodeGenMember("ResponseFormat")] public GeneratedImageFormat? ResponseFormat { get; set; } + // CUSTOM: Renamed. + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// Learn more. + /// + [CodeGenMember("User")] + public string EndUserId { get; set; } + internal MultipartFormDataBinaryContent ToMultipartContent(Stream image, string imageFilename) { MultipartFormDataBinaryContent content = new(); @@ -83,9 +93,9 @@ internal MultipartFormDataBinaryContent ToMultipartContent(Stream image, string content.Add(Size.ToString(), "size"); } - if (User is not null) + if (EndUserId is not null) { - content.Add(User, "user"); + content.Add(EndUserId, "user"); } return content; diff --git a/.dotnet/src/Custom/Internal/CancellationTokenExtensions.cs b/.dotnet/src/Custom/Internal/CancellationTokenExtensions.cs index d0a5bc81f..ba7a74e19 100644 --- a/.dotnet/src/Custom/Internal/CancellationTokenExtensions.cs +++ b/.dotnet/src/Custom/Internal/CancellationTokenExtensions.cs @@ -13,7 +13,8 @@ public static RequestOptions ToRequestOptions(this CancellationToken cancellatio return StreamRequestOptions; } - return new RequestOptions() { + return new RequestOptions() + { CancellationToken = cancellationToken, BufferResponse = !streaming, }; diff --git a/.dotnet/src/Custom/LegacyCompletions/Internal/GeneratorStubs.cs b/.dotnet/src/Custom/LegacyCompletions/Internal/GeneratorStubs.cs index b184f9da0..43682d982 100644 --- a/.dotnet/src/Custom/LegacyCompletions/Internal/GeneratorStubs.cs +++ b/.dotnet/src/Custom/LegacyCompletions/Internal/GeneratorStubs.cs @@ -18,7 +18,7 @@ internal partial class InternalCreateCompletionResponseChoice { } internal readonly partial struct InternalCreateCompletionResponseChoiceFinishReason { } [CodeGenModel("CreateCompletionResponseChoiceLogprobs")] -internal partial class InternalCreateCompletionResponseChoiceLogprobs { } +internal partial class InternalCreateCompletionResponseChoiceLogprobs { } [CodeGenModel("CreateCompletionResponseObject")] internal readonly partial struct InternalCreateCompletionResponseObject { } \ No newline at end of file diff --git a/.dotnet/src/Custom/LegacyCompletions/Internal/LegacyCompletionClient.cs b/.dotnet/src/Custom/LegacyCompletions/Internal/LegacyCompletionClient.cs index 074625f2e..7d50115ce 100644 --- a/.dotnet/src/Custom/LegacyCompletions/Internal/LegacyCompletionClient.cs +++ b/.dotnet/src/Custom/LegacyCompletions/Internal/LegacyCompletionClient.cs @@ -1,13 +1,73 @@ +using System; +using System.ClientModel.Primitives; +using System.ClientModel; + namespace OpenAI.LegacyCompletions; -/// -/// The basic, protocol-level service client for OpenAI legacy completion operations. -/// -/// Note: pre-chat completions are a legacy feature. New solutions should consider the use of chat -/// completions or assistants, instead. -/// -/// +// CUSTOM: +// - Renamed. +// - Suppressed constructor that takes endpoint parameter; endpoint is now a property in the options class. +// - Suppressed methods that only take the options parameter. +/// The service client for OpenAI legacy completion operations. [CodeGenClient("Completions")] +[CodeGenSuppress("LegacyCompletionClient", typeof(ClientPipeline), typeof(ApiKeyCredential), typeof(Uri))] internal partial class LegacyCompletionClient { + private readonly string _model; + + // CUSTOM: + // - Added `model` parameter. + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The API key to authenticate with the service. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public LegacyCompletionClient(string model, ApiKeyCredential credential) : this(model, credential, new OpenAIClientOptions()) + { + } + + // CUSTOM: + // - Added `model` parameter. + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The API key to authenticate with the service. + /// The options to configure the client. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public LegacyCompletionClient(string model, ApiKeyCredential credential, OpenAIClientOptions options) + { + Argument.AssertNotNullOrEmpty(model, nameof(model)); + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); + + _model = model; + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); + } + + // CUSTOM: + // - Added `model` parameter. + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The options to configure the client. + /// or is null. + /// is an empty string, and was expected to be non-empty. + protected internal LegacyCompletionClient(ClientPipeline pipeline, string model, OpenAIClientOptions options) + { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + Argument.AssertNotNullOrEmpty(model, nameof(model)); + options ??= new OpenAIClientOptions(); + + _model = model; + _pipeline = pipeline; + _endpoint = OpenAIClient.GetEndpoint(options); + } } diff --git a/.dotnet/src/Custom/Models/ModelClient.cs b/.dotnet/src/Custom/Models/ModelClient.cs index 56e20876a..efbadd6d7 100644 --- a/.dotnet/src/Custom/Models/ModelClient.cs +++ b/.dotnet/src/Custom/Models/ModelClient.cs @@ -5,90 +5,80 @@ namespace OpenAI.Models; -/// -/// The service client for OpenAI model operations. -/// +// CUSTOM: +// - Renamed. +// - Suppressed constructor that takes endpoint parameter; endpoint is now a property in the options class. +// - Renamed convenience methods. +/// The service client for OpenAI model operations. [CodeGenClient("ModelsOps")] [CodeGenSuppress("ModelClient", typeof(ClientPipeline), typeof(ApiKeyCredential), typeof(Uri))] -[CodeGenSuppress("GetModelsAsync")] -[CodeGenSuppress("GetModels")] [CodeGenSuppress("RetrieveAsync", typeof(string))] [CodeGenSuppress("Retrieve", typeof(string))] [CodeGenSuppress("DeleteAsync", typeof(string))] [CodeGenSuppress("Delete", typeof(string))] public partial class ModelClient { - /// - /// Initializes a new instance of that will use an API key when authenticating. - /// - /// The API key used to authenticate with the service endpoint. - /// Additional options to customize the client. - /// The provided was null. - public ModelClient(ApiKeyCredential credential, OpenAIClientOptions options = null) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(credential, requireExplicitCredential: true), options), - OpenAIClient.GetEndpoint(options), - options) + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// is null. + public ModelClient(ApiKeyCredential credential) : this(credential, new OpenAIClientOptions()) { } - /// - /// Initializes a new instance of that will use an API key from the OPENAI_API_KEY - /// environment variable when authenticating. - /// - /// - /// To provide an explicit credential instead of using the environment variable, use an alternate constructor like - /// . - /// - /// Additional options to customize the client. - /// The OPENAI_API_KEY environment variable was not found. - public ModelClient(OpenAIClientOptions options = null) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(), options), - OpenAIClient.GetEndpoint(options), - options) + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// The options to configure the client. + /// is null. + public ModelClient(ApiKeyCredential credential, OpenAIClientOptions options) { + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); + + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); } - /// Initializes a new instance of . - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// OpenAI Endpoint. - protected internal ModelClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options) + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The options to configure the client. + /// is null. + protected internal ModelClient(ClientPipeline pipeline, OpenAIClientOptions options) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + options ??= new OpenAIClientOptions(); + _pipeline = pipeline; - _endpoint = endpoint; + _endpoint = OpenAIClient.GetEndpoint(options); } - /// - /// Lists the currently available models, and provides basic information about each one such as the - /// owner and availability. - /// - /// List models. + /// Gets basic information about each of the models that are currently available, such as their corresponding owner and availability. public virtual async Task> GetModelsAsync() { ClientResult result = await GetModelsAsync(null).ConfigureAwait(false); return ClientResult.FromValue(OpenAIModelInfoCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// - /// Lists the currently available models, and provides basic information about each one such as the - /// owner and availability. - /// - /// List models. + /// Gets basic information about each of the models that are currently available, such as their corresponding owner and availability. public virtual ClientResult GetModels() { ClientResult result = GetModels(null); return ClientResult.FromValue(OpenAIModelInfoCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// - /// Retrieves a model instance, providing basic information about the model such as the owner and - /// permissioning. - /// - /// The ID of the model to use for this request. + /// Gets basic information about the specified model, such as its owner and availability. + /// The name of the desired model. /// is null. /// is an empty string, and was expected to be non-empty. - /// Retrieve. public virtual async Task> GetModelAsync(string model) { Argument.AssertNotNullOrEmpty(model, nameof(model)); @@ -97,14 +87,10 @@ public virtual async Task> GetModelAsync(string mo return ClientResult.FromValue(OpenAIModelInfo.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// - /// Retrieves a model instance, providing basic information about the model such as the owner and - /// permissioning. - /// - /// The ID of the model to use for this request. + /// Gets basic information about the specified model, such as its owner and availability. + /// The name of the desired model. /// is null. /// is an empty string, and was expected to be non-empty. - /// Retrieve. public virtual ClientResult GetModel(string model) { Argument.AssertNotNullOrEmpty(model, nameof(model)); @@ -113,11 +99,11 @@ public virtual ClientResult GetModel(string model) return ClientResult.FromValue(OpenAIModelInfo.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. - /// The model to delete. + /// Deletes the specified fine-tuned model. + /// You must have the role of "owner" within your organization in order to be able to delete a model. + /// The name of the model to delete. /// is null. /// is an empty string, and was expected to be non-empty. - /// A value indicating whether the deletion operation was successful. public virtual async Task> DeleteModelAsync(string model) { Argument.AssertNotNullOrEmpty(model, nameof(model)); @@ -128,11 +114,11 @@ public virtual async Task> DeleteModelAsync(string model) return ClientResult.FromValue(value.Deleted, response); } - /// Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. - /// The model to delete. + /// Deletes the specified fine-tuned model. + /// You must have the role of "owner" within your organization in order to be able to delete a model. + /// The name of the model to delete. /// is null. /// is an empty string, and was expected to be non-empty. - /// A value indicating whether the deletion operation was successful. public virtual ClientResult DeleteModel(string model) { Argument.AssertNotNullOrEmpty(model, nameof(model)); diff --git a/.dotnet/src/Custom/Moderations/ModerationClient.cs b/.dotnet/src/Custom/Moderations/ModerationClient.cs index 44cfde541..d425f1ad9 100644 --- a/.dotnet/src/Custom/Moderations/ModerationClient.cs +++ b/.dotnet/src/Custom/Moderations/ModerationClient.cs @@ -8,9 +8,11 @@ namespace OpenAI.Moderations; -/// -/// The service client for OpenAI moderation operations. -/// +// CUSTOM: +// - Renamed. +// - Suppressed constructor that takes endpoint parameter; endpoint is now a property in the options class. +// - Suppressed methods that only take the options parameter. +/// The service client for OpenAI moderation operations. [CodeGenClient("Moderations")] [CodeGenSuppress("ModerationClient", typeof(ClientPipeline), typeof(ApiKeyCredential), typeof(Uri))] [CodeGenSuppress("CreateModerationAsync", typeof(ModerationOptions))] @@ -19,56 +21,65 @@ public partial class ModerationClient { private readonly string _model; - /// - /// Initializes a new instance of that will use an API key when authenticating. - /// - /// The model name to use for moderation operations. - /// The API key used to authenticate with the service endpoint. - /// Additional options to customize the client. - /// The provided was null. - public ModerationClient(string model, ApiKeyCredential credential, OpenAIClientOptions options = null) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(credential, requireExplicitCredential: true), options), - model, - OpenAIClient.GetEndpoint(options), - options) + // CUSTOM: + // - Added `model` parameter. + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The API key to authenticate with the service. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public ModerationClient(string model, ApiKeyCredential credential) : this(model, credential, new OpenAIClientOptions()) { } - /// - /// Initializes a new instance of that will use an API key from the OPENAI_API_KEY - /// environment variable when authenticating. - /// - /// - /// To provide an explicit credential instead of using the environment variable, use an alternate constructor like - /// . - /// - /// The model name to use for moderation operations. - /// Additional options to customize the client. - /// The OPENAI_API_KEY environment variable was not found. - public ModerationClient(string model, OpenAIClientOptions options = null) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(), options), - model, - OpenAIClient.GetEndpoint(options), - options) + // CUSTOM: + // - Added `model` parameter. + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The API key to authenticate with the service. + /// The options to configure the client. + /// or is null. + /// is an empty string, and was expected to be non-empty. + public ModerationClient(string model, ApiKeyCredential credential, OpenAIClientOptions options) { + Argument.AssertNotNullOrEmpty(model, nameof(model)); + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); + + _model = model; + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); } - /// Initializes a new instance of . - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// The model name to use for moderation operations. - /// OpenAI Endpoint. - protected internal ModerationClient(ClientPipeline pipeline, string model, Uri endpoint, OpenAIClientOptions options) + // CUSTOM: + // - Added `model` parameter. + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The name of the model to use in requests sent to the service. To learn more about the available models, see . + /// The options to configure the client. + /// or is null. + /// is an empty string, and was expected to be non-empty. + protected internal ModerationClient(ClientPipeline pipeline, string model, OpenAIClientOptions options) { - _pipeline = pipeline; + Argument.AssertNotNull(pipeline, nameof(pipeline)); + Argument.AssertNotNullOrEmpty(model, nameof(model)); + options ??= new OpenAIClientOptions(); + _model = model; - _endpoint = endpoint; + _pipeline = pipeline; + _endpoint = OpenAIClient.GetEndpoint(options); } - /// Classifies if text is potentially harmful. - /// The text to classify. - /// A token that can be used to cancel this method call. + /// Classifies if the text input is potentially harmful across several categories. + /// The text input to classify. + /// A token that can be used to cancel this method call. /// is null. /// is an empty string, and was expected to be non-empty. public virtual async Task> ClassifyTextInputAsync(string input, CancellationToken cancellationToken = default) @@ -83,9 +94,9 @@ public virtual async Task> ClassifyTextInputAsync return ClientResult.FromValue(ModerationCollection.FromResponse(result.GetRawResponse()).FirstOrDefault(), result.GetRawResponse()); } - /// Classifies if text is potentially harmful. - /// The text to classify. - /// A token that can be used to cancel this method call. + /// Classifies if the text input is potentially harmful across several categories. + /// The text input to classify. + /// A token that can be used to cancel this method call. /// is null. /// is an empty string, and was expected to be non-empty. public virtual ClientResult ClassifyTextInput(string input, CancellationToken cancellationToken = default) @@ -100,10 +111,9 @@ public virtual ClientResult ClassifyTextInput(string input, Ca return ClientResult.FromValue(ModerationCollection.FromResponse(result.GetRawResponse()).FirstOrDefault(), result.GetRawResponse()); } - - /// Classifies if text is potentially harmful. - /// The text to classify. - /// A token that can be used to cancel this method call. + /// Classifies if the text inputs are potentially harmful across several categories. + /// The text inputs to classify. + /// A token that can be used to cancel this method call. /// is null. /// is an empty collection, and was expected to be non-empty. public virtual async Task> ClassifyTextInputsAsync(IEnumerable inputs, CancellationToken cancellationToken = default) @@ -118,9 +128,9 @@ public virtual async Task> ClassifyTextInputs return ClientResult.FromValue(ModerationCollection.FromResponse(result.GetRawResponse()), result.GetRawResponse()); } - /// Classifies if text is potentially harmful. - /// The text to classify. - /// A token that can be used to cancel this method call. + /// Classifies if the text inputs are potentially harmful across several categories. + /// The text inputs to classify. + /// A token that can be used to cancel this method call. /// is null. /// is an empty collection, and was expected to be non-empty. public virtual ClientResult ClassifyTextInputs(IEnumerable inputs, CancellationToken cancellationToken = default) diff --git a/.dotnet/src/Custom/OpenAIClient.cs b/.dotnet/src/Custom/OpenAIClient.cs index 287c4ca81..cf696c385 100644 --- a/.dotnet/src/Custom/OpenAIClient.cs +++ b/.dotnet/src/Custom/OpenAIClient.cs @@ -17,6 +17,7 @@ namespace OpenAI; // CUSTOM: +// - Suppressed constructor that takes endpoint parameter; endpoint is now a property in the options class. // - Suppressed cached clients. Clients are not singletons, and users can create multiple clients of the same type // if needed (e.g., to target different OpenAI models). The Get*Client methods return new client instances. /// @@ -37,6 +38,7 @@ namespace OpenAI; [CodeGenSuppress("_cachedInternalAssistantMessageClient")] [CodeGenSuppress("_cachedInternalAssistantRunClient")] [CodeGenSuppress("_cachedInternalAssistantThreadClient")] +[CodeGenSuppress("_cachedInternalUploadsClient")] [CodeGenSuppress("_cachedLegacyCompletionClient")] [CodeGenSuppress("_cachedModelClient")] [CodeGenSuppress("_cachedModerationClient")] @@ -52,58 +54,65 @@ namespace OpenAI; [CodeGenSuppress("GetInternalAssistantMessageClientClient")] [CodeGenSuppress("GetInternalAssistantRunClientClient")] [CodeGenSuppress("GetInternalAssistantThreadClientClient")] +[CodeGenSuppress("GetInternalUploadsClientClient")] [CodeGenSuppress("GetLegacyCompletionClientClient")] [CodeGenSuppress("GetModelClientClient")] [CodeGenSuppress("GetModerationClientClient")] [CodeGenSuppress("GetVectorStoreClientClient")] public partial class OpenAIClient { - private readonly OpenAIClientOptions _options; + private const string OpenAIV1Endpoint = "https://api.openai.com/v1"; + private const string OpenAIBetaHeaderValue = "assistants=v2"; - /// - /// The configured connection endpoint. - /// - protected Uri Endpoint => _endpoint; + private static class KnownHeaderNames + { + public const string OpenAIBeta = "OpenAI-Beta"; + public const string OpenAIOrganization = "OpenAI-Organization"; + public const string OpenAIProject = "OpenAI-Project"; + public const string UserAgent = "User-Agent"; + } - /// - /// Creates a new instance of . This type is used to share common - /// and client configuration details across scenario client instances created via - /// methods like . - /// - /// The API key to use when authenticating the client. - /// A common client options definition that all clients created by this should use. - /// The provided is null. - public OpenAIClient(ApiKeyCredential credential, OpenAIClientOptions options = null) - : this(CreatePipeline(GetApiKey(credential, requireExplicitCredential: true), options), GetEndpoint(options), options) + private readonly OpenAIClientOptions _options; + + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// is null. + public OpenAIClient(ApiKeyCredential credential) : this(credential, new OpenAIClientOptions()) { - _keyCredential = credential; } - /// - /// Creates a new instance of . This type is used to share common - /// and client configuration details across scenario client instances created via - /// methods like . - /// - /// This constructor overload will use the value of the OPENAI_API_KEY environment variable as its - /// authentication mechanism. To provide an explicit credential, use an alternate constructor like - /// . - /// - /// - /// A common client options definition that all clients created by this should use. - public OpenAIClient(OpenAIClientOptions options = default) - : this(CreatePipeline(GetApiKey(), options), GetEndpoint(options), options) - {} + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// The options to configure the client. + /// is null. + public OpenAIClient(ApiKeyCredential credential, OpenAIClientOptions options) + { + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); - /// - /// Creates a new instance of . - /// - /// The common client pipeline that should be used for all created scenario clients. - /// The HTTP endpoint to use. - /// The common client options that should be used for all created scenario clients. - protected OpenAIClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options) + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); + _options = options; + } + + // CUSTOM: Added protected internal constructor that takes a ClientPipeline. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The options to configure the client. + /// is null. + protected internal OpenAIClient(ClientPipeline pipeline, OpenAIClientOptions options) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + options ??= new OpenAIClientOptions(); + _pipeline = pipeline; - _endpoint = endpoint; + _endpoint = OpenAIClient.GetEndpoint(options); _options = options; } @@ -117,7 +126,7 @@ protected OpenAIClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOption /// /// A new . [Experimental("OPENAI001")] - public virtual AssistantClient GetAssistantClient() => new(_pipeline, _endpoint, _options); + public virtual AssistantClient GetAssistantClient() => new(_pipeline, _options); /// /// Gets a new instance of that reuses the client configuration details provided to @@ -128,7 +137,7 @@ protected OpenAIClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOption /// the same configuration details. /// /// A new . - public virtual AudioClient GetAudioClient(string model) => new(_pipeline, model, _endpoint, _options); + public virtual AudioClient GetAudioClient(string model) => new(_pipeline, model, _options); /// /// Gets a new instance of that reuses the client configuration details provided to @@ -139,7 +148,8 @@ protected OpenAIClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOption /// the same configuration details. /// /// A new . - public virtual BatchClient GetBatchClient() => new(_pipeline, _endpoint, _options); + [Experimental("OPENAI001")] + public virtual BatchClient GetBatchClient() => new(_pipeline, _options); /// /// Gets a new instance of that reuses the client configuration details provided to @@ -150,7 +160,7 @@ protected OpenAIClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOption /// the same configuration details. /// /// A new . - public virtual ChatClient GetChatClient(string model) => new(_pipeline, model, _endpoint, _options); + public virtual ChatClient GetChatClient(string model) => new(_pipeline, model, _options); /// /// Gets a new instance of that reuses the client configuration details provided to @@ -161,7 +171,7 @@ protected OpenAIClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOption /// the same configuration details. /// /// A new . - public virtual EmbeddingClient GetEmbeddingClient(string model) => new(_pipeline, model, _endpoint, _options); + public virtual EmbeddingClient GetEmbeddingClient(string model) => new(_pipeline, model, _options); /// /// Gets a new instance of that reuses the client configuration details provided to @@ -172,7 +182,7 @@ protected OpenAIClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOption /// the same configuration details. /// /// A new . - public virtual FileClient GetFileClient() => new(_pipeline, _endpoint, _options); + public virtual FileClient GetFileClient() => new(_pipeline, _options); /// /// Gets a new instance of that reuses the client configuration details provided to @@ -183,7 +193,8 @@ protected OpenAIClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOption /// the same configuration details. /// /// A new . - public virtual FineTuningClient GetFineTuningClient() => new(_pipeline, _endpoint, _options); + [Experimental("OPENAI001")] + public virtual FineTuningClient GetFineTuningClient() => new(_pipeline, _options); /// /// Gets a new instance of that reuses the client configuration details provided to @@ -194,7 +205,7 @@ protected OpenAIClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOption /// the same configuration details. /// /// A new . - public virtual ImageClient GetImageClient(string model) => new(_pipeline, model, _endpoint, _options); + public virtual ImageClient GetImageClient(string model) => new(_pipeline, model, _options); /// /// Gets a new instance of that reuses the client configuration details provided to @@ -205,7 +216,7 @@ protected OpenAIClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOption /// the same configuration details. /// /// A new . - public virtual ModelClient GetModelClient() => new(_pipeline, _endpoint, _options); + public virtual ModelClient GetModelClient() => new(_pipeline, _options); /// /// Gets a new instance of that reuses the client configuration details provided to @@ -216,7 +227,7 @@ protected OpenAIClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOption /// the same configuration details. /// /// A new . - public virtual ModerationClient GetModerationClient(string model) => new(_pipeline, model, _endpoint, _options); + public virtual ModerationClient GetModerationClient(string model) => new(_pipeline, model, _options); /// /// Gets a new instance of that reuses the client configuration details provided to @@ -228,59 +239,35 @@ protected OpenAIClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOption /// /// A new . [Experimental("OPENAI001")] - public virtual VectorStoreClient GetVectorStoreClient() => new(_pipeline, _endpoint, _options); + public virtual VectorStoreClient GetVectorStoreClient() => new(_pipeline, _options); - internal static ClientPipeline CreatePipeline(ApiKeyCredential credential, OpenAIClientOptions options = null) + internal static ClientPipeline CreatePipeline(ApiKeyCredential credential, OpenAIClientOptions options) { return ClientPipeline.Create( - options ?? new(), + options, perCallPolicies: [ CreateAddBetaFeatureHeaderPolicy(), CreateAddCustomHeadersPolicy(options), ], - perTryPolicies: - [ + perTryPolicies: [ ApiKeyAuthenticationPolicy.CreateHeaderApiKeyPolicy(credential, AuthorizationHeader, AuthorizationApiKeyPrefix) ], - beforeTransportPolicies: []); - } - - internal static Uri GetEndpoint(OpenAIClientOptions options) - { - return options?.Endpoint ?? new(Environment.GetEnvironmentVariable(OpenAIEndpointEnvironmentVariable) ?? OpenAIV1Endpoint); + beforeTransportPolicies: [ + ]); } - internal static ApiKeyCredential GetApiKey(ApiKeyCredential explicitCredential = null, bool requireExplicitCredential = false) + internal static Uri GetEndpoint(OpenAIClientOptions options = null) { - if (explicitCredential is not null) - { - return explicitCredential; - } - else if (requireExplicitCredential) - { - throw new ArgumentNullException(nameof(explicitCredential), $"A non-null credential value is required."); - } - else - { - string environmentApiKey = Environment.GetEnvironmentVariable(OpenAIApiKeyEnvironmentVariable); - if (string.IsNullOrEmpty(environmentApiKey)) - { - throw new InvalidOperationException( - $"No environment variable value was found for {OpenAIApiKeyEnvironmentVariable}. " - + "Please either populate this environment variable or provide authentication information directly " - + "to the client constructor."); - } - return new(environmentApiKey); - } + return options?.Endpoint ?? new(OpenAIV1Endpoint); } private static PipelinePolicy CreateAddBetaFeatureHeaderPolicy() { return new GenericActionPipelinePolicy((message) => { - if (message?.Request?.Headers?.TryGetValue(OpenAIBetaFeatureHeaderName, out string _) == false) + if (message?.Request?.Headers?.TryGetValue(KnownHeaderNames.OpenAIBeta, out string _) == false) { - message.Request.Headers.Set(OpenAIBetaFeatureHeaderName, OpenAIBetaAssistantsV1HeaderValue); + message.Request.Headers.Set(KnownHeaderNames.OpenAIBeta, OpenAIBetaHeaderValue); } }); } @@ -290,28 +277,20 @@ private static PipelinePolicy CreateAddCustomHeadersPolicy(OpenAIClientOptions o TelemetryDetails telemetryDetails = new(typeof(OpenAIClientOptions).Assembly, options?.ApplicationId); return new GenericActionPipelinePolicy((message) => { - if (message?.Request?.Headers?.TryGetValue(UserAgentHeaderName, out string _) == false) + if (message?.Request?.Headers?.TryGetValue(KnownHeaderNames.UserAgent, out string _) == false) { - message.Request.Headers.Set(UserAgentHeaderName, telemetryDetails.ToString()); + message.Request.Headers.Set(KnownHeaderNames.UserAgent, telemetryDetails.ToString()); } if (!string.IsNullOrEmpty(options?.OrganizationId)) { - message.Request.Headers.Set(OpenAIOrganizationHeaderName, options.OrganizationId); + message.Request.Headers.Set(KnownHeaderNames.OpenAIOrganization, options.OrganizationId); } + if (!string.IsNullOrEmpty(options?.ProjectId)) { - message.Request.Headers.Set(OpenAIProjectHeaderName, options.ProjectId); + message.Request.Headers.Set(KnownHeaderNames.OpenAIProject, options.ProjectId); } }); } - - private const string OpenAIBetaFeatureHeaderName = "OpenAI-Beta"; - private const string OpenAIOrganizationHeaderName = "OpenAI-Organization"; - private const string OpenAIProjectHeaderName = "OpenAI-Project"; - private const string OpenAIBetaAssistantsV1HeaderValue = "assistants=v2"; - private const string OpenAIEndpointEnvironmentVariable = "OPENAI_ENDPOINT"; - private const string OpenAIApiKeyEnvironmentVariable = "OPENAI_API_KEY"; - private const string OpenAIV1Endpoint = "https://api.openai.com/v1"; - private const string UserAgentHeaderName = "User-Agent"; } diff --git a/.dotnet/src/Custom/OpenAIClientOptions.cs b/.dotnet/src/Custom/OpenAIClientOptions.cs index a306e8aa5..ca7937575 100644 --- a/.dotnet/src/Custom/OpenAIClientOptions.cs +++ b/.dotnet/src/Custom/OpenAIClientOptions.cs @@ -3,29 +3,71 @@ namespace OpenAI; -/// -/// Client-level options for the OpenAI service. -/// +/// The options to configure the client. [CodeGenModel("OpenAIClientOptions")] public partial class OpenAIClientOptions : ClientPipelineOptions { + private Uri _endpoint; + private string _organizationId; + private string _projectId; + private string _applicationId; + /// - /// A non-default base endpoint that clients should use when connecting. + /// The service endpoint that the client will send requests to. If not set, the default endpoint will be used. /// - public Uri Endpoint { get; set; } + public Uri Endpoint + { + get => _endpoint; + set + { + AssertNotFrozen(); + _endpoint = value; + } + } /// - /// An optional application ID to use as part of the request User-Agent header. + /// The value to use for the OpenAI-Organization request header. Users who belong to multiple organizations + /// can set this value to specify which organization is used for an API request. Usage from these API requests will + /// count against the specified organization's quota. If not set, the header will be omitted, and the default + /// organization will be billed. You can change your default organization in your user settings. + /// Learn more. /// - public string ApplicationId { get; set; } + public string OrganizationId + { + get => _organizationId; + set + { + AssertNotFrozen(); + _organizationId = value; + } + } /// - /// An optional ID added to OpenAI-Organization header + /// The value to use for the OpenAI-Project request header. Users who are accessing their projects through + /// their legacy user API key can set this value to specify which project is used for an API request. Usage from + /// these API requests will count as usage for the specified project. If not set, the header will be omitted, and + /// the default project will be accessed. /// - public string OrganizationId { get; set; } + public string ProjectId + { + get => _projectId; + set + { + AssertNotFrozen(); + _projectId = value; + } + } /// - /// An optional ID added to OpenAI-Project header + /// An optional application ID to use as part of the request User-Agent header. /// - public string ProjectId { get; set; } + public string ApplicationId + { + get => _applicationId; + set + { + AssertNotFrozen(); + _applicationId = value; + } + } } diff --git a/.dotnet/src/Custom/VectorStores/FileChunkingStrategy.cs b/.dotnet/src/Custom/VectorStores/FileChunkingStrategy.cs index 0f642d66e..3da40dad9 100644 --- a/.dotnet/src/Custom/VectorStores/FileChunkingStrategy.cs +++ b/.dotnet/src/Custom/VectorStores/FileChunkingStrategy.cs @@ -1,8 +1,11 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.VectorStores; +[Experimental("OPENAI001")] [CodeGenModel("FileChunkingStrategyResponseParam")] public abstract partial class FileChunkingStrategy -{ +{ /// /// Gets a value representing the default, automatic selection for a file chunking strategy. /// @@ -32,5 +35,5 @@ public static FileChunkingStrategy CreateStaticStrategy( } private static InternalAutoChunkingStrategy _autoValue; - private static InternalUnknownChunkingStrategy _unknownValue; + private static InternalUnknownChunkingStrategy _unknownValue; } diff --git a/.dotnet/src/Custom/VectorStores/Internal/GeneratorStubs.cs b/.dotnet/src/Custom/VectorStores/Internal/GeneratorStubs.cs index 3fa00a75b..a5cbb6408 100644 --- a/.dotnet/src/Custom/VectorStores/Internal/GeneratorStubs.cs +++ b/.dotnet/src/Custom/VectorStores/Internal/GeneratorStubs.cs @@ -6,7 +6,7 @@ namespace OpenAI.VectorStores; internal partial class InternalCreateVectorStoreFileBatchRequest { } [CodeGenModel("CreateVectorStoreFileRequest")] -internal partial class InternalCreateVectorStoreFileRequest {} +internal partial class InternalCreateVectorStoreFileRequest { } [CodeGenModel("DeleteVectorStoreFileResponse")] internal partial class InternalDeleteVectorStoreFileResponse { } @@ -67,3 +67,6 @@ internal partial class InternalUnknownChunkingStrategy { } [CodeGenModel("UnknownFileChunkingStrategyResponseParam")] internal partial class InternalUnknownFileChunkingStrategyResponseParamProxy { } + +[CodeGenModel("ListFilesInVectorStoreBatchRequestOrder")] +internal readonly partial struct InternalListFilesInVectorStoreBatchRequestOrder { } \ No newline at end of file diff --git a/.dotnet/src/Custom/VectorStores/Internal/InternalCreateVectorStoreFileRequest.cs b/.dotnet/src/Custom/VectorStores/Internal/InternalCreateVectorStoreFileRequest.cs index 338a848e8..7ddc7b78b 100644 --- a/.dotnet/src/Custom/VectorStores/Internal/InternalCreateVectorStoreFileRequest.cs +++ b/.dotnet/src/Custom/VectorStores/Internal/InternalCreateVectorStoreFileRequest.cs @@ -1,6 +1,3 @@ -using System; -using System.Collections.Generic; - namespace OpenAI.VectorStores; internal partial class InternalCreateVectorStoreFileRequest diff --git a/.dotnet/src/Custom/VectorStores/Internal/Pagination/VectorStoreFileBatchesPageToken.cs b/.dotnet/src/Custom/VectorStores/Internal/Pagination/VectorStoreFileBatchesPageToken.cs index 50f807901..3329be771 100644 --- a/.dotnet/src/Custom/VectorStores/Internal/Pagination/VectorStoreFileBatchesPageToken.cs +++ b/.dotnet/src/Custom/VectorStores/Internal/Pagination/VectorStoreFileBatchesPageToken.cs @@ -10,7 +10,7 @@ namespace OpenAI.VectorStores; internal class VectorStoreFileBatchesPageToken : ContinuationToken { - protected VectorStoreFileBatchesPageToken(string vectorStoreId,string batchId, int? limit, string? order, string? after, string? before, string? filter) + protected VectorStoreFileBatchesPageToken(string vectorStoreId, string batchId, int? limit, string? order, string? after, string? before, string? filter) { VectorStoreId = vectorStoreId; BatchId = batchId; diff --git a/.dotnet/src/Custom/VectorStores/Internal/Pagination/VectorStoreFilesPageEnumerator.cs b/.dotnet/src/Custom/VectorStores/Internal/Pagination/VectorStoreFilesPageEnumerator.cs index 9a5e9bec3..6034ba109 100644 --- a/.dotnet/src/Custom/VectorStores/Internal/Pagination/VectorStoreFilesPageEnumerator.cs +++ b/.dotnet/src/Custom/VectorStores/Internal/Pagination/VectorStoreFilesPageEnumerator.cs @@ -28,7 +28,7 @@ internal partial class VectorStoreFilesPageEnumerator : PageEnumerator public StaticFileChunkingStrategy(int maxTokensPerChunk, int overlappingTokenCount) : this(new InternalStaticChunkingStrategyDetails(maxTokensPerChunk, overlappingTokenCount)) - {} + { } } diff --git a/.dotnet/src/Custom/VectorStores/VectorStore.cs b/.dotnet/src/Custom/VectorStores/VectorStore.cs index 04b8c79b7..d63612d18 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStore.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStore.cs @@ -1,8 +1,11 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.VectorStores; /// /// A representation of a file storage and indexing container used by the file_search tool for assistants. /// +[Experimental("OPENAI001")] [CodeGenModel("VectorStoreObject")] public partial class VectorStore { diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreBatchFileJob.cs b/.dotnet/src/Custom/VectorStores/VectorStoreBatchFileJob.cs index 507745bb3..329a03b53 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreBatchFileJob.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreBatchFileJob.cs @@ -1,8 +1,11 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.VectorStores; /// /// Represents information about a bulk ingestion job of files into a vector store. /// +[Experimental("OPENAI001")] [CodeGenModel("VectorStoreFileBatchObject")] public partial class VectorStoreBatchFileJob { diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreBatchFileJobStatus.cs b/.dotnet/src/Custom/VectorStores/VectorStoreBatchFileJobStatus.cs index 84eb5e3f6..d23f262a6 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreBatchFileJobStatus.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreBatchFileJobStatus.cs @@ -1,5 +1,8 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.VectorStores; +[Experimental("OPENAI001")] [CodeGenModel("VectorStoreFileBatchObjectStatus")] public readonly partial struct VectorStoreBatchFileJobStatus { diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreClient.cs b/.dotnet/src/Custom/VectorStores/VectorStoreClient.cs index 69e6e0c32..91aa64eda 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreClient.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreClient.cs @@ -13,6 +13,7 @@ namespace OpenAI.VectorStores; /// The service client for OpenAI vector store operations. /// [CodeGenClient("VectorStores")] +[CodeGenSuppress("VectorStoreClient", typeof(ClientPipeline), typeof(ApiKeyCredential), typeof(Uri))] [CodeGenSuppress("CreateVectorStoreAsync", typeof(VectorStoreCreationOptions))] [CodeGenSuppress("CreateVectorStore", typeof(VectorStoreCreationOptions))] [CodeGenSuppress("GetVectorStoreAsync", typeof(string))] @@ -21,10 +22,10 @@ namespace OpenAI.VectorStores; [CodeGenSuppress("ModifyVectorStore", typeof(string), typeof(VectorStoreModificationOptions))] [CodeGenSuppress("DeleteVectorStoreAsync", typeof(string))] [CodeGenSuppress("DeleteVectorStore", typeof(string))] -[CodeGenSuppress("GetVectorStoresAsync", typeof(int?), typeof(ListOrder?), typeof(string), typeof(string))] -[CodeGenSuppress("GetVectorStores", typeof(int?), typeof(ListOrder?), typeof(string), typeof(string))] -[CodeGenSuppress("GetVectorStoreFilesAsync", typeof(string), typeof(int?), typeof(ListOrder?), typeof(string), typeof(string), typeof(VectorStoreFileStatusFilter?))] -[CodeGenSuppress("GetVectorStoreFiles", typeof(string), typeof(int?), typeof(ListOrder?), typeof(string), typeof(string), typeof(VectorStoreFileStatusFilter?))] +[CodeGenSuppress("GetVectorStoresAsync", typeof(int?), typeof(VectorStoreCollectionOrder?), typeof(string), typeof(string))] +[CodeGenSuppress("GetVectorStores", typeof(int?), typeof(VectorStoreCollectionOrder?), typeof(string), typeof(string))] +[CodeGenSuppress("GetVectorStoreFilesAsync", typeof(string), typeof(int?), typeof(VectorStoreFileAssociationCollectionOrder?), typeof(string), typeof(string), typeof(VectorStoreFileStatusFilter?))] +[CodeGenSuppress("GetVectorStoreFiles", typeof(string), typeof(int?), typeof(VectorStoreFileAssociationCollectionOrder?), typeof(string), typeof(string), typeof(VectorStoreFileStatusFilter?))] [CodeGenSuppress("CreateVectorStoreFileAsync", typeof(string), typeof(InternalCreateVectorStoreFileRequest))] [CodeGenSuppress("CreateVectorStoreFile", typeof(string), typeof(InternalCreateVectorStoreFileRequest))] [CodeGenSuppress("GetVectorStoreFileAsync", typeof(string), typeof(string))] @@ -37,8 +38,8 @@ namespace OpenAI.VectorStores; [CodeGenSuppress("GetVectorStoreFileBatch", typeof(string), typeof(string))] [CodeGenSuppress("CancelVectorStoreFileBatchAsync", typeof(string), typeof(string))] [CodeGenSuppress("CancelVectorStoreFileBatch", typeof(string), typeof(string))] -[CodeGenSuppress("GetFilesInVectorStoreBatchesAsync", typeof(string), typeof(string), typeof(int?), typeof(ListOrder?), typeof(string), typeof(string), typeof(VectorStoreFileStatusFilter?))] -[CodeGenSuppress("GetFilesInVectorStoreBatches", typeof(string), typeof(string), typeof(int?), typeof(ListOrder?), typeof(string), typeof(string), typeof(VectorStoreFileStatusFilter?))] +[CodeGenSuppress("GetFilesInVectorStoreBatchesAsync", typeof(string), typeof(string), typeof(int?), typeof(InternalListFilesInVectorStoreBatchRequestOrder?), typeof(string), typeof(string), typeof(VectorStoreFileStatusFilter?))] +[CodeGenSuppress("GetFilesInVectorStoreBatches", typeof(string), typeof(string), typeof(int?), typeof(InternalListFilesInVectorStoreBatchRequestOrder?), typeof(string), typeof(string), typeof(VectorStoreFileStatusFilter?))] [Experimental("OPENAI001")] public partial class VectorStoreClient { @@ -57,30 +58,37 @@ public VectorStoreClient(ApiKeyCredential credential, OpenAIClientOptions option options) { } - /// - /// Initializes a new instance of that will use an API key from the OPENAI_API_KEY - /// environment variable when authenticating. - /// - /// - /// To provide an explicit credential instead of using the environment variable, use an alternate constructor like - /// . - /// - /// Additional options to customize the client. - /// The OPENAI_API_KEY environment variable was not found. - public VectorStoreClient(OpenAIClientOptions options = null) - : this( - OpenAIClient.CreatePipeline(OpenAIClient.GetApiKey(), options), - OpenAIClient.GetEndpoint(options), - options) - { } + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + /// Initializes a new instance of . + /// The API key to authenticate with the service. + /// The options to configure the client. + /// is null. + public VectorStoreClient(ApiKeyCredential credential, OpenAIClientOptions options) + { + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new OpenAIClientOptions(); - /// Initializes a new instance of VectorStoreClient. - /// The HTTP pipeline for sending and receiving REST requests and responses. - /// OpenAI Endpoint. - protected internal VectorStoreClient(ClientPipeline pipeline, Uri endpoint, OpenAIClientOptions options) + _pipeline = OpenAIClient.CreatePipeline(credential, options); + _endpoint = OpenAIClient.GetEndpoint(options); + } + + // CUSTOM: + // - Used a custom pipeline. + // - Demoted the endpoint parameter to be a property in the options class. + // - Made protected. + /// Initializes a new instance of . + /// The HTTP pipeline to send and receive REST requests and responses. + /// The options to configure the client. + /// is null. + protected internal VectorStoreClient(ClientPipeline pipeline, OpenAIClientOptions options) { + Argument.AssertNotNull(pipeline, nameof(pipeline)); + options ??= new OpenAIClientOptions(); + _pipeline = pipeline; - _endpoint = endpoint; + _endpoint = OpenAIClient.GetEndpoint(options); } /// Creates a vector store. @@ -224,7 +232,7 @@ public virtual AsyncPageCollection GetVectorStoresAsync( VectorStoreCollectionOptions options = default, CancellationToken cancellationToken = default) { - return GetVectorStoresAsync(options?.PageSize, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) + return GetVectorStoresAsync(options?.PageSizeLimit, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) as AsyncPageCollection; } @@ -261,7 +269,7 @@ public virtual PageCollection GetVectorStores( VectorStoreCollectionOptions options = default, CancellationToken cancellationToken = default) { - return GetVectorStores(options?.PageSize, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) + return GetVectorStores(options?.PageSizeLimit, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, cancellationToken.ToRequestOptions()) as PageCollection; } @@ -349,7 +357,7 @@ public virtual AsyncPageCollection GetFileAssociatio { Argument.AssertNotNullOrEmpty(vectorStoreId, nameof(vectorStoreId)); - return GetFileAssociationsAsync(vectorStoreId, options?.PageSize, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, options?.Filter?.ToString(), cancellationToken.ToRequestOptions()) + return GetFileAssociationsAsync(vectorStoreId, options?.PageSizeLimit, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, options?.Filter?.ToString(), cancellationToken.ToRequestOptions()) as AsyncPageCollection; } @@ -393,7 +401,7 @@ public virtual PageCollection GetFileAssociations( { Argument.AssertNotNullOrEmpty(vectorStoreId, nameof(vectorStoreId)); - return GetFileAssociations(vectorStoreId, options?.PageSize, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, options?.Filter?.ToString(), cancellationToken.ToRequestOptions()) + return GetFileAssociations(vectorStoreId, options?.PageSizeLimit, options?.Order?.ToString(), options?.AfterId, options?.BeforeId, options?.Filter?.ToString(), cancellationToken.ToRequestOptions()) as PageCollection; } diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreCollectionOptions.cs b/.dotnet/src/Custom/VectorStores/VectorStoreCollectionOptions.cs index 82dfac7dc..546ee6c95 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreCollectionOptions.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreCollectionOptions.cs @@ -1,33 +1,34 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.VectorStores; -/// -/// Represents addition options available when requesting a collection of instances. -/// +/// The options to configure how objects are retrieved and paginated. +[Experimental("OPENAI001")] public class VectorStoreCollectionOptions { - /// - /// Creates a new instance of . - /// + /// Initializes a new instance of . public VectorStoreCollectionOptions() { } - /// - /// The order that results should appear in the list according to - /// their created_at timestamp. + /// + /// A limit on the number of objects to be returned per page. /// - public ListOrder? Order { get; set; } + public int? PageSizeLimit { get; set; } /// - /// The number of values to return in a page result. + /// The order in which to retrieve objects when sorted by their + /// timestamp. /// - public int? PageSize { get; set; } + public VectorStoreCollectionOrder? Order { get; set; } /// - /// The id of the item preceeding the first item in the collection. + /// The used to retrieve the page of objects that come + /// after this one. /// public string AfterId { get; set; } /// - /// The id of the item following the last item in the collection. + /// The used to retrieve the page of objects that come + /// before this one. /// public string BeforeId { get; set; } } diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreCollectionOrder.cs b/.dotnet/src/Custom/VectorStores/VectorStoreCollectionOrder.cs new file mode 100644 index 000000000..d6f294c96 --- /dev/null +++ b/.dotnet/src/Custom/VectorStores/VectorStoreCollectionOrder.cs @@ -0,0 +1,17 @@ +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI.VectorStores; + +// CUSTOM: Renamed. +[Experimental("OPENAI001")] +[CodeGenModel("ListVectorStoresRequestOrder")] +public readonly partial struct VectorStoreCollectionOrder +{ + // CUSTOM: Renamed. + [CodeGenMember("Asc")] + public static VectorStoreCollectionOrder Ascending { get; } = new VectorStoreCollectionOrder(AscendingValue); + + // CUSTOM: Renamed. + [CodeGenMember("Desc")] + public static VectorStoreCollectionOrder Descending { get; } = new VectorStoreCollectionOrder(DescendingValue); +} diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreCreationOptions.cs b/.dotnet/src/Custom/VectorStores/VectorStoreCreationOptions.cs index 97514c9f4..066fb6194 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreCreationOptions.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreCreationOptions.cs @@ -1,7 +1,9 @@ using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.VectorStores; +[Experimental("OPENAI001")] [CodeGenModel("CreateVectorStoreRequest")] public partial class VectorStoreCreationOptions { diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreExpirationAnchor.cs b/.dotnet/src/Custom/VectorStores/VectorStoreExpirationAnchor.cs index 482276c40..98882c6f0 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreExpirationAnchor.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreExpirationAnchor.cs @@ -1,10 +1,12 @@ using System.ComponentModel; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.VectorStores; /// /// Represents the available timestamps to which the duration in a will apply. /// +[Experimental("OPENAI001")] [CodeGenModel("VectorStoreExpirationAfterAnchor")] public enum VectorStoreExpirationAnchor { diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreExpirationPolicy.cs b/.dotnet/src/Custom/VectorStores/VectorStoreExpirationPolicy.cs index 4357959e8..8b437809f 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreExpirationPolicy.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreExpirationPolicy.cs @@ -7,6 +7,7 @@ namespace OpenAI.VectorStores; /// /// Represents the the configuration that controls when a vector store will be automatically deleted. /// +[Experimental("OPENAI001")] [CodeGenModel("VectorStoreExpirationAfter")] [CodeGenSuppress(nameof(VectorStoreExpirationPolicy))] [CodeGenSuppress(nameof(VectorStoreExpirationPolicy), typeof(int))] @@ -22,14 +23,14 @@ public partial class VectorStoreExpirationPolicy /// Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. public required VectorStoreExpirationAnchor Anchor - { + { get => _anchor; set => _anchor = value; } /// The number of days after the anchor time that the vector store will expire. public required int Days - { + { get => _days; set => _days = value; } diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociation.cs b/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociation.cs index 9c680ab46..4ab1114e1 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociation.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociation.cs @@ -1,8 +1,11 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.VectorStores; /// /// A representation of a file association between an uploaded file and a vector store. /// +[Experimental("OPENAI001")] [CodeGenModel("VectorStoreFileObject")] public partial class VectorStoreFileAssociation { diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationCollectionOptions.cs b/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationCollectionOptions.cs index 9880b0afd..71b2a8b0c 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationCollectionOptions.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationCollectionOptions.cs @@ -1,38 +1,40 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.VectorStores; -/// -/// Represents addition options available when requesting a collection of instances. -/// +/// The options to configure how objects are retrieved and paginated. +[Experimental("OPENAI001")] public class VectorStoreFileAssociationCollectionOptions { - /// - /// Creates a new instance of . - /// + /// Initializes a new instance of . public VectorStoreFileAssociationCollectionOptions() { } - /// - /// The order that results should appear in the list according to - /// their created_at timestamp. + /// + /// A limit on the number of objects to be returned per page. /// - public ListOrder? Order { get; set; } + public int? PageSizeLimit { get; set; } /// - /// The number of values to return in a page result. + /// The order in which to retrieve objects when sorted by their + /// timestamp. /// - public int? PageSize { get; set; } + public VectorStoreFileAssociationCollectionOrder? Order { get; set; } /// - /// The id of the item preceeding the first item in the collection. + /// The used to retrieve the page of objects that come + /// after this one. /// public string AfterId { get; set; } /// - /// The id of the item following the last item in the collection. + /// The used to retrieve the page of objects that come + /// before this one. /// public string BeforeId { get; set; } /// - /// A status filter that file associations must match to be included in the collection. + /// A filter to only retrieve the objects with a matching + /// . /// public VectorStoreFileStatusFilter? Filter { get; set; } } diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationCollectionOrder.cs b/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationCollectionOrder.cs new file mode 100644 index 000000000..888cf8322 --- /dev/null +++ b/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationCollectionOrder.cs @@ -0,0 +1,17 @@ +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI.VectorStores; + +// CUSTOM: Renamed. +[Experimental("OPENAI001")] +[CodeGenModel("ListVectorStoreFilesRequestOrder")] +public readonly partial struct VectorStoreFileAssociationCollectionOrder +{ + // CUSTOM: Renamed. + [CodeGenMember("Asc")] + public static VectorStoreFileAssociationCollectionOrder Ascending { get; } = new VectorStoreFileAssociationCollectionOrder(AscendingValue); + + // CUSTOM: Renamed. + [CodeGenMember("Desc")] + public static VectorStoreFileAssociationCollectionOrder Descending { get; } = new VectorStoreFileAssociationCollectionOrder(DescendingValue); +} diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationError.cs b/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationError.cs index a682fe289..c46e71fc1 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationError.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationError.cs @@ -1,5 +1,8 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.VectorStores; +[Experimental("OPENAI001")] [CodeGenModel("VectorStoreFileObjectLastError")] public partial class VectorStoreFileAssociationError { diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationErrorCode.cs b/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationErrorCode.cs index 8246d94f9..82df53e38 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationErrorCode.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationErrorCode.cs @@ -1,5 +1,8 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.VectorStores; +[Experimental("OPENAI001")] [CodeGenModel("VectorStoreFileObjectLastErrorCode")] public readonly partial struct VectorStoreFileAssociationErrorCode { diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationStatus.cs b/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationStatus.cs index a1bd0c55d..2056ca2e1 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationStatus.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreFileAssociationStatus.cs @@ -1,10 +1,12 @@ using System.ComponentModel; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.VectorStores; /// /// Represents the possible states for a vector store file association. /// +[Experimental("OPENAI001")] [CodeGenModel("VectorStoreFileObjectStatus")] public enum VectorStoreFileAssociationStatus { diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreFileCounts.cs b/.dotnet/src/Custom/VectorStores/VectorStoreFileCounts.cs index 049de2351..058d618b5 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreFileCounts.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreFileCounts.cs @@ -1,5 +1,8 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.VectorStores; +[Experimental("OPENAI001")] [CodeGenModel("VectorStoreObjectFileCounts")] public partial class VectorStoreFileCounts { diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreFileStatusFilter.cs b/.dotnet/src/Custom/VectorStores/VectorStoreFileStatusFilter.cs index 24f9fb964..93e9b9b76 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreFileStatusFilter.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreFileStatusFilter.cs @@ -1,5 +1,8 @@ +using System.Diagnostics.CodeAnalysis; + namespace OpenAI.VectorStores; +[Experimental("OPENAI001")] [CodeGenModel("ListVectorStoreFilesFilter")] public readonly partial struct VectorStoreFileStatusFilter { diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreModificationOptions.cs b/.dotnet/src/Custom/VectorStores/VectorStoreModificationOptions.cs index 964a1a865..193ab8fab 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreModificationOptions.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreModificationOptions.cs @@ -1,7 +1,8 @@ -using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.VectorStores; +[Experimental("OPENAI001")] [CodeGenModel("UpdateVectorStoreRequest")] public partial class VectorStoreModificationOptions { diff --git a/.dotnet/src/Custom/VectorStores/VectorStoreStatus.cs b/.dotnet/src/Custom/VectorStores/VectorStoreStatus.cs index 0f482a942..5cd98b5d0 100644 --- a/.dotnet/src/Custom/VectorStores/VectorStoreStatus.cs +++ b/.dotnet/src/Custom/VectorStores/VectorStoreStatus.cs @@ -1,10 +1,12 @@ using System.ComponentModel; +using System.Diagnostics.CodeAnalysis; namespace OpenAI.VectorStores; /// /// Represents the possible states for a vector store. /// +[Experimental("OPENAI001")] [CodeGenModel("VectorStoreObjectStatus")] public enum VectorStoreStatus { diff --git a/.dotnet/src/Generated/ChatClient.cs b/.dotnet/src/Generated/ChatClient.cs index 681019cf2..6db94d5ea 100644 --- a/.dotnet/src/Generated/ChatClient.cs +++ b/.dotnet/src/Generated/ChatClient.cs @@ -24,13 +24,6 @@ protected ChatClient() { } - internal ChatClient(ClientPipeline pipeline, ApiKeyCredential keyCredential, Uri endpoint) - { - _pipeline = pipeline; - _keyCredential = keyCredential; - _endpoint = endpoint; - } - internal PipelineMessage CreateCreateChatCompletionRequest(BinaryContent content, RequestOptions options) { var message = _pipeline.CreateMessage(); diff --git a/.dotnet/src/Generated/FineTuningClient.cs b/.dotnet/src/Generated/FineTuningClient.cs index 7424e0331..c4446699d 100644 --- a/.dotnet/src/Generated/FineTuningClient.cs +++ b/.dotnet/src/Generated/FineTuningClient.cs @@ -24,13 +24,6 @@ protected FineTuningClient() { } - internal FineTuningClient(ClientPipeline pipeline, ApiKeyCredential keyCredential, Uri endpoint) - { - _pipeline = pipeline; - _keyCredential = keyCredential; - _endpoint = endpoint; - } - internal PipelineMessage CreateCreateFineTuningJobRequest(BinaryContent content, RequestOptions options) { var message = _pipeline.CreateMessage(); diff --git a/.dotnet/src/Generated/InternalUploadsClient.cs b/.dotnet/src/Generated/InternalUploadsClient.cs new file mode 100644 index 000000000..4c6d033aa --- /dev/null +++ b/.dotnet/src/Generated/InternalUploadsClient.cs @@ -0,0 +1,244 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Threading.Tasks; + +namespace OpenAI.Files +{ + // Data plane generated sub-client. + internal partial class InternalUploadsClient + { + private const string AuthorizationHeader = "Authorization"; + private readonly ApiKeyCredential _keyCredential; + private const string AuthorizationApiKeyPrefix = "Bearer"; + private readonly ClientPipeline _pipeline; + private readonly Uri _endpoint; + + public virtual ClientPipeline Pipeline => _pipeline; + + protected InternalUploadsClient() + { + } + + public virtual async Task> CreateUploadAsync(InternalCreateUploadRequest requestBody) + { + Argument.AssertNotNull(requestBody, nameof(requestBody)); + + using BinaryContent content = requestBody.ToBinaryContent(); + ClientResult result = await CreateUploadAsync(content, null).ConfigureAwait(false); + return ClientResult.FromValue(InternalUpload.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + public virtual ClientResult CreateUpload(InternalCreateUploadRequest requestBody) + { + Argument.AssertNotNull(requestBody, nameof(requestBody)); + + using BinaryContent content = requestBody.ToBinaryContent(); + ClientResult result = CreateUpload(content, null); + return ClientResult.FromValue(InternalUpload.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + public virtual async Task CreateUploadAsync(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + using PipelineMessage message = CreateCreateUploadRequest(content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + + public virtual ClientResult CreateUpload(BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNull(content, nameof(content)); + + using PipelineMessage message = CreateCreateUploadRequest(content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + + public virtual async Task> AddUploadPartAsync(string uploadId, InternalAddUploadPartRequest requestBody) + { + Argument.AssertNotNullOrEmpty(uploadId, nameof(uploadId)); + Argument.AssertNotNull(requestBody, nameof(requestBody)); + + using MultipartFormDataBinaryContent content = requestBody.ToMultipartBinaryBody(); + ClientResult result = await AddUploadPartAsync(uploadId, content, content.ContentType, (RequestOptions)null).ConfigureAwait(false); + return ClientResult.FromValue(InternalUploadPart.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + public virtual ClientResult AddUploadPart(string uploadId, InternalAddUploadPartRequest requestBody) + { + Argument.AssertNotNullOrEmpty(uploadId, nameof(uploadId)); + Argument.AssertNotNull(requestBody, nameof(requestBody)); + + using MultipartFormDataBinaryContent content = requestBody.ToMultipartBinaryBody(); + ClientResult result = AddUploadPart(uploadId, content, content.ContentType, (RequestOptions)null); + return ClientResult.FromValue(InternalUploadPart.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + public virtual async Task AddUploadPartAsync(string uploadId, BinaryContent content, string contentType, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(uploadId, nameof(uploadId)); + Argument.AssertNotNull(content, nameof(content)); + + using PipelineMessage message = CreateAddUploadPartRequest(uploadId, content, contentType, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + + public virtual ClientResult AddUploadPart(string uploadId, BinaryContent content, string contentType, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(uploadId, nameof(uploadId)); + Argument.AssertNotNull(content, nameof(content)); + + using PipelineMessage message = CreateAddUploadPartRequest(uploadId, content, contentType, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + + public virtual async Task> CompleteUploadAsync(string uploadId, InternalCompleteUploadRequest requestBody) + { + Argument.AssertNotNullOrEmpty(uploadId, nameof(uploadId)); + Argument.AssertNotNull(requestBody, nameof(requestBody)); + + using BinaryContent content = requestBody.ToBinaryContent(); + ClientResult result = await CompleteUploadAsync(uploadId, content, null).ConfigureAwait(false); + return ClientResult.FromValue(InternalUpload.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + public virtual ClientResult CompleteUpload(string uploadId, InternalCompleteUploadRequest requestBody) + { + Argument.AssertNotNullOrEmpty(uploadId, nameof(uploadId)); + Argument.AssertNotNull(requestBody, nameof(requestBody)); + + using BinaryContent content = requestBody.ToBinaryContent(); + ClientResult result = CompleteUpload(uploadId, content, null); + return ClientResult.FromValue(InternalUpload.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + public virtual async Task CompleteUploadAsync(string uploadId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(uploadId, nameof(uploadId)); + Argument.AssertNotNull(content, nameof(content)); + + using PipelineMessage message = CreateCompleteUploadRequest(uploadId, content, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + + public virtual ClientResult CompleteUpload(string uploadId, BinaryContent content, RequestOptions options = null) + { + Argument.AssertNotNullOrEmpty(uploadId, nameof(uploadId)); + Argument.AssertNotNull(content, nameof(content)); + + using PipelineMessage message = CreateCompleteUploadRequest(uploadId, content, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + + public virtual async Task> CancelUploadAsync(string uploadId) + { + Argument.AssertNotNullOrEmpty(uploadId, nameof(uploadId)); + + ClientResult result = await CancelUploadAsync(uploadId, null).ConfigureAwait(false); + return ClientResult.FromValue(InternalUpload.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + public virtual ClientResult CancelUpload(string uploadId) + { + Argument.AssertNotNullOrEmpty(uploadId, nameof(uploadId)); + + ClientResult result = CancelUpload(uploadId, null); + return ClientResult.FromValue(InternalUpload.FromResponse(result.GetRawResponse()), result.GetRawResponse()); + } + + public virtual async Task CancelUploadAsync(string uploadId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(uploadId, nameof(uploadId)); + + using PipelineMessage message = CreateCancelUploadRequest(uploadId, options); + return ClientResult.FromResponse(await _pipeline.ProcessMessageAsync(message, options).ConfigureAwait(false)); + } + + public virtual ClientResult CancelUpload(string uploadId, RequestOptions options) + { + Argument.AssertNotNullOrEmpty(uploadId, nameof(uploadId)); + + using PipelineMessage message = CreateCancelUploadRequest(uploadId, options); + return ClientResult.FromResponse(_pipeline.ProcessMessage(message, options)); + } + + internal PipelineMessage CreateCreateUploadRequest(BinaryContent content, RequestOptions options) + { + var message = _pipeline.CreateMessage(); + message.ResponseClassifier = PipelineMessageClassifier200; + var request = message.Request; + request.Method = "POST"; + var uri = new ClientUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/uploads", false); + request.Uri = uri.ToUri(); + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateAddUploadPartRequest(string uploadId, BinaryContent content, string contentType, RequestOptions options) + { + var message = _pipeline.CreateMessage(); + message.ResponseClassifier = PipelineMessageClassifier200; + var request = message.Request; + request.Method = "POST"; + var uri = new ClientUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/uploads/", false); + uri.AppendPath(uploadId, true); + uri.AppendPath("/parts", false); + request.Uri = uri.ToUri(); + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", contentType); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateCompleteUploadRequest(string uploadId, BinaryContent content, RequestOptions options) + { + var message = _pipeline.CreateMessage(); + message.ResponseClassifier = PipelineMessageClassifier200; + var request = message.Request; + request.Method = "POST"; + var uri = new ClientUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/uploads/", false); + uri.AppendPath(uploadId, true); + uri.AppendPath("/complete", false); + request.Uri = uri.ToUri(); + request.Headers.Set("Accept", "application/json"); + request.Headers.Set("Content-Type", "application/json"); + request.Content = content; + message.Apply(options); + return message; + } + + internal PipelineMessage CreateCancelUploadRequest(string uploadId, RequestOptions options) + { + var message = _pipeline.CreateMessage(); + message.ResponseClassifier = PipelineMessageClassifier200; + var request = message.Request; + request.Method = "POST"; + var uri = new ClientUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/uploads/", false); + uri.AppendPath(uploadId, true); + uri.AppendPath("/cancel", false); + request.Uri = uri.ToUri(); + request.Headers.Set("Accept", "application/json"); + message.Apply(options); + return message; + } + + private static PipelineMessageClassifier _pipelineMessageClassifier200; + private static PipelineMessageClassifier PipelineMessageClassifier200 => _pipelineMessageClassifier200 ??= PipelineMessageClassifier.Create(stackalloc ushort[] { 200 }); + } +} diff --git a/.dotnet/src/Generated/LegacyCompletionClient.cs b/.dotnet/src/Generated/LegacyCompletionClient.cs index 84402ded2..efb44faac 100644 --- a/.dotnet/src/Generated/LegacyCompletionClient.cs +++ b/.dotnet/src/Generated/LegacyCompletionClient.cs @@ -24,13 +24,6 @@ protected LegacyCompletionClient() { } - internal LegacyCompletionClient(ClientPipeline pipeline, ApiKeyCredential keyCredential, Uri endpoint) - { - _pipeline = pipeline; - _keyCredential = keyCredential; - _endpoint = endpoint; - } - public virtual async Task> CreateCompletionAsync(InternalCreateCompletionRequest requestBody) { Argument.AssertNotNull(requestBody, nameof(requestBody)); diff --git a/.dotnet/src/Generated/Models/AssistantChatMessage.Serialization.cs b/.dotnet/src/Generated/Models/AssistantChatMessage.Serialization.cs index 4695f5fee..89a7d76f5 100644 --- a/.dotnet/src/Generated/Models/AssistantChatMessage.Serialization.cs +++ b/.dotnet/src/Generated/Models/AssistantChatMessage.Serialization.cs @@ -32,15 +32,26 @@ internal static AssistantChatMessage DeserializeAssistantChatMessage(JsonElement { return null; } + string refusal = default; string name = default; IList toolCalls = default; ChatFunctionCall functionCall = default; - string role = "assistant"; + ChatMessageRole role = default; IList content = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { + if (property.NameEquals("refusal"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + refusal = null; + continue; + } + refusal = property.Value.GetString(); + continue; + } if (property.NameEquals("name"u8)) { name = property.Value.GetString(); @@ -72,7 +83,7 @@ internal static AssistantChatMessage DeserializeAssistantChatMessage(JsonElement } if (property.NameEquals("role"u8)) { - role = property.Value.GetString(); + role = property.Value.GetString().ToChatMessageRole(); continue; } if (property.NameEquals("content"u8)) @@ -91,6 +102,7 @@ internal static AssistantChatMessage DeserializeAssistantChatMessage(JsonElement role, content ?? new ChangeTrackingList(), serializedAdditionalRawData, + refusal, name, toolCalls ?? new ChangeTrackingList(), functionCall); diff --git a/.dotnet/src/Generated/Models/AssistantChatMessage.cs b/.dotnet/src/Generated/Models/AssistantChatMessage.cs index 38f6c207d..1d3cc6723 100644 --- a/.dotnet/src/Generated/Models/AssistantChatMessage.cs +++ b/.dotnet/src/Generated/Models/AssistantChatMessage.cs @@ -9,13 +9,15 @@ namespace OpenAI.Chat { public partial class AssistantChatMessage : ChatMessage { - internal AssistantChatMessage(string role, IList content, IDictionary serializedAdditionalRawData, string participantName, IList toolCalls, ChatFunctionCall functionCall) : base(role, content, serializedAdditionalRawData) + internal AssistantChatMessage(ChatMessageRole role, IList content, IDictionary serializedAdditionalRawData, string refusal, string participantName, IList toolCalls, ChatFunctionCall functionCall) : base(role, content, serializedAdditionalRawData) { + Refusal = refusal; ParticipantName = participantName; ToolCalls = toolCalls; FunctionCall = functionCall; } - public IList ToolCalls { get; } + + public string Refusal { get; set; } public ChatFunctionCall FunctionCall { get; set; } } } diff --git a/.dotnet/src/Generated/Models/AssistantCollectionOrder.cs b/.dotnet/src/Generated/Models/AssistantCollectionOrder.cs new file mode 100644 index 000000000..2b25b8450 --- /dev/null +++ b/.dotnet/src/Generated/Models/AssistantCollectionOrder.cs @@ -0,0 +1,33 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Assistants +{ + public readonly partial struct AssistantCollectionOrder : IEquatable + { + private readonly string _value; + + public AssistantCollectionOrder(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AscendingValue = "asc"; + private const string DescendingValue = "desc"; + public static bool operator ==(AssistantCollectionOrder left, AssistantCollectionOrder right) => left.Equals(right); + public static bool operator !=(AssistantCollectionOrder left, AssistantCollectionOrder right) => !left.Equals(right); + public static implicit operator AssistantCollectionOrder(string value) => new AssistantCollectionOrder(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is AssistantCollectionOrder other && Equals(other); + public bool Equals(AssistantCollectionOrder other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/AssistantResponseFormat.Serialization.cs b/.dotnet/src/Generated/Models/AssistantResponseFormat.Serialization.cs index eaa92389d..f2fa4d9f9 100644 --- a/.dotnet/src/Generated/Models/AssistantResponseFormat.Serialization.cs +++ b/.dotnet/src/Generated/Models/AssistantResponseFormat.Serialization.cs @@ -9,19 +9,8 @@ namespace OpenAI.Assistants { + [PersistableModelProxy(typeof(InternalUnknownAssistantResponseFormat))] public partial class AssistantResponseFormat : IJsonModel { - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - internal static AssistantResponseFormat FromResponse(PipelineResponse response) - { - using var document = JsonDocument.Parse(response.Content); - return DeserializeAssistantResponseFormat(document.RootElement); - } - - internal virtual BinaryContent ToBinaryContent() - { - return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); - } } } diff --git a/.dotnet/src/Generated/Models/AssistantResponseFormat.cs b/.dotnet/src/Generated/Models/AssistantResponseFormat.cs index da67ee999..3490f8a5a 100644 --- a/.dotnet/src/Generated/Models/AssistantResponseFormat.cs +++ b/.dotnet/src/Generated/Models/AssistantResponseFormat.cs @@ -7,11 +7,19 @@ namespace OpenAI.Assistants { - public partial class AssistantResponseFormat + public abstract partial class AssistantResponseFormat { - internal AssistantResponseFormat(IDictionary serializedAdditionalRawData) + internal IDictionary SerializedAdditionalRawData { get; set; } + protected AssistantResponseFormat() { + } + + internal AssistantResponseFormat(string type, IDictionary serializedAdditionalRawData) + { + Type = type; SerializedAdditionalRawData = serializedAdditionalRawData; } + + internal string Type { get; set; } } } diff --git a/.dotnet/src/Generated/Models/ChatCompletion.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletion.Serialization.cs index ea1696d92..87d34ee83 100644 --- a/.dotnet/src/Generated/Models/ChatCompletion.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletion.Serialization.cs @@ -46,6 +46,18 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOp writer.WritePropertyName("model"u8); writer.WriteStringValue(Model); } + if (SerializedAdditionalRawData?.ContainsKey("service_tier") != true && Optional.IsDefined(_serviceTier)) + { + if (_serviceTier != null) + { + writer.WritePropertyName("service_tier"u8); + writer.WriteStringValue(_serviceTier.Value.ToString()); + } + else + { + writer.WriteNull("service_tier"); + } + } if (SerializedAdditionalRawData?.ContainsKey("system_fingerprint") != true && Optional.IsDefined(SystemFingerprint)) { writer.WritePropertyName("system_fingerprint"u8); @@ -107,6 +119,7 @@ internal static ChatCompletion DeserializeChatCompletion(JsonElement element, Mo IReadOnlyList choices = default; DateTimeOffset created = default; string model = default; + InternalCreateChatCompletionResponseServiceTier? serviceTier = default; string systemFingerprint = default; InternalCreateChatCompletionResponseObject @object = default; ChatTokenUsage usage = default; @@ -139,6 +152,16 @@ internal static ChatCompletion DeserializeChatCompletion(JsonElement element, Mo model = property.Value.GetString(); continue; } + if (property.NameEquals("service_tier"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + serviceTier = null; + continue; + } + serviceTier = new InternalCreateChatCompletionResponseServiceTier(property.Value.GetString()); + continue; + } if (property.NameEquals("system_fingerprint"u8)) { systemFingerprint = property.Value.GetString(); @@ -170,6 +193,7 @@ internal static ChatCompletion DeserializeChatCompletion(JsonElement element, Mo choices, created, model, + serviceTier, systemFingerprint, @object, usage, diff --git a/.dotnet/src/Generated/Models/ChatCompletion.cs b/.dotnet/src/Generated/Models/ChatCompletion.cs index 1ad8b44e2..74549a43e 100644 --- a/.dotnet/src/Generated/Models/ChatCompletion.cs +++ b/.dotnet/src/Generated/Models/ChatCompletion.cs @@ -23,12 +23,13 @@ internal ChatCompletion(string id, IEnumerable choices, DateTimeOffset createdAt, string model, string systemFingerprint, InternalCreateChatCompletionResponseObject @object, ChatTokenUsage usage, IDictionary serializedAdditionalRawData) + internal ChatCompletion(string id, IReadOnlyList choices, DateTimeOffset createdAt, string model, InternalCreateChatCompletionResponseServiceTier? serviceTier, string systemFingerprint, InternalCreateChatCompletionResponseObject @object, ChatTokenUsage usage, IDictionary serializedAdditionalRawData) { Id = id; Choices = choices; CreatedAt = createdAt; Model = model; + _serviceTier = serviceTier; SystemFingerprint = systemFingerprint; Object = @object; Usage = usage; diff --git a/.dotnet/src/Generated/Models/ChatCompletionOptions.Serialization.cs b/.dotnet/src/Generated/Models/ChatCompletionOptions.Serialization.cs index 23a61e573..cae873c4a 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionOptions.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionOptions.Serialization.cs @@ -137,6 +137,18 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderW writer.WriteNull("seed"); } } + if (SerializedAdditionalRawData?.ContainsKey("service_tier") != true && Optional.IsDefined(_serviceTier)) + { + if (_serviceTier != null) + { + writer.WritePropertyName("service_tier"u8); + writer.WriteStringValue(_serviceTier.Value.ToString()); + } + else + { + writer.WriteNull("service_tier"); + } + } if (SerializedAdditionalRawData?.ContainsKey("stop") != true && Optional.IsCollectionDefined(StopSequences)) { if (StopSequences != null) @@ -217,10 +229,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderW writer.WritePropertyName("parallel_tool_calls"u8); writer.WriteBooleanValue(ParallelToolCallsEnabled.Value); } - if (SerializedAdditionalRawData?.ContainsKey("user") != true && Optional.IsDefined(User)) + if (SerializedAdditionalRawData?.ContainsKey("user") != true && Optional.IsDefined(EndUserId)) { writer.WritePropertyName("user"u8); - writer.WriteStringValue(User); + writer.WriteStringValue(EndUserId); } if (SerializedAdditionalRawData?.ContainsKey("function_call") != true && Optional.IsDefined(FunctionChoice)) { @@ -290,6 +302,7 @@ internal static ChatCompletionOptions DeserializeChatCompletionOptions(JsonEleme float? presencePenalty = default; ChatResponseFormat responseFormat = default; long? seed = default; + InternalCreateChatCompletionRequestServiceTier? serviceTier = default; IList stop = default; bool? stream = default; InternalChatCompletionStreamOptions streamOptions = default; @@ -404,6 +417,16 @@ internal static ChatCompletionOptions DeserializeChatCompletionOptions(JsonEleme seed = property.Value.GetInt64(); continue; } + if (property.NameEquals("service_tier"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + serviceTier = null; + continue; + } + serviceTier = new InternalCreateChatCompletionRequestServiceTier(property.Value.GetString()); + continue; + } if (property.NameEquals("stop"u8)) { DeserializeStopSequencesValue(property, ref stop); @@ -528,6 +551,7 @@ internal static ChatCompletionOptions DeserializeChatCompletionOptions(JsonEleme presencePenalty, responseFormat, seed, + serviceTier, stop ?? new ChangeTrackingList(), stream, streamOptions, diff --git a/.dotnet/src/Generated/Models/ChatCompletionOptions.cs b/.dotnet/src/Generated/Models/ChatCompletionOptions.cs index 9588a63d4..5afc0bc26 100644 --- a/.dotnet/src/Generated/Models/ChatCompletionOptions.cs +++ b/.dotnet/src/Generated/Models/ChatCompletionOptions.cs @@ -12,7 +12,7 @@ public partial class ChatCompletionOptions { internal IDictionary SerializedAdditionalRawData { get; set; } - internal ChatCompletionOptions(IList messages, InternalCreateChatCompletionRequestModel model, float? frequencyPenalty, IDictionary logitBiases, bool? includeLogProbabilities, int? topLogProbabilityCount, int? maxTokens, int? n, float? presencePenalty, ChatResponseFormat responseFormat, long? seed, IList stopSequences, bool? stream, InternalChatCompletionStreamOptions streamOptions, float? temperature, float? topP, IList tools, ChatToolChoice toolChoice, bool? parallelToolCallsEnabled, string user, ChatFunctionChoice functionChoice, IList functions, IDictionary serializedAdditionalRawData) + internal ChatCompletionOptions(IList messages, InternalCreateChatCompletionRequestModel model, float? frequencyPenalty, IDictionary logitBiases, bool? includeLogProbabilities, int? topLogProbabilityCount, int? maxTokens, int? n, float? presencePenalty, ChatResponseFormat responseFormat, long? seed, InternalCreateChatCompletionRequestServiceTier? serviceTier, IList stopSequences, bool? stream, InternalChatCompletionStreamOptions streamOptions, float? temperature, float? topP, IList tools, ChatToolChoice toolChoice, bool? parallelToolCallsEnabled, string endUserId, ChatFunctionChoice functionChoice, IList functions, IDictionary serializedAdditionalRawData) { Messages = messages; Model = model; @@ -25,6 +25,7 @@ internal ChatCompletionOptions(IList messages, InternalCreateChatCo PresencePenalty = presencePenalty; ResponseFormat = responseFormat; Seed = seed; + _serviceTier = serviceTier; StopSequences = stopSequences; Stream = stream; StreamOptions = streamOptions; @@ -33,7 +34,7 @@ internal ChatCompletionOptions(IList messages, InternalCreateChatCo Tools = tools; ToolChoice = toolChoice; ParallelToolCallsEnabled = parallelToolCallsEnabled; - User = user; + EndUserId = endUserId; FunctionChoice = functionChoice; Functions = functions; SerializedAdditionalRawData = serializedAdditionalRawData; @@ -42,11 +43,9 @@ internal ChatCompletionOptions(IList messages, InternalCreateChatCo public int? MaxTokens { get; set; } public float? PresencePenalty { get; set; } public ChatResponseFormat ResponseFormat { get; set; } - public long? Seed { get; set; } public float? Temperature { get; set; } public float? TopP { get; set; } public IList Tools { get; } - public string User { get; set; } public IList Functions { get; } } } diff --git a/.dotnet/src/Generated/Models/ChatImageDetailLevel.cs b/.dotnet/src/Generated/Models/ChatImageDetailLevel.cs new file mode 100644 index 000000000..28d44ae56 --- /dev/null +++ b/.dotnet/src/Generated/Models/ChatImageDetailLevel.cs @@ -0,0 +1,38 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Chat +{ + public readonly partial struct ChatImageDetailLevel : IEquatable + { + private readonly string _value; + + public ChatImageDetailLevel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AutoValue = "auto"; + private const string LowValue = "low"; + private const string HighValue = "high"; + + public static ChatImageDetailLevel Auto { get; } = new ChatImageDetailLevel(AutoValue); + public static ChatImageDetailLevel Low { get; } = new ChatImageDetailLevel(LowValue); + public static ChatImageDetailLevel High { get; } = new ChatImageDetailLevel(HighValue); + public static bool operator ==(ChatImageDetailLevel left, ChatImageDetailLevel right) => left.Equals(right); + public static bool operator !=(ChatImageDetailLevel left, ChatImageDetailLevel right) => !left.Equals(right); + public static implicit operator ChatImageDetailLevel(string value) => new ChatImageDetailLevel(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ChatImageDetailLevel other && Equals(other); + public bool Equals(ChatImageDetailLevel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ChatMessage.Serialization.cs b/.dotnet/src/Generated/Models/ChatMessage.Serialization.cs index 3d13375d1..971105a95 100644 --- a/.dotnet/src/Generated/Models/ChatMessage.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatMessage.Serialization.cs @@ -10,7 +10,7 @@ namespace OpenAI.Chat { - [PersistableModelProxy(typeof(UnknownChatMessage))] + [PersistableModelProxy(typeof(InternalUnknownChatMessage))] public partial class ChatMessage : IJsonModel { ChatMessage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) @@ -45,7 +45,7 @@ internal static ChatMessage DeserializeChatMessage(JsonElement element, ModelRea case "user": return UserChatMessage.DeserializeUserChatMessage(element, options); } } - return UnknownChatMessage.DeserializeUnknownChatMessage(element, options); + return InternalUnknownChatMessage.DeserializeInternalUnknownChatMessage(element, options); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/.dotnet/src/Generated/Models/ChatMessage.cs b/.dotnet/src/Generated/Models/ChatMessage.cs index 7617e8dc4..06a9f1adb 100644 --- a/.dotnet/src/Generated/Models/ChatMessage.cs +++ b/.dotnet/src/Generated/Models/ChatMessage.cs @@ -10,18 +10,12 @@ namespace OpenAI.Chat public abstract partial class ChatMessage { internal IDictionary SerializedAdditionalRawData { get; set; } - protected ChatMessage() - { - Content = new ChangeTrackingList(); - } - internal ChatMessage(string role, IList content, IDictionary serializedAdditionalRawData) + internal ChatMessage(ChatMessageRole role, IList content, IDictionary serializedAdditionalRawData) { Role = role; Content = content; SerializedAdditionalRawData = serializedAdditionalRawData; } - - internal string Role { get; set; } } } diff --git a/.dotnet/src/Generated/Models/ChatResponseFormat.Serialization.cs b/.dotnet/src/Generated/Models/ChatResponseFormat.Serialization.cs index a98c5ce1a..26c297851 100644 --- a/.dotnet/src/Generated/Models/ChatResponseFormat.Serialization.cs +++ b/.dotnet/src/Generated/Models/ChatResponseFormat.Serialization.cs @@ -5,49 +5,13 @@ using System; using System.ClientModel; using System.ClientModel.Primitives; -using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Chat { + [PersistableModelProxy(typeof(InternalUnknownChatResponseFormat))] public partial class ChatResponseFormat : IJsonModel { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(ChatResponseFormat)} does not support writing '{format}' format."); - } - - writer.WriteStartObject(); - if (SerializedAdditionalRawData?.ContainsKey("type") != true && Optional.IsDefined(Type)) - { - writer.WritePropertyName("type"u8); - writer.WriteStringValue(Type.Value.ToString()); - } - if (SerializedAdditionalRawData != null) - { - foreach (var item in SerializedAdditionalRawData) - { - if (ModelSerializationExtensions.IsSentinelValue(item.Value)) - { - continue; - } - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - writer.WriteEndObject(); - } - ChatResponseFormat IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -68,28 +32,16 @@ internal static ChatResponseFormat DeserializeChatResponseFormat(JsonElement ele { return null; } - InternalCreateChatCompletionRequestResponseFormatType? type = default; - IDictionary serializedAdditionalRawData = default; - Dictionary rawDataDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) + if (element.TryGetProperty("type", out JsonElement discriminator)) { - if (property.NameEquals("type"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - type = new InternalCreateChatCompletionRequestResponseFormatType(property.Value.GetString()); - continue; - } - if (true) + switch (discriminator.GetString()) { - rawDataDictionary ??= new Dictionary(); - rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + case "json_object": return InternalChatResponseFormatJsonObject.DeserializeInternalChatResponseFormatJsonObject(element, options); + case "json_schema": return InternalChatResponseFormatJsonSchema.DeserializeInternalChatResponseFormatJsonSchema(element, options); + case "text": return InternalChatResponseFormatText.DeserializeInternalChatResponseFormatText(element, options); } } - serializedAdditionalRawData = rawDataDictionary; - return new ChatResponseFormat(type, serializedAdditionalRawData); + return InternalUnknownChatResponseFormat.DeserializeInternalUnknownChatResponseFormat(element, options); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/.dotnet/src/Generated/Models/ChatResponseFormat.cs b/.dotnet/src/Generated/Models/ChatResponseFormat.cs index 78f6d60f9..54eb0a8bd 100644 --- a/.dotnet/src/Generated/Models/ChatResponseFormat.cs +++ b/.dotnet/src/Generated/Models/ChatResponseFormat.cs @@ -7,14 +7,19 @@ namespace OpenAI.Chat { - public partial class ChatResponseFormat + public abstract partial class ChatResponseFormat { internal IDictionary SerializedAdditionalRawData { get; set; } + protected ChatResponseFormat() + { + } - internal ChatResponseFormat(InternalCreateChatCompletionRequestResponseFormatType? type, IDictionary serializedAdditionalRawData) + internal ChatResponseFormat(string type, IDictionary serializedAdditionalRawData) { Type = type; SerializedAdditionalRawData = serializedAdditionalRawData; } + + internal string Type { get; set; } } } diff --git a/.dotnet/src/Generated/Models/EmbeddingGenerationOptions.Serialization.cs b/.dotnet/src/Generated/Models/EmbeddingGenerationOptions.Serialization.cs index 8b8c90264..5a2ee5c11 100644 --- a/.dotnet/src/Generated/Models/EmbeddingGenerationOptions.Serialization.cs +++ b/.dotnet/src/Generated/Models/EmbeddingGenerationOptions.Serialization.cs @@ -48,10 +48,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelRe writer.WritePropertyName("dimensions"u8); writer.WriteNumberValue(Dimensions.Value); } - if (SerializedAdditionalRawData?.ContainsKey("user") != true && Optional.IsDefined(User)) + if (SerializedAdditionalRawData?.ContainsKey("user") != true && Optional.IsDefined(EndUserId)) { writer.WritePropertyName("user"u8); - writer.WriteStringValue(User); + writer.WriteStringValue(EndUserId); } if (SerializedAdditionalRawData != null) { diff --git a/.dotnet/src/Generated/Models/EmbeddingGenerationOptions.cs b/.dotnet/src/Generated/Models/EmbeddingGenerationOptions.cs index b9845d838..92ffbca28 100644 --- a/.dotnet/src/Generated/Models/EmbeddingGenerationOptions.cs +++ b/.dotnet/src/Generated/Models/EmbeddingGenerationOptions.cs @@ -11,16 +11,15 @@ public partial class EmbeddingGenerationOptions { internal IDictionary SerializedAdditionalRawData { get; set; } - internal EmbeddingGenerationOptions(BinaryData input, InternalCreateEmbeddingRequestModel model, InternalCreateEmbeddingRequestEncodingFormat? encodingFormat, int? dimensions, string user, IDictionary serializedAdditionalRawData) + internal EmbeddingGenerationOptions(BinaryData input, InternalCreateEmbeddingRequestModel model, InternalCreateEmbeddingRequestEncodingFormat? encodingFormat, int? dimensions, string endUserId, IDictionary serializedAdditionalRawData) { Input = input; Model = model; EncodingFormat = encodingFormat; Dimensions = dimensions; - User = user; + EndUserId = endUserId; SerializedAdditionalRawData = serializedAdditionalRawData; } public int? Dimensions { get; set; } - public string User { get; set; } } } diff --git a/.dotnet/src/Generated/Models/FunctionChatMessage.Serialization.cs b/.dotnet/src/Generated/Models/FunctionChatMessage.Serialization.cs index ea0a1d297..398f7d5cc 100644 --- a/.dotnet/src/Generated/Models/FunctionChatMessage.Serialization.cs +++ b/.dotnet/src/Generated/Models/FunctionChatMessage.Serialization.cs @@ -5,6 +5,7 @@ using System; using System.ClientModel; using System.ClientModel.Primitives; +using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Chat @@ -23,6 +24,46 @@ FunctionChatMessage IJsonModel.Create(ref Utf8JsonReader re return DeserializeFunctionChatMessage(document.RootElement, options); } + internal static FunctionChatMessage DeserializeFunctionChatMessage(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + ChatMessageRole role = default; + IList content = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("role"u8)) + { + role = property.Value.GetString().ToChatMessageRole(); + continue; + } + if (property.NameEquals("content"u8)) + { + DeserializeContentValue(property, ref content); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new FunctionChatMessage(role, content ?? new ChangeTrackingList(), serializedAdditionalRawData, name); + } + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; diff --git a/.dotnet/src/Generated/Models/FunctionChatMessage.cs b/.dotnet/src/Generated/Models/FunctionChatMessage.cs index 1c814213f..abcbb2170 100644 --- a/.dotnet/src/Generated/Models/FunctionChatMessage.cs +++ b/.dotnet/src/Generated/Models/FunctionChatMessage.cs @@ -10,7 +10,7 @@ namespace OpenAI.Chat [Obsolete("This field is marked as deprecated.")] public partial class FunctionChatMessage : ChatMessage { - internal FunctionChatMessage(string role, IList content, IDictionary serializedAdditionalRawData, string functionName) : base(role, content, serializedAdditionalRawData) + internal FunctionChatMessage(ChatMessageRole role, IList content, IDictionary serializedAdditionalRawData, string functionName) : base(role, content, serializedAdditionalRawData) { FunctionName = functionName; } diff --git a/.dotnet/src/Generated/Models/GeneratedSpeechFormat.Serialization.cs b/.dotnet/src/Generated/Models/GeneratedSpeechFormat.Serialization.cs deleted file mode 100644 index ff1d45f85..000000000 --- a/.dotnet/src/Generated/Models/GeneratedSpeechFormat.Serialization.cs +++ /dev/null @@ -1,33 +0,0 @@ -// - -#nullable disable - -using System; - -namespace OpenAI.Audio -{ - internal static partial class GeneratedSpeechFormatExtensions - { - public static string ToSerialString(this GeneratedSpeechFormat value) => value switch - { - GeneratedSpeechFormat.Mp3 => "mp3", - GeneratedSpeechFormat.Opus => "opus", - GeneratedSpeechFormat.Aac => "aac", - GeneratedSpeechFormat.Flac => "flac", - GeneratedSpeechFormat.Wav => "wav", - GeneratedSpeechFormat.Pcm => "pcm", - _ => throw new ArgumentOutOfRangeException(nameof(value), value, "Unknown GeneratedSpeechFormat value.") - }; - - public static GeneratedSpeechFormat ToGeneratedSpeechFormat(this string value) - { - if (StringComparer.OrdinalIgnoreCase.Equals(value, "mp3")) return GeneratedSpeechFormat.Mp3; - if (StringComparer.OrdinalIgnoreCase.Equals(value, "opus")) return GeneratedSpeechFormat.Opus; - if (StringComparer.OrdinalIgnoreCase.Equals(value, "aac")) return GeneratedSpeechFormat.Aac; - if (StringComparer.OrdinalIgnoreCase.Equals(value, "flac")) return GeneratedSpeechFormat.Flac; - if (StringComparer.OrdinalIgnoreCase.Equals(value, "wav")) return GeneratedSpeechFormat.Wav; - if (StringComparer.OrdinalIgnoreCase.Equals(value, "pcm")) return GeneratedSpeechFormat.Pcm; - throw new ArgumentOutOfRangeException(nameof(value), value, "Unknown GeneratedSpeechFormat value."); - } - } -} diff --git a/.dotnet/src/Generated/Models/GeneratedSpeechFormat.cs b/.dotnet/src/Generated/Models/GeneratedSpeechFormat.cs new file mode 100644 index 000000000..e8c256e31 --- /dev/null +++ b/.dotnet/src/Generated/Models/GeneratedSpeechFormat.cs @@ -0,0 +1,44 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Audio +{ + public readonly partial struct GeneratedSpeechFormat : IEquatable + { + private readonly string _value; + + public GeneratedSpeechFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string Mp3Value = "mp3"; + private const string OpusValue = "opus"; + private const string AacValue = "aac"; + private const string FlacValue = "flac"; + private const string WavValue = "wav"; + private const string PcmValue = "pcm"; + + public static GeneratedSpeechFormat Mp3 { get; } = new GeneratedSpeechFormat(Mp3Value); + public static GeneratedSpeechFormat Opus { get; } = new GeneratedSpeechFormat(OpusValue); + public static GeneratedSpeechFormat Aac { get; } = new GeneratedSpeechFormat(AacValue); + public static GeneratedSpeechFormat Flac { get; } = new GeneratedSpeechFormat(FlacValue); + public static GeneratedSpeechFormat Wav { get; } = new GeneratedSpeechFormat(WavValue); + public static GeneratedSpeechFormat Pcm { get; } = new GeneratedSpeechFormat(PcmValue); + public static bool operator ==(GeneratedSpeechFormat left, GeneratedSpeechFormat right) => left.Equals(right); + public static bool operator !=(GeneratedSpeechFormat left, GeneratedSpeechFormat right) => !left.Equals(right); + public static implicit operator GeneratedSpeechFormat(string value) => new GeneratedSpeechFormat(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is GeneratedSpeechFormat other && Equals(other); + public bool Equals(GeneratedSpeechFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/GeneratedSpeechVoice.Serialization.cs b/.dotnet/src/Generated/Models/GeneratedSpeechVoice.Serialization.cs deleted file mode 100644 index b28f7c0d4..000000000 --- a/.dotnet/src/Generated/Models/GeneratedSpeechVoice.Serialization.cs +++ /dev/null @@ -1,33 +0,0 @@ -// - -#nullable disable - -using System; - -namespace OpenAI.Audio -{ - internal static partial class GeneratedSpeechVoiceExtensions - { - public static string ToSerialString(this GeneratedSpeechVoice value) => value switch - { - GeneratedSpeechVoice.Alloy => "alloy", - GeneratedSpeechVoice.Echo => "echo", - GeneratedSpeechVoice.Fable => "fable", - GeneratedSpeechVoice.Onyx => "onyx", - GeneratedSpeechVoice.Nova => "nova", - GeneratedSpeechVoice.Shimmer => "shimmer", - _ => throw new ArgumentOutOfRangeException(nameof(value), value, "Unknown GeneratedSpeechVoice value.") - }; - - public static GeneratedSpeechVoice ToGeneratedSpeechVoice(this string value) - { - if (StringComparer.OrdinalIgnoreCase.Equals(value, "alloy")) return GeneratedSpeechVoice.Alloy; - if (StringComparer.OrdinalIgnoreCase.Equals(value, "echo")) return GeneratedSpeechVoice.Echo; - if (StringComparer.OrdinalIgnoreCase.Equals(value, "fable")) return GeneratedSpeechVoice.Fable; - if (StringComparer.OrdinalIgnoreCase.Equals(value, "onyx")) return GeneratedSpeechVoice.Onyx; - if (StringComparer.OrdinalIgnoreCase.Equals(value, "nova")) return GeneratedSpeechVoice.Nova; - if (StringComparer.OrdinalIgnoreCase.Equals(value, "shimmer")) return GeneratedSpeechVoice.Shimmer; - throw new ArgumentOutOfRangeException(nameof(value), value, "Unknown GeneratedSpeechVoice value."); - } - } -} diff --git a/.dotnet/src/Generated/Models/GeneratedSpeechVoice.cs b/.dotnet/src/Generated/Models/GeneratedSpeechVoice.cs new file mode 100644 index 000000000..14fad7425 --- /dev/null +++ b/.dotnet/src/Generated/Models/GeneratedSpeechVoice.cs @@ -0,0 +1,44 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Audio +{ + public readonly partial struct GeneratedSpeechVoice : IEquatable + { + private readonly string _value; + + public GeneratedSpeechVoice(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AlloyValue = "alloy"; + private const string EchoValue = "echo"; + private const string FableValue = "fable"; + private const string OnyxValue = "onyx"; + private const string NovaValue = "nova"; + private const string ShimmerValue = "shimmer"; + + public static GeneratedSpeechVoice Alloy { get; } = new GeneratedSpeechVoice(AlloyValue); + public static GeneratedSpeechVoice Echo { get; } = new GeneratedSpeechVoice(EchoValue); + public static GeneratedSpeechVoice Fable { get; } = new GeneratedSpeechVoice(FableValue); + public static GeneratedSpeechVoice Onyx { get; } = new GeneratedSpeechVoice(OnyxValue); + public static GeneratedSpeechVoice Nova { get; } = new GeneratedSpeechVoice(NovaValue); + public static GeneratedSpeechVoice Shimmer { get; } = new GeneratedSpeechVoice(ShimmerValue); + public static bool operator ==(GeneratedSpeechVoice left, GeneratedSpeechVoice right) => left.Equals(right); + public static bool operator !=(GeneratedSpeechVoice left, GeneratedSpeechVoice right) => !left.Equals(right); + public static implicit operator GeneratedSpeechVoice(string value) => new GeneratedSpeechVoice(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is GeneratedSpeechVoice other && Equals(other); + public bool Equals(GeneratedSpeechVoice other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ImageChatMessageContentPartDetail.cs b/.dotnet/src/Generated/Models/ImageChatMessageContentPartDetail.cs deleted file mode 100644 index 137c9a819..000000000 --- a/.dotnet/src/Generated/Models/ImageChatMessageContentPartDetail.cs +++ /dev/null @@ -1,38 +0,0 @@ -// - -#nullable disable - -using System; -using System.ComponentModel; - -namespace OpenAI.Chat -{ - public readonly partial struct ImageChatMessageContentPartDetail : IEquatable - { - private readonly string _value; - - public ImageChatMessageContentPartDetail(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string AutoValue = "auto"; - private const string LowValue = "low"; - private const string HighValue = "high"; - - public static ImageChatMessageContentPartDetail Auto { get; } = new ImageChatMessageContentPartDetail(AutoValue); - public static ImageChatMessageContentPartDetail Low { get; } = new ImageChatMessageContentPartDetail(LowValue); - public static ImageChatMessageContentPartDetail High { get; } = new ImageChatMessageContentPartDetail(HighValue); - public static bool operator ==(ImageChatMessageContentPartDetail left, ImageChatMessageContentPartDetail right) => left.Equals(right); - public static bool operator !=(ImageChatMessageContentPartDetail left, ImageChatMessageContentPartDetail right) => !left.Equals(right); - public static implicit operator ImageChatMessageContentPartDetail(string value) => new ImageChatMessageContentPartDetail(value); - - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is ImageChatMessageContentPartDetail other && Equals(other); - public bool Equals(ImageChatMessageContentPartDetail other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; - public override string ToString() => _value; - } -} diff --git a/.dotnet/src/Generated/Models/ImageEditOptions.Serialization.cs b/.dotnet/src/Generated/Models/ImageEditOptions.Serialization.cs index 2a36f0792..45e614018 100644 --- a/.dotnet/src/Generated/Models/ImageEditOptions.Serialization.cs +++ b/.dotnet/src/Generated/Models/ImageEditOptions.Serialization.cs @@ -99,10 +99,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriter writer.WriteNull("response_format"); } } - if (SerializedAdditionalRawData?.ContainsKey("user") != true && Optional.IsDefined(User)) + if (SerializedAdditionalRawData?.ContainsKey("user") != true && Optional.IsDefined(EndUserId)) { writer.WritePropertyName("user"u8); - writer.WriteStringValue(User); + writer.WriteStringValue(EndUserId); } if (SerializedAdditionalRawData != null) { @@ -293,9 +293,9 @@ internal virtual MultipartFormDataBinaryContent ToMultipartBinaryBody() content.Add(ResponseFormat.Value.ToSerialString(), "response_format"); } } - if (Optional.IsDefined(User)) + if (Optional.IsDefined(EndUserId)) { - content.Add(User, "user"); + content.Add(EndUserId, "user"); } return content; } diff --git a/.dotnet/src/Generated/Models/ImageEditOptions.cs b/.dotnet/src/Generated/Models/ImageEditOptions.cs index bae4472ef..c46d7879c 100644 --- a/.dotnet/src/Generated/Models/ImageEditOptions.cs +++ b/.dotnet/src/Generated/Models/ImageEditOptions.cs @@ -11,7 +11,7 @@ public partial class ImageEditOptions { internal IDictionary SerializedAdditionalRawData { get; set; } - internal ImageEditOptions(BinaryData image, string prompt, BinaryData mask, InternalCreateImageEditRequestModel? model, long? n, GeneratedImageSize? size, GeneratedImageFormat? responseFormat, string user, IDictionary serializedAdditionalRawData) + internal ImageEditOptions(BinaryData image, string prompt, BinaryData mask, InternalCreateImageEditRequestModel? model, long? n, GeneratedImageSize? size, GeneratedImageFormat? responseFormat, string endUserId, IDictionary serializedAdditionalRawData) { Image = image; Prompt = prompt; @@ -20,9 +20,8 @@ internal ImageEditOptions(BinaryData image, string prompt, BinaryData mask, Inte N = n; Size = size; ResponseFormat = responseFormat; - User = user; + EndUserId = endUserId; SerializedAdditionalRawData = serializedAdditionalRawData; } - public string User { get; set; } } } diff --git a/.dotnet/src/Generated/Models/ImageGenerationOptions.Serialization.cs b/.dotnet/src/Generated/Models/ImageGenerationOptions.Serialization.cs index 44a19bd2c..a007fd04a 100644 --- a/.dotnet/src/Generated/Models/ImageGenerationOptions.Serialization.cs +++ b/.dotnet/src/Generated/Models/ImageGenerationOptions.Serialization.cs @@ -91,10 +91,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReader writer.WriteNull("style"); } } - if (SerializedAdditionalRawData?.ContainsKey("user") != true && Optional.IsDefined(User)) + if (SerializedAdditionalRawData?.ContainsKey("user") != true && Optional.IsDefined(EndUserId)) { writer.WritePropertyName("user"u8); - writer.WriteStringValue(User); + writer.WriteStringValue(EndUserId); } if (SerializedAdditionalRawData != null) { diff --git a/.dotnet/src/Generated/Models/ImageGenerationOptions.cs b/.dotnet/src/Generated/Models/ImageGenerationOptions.cs index e5f92429f..e7d9311dc 100644 --- a/.dotnet/src/Generated/Models/ImageGenerationOptions.cs +++ b/.dotnet/src/Generated/Models/ImageGenerationOptions.cs @@ -11,7 +11,7 @@ public partial class ImageGenerationOptions { internal IDictionary SerializedAdditionalRawData { get; set; } - internal ImageGenerationOptions(string prompt, InternalCreateImageRequestModel? model, long? n, GeneratedImageQuality? quality, GeneratedImageFormat? responseFormat, GeneratedImageSize? size, GeneratedImageStyle? style, string user, IDictionary serializedAdditionalRawData) + internal ImageGenerationOptions(string prompt, InternalCreateImageRequestModel? model, long? n, GeneratedImageQuality? quality, GeneratedImageFormat? responseFormat, GeneratedImageSize? size, GeneratedImageStyle? style, string endUserId, IDictionary serializedAdditionalRawData) { Prompt = prompt; Model = model; @@ -20,13 +20,12 @@ internal ImageGenerationOptions(string prompt, InternalCreateImageRequestModel? ResponseFormat = responseFormat; Size = size; Style = style; - User = user; + EndUserId = endUserId; SerializedAdditionalRawData = serializedAdditionalRawData; } public GeneratedImageQuality? Quality { get; set; } public GeneratedImageFormat? ResponseFormat { get; set; } public GeneratedImageSize? Size { get; set; } public GeneratedImageStyle? Style { get; set; } - public string User { get; set; } } } diff --git a/.dotnet/src/Generated/Models/ImageVariationOptions.Serialization.cs b/.dotnet/src/Generated/Models/ImageVariationOptions.Serialization.cs index 1ba8f7a15..aea32c3d6 100644 --- a/.dotnet/src/Generated/Models/ImageVariationOptions.Serialization.cs +++ b/.dotnet/src/Generated/Models/ImageVariationOptions.Serialization.cs @@ -82,10 +82,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderW writer.WriteNull("size"); } } - if (SerializedAdditionalRawData?.ContainsKey("user") != true && Optional.IsDefined(User)) + if (SerializedAdditionalRawData?.ContainsKey("user") != true && Optional.IsDefined(EndUserId)) { writer.WritePropertyName("user"u8); - writer.WriteStringValue(User); + writer.WriteStringValue(EndUserId); } if (SerializedAdditionalRawData != null) { @@ -253,9 +253,9 @@ internal virtual MultipartFormDataBinaryContent ToMultipartBinaryBody() content.Add(Size.Value.ToString(), "size"); } } - if (Optional.IsDefined(User)) + if (Optional.IsDefined(EndUserId)) { - content.Add(User, "user"); + content.Add(EndUserId, "user"); } return content; } diff --git a/.dotnet/src/Generated/Models/ImageVariationOptions.cs b/.dotnet/src/Generated/Models/ImageVariationOptions.cs index d4b9a8cf2..6fac2f976 100644 --- a/.dotnet/src/Generated/Models/ImageVariationOptions.cs +++ b/.dotnet/src/Generated/Models/ImageVariationOptions.cs @@ -11,16 +11,15 @@ public partial class ImageVariationOptions { internal IDictionary SerializedAdditionalRawData { get; set; } - internal ImageVariationOptions(BinaryData image, InternalCreateImageVariationRequestModel? model, long? n, GeneratedImageFormat? responseFormat, GeneratedImageSize? size, string user, IDictionary serializedAdditionalRawData) + internal ImageVariationOptions(BinaryData image, InternalCreateImageVariationRequestModel? model, long? n, GeneratedImageFormat? responseFormat, GeneratedImageSize? size, string endUserId, IDictionary serializedAdditionalRawData) { Image = image; Model = model; N = n; ResponseFormat = responseFormat; Size = size; - User = user; + EndUserId = endUserId; SerializedAdditionalRawData = serializedAdditionalRawData; } - public string User { get; set; } } } diff --git a/.dotnet/src/Generated/Models/InternalAddUploadPartRequest.Serialization.cs b/.dotnet/src/Generated/Models/InternalAddUploadPartRequest.Serialization.cs new file mode 100644 index 000000000..f76b44afb --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalAddUploadPartRequest.Serialization.cs @@ -0,0 +1,165 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.IO; +using System.Text.Json; + +namespace OpenAI.Files +{ + internal partial class InternalAddUploadPartRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalAddUploadPartRequest)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("data") != true) + { + writer.WritePropertyName("data"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(global::System.BinaryData.FromStream(Data)); +#else + using (JsonDocument document = JsonDocument.Parse(BinaryData.FromStream(Data))) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InternalAddUploadPartRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalAddUploadPartRequest)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalAddUploadPartRequest(document.RootElement, options); + } + + internal static InternalAddUploadPartRequest DeserializeInternalAddUploadPartRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + Stream data = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("data"u8)) + { + data = BinaryData.FromString(property.Value.GetRawText()).ToStream(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalAddUploadPartRequest(data, serializedAdditionalRawData); + } + + private BinaryData SerializeMultipart(ModelReaderWriterOptions options) + { + using MultipartFormDataBinaryContent content = ToMultipartBinaryBody(); + using MemoryStream stream = new MemoryStream(); + content.WriteTo(stream); + if (stream.Position > int.MaxValue) + { + return BinaryData.FromStream(stream); + } + else + { + return new BinaryData(stream.GetBuffer().AsMemory(0, (int)stream.Position)); + } + } + + internal virtual MultipartFormDataBinaryContent ToMultipartBinaryBody() + { + MultipartFormDataBinaryContent content = new MultipartFormDataBinaryContent(); + content.Add(Data, "data", "data", "application/octet-stream"); + return content; + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + case "MFD": + return SerializeMultipart(options); + default: + throw new FormatException($"The model {nameof(InternalAddUploadPartRequest)} does not support writing '{options.Format}' format."); + } + } + + InternalAddUploadPartRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalAddUploadPartRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalAddUploadPartRequest)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "MFD"; + + internal static InternalAddUploadPartRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalAddUploadPartRequest(document.RootElement); + } + + internal virtual BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalAddUploadPartRequest.cs b/.dotnet/src/Generated/Models/InternalAddUploadPartRequest.cs new file mode 100644 index 000000000..f6035f34b --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalAddUploadPartRequest.cs @@ -0,0 +1,33 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; +using System.IO; + +namespace OpenAI.Files +{ + internal partial class InternalAddUploadPartRequest + { + internal IDictionary SerializedAdditionalRawData { get; set; } + public InternalAddUploadPartRequest(Stream data) + { + Argument.AssertNotNull(data, nameof(data)); + + Data = data; + } + + internal InternalAddUploadPartRequest(Stream data, IDictionary serializedAdditionalRawData) + { + Data = data; + SerializedAdditionalRawData = serializedAdditionalRawData; + } + + internal InternalAddUploadPartRequest() + { + } + + public Stream Data { get; } + } +} diff --git a/.dotnet/src/Generated/Models/InternalAssistantResponseFormatJsonObject.Serialization.cs b/.dotnet/src/Generated/Models/InternalAssistantResponseFormatJsonObject.Serialization.cs new file mode 100644 index 000000000..80afbf5f6 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalAssistantResponseFormatJsonObject.Serialization.cs @@ -0,0 +1,122 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Assistants +{ + internal partial class InternalAssistantResponseFormatJsonObject : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalAssistantResponseFormatJsonObject)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("type") != true) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type); + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InternalAssistantResponseFormatJsonObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalAssistantResponseFormatJsonObject)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalAssistantResponseFormatJsonObject(document.RootElement, options); + } + + internal static InternalAssistantResponseFormatJsonObject DeserializeInternalAssistantResponseFormatJsonObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string type = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = property.Value.GetString(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalAssistantResponseFormatJsonObject(type, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalAssistantResponseFormatJsonObject)} does not support writing '{options.Format}' format."); + } + } + + InternalAssistantResponseFormatJsonObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalAssistantResponseFormatJsonObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalAssistantResponseFormatJsonObject)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + } +} diff --git a/.dotnet/src/Generated/Models/InternalAssistantResponseFormatJsonObject.cs b/.dotnet/src/Generated/Models/InternalAssistantResponseFormatJsonObject.cs new file mode 100644 index 000000000..b6caf3d33 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalAssistantResponseFormatJsonObject.cs @@ -0,0 +1,21 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Assistants +{ + internal partial class InternalAssistantResponseFormatJsonObject : AssistantResponseFormat + { + internal InternalAssistantResponseFormatJsonObject() + { + Type = "json_object"; + } + + internal InternalAssistantResponseFormatJsonObject(string type, IDictionary serializedAdditionalRawData) : base(type, serializedAdditionalRawData) + { + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalAssistantResponseFormatJsonSchema.Serialization.cs b/.dotnet/src/Generated/Models/InternalAssistantResponseFormatJsonSchema.Serialization.cs new file mode 100644 index 000000000..24865dbf9 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalAssistantResponseFormatJsonSchema.Serialization.cs @@ -0,0 +1,134 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI.Internal; + +namespace OpenAI.Assistants +{ + internal partial class InternalAssistantResponseFormatJsonSchema : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalAssistantResponseFormatJsonSchema)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("json_schema") != true) + { + writer.WritePropertyName("json_schema"u8); + writer.WriteObjectValue(JsonSchema, options); + } + if (SerializedAdditionalRawData?.ContainsKey("type") != true) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type); + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InternalAssistantResponseFormatJsonSchema IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalAssistantResponseFormatJsonSchema)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalAssistantResponseFormatJsonSchema(document.RootElement, options); + } + + internal static InternalAssistantResponseFormatJsonSchema DeserializeInternalAssistantResponseFormatJsonSchema(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + InternalResponseFormatJsonSchemaJsonSchema jsonSchema = default; + string type = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("json_schema"u8)) + { + jsonSchema = InternalResponseFormatJsonSchemaJsonSchema.DeserializeInternalResponseFormatJsonSchemaJsonSchema(property.Value, options); + continue; + } + if (property.NameEquals("type"u8)) + { + type = property.Value.GetString(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalAssistantResponseFormatJsonSchema(type, serializedAdditionalRawData, jsonSchema); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalAssistantResponseFormatJsonSchema)} does not support writing '{options.Format}' format."); + } + } + + InternalAssistantResponseFormatJsonSchema IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalAssistantResponseFormatJsonSchema(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalAssistantResponseFormatJsonSchema)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + } +} diff --git a/.dotnet/src/Generated/Models/InternalAssistantResponseFormatJsonSchema.cs b/.dotnet/src/Generated/Models/InternalAssistantResponseFormatJsonSchema.cs new file mode 100644 index 000000000..d2b71a861 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalAssistantResponseFormatJsonSchema.cs @@ -0,0 +1,32 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; +using OpenAI.Internal; + +namespace OpenAI.Assistants +{ + internal partial class InternalAssistantResponseFormatJsonSchema : AssistantResponseFormat + { + internal InternalAssistantResponseFormatJsonSchema(InternalResponseFormatJsonSchemaJsonSchema jsonSchema) + { + Argument.AssertNotNull(jsonSchema, nameof(jsonSchema)); + + Type = "json_schema"; + JsonSchema = jsonSchema; + } + + internal InternalAssistantResponseFormatJsonSchema(string type, IDictionary serializedAdditionalRawData, InternalResponseFormatJsonSchemaJsonSchema jsonSchema) : base(type, serializedAdditionalRawData) + { + JsonSchema = jsonSchema; + } + + internal InternalAssistantResponseFormatJsonSchema() + { + } + + public InternalResponseFormatJsonSchemaJsonSchema JsonSchema { get; } + } +} diff --git a/.dotnet/src/Generated/Models/InternalAssistantsApiResponseFormat.Serialization.cs b/.dotnet/src/Generated/Models/InternalAssistantResponseFormatText.Serialization.cs similarity index 60% rename from .dotnet/src/Generated/Models/InternalAssistantsApiResponseFormat.Serialization.cs rename to .dotnet/src/Generated/Models/InternalAssistantResponseFormatText.Serialization.cs index f1c047365..a2ae06eea 100644 --- a/.dotnet/src/Generated/Models/InternalAssistantsApiResponseFormat.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalAssistantResponseFormatText.Serialization.cs @@ -10,21 +10,21 @@ namespace OpenAI.Assistants { - internal partial class InternalAssistantsApiResponseFormat : IJsonModel + internal partial class InternalAssistantResponseFormatText : IJsonModel { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(InternalAssistantsApiResponseFormat)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(InternalAssistantResponseFormatText)} does not support writing '{format}' format."); } writer.WriteStartObject(); - if (SerializedAdditionalRawData?.ContainsKey("type") != true && Optional.IsDefined(Type)) + if (SerializedAdditionalRawData?.ContainsKey("type") != true) { writer.WritePropertyName("type"u8); - writer.WriteStringValue(Type.Value.ToString()); + writer.WriteStringValue(Type); } if (SerializedAdditionalRawData != null) { @@ -48,19 +48,19 @@ void IJsonModel.Write(Utf8JsonWriter writer writer.WriteEndObject(); } - InternalAssistantsApiResponseFormat IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + InternalAssistantResponseFormatText IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(InternalAssistantsApiResponseFormat)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(InternalAssistantResponseFormatText)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeInternalAssistantsApiResponseFormat(document.RootElement, options); + return DeserializeInternalAssistantResponseFormatText(document.RootElement, options); } - internal static InternalAssistantsApiResponseFormat DeserializeInternalAssistantsApiResponseFormat(JsonElement element, ModelReaderWriterOptions options = null) + internal static InternalAssistantResponseFormatText DeserializeInternalAssistantResponseFormatText(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -68,18 +68,14 @@ internal static InternalAssistantsApiResponseFormat DeserializeInternalAssistant { return null; } - InternalAssistantsApiResponseFormatType? type = default; + string type = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { if (property.NameEquals("type"u8)) { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - type = new InternalAssistantsApiResponseFormatType(property.Value.GetString()); + type = property.Value.GetString(); continue; } if (true) @@ -89,49 +85,38 @@ internal static InternalAssistantsApiResponseFormat DeserializeInternalAssistant } } serializedAdditionalRawData = rawDataDictionary; - return new InternalAssistantsApiResponseFormat(type, serializedAdditionalRawData); + return new InternalAssistantResponseFormatText(type, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options); default: - throw new FormatException($"The model {nameof(InternalAssistantsApiResponseFormat)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(InternalAssistantResponseFormatText)} does not support writing '{options.Format}' format."); } } - InternalAssistantsApiResponseFormat IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + InternalAssistantResponseFormatText IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data); - return DeserializeInternalAssistantsApiResponseFormat(document.RootElement, options); + return DeserializeInternalAssistantResponseFormatText(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(InternalAssistantsApiResponseFormat)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(InternalAssistantResponseFormatText)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - internal static InternalAssistantsApiResponseFormat FromResponse(PipelineResponse response) - { - using var document = JsonDocument.Parse(response.Content); - return DeserializeInternalAssistantsApiResponseFormat(document.RootElement); - } - - internal virtual BinaryContent ToBinaryContent() - { - return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); - } + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; } } diff --git a/.dotnet/src/Generated/Models/InternalAssistantResponseFormatText.cs b/.dotnet/src/Generated/Models/InternalAssistantResponseFormatText.cs new file mode 100644 index 000000000..899b65998 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalAssistantResponseFormatText.cs @@ -0,0 +1,21 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Assistants +{ + internal partial class InternalAssistantResponseFormatText : AssistantResponseFormat + { + internal InternalAssistantResponseFormatText() + { + Type = "text"; + } + + internal InternalAssistantResponseFormatText(string type, IDictionary serializedAdditionalRawData) : base(type, serializedAdditionalRawData) + { + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalAssistantsApiResponseFormat.cs b/.dotnet/src/Generated/Models/InternalAssistantsApiResponseFormat.cs deleted file mode 100644 index 9333fb55c..000000000 --- a/.dotnet/src/Generated/Models/InternalAssistantsApiResponseFormat.cs +++ /dev/null @@ -1,25 +0,0 @@ -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace OpenAI.Assistants -{ - internal partial class InternalAssistantsApiResponseFormat - { - internal IDictionary SerializedAdditionalRawData { get; set; } - public InternalAssistantsApiResponseFormat() - { - } - - internal InternalAssistantsApiResponseFormat(InternalAssistantsApiResponseFormatType? type, IDictionary serializedAdditionalRawData) - { - Type = type; - SerializedAdditionalRawData = serializedAdditionalRawData; - } - - public InternalAssistantsApiResponseFormatType? Type { get; set; } - } -} diff --git a/.dotnet/src/Generated/Models/InternalAssistantsApiResponseFormatType.cs b/.dotnet/src/Generated/Models/InternalAssistantsApiResponseFormatType.cs deleted file mode 100644 index 60e13d230..000000000 --- a/.dotnet/src/Generated/Models/InternalAssistantsApiResponseFormatType.cs +++ /dev/null @@ -1,36 +0,0 @@ -// - -#nullable disable - -using System; -using System.ComponentModel; - -namespace OpenAI.Assistants -{ - internal readonly partial struct InternalAssistantsApiResponseFormatType : IEquatable - { - private readonly string _value; - - public InternalAssistantsApiResponseFormatType(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string TextValue = "text"; - private const string JsonObjectValue = "json_object"; - - public static InternalAssistantsApiResponseFormatType Text { get; } = new InternalAssistantsApiResponseFormatType(TextValue); - public static InternalAssistantsApiResponseFormatType JsonObject { get; } = new InternalAssistantsApiResponseFormatType(JsonObjectValue); - public static bool operator ==(InternalAssistantsApiResponseFormatType left, InternalAssistantsApiResponseFormatType right) => left.Equals(right); - public static bool operator !=(InternalAssistantsApiResponseFormatType left, InternalAssistantsApiResponseFormatType right) => !left.Equals(right); - public static implicit operator InternalAssistantsApiResponseFormatType(string value) => new InternalAssistantsApiResponseFormatType(value); - - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is InternalAssistantsApiResponseFormatType other && Equals(other); - public bool Equals(InternalAssistantsApiResponseFormatType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; - public override string ToString() => _value; - } -} diff --git a/.dotnet/src/Generated/Models/InternalBatchRequestOutputResponse.Serialization.cs b/.dotnet/src/Generated/Models/InternalBatchRequestOutputResponse.Serialization.cs index 54280475f..5f48d8090 100644 --- a/.dotnet/src/Generated/Models/InternalBatchRequestOutputResponse.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalBatchRequestOutputResponse.Serialization.cs @@ -38,7 +38,19 @@ void IJsonModel.Write(Utf8JsonWriter writer, foreach (var item in Body) { writer.WritePropertyName(item.Key); - writer.WriteStringValue(item.Value); + if (item.Value == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif } writer.WriteEndObject(); } @@ -86,7 +98,7 @@ internal static InternalBatchRequestOutputResponse DeserializeInternalBatchReque } int? statusCode = default; string requestId = default; - IReadOnlyDictionary body = default; + IReadOnlyDictionary body = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -111,10 +123,17 @@ internal static InternalBatchRequestOutputResponse DeserializeInternalBatchReque { continue; } - Dictionary dictionary = new Dictionary(); + Dictionary dictionary = new Dictionary(); foreach (var property0 in property.Value.EnumerateObject()) { - dictionary.Add(property0.Name, property0.Value.GetString()); + if (property0.Value.ValueKind == JsonValueKind.Null) + { + dictionary.Add(property0.Name, null); + } + else + { + dictionary.Add(property0.Name, BinaryData.FromString(property0.Value.GetRawText())); + } } body = dictionary; continue; @@ -126,7 +145,7 @@ internal static InternalBatchRequestOutputResponse DeserializeInternalBatchReque } } serializedAdditionalRawData = rawDataDictionary; - return new InternalBatchRequestOutputResponse(statusCode, requestId, body ?? new ChangeTrackingDictionary(), serializedAdditionalRawData); + return new InternalBatchRequestOutputResponse(statusCode, requestId, body ?? new ChangeTrackingDictionary(), serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/.dotnet/src/Generated/Models/InternalBatchRequestOutputResponse.cs b/.dotnet/src/Generated/Models/InternalBatchRequestOutputResponse.cs index 2919fec21..8435f20dc 100644 --- a/.dotnet/src/Generated/Models/InternalBatchRequestOutputResponse.cs +++ b/.dotnet/src/Generated/Models/InternalBatchRequestOutputResponse.cs @@ -12,10 +12,10 @@ internal partial class InternalBatchRequestOutputResponse internal IDictionary SerializedAdditionalRawData { get; set; } internal InternalBatchRequestOutputResponse() { - Body = new ChangeTrackingDictionary(); + Body = new ChangeTrackingDictionary(); } - internal InternalBatchRequestOutputResponse(int? statusCode, string requestId, IReadOnlyDictionary body, IDictionary serializedAdditionalRawData) + internal InternalBatchRequestOutputResponse(int? statusCode, string requestId, IReadOnlyDictionary body, IDictionary serializedAdditionalRawData) { StatusCode = statusCode; RequestId = requestId; @@ -25,6 +25,6 @@ internal InternalBatchRequestOutputResponse(int? statusCode, string requestId, I public int? StatusCode { get; } public string RequestId { get; } - public IReadOnlyDictionary Body { get; } + public IReadOnlyDictionary Body { get; } } } diff --git a/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartImageImageUrl.Serialization.cs b/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartImageImageUrl.Serialization.cs index f094173d0..e84cdd271 100644 --- a/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartImageImageUrl.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartImageImageUrl.Serialization.cs @@ -74,7 +74,7 @@ internal static InternalChatCompletionRequestMessageContentPartImageImageUrl Des return null; } string url = default; - ImageChatMessageContentPartDetail? detail = default; + ChatImageDetailLevel? detail = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -90,7 +90,7 @@ internal static InternalChatCompletionRequestMessageContentPartImageImageUrl Des { continue; } - detail = new ImageChatMessageContentPartDetail(property.Value.GetString()); + detail = new ChatImageDetailLevel(property.Value.GetString()); continue; } if (true) diff --git a/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartImageImageUrl.cs b/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartImageImageUrl.cs index 1ff5dc332..e0e87271f 100644 --- a/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartImageImageUrl.cs +++ b/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartImageImageUrl.cs @@ -14,6 +14,6 @@ internal partial class InternalChatCompletionRequestMessageContentPartImageImage internal InternalChatCompletionRequestMessageContentPartImageImageUrl() { } - public ImageChatMessageContentPartDetail? Detail { get; set; } + public ChatImageDetailLevel? Detail { get; set; } } } diff --git a/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartRefusal.Serialization.cs b/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartRefusal.Serialization.cs new file mode 100644 index 000000000..093b83c78 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartRefusal.Serialization.cs @@ -0,0 +1,144 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Chat +{ + internal partial class InternalChatCompletionRequestMessageContentPartRefusal : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalChatCompletionRequestMessageContentPartRefusal)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("type") != true) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + } + if (SerializedAdditionalRawData?.ContainsKey("refusal") != true) + { + writer.WritePropertyName("refusal"u8); + writer.WriteStringValue(Refusal); + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InternalChatCompletionRequestMessageContentPartRefusal IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalChatCompletionRequestMessageContentPartRefusal)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalChatCompletionRequestMessageContentPartRefusal(document.RootElement, options); + } + + internal static InternalChatCompletionRequestMessageContentPartRefusal DeserializeInternalChatCompletionRequestMessageContentPartRefusal(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + InternalChatCompletionRequestMessageContentPartRefusalType type = default; + string refusal = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = new InternalChatCompletionRequestMessageContentPartRefusalType(property.Value.GetString()); + continue; + } + if (property.NameEquals("refusal"u8)) + { + refusal = property.Value.GetString(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalChatCompletionRequestMessageContentPartRefusal(type, refusal, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalChatCompletionRequestMessageContentPartRefusal)} does not support writing '{options.Format}' format."); + } + } + + InternalChatCompletionRequestMessageContentPartRefusal IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalChatCompletionRequestMessageContentPartRefusal(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalChatCompletionRequestMessageContentPartRefusal)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static InternalChatCompletionRequestMessageContentPartRefusal FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalChatCompletionRequestMessageContentPartRefusal(document.RootElement); + } + + internal virtual BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartRefusal.cs b/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartRefusal.cs new file mode 100644 index 000000000..ea0eaa097 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartRefusal.cs @@ -0,0 +1,35 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Chat +{ + internal partial class InternalChatCompletionRequestMessageContentPartRefusal + { + internal IDictionary SerializedAdditionalRawData { get; set; } + public InternalChatCompletionRequestMessageContentPartRefusal(string refusal) + { + Argument.AssertNotNull(refusal, nameof(refusal)); + + Refusal = refusal; + } + + internal InternalChatCompletionRequestMessageContentPartRefusal(InternalChatCompletionRequestMessageContentPartRefusalType type, string refusal, IDictionary serializedAdditionalRawData) + { + Type = type; + Refusal = refusal; + SerializedAdditionalRawData = serializedAdditionalRawData; + } + + internal InternalChatCompletionRequestMessageContentPartRefusal() + { + } + + public InternalChatCompletionRequestMessageContentPartRefusalType Type { get; } = InternalChatCompletionRequestMessageContentPartRefusalType.Refusal; + + public string Refusal { get; } + } +} diff --git a/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartRefusalType.cs b/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartRefusalType.cs new file mode 100644 index 000000000..d1c3e8498 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalChatCompletionRequestMessageContentPartRefusalType.cs @@ -0,0 +1,34 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Chat +{ + internal readonly partial struct InternalChatCompletionRequestMessageContentPartRefusalType : IEquatable + { + private readonly string _value; + + public InternalChatCompletionRequestMessageContentPartRefusalType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string RefusalValue = "refusal"; + + public static InternalChatCompletionRequestMessageContentPartRefusalType Refusal { get; } = new InternalChatCompletionRequestMessageContentPartRefusalType(RefusalValue); + public static bool operator ==(InternalChatCompletionRequestMessageContentPartRefusalType left, InternalChatCompletionRequestMessageContentPartRefusalType right) => left.Equals(right); + public static bool operator !=(InternalChatCompletionRequestMessageContentPartRefusalType left, InternalChatCompletionRequestMessageContentPartRefusalType right) => !left.Equals(right); + public static implicit operator InternalChatCompletionRequestMessageContentPartRefusalType(string value) => new InternalChatCompletionRequestMessageContentPartRefusalType(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is InternalChatCompletionRequestMessageContentPartRefusalType other && Equals(other); + public bool Equals(InternalChatCompletionRequestMessageContentPartRefusalType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/InternalChatCompletionResponseMessage.cs b/.dotnet/src/Generated/Models/InternalChatCompletionResponseMessage.cs index 114a9ae55..f29262217 100644 --- a/.dotnet/src/Generated/Models/InternalChatCompletionResponseMessage.cs +++ b/.dotnet/src/Generated/Models/InternalChatCompletionResponseMessage.cs @@ -11,10 +11,17 @@ namespace OpenAI.Chat internal partial class InternalChatCompletionResponseMessage { internal IDictionary SerializedAdditionalRawData { get; set; } + internal InternalChatCompletionResponseMessage(IEnumerable content, string refusal) + { + Content = content?.ToList(); + Refusal = refusal; + ToolCalls = new ChangeTrackingList(); + } - internal InternalChatCompletionResponseMessage(IReadOnlyList content, IReadOnlyList toolCalls, ChatMessageRole role, ChatFunctionCall functionCall, IDictionary serializedAdditionalRawData) + internal InternalChatCompletionResponseMessage(IReadOnlyList content, string refusal, IReadOnlyList toolCalls, ChatMessageRole role, ChatFunctionCall functionCall, IDictionary serializedAdditionalRawData) { Content = content; + Refusal = refusal; ToolCalls = toolCalls; Role = role; FunctionCall = functionCall; @@ -24,6 +31,7 @@ internal InternalChatCompletionResponseMessage(IReadOnlyList ToolCalls { get; } } } diff --git a/.dotnet/src/Generated/Models/InternalChatCompletionStreamResponseDelta.cs b/.dotnet/src/Generated/Models/InternalChatCompletionStreamResponseDelta.cs index 34085b941..9bdd14f0a 100644 --- a/.dotnet/src/Generated/Models/InternalChatCompletionStreamResponseDelta.cs +++ b/.dotnet/src/Generated/Models/InternalChatCompletionStreamResponseDelta.cs @@ -11,15 +11,17 @@ internal partial class InternalChatCompletionStreamResponseDelta { internal IDictionary SerializedAdditionalRawData { get; set; } - internal InternalChatCompletionStreamResponseDelta(IReadOnlyList content, StreamingChatFunctionCallUpdate functionCall, IReadOnlyList toolCalls, ChatMessageRole? role, IDictionary serializedAdditionalRawData) + internal InternalChatCompletionStreamResponseDelta(IReadOnlyList content, StreamingChatFunctionCallUpdate functionCall, IReadOnlyList toolCalls, ChatMessageRole? role, string refusal, IDictionary serializedAdditionalRawData) { Content = content; FunctionCall = functionCall; ToolCalls = toolCalls; Role = role; + Refusal = refusal; SerializedAdditionalRawData = serializedAdditionalRawData; } public StreamingChatFunctionCallUpdate FunctionCall { get; } public IReadOnlyList ToolCalls { get; } + public string Refusal { get; } } } diff --git a/.dotnet/src/Generated/Models/InternalChatResponseFormatJsonObject.Serialization.cs b/.dotnet/src/Generated/Models/InternalChatResponseFormatJsonObject.Serialization.cs new file mode 100644 index 000000000..3e555b657 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalChatResponseFormatJsonObject.Serialization.cs @@ -0,0 +1,97 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Chat +{ + internal partial class InternalChatResponseFormatJsonObject : IJsonModel + { + InternalChatResponseFormatJsonObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalChatResponseFormatJsonObject)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalChatResponseFormatJsonObject(document.RootElement, options); + } + + internal static InternalChatResponseFormatJsonObject DeserializeInternalChatResponseFormatJsonObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string type = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = property.Value.GetString(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalChatResponseFormatJsonObject(type, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalChatResponseFormatJsonObject)} does not support writing '{options.Format}' format."); + } + } + + InternalChatResponseFormatJsonObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalChatResponseFormatJsonObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalChatResponseFormatJsonObject)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static new InternalChatResponseFormatJsonObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalChatResponseFormatJsonObject(document.RootElement); + } + + internal override BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalChatResponseFormatJsonObject.cs b/.dotnet/src/Generated/Models/InternalChatResponseFormatJsonObject.cs new file mode 100644 index 000000000..a3d489404 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalChatResponseFormatJsonObject.cs @@ -0,0 +1,21 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Chat +{ + internal partial class InternalChatResponseFormatJsonObject : ChatResponseFormat + { + public InternalChatResponseFormatJsonObject() + { + Type = "json_object"; + } + + internal InternalChatResponseFormatJsonObject(string type, IDictionary serializedAdditionalRawData) : base(type, serializedAdditionalRawData) + { + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalChatResponseFormatJsonSchema.Serialization.cs b/.dotnet/src/Generated/Models/InternalChatResponseFormatJsonSchema.Serialization.cs new file mode 100644 index 000000000..c2b875ee6 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalChatResponseFormatJsonSchema.Serialization.cs @@ -0,0 +1,104 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using OpenAI.Internal; + +namespace OpenAI.Chat +{ + internal partial class InternalChatResponseFormatJsonSchema : IJsonModel + { + InternalChatResponseFormatJsonSchema IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalChatResponseFormatJsonSchema)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalChatResponseFormatJsonSchema(document.RootElement, options); + } + + internal static InternalChatResponseFormatJsonSchema DeserializeInternalChatResponseFormatJsonSchema(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + InternalResponseFormatJsonSchemaJsonSchema jsonSchema = default; + string type = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("json_schema"u8)) + { + jsonSchema = InternalResponseFormatJsonSchemaJsonSchema.DeserializeInternalResponseFormatJsonSchemaJsonSchema(property.Value, options); + continue; + } + if (property.NameEquals("type"u8)) + { + type = property.Value.GetString(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalChatResponseFormatJsonSchema(type, serializedAdditionalRawData, jsonSchema); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalChatResponseFormatJsonSchema)} does not support writing '{options.Format}' format."); + } + } + + InternalChatResponseFormatJsonSchema IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalChatResponseFormatJsonSchema(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalChatResponseFormatJsonSchema)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static new InternalChatResponseFormatJsonSchema FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalChatResponseFormatJsonSchema(document.RootElement); + } + + internal override BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalChatResponseFormatJsonSchema.cs b/.dotnet/src/Generated/Models/InternalChatResponseFormatJsonSchema.cs new file mode 100644 index 000000000..d519acb14 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalChatResponseFormatJsonSchema.cs @@ -0,0 +1,32 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; +using OpenAI.Internal; + +namespace OpenAI.Chat +{ + internal partial class InternalChatResponseFormatJsonSchema : ChatResponseFormat + { + public InternalChatResponseFormatJsonSchema(InternalResponseFormatJsonSchemaJsonSchema jsonSchema) + { + Argument.AssertNotNull(jsonSchema, nameof(jsonSchema)); + + Type = "json_schema"; + JsonSchema = jsonSchema; + } + + internal InternalChatResponseFormatJsonSchema(string type, IDictionary serializedAdditionalRawData, InternalResponseFormatJsonSchemaJsonSchema jsonSchema) : base(type, serializedAdditionalRawData) + { + JsonSchema = jsonSchema; + } + + internal InternalChatResponseFormatJsonSchema() + { + } + + public InternalResponseFormatJsonSchemaJsonSchema JsonSchema { get; } + } +} diff --git a/.dotnet/src/Generated/Models/InternalChatResponseFormatText.Serialization.cs b/.dotnet/src/Generated/Models/InternalChatResponseFormatText.Serialization.cs new file mode 100644 index 000000000..6ab2655f8 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalChatResponseFormatText.Serialization.cs @@ -0,0 +1,97 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Chat +{ + internal partial class InternalChatResponseFormatText : IJsonModel + { + InternalChatResponseFormatText IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalChatResponseFormatText)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalChatResponseFormatText(document.RootElement, options); + } + + internal static InternalChatResponseFormatText DeserializeInternalChatResponseFormatText(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string type = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = property.Value.GetString(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalChatResponseFormatText(type, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalChatResponseFormatText)} does not support writing '{options.Format}' format."); + } + } + + InternalChatResponseFormatText IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalChatResponseFormatText(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalChatResponseFormatText)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static new InternalChatResponseFormatText FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalChatResponseFormatText(document.RootElement); + } + + internal override BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalChatResponseFormatText.cs b/.dotnet/src/Generated/Models/InternalChatResponseFormatText.cs new file mode 100644 index 000000000..49e7771f0 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalChatResponseFormatText.cs @@ -0,0 +1,21 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Chat +{ + internal partial class InternalChatResponseFormatText : ChatResponseFormat + { + public InternalChatResponseFormatText() + { + Type = "text"; + } + + internal InternalChatResponseFormatText(string type, IDictionary serializedAdditionalRawData) : base(type, serializedAdditionalRawData) + { + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalModifyAssistantRequestToolResourcesFileSearch.Serialization.cs b/.dotnet/src/Generated/Models/InternalCompleteUploadRequest.Serialization.cs similarity index 53% rename from .dotnet/src/Generated/Models/InternalModifyAssistantRequestToolResourcesFileSearch.Serialization.cs rename to .dotnet/src/Generated/Models/InternalCompleteUploadRequest.Serialization.cs index d8f1c1e75..30ee85a04 100644 --- a/.dotnet/src/Generated/Models/InternalModifyAssistantRequestToolResourcesFileSearch.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalCompleteUploadRequest.Serialization.cs @@ -8,29 +8,34 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Assistants +namespace OpenAI.Files { - internal partial class InternalModifyAssistantRequestToolResourcesFileSearch : IJsonModel + internal partial class InternalCompleteUploadRequest : IJsonModel { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(InternalModifyAssistantRequestToolResourcesFileSearch)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(InternalCompleteUploadRequest)} does not support writing '{format}' format."); } writer.WriteStartObject(); - if (SerializedAdditionalRawData?.ContainsKey("vector_store_ids") != true && Optional.IsCollectionDefined(VectorStoreIds)) + if (SerializedAdditionalRawData?.ContainsKey("part_ids") != true) { - writer.WritePropertyName("vector_store_ids"u8); + writer.WritePropertyName("part_ids"u8); writer.WriteStartArray(); - foreach (var item in VectorStoreIds) + foreach (var item in PartIds) { writer.WriteStringValue(item); } writer.WriteEndArray(); } + if (SerializedAdditionalRawData?.ContainsKey("md5") != true && Optional.IsDefined(Md5)) + { + writer.WritePropertyName("md5"u8); + writer.WriteStringValue(Md5); + } if (SerializedAdditionalRawData != null) { foreach (var item in SerializedAdditionalRawData) @@ -53,19 +58,19 @@ void IJsonModel.Write(Utf writer.WriteEndObject(); } - InternalModifyAssistantRequestToolResourcesFileSearch IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + InternalCompleteUploadRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(InternalModifyAssistantRequestToolResourcesFileSearch)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(InternalCompleteUploadRequest)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeInternalModifyAssistantRequestToolResourcesFileSearch(document.RootElement, options); + return DeserializeInternalCompleteUploadRequest(document.RootElement, options); } - internal static InternalModifyAssistantRequestToolResourcesFileSearch DeserializeInternalModifyAssistantRequestToolResourcesFileSearch(JsonElement element, ModelReaderWriterOptions options = null) + internal static InternalCompleteUploadRequest DeserializeInternalCompleteUploadRequest(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -73,23 +78,25 @@ internal static InternalModifyAssistantRequestToolResourcesFileSearch Deserializ { return null; } - IList vectorStoreIds = default; + IList partIds = default; + string md5 = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { - if (property.NameEquals("vector_store_ids"u8)) + if (property.NameEquals("part_ids"u8)) { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } List array = new List(); foreach (var item in property.Value.EnumerateArray()) { array.Add(item.GetString()); } - vectorStoreIds = array; + partIds = array; + continue; + } + if (property.NameEquals("md5"u8)) + { + md5 = property.Value.GetString(); continue; } if (true) @@ -99,44 +106,44 @@ internal static InternalModifyAssistantRequestToolResourcesFileSearch Deserializ } } serializedAdditionalRawData = rawDataDictionary; - return new InternalModifyAssistantRequestToolResourcesFileSearch(vectorStoreIds ?? new ChangeTrackingList(), serializedAdditionalRawData); + return new InternalCompleteUploadRequest(partIds, md5, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options); default: - throw new FormatException($"The model {nameof(InternalModifyAssistantRequestToolResourcesFileSearch)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(InternalCompleteUploadRequest)} does not support writing '{options.Format}' format."); } } - InternalModifyAssistantRequestToolResourcesFileSearch IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + InternalCompleteUploadRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data); - return DeserializeInternalModifyAssistantRequestToolResourcesFileSearch(document.RootElement, options); + return DeserializeInternalCompleteUploadRequest(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(InternalModifyAssistantRequestToolResourcesFileSearch)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(InternalCompleteUploadRequest)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - internal static InternalModifyAssistantRequestToolResourcesFileSearch FromResponse(PipelineResponse response) + internal static InternalCompleteUploadRequest FromResponse(PipelineResponse response) { using var document = JsonDocument.Parse(response.Content); - return DeserializeInternalModifyAssistantRequestToolResourcesFileSearch(document.RootElement); + return DeserializeInternalCompleteUploadRequest(document.RootElement); } internal virtual BinaryContent ToBinaryContent() diff --git a/.dotnet/src/Generated/Models/InternalCompleteUploadRequest.cs b/.dotnet/src/Generated/Models/InternalCompleteUploadRequest.cs new file mode 100644 index 000000000..74d4d5346 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalCompleteUploadRequest.cs @@ -0,0 +1,35 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Linq; + +namespace OpenAI.Files +{ + internal partial class InternalCompleteUploadRequest + { + internal IDictionary SerializedAdditionalRawData { get; set; } + public InternalCompleteUploadRequest(IEnumerable partIds) + { + Argument.AssertNotNull(partIds, nameof(partIds)); + + PartIds = partIds.ToList(); + } + + internal InternalCompleteUploadRequest(IList partIds, string md5, IDictionary serializedAdditionalRawData) + { + PartIds = partIds; + Md5 = md5; + SerializedAdditionalRawData = serializedAdditionalRawData; + } + + internal InternalCompleteUploadRequest() + { + } + + public IList PartIds { get; } + public string Md5 { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestModel.cs b/.dotnet/src/Generated/Models/InternalCreateAssistantRequestModel.cs index dbe430270..dff777d4f 100644 --- a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestModel.cs +++ b/.dotnet/src/Generated/Models/InternalCreateAssistantRequestModel.cs @@ -17,7 +17,10 @@ public InternalCreateAssistantRequestModel(string value) } private const string Gpt4oValue = "gpt-4o"; + private const string Gpt4o20240806Value = "gpt-4o-2024-08-06"; private const string Gpt4o20240513Value = "gpt-4o-2024-05-13"; + private const string Gpt4oMiniValue = "gpt-4o-mini"; + private const string Gpt4oMini20240718Value = "gpt-4o-mini-2024-07-18"; private const string Gpt4TurboValue = "gpt-4-turbo"; private const string Gpt4Turbo20240409Value = "gpt-4-turbo-2024-04-09"; private const string Gpt40125PreviewValue = "gpt-4-0125-preview"; @@ -38,7 +41,10 @@ public InternalCreateAssistantRequestModel(string value) private const string Gpt35Turbo16k0613Value = "gpt-3.5-turbo-16k-0613"; public static InternalCreateAssistantRequestModel Gpt4o { get; } = new InternalCreateAssistantRequestModel(Gpt4oValue); + public static InternalCreateAssistantRequestModel Gpt4o20240806 { get; } = new InternalCreateAssistantRequestModel(Gpt4o20240806Value); public static InternalCreateAssistantRequestModel Gpt4o20240513 { get; } = new InternalCreateAssistantRequestModel(Gpt4o20240513Value); + public static InternalCreateAssistantRequestModel Gpt4oMini { get; } = new InternalCreateAssistantRequestModel(Gpt4oMiniValue); + public static InternalCreateAssistantRequestModel Gpt4oMini20240718 { get; } = new InternalCreateAssistantRequestModel(Gpt4oMini20240718Value); public static InternalCreateAssistantRequestModel Gpt4Turbo { get; } = new InternalCreateAssistantRequestModel(Gpt4TurboValue); public static InternalCreateAssistantRequestModel Gpt4Turbo20240409 { get; } = new InternalCreateAssistantRequestModel(Gpt4Turbo20240409Value); public static InternalCreateAssistantRequestModel Gpt40125Preview { get; } = new InternalCreateAssistantRequestModel(Gpt40125PreviewValue); diff --git a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResources.Serialization.cs b/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResources.Serialization.cs index 9b879c1ec..d21e16129 100644 --- a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResources.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResources.Serialization.cs @@ -29,14 +29,7 @@ void IJsonModel.Write(Utf8JsonWrite if (SerializedAdditionalRawData?.ContainsKey("file_search") != true && Optional.IsDefined(FileSearch)) { writer.WritePropertyName("file_search"u8); -#if NET6_0_OR_GREATER - writer.WriteRawValue(FileSearch); -#else - using (JsonDocument document = JsonDocument.Parse(FileSearch)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif + writer.WriteObjectValue(FileSearch, options); } if (SerializedAdditionalRawData != null) { @@ -81,7 +74,7 @@ internal static InternalCreateAssistantRequestToolResources DeserializeInternalC return null; } InternalCreateAssistantRequestToolResourcesCodeInterpreter codeInterpreter = default; - BinaryData fileSearch = default; + FileSearchToolResources fileSearch = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -101,7 +94,7 @@ internal static InternalCreateAssistantRequestToolResources DeserializeInternalC { continue; } - fileSearch = BinaryData.FromString(property.Value.GetRawText()); + fileSearch = FileSearchToolResources.DeserializeFileSearchToolResources(property.Value, options); continue; } if (true) diff --git a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResources.cs b/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResources.cs index 4ed163de6..3ac5aa737 100644 --- a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResources.cs +++ b/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResources.cs @@ -14,7 +14,7 @@ public InternalCreateAssistantRequestToolResources() { } - internal InternalCreateAssistantRequestToolResources(InternalCreateAssistantRequestToolResourcesCodeInterpreter codeInterpreter, BinaryData fileSearch, IDictionary serializedAdditionalRawData) + internal InternalCreateAssistantRequestToolResources(InternalCreateAssistantRequestToolResourcesCodeInterpreter codeInterpreter, FileSearchToolResources fileSearch, IDictionary serializedAdditionalRawData) { CodeInterpreter = codeInterpreter; FileSearch = fileSearch; @@ -22,6 +22,6 @@ internal InternalCreateAssistantRequestToolResources(InternalCreateAssistantRequ } public InternalCreateAssistantRequestToolResourcesCodeInterpreter CodeInterpreter { get; set; } - public BinaryData FileSearch { get; set; } + public FileSearchToolResources FileSearch { get; set; } } } diff --git a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchBase.cs b/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchBase.cs deleted file mode 100644 index 31dff8351..000000000 --- a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchBase.cs +++ /dev/null @@ -1,22 +0,0 @@ -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace OpenAI.Assistants -{ - internal partial class InternalCreateAssistantRequestToolResourcesFileSearchBase - { - internal IDictionary SerializedAdditionalRawData { get; set; } - public InternalCreateAssistantRequestToolResourcesFileSearchBase() - { - } - - internal InternalCreateAssistantRequestToolResourcesFileSearchBase(IDictionary serializedAdditionalRawData) - { - SerializedAdditionalRawData = serializedAdditionalRawData; - } - } -} diff --git a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers.Serialization.cs b/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers.Serialization.cs deleted file mode 100644 index af3257e65..000000000 --- a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers.Serialization.cs +++ /dev/null @@ -1,147 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; - -namespace OpenAI.Assistants -{ - internal partial class InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers : IJsonModel - { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers)} does not support writing '{format}' format."); - } - - writer.WriteStartObject(); - if (SerializedAdditionalRawData?.ContainsKey("vector_stores") != true && Optional.IsCollectionDefined(VectorStores)) - { - writer.WritePropertyName("vector_stores"u8); - writer.WriteStartArray(); - foreach (var item in VectorStores) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } - if (SerializedAdditionalRawData != null) - { - foreach (var item in SerializedAdditionalRawData) - { - if (ModelSerializationExtensions.IsSentinelValue(item.Value)) - { - continue; - } - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - writer.WriteEndObject(); - } - - InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers)} does not support reading '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeInternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers(document.RootElement, options); - } - - internal static InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers DeserializeInternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= ModelSerializationExtensions.WireOptions; - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - IList vectorStores = default; - IDictionary serializedAdditionalRawData = default; - Dictionary rawDataDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("vector_stores"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(VectorStoreCreationHelper.DeserializeVectorStoreCreationHelper(item, options)); - } - vectorStores = array; - continue; - } - if (true) - { - rawDataDictionary ??= new Dictionary(); - rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = rawDataDictionary; - return new InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers(vectorStores ?? new ChangeTrackingList(), serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options); - default: - throw new FormatException($"The model {nameof(InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers)} does not support writing '{options.Format}' format."); - } - } - - InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data); - return DeserializeInternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers)} does not support reading '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - internal static InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers FromResponse(PipelineResponse response) - { - using var document = JsonDocument.Parse(response.Content); - return DeserializeInternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers(document.RootElement); - } - - internal virtual BinaryContent ToBinaryContent() - { - return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); - } - } -} diff --git a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers.cs b/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers.cs deleted file mode 100644 index bb2896dc6..000000000 --- a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers.cs +++ /dev/null @@ -1,26 +0,0 @@ -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace OpenAI.Assistants -{ - internal partial class InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers - { - internal IDictionary SerializedAdditionalRawData { get; set; } - public InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers() - { - VectorStores = new ChangeTrackingList(); - } - - internal InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers(IList vectorStores, IDictionary serializedAdditionalRawData) - { - VectorStores = vectorStores; - SerializedAdditionalRawData = serializedAdditionalRawData; - } - - public IList VectorStores { get; } - } -} diff --git a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences.Serialization.cs b/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences.Serialization.cs deleted file mode 100644 index 4193507ce..000000000 --- a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences.Serialization.cs +++ /dev/null @@ -1,147 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; - -namespace OpenAI.Assistants -{ - internal partial class InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences : IJsonModel - { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences)} does not support writing '{format}' format."); - } - - writer.WriteStartObject(); - if (SerializedAdditionalRawData?.ContainsKey("vector_store_ids") != true && Optional.IsCollectionDefined(VectorStoreIds)) - { - writer.WritePropertyName("vector_store_ids"u8); - writer.WriteStartArray(); - foreach (var item in VectorStoreIds) - { - writer.WriteStringValue(item); - } - writer.WriteEndArray(); - } - if (SerializedAdditionalRawData != null) - { - foreach (var item in SerializedAdditionalRawData) - { - if (ModelSerializationExtensions.IsSentinelValue(item.Value)) - { - continue; - } - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - writer.WriteEndObject(); - } - - InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences)} does not support reading '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeInternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences(document.RootElement, options); - } - - internal static InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences DeserializeInternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= ModelSerializationExtensions.WireOptions; - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - IList vectorStoreIds = default; - IDictionary serializedAdditionalRawData = default; - Dictionary rawDataDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("vector_store_ids"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(item.GetString()); - } - vectorStoreIds = array; - continue; - } - if (true) - { - rawDataDictionary ??= new Dictionary(); - rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = rawDataDictionary; - return new InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences(vectorStoreIds ?? new ChangeTrackingList(), serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options); - default: - throw new FormatException($"The model {nameof(InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences)} does not support writing '{options.Format}' format."); - } - } - - InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data); - return DeserializeInternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences)} does not support reading '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - internal static InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences FromResponse(PipelineResponse response) - { - using var document = JsonDocument.Parse(response.Content); - return DeserializeInternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences(document.RootElement); - } - - internal virtual BinaryContent ToBinaryContent() - { - return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); - } - } -} diff --git a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences.cs b/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences.cs deleted file mode 100644 index ba6da6ac4..000000000 --- a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences.cs +++ /dev/null @@ -1,26 +0,0 @@ -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace OpenAI.Assistants -{ - internal partial class InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences - { - internal IDictionary SerializedAdditionalRawData { get; set; } - public InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences() - { - VectorStoreIds = new ChangeTrackingList(); - } - - internal InternalCreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences(IList vectorStoreIds, IDictionary serializedAdditionalRawData) - { - VectorStoreIds = vectorStoreIds; - SerializedAdditionalRawData = serializedAdditionalRawData; - } - - public IList VectorStoreIds { get; } - } -} diff --git a/.dotnet/src/Generated/Models/InternalCreateChatCompletionRequestModel.cs b/.dotnet/src/Generated/Models/InternalCreateChatCompletionRequestModel.cs index 3ee5bd5d5..4b14c7809 100644 --- a/.dotnet/src/Generated/Models/InternalCreateChatCompletionRequestModel.cs +++ b/.dotnet/src/Generated/Models/InternalCreateChatCompletionRequestModel.cs @@ -18,6 +18,10 @@ public InternalCreateChatCompletionRequestModel(string value) private const string Gpt4oValue = "gpt-4o"; private const string Gpt4o20240513Value = "gpt-4o-2024-05-13"; + private const string Gpt4o20240806Value = "gpt-4o-2024-08-06"; + private const string Chatgpt4oLatestValue = "chatgpt-4o-latest"; + private const string Gpt4oMiniValue = "gpt-4o-mini"; + private const string Gpt4oMini20240718Value = "gpt-4o-mini-2024-07-18"; private const string Gpt4TurboValue = "gpt-4-turbo"; private const string Gpt4Turbo20240409Value = "gpt-4-turbo-2024-04-09"; private const string Gpt40125PreviewValue = "gpt-4-0125-preview"; @@ -40,6 +44,10 @@ public InternalCreateChatCompletionRequestModel(string value) public static InternalCreateChatCompletionRequestModel Gpt4o { get; } = new InternalCreateChatCompletionRequestModel(Gpt4oValue); public static InternalCreateChatCompletionRequestModel Gpt4o20240513 { get; } = new InternalCreateChatCompletionRequestModel(Gpt4o20240513Value); + public static InternalCreateChatCompletionRequestModel Gpt4o20240806 { get; } = new InternalCreateChatCompletionRequestModel(Gpt4o20240806Value); + public static InternalCreateChatCompletionRequestModel Chatgpt4oLatest { get; } = new InternalCreateChatCompletionRequestModel(Chatgpt4oLatestValue); + public static InternalCreateChatCompletionRequestModel Gpt4oMini { get; } = new InternalCreateChatCompletionRequestModel(Gpt4oMiniValue); + public static InternalCreateChatCompletionRequestModel Gpt4oMini20240718 { get; } = new InternalCreateChatCompletionRequestModel(Gpt4oMini20240718Value); public static InternalCreateChatCompletionRequestModel Gpt4Turbo { get; } = new InternalCreateChatCompletionRequestModel(Gpt4TurboValue); public static InternalCreateChatCompletionRequestModel Gpt4Turbo20240409 { get; } = new InternalCreateChatCompletionRequestModel(Gpt4Turbo20240409Value); public static InternalCreateChatCompletionRequestModel Gpt40125Preview { get; } = new InternalCreateChatCompletionRequestModel(Gpt40125PreviewValue); diff --git a/.dotnet/src/Generated/Models/InternalCreateChatCompletionRequestResponseFormatType.cs b/.dotnet/src/Generated/Models/InternalCreateChatCompletionRequestResponseFormatType.cs deleted file mode 100644 index 535b6a509..000000000 --- a/.dotnet/src/Generated/Models/InternalCreateChatCompletionRequestResponseFormatType.cs +++ /dev/null @@ -1,36 +0,0 @@ -// - -#nullable disable - -using System; -using System.ComponentModel; - -namespace OpenAI.Chat -{ - internal readonly partial struct InternalCreateChatCompletionRequestResponseFormatType : IEquatable - { - private readonly string _value; - - public InternalCreateChatCompletionRequestResponseFormatType(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string TextValue = "text"; - private const string JsonObjectValue = "json_object"; - - public static InternalCreateChatCompletionRequestResponseFormatType Text { get; } = new InternalCreateChatCompletionRequestResponseFormatType(TextValue); - public static InternalCreateChatCompletionRequestResponseFormatType JsonObject { get; } = new InternalCreateChatCompletionRequestResponseFormatType(JsonObjectValue); - public static bool operator ==(InternalCreateChatCompletionRequestResponseFormatType left, InternalCreateChatCompletionRequestResponseFormatType right) => left.Equals(right); - public static bool operator !=(InternalCreateChatCompletionRequestResponseFormatType left, InternalCreateChatCompletionRequestResponseFormatType right) => !left.Equals(right); - public static implicit operator InternalCreateChatCompletionRequestResponseFormatType(string value) => new InternalCreateChatCompletionRequestResponseFormatType(value); - - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is InternalCreateChatCompletionRequestResponseFormatType other && Equals(other); - public bool Equals(InternalCreateChatCompletionRequestResponseFormatType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; - public override string ToString() => _value; - } -} diff --git a/.dotnet/src/Generated/Models/InternalCreateChatCompletionRequestServiceTier.cs b/.dotnet/src/Generated/Models/InternalCreateChatCompletionRequestServiceTier.cs new file mode 100644 index 000000000..7d6f7f657 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalCreateChatCompletionRequestServiceTier.cs @@ -0,0 +1,36 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Chat +{ + internal readonly partial struct InternalCreateChatCompletionRequestServiceTier : IEquatable + { + private readonly string _value; + + public InternalCreateChatCompletionRequestServiceTier(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AutoValue = "auto"; + private const string DefaultValue = "default"; + + public static InternalCreateChatCompletionRequestServiceTier Auto { get; } = new InternalCreateChatCompletionRequestServiceTier(AutoValue); + public static InternalCreateChatCompletionRequestServiceTier Default { get; } = new InternalCreateChatCompletionRequestServiceTier(DefaultValue); + public static bool operator ==(InternalCreateChatCompletionRequestServiceTier left, InternalCreateChatCompletionRequestServiceTier right) => left.Equals(right); + public static bool operator !=(InternalCreateChatCompletionRequestServiceTier left, InternalCreateChatCompletionRequestServiceTier right) => !left.Equals(right); + public static implicit operator InternalCreateChatCompletionRequestServiceTier(string value) => new InternalCreateChatCompletionRequestServiceTier(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is InternalCreateChatCompletionRequestServiceTier other && Equals(other); + public bool Equals(InternalCreateChatCompletionRequestServiceTier other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/InternalCreateChatCompletionResponseChoiceLogprobs.Serialization.cs b/.dotnet/src/Generated/Models/InternalCreateChatCompletionResponseChoiceLogprobs.Serialization.cs index db61936aa..1fee709bf 100644 --- a/.dotnet/src/Generated/Models/InternalCreateChatCompletionResponseChoiceLogprobs.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalCreateChatCompletionResponseChoiceLogprobs.Serialization.cs @@ -38,6 +38,23 @@ void IJsonModel.Write(Utf8Js writer.WriteNull("content"); } } + if (SerializedAdditionalRawData?.ContainsKey("refusal") != true) + { + if (Refusal != null && Optional.IsCollectionDefined(Refusal)) + { + writer.WritePropertyName("refusal"u8); + writer.WriteStartArray(); + foreach (var item in Refusal) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + else + { + writer.WriteNull("refusal"); + } + } if (SerializedAdditionalRawData != null) { foreach (var item in SerializedAdditionalRawData) @@ -81,6 +98,7 @@ internal static InternalCreateChatCompletionResponseChoiceLogprobs DeserializeIn return null; } IReadOnlyList content = default; + IReadOnlyList refusal = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -100,6 +118,21 @@ internal static InternalCreateChatCompletionResponseChoiceLogprobs DeserializeIn content = array; continue; } + if (property.NameEquals("refusal"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + refusal = new ChangeTrackingList(); + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ChatTokenLogProbabilityInfo.DeserializeChatTokenLogProbabilityInfo(item, options)); + } + refusal = array; + continue; + } if (true) { rawDataDictionary ??= new Dictionary(); @@ -107,7 +140,7 @@ internal static InternalCreateChatCompletionResponseChoiceLogprobs DeserializeIn } } serializedAdditionalRawData = rawDataDictionary; - return new InternalCreateChatCompletionResponseChoiceLogprobs(content, serializedAdditionalRawData); + return new InternalCreateChatCompletionResponseChoiceLogprobs(content, refusal, serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/.dotnet/src/Generated/Models/InternalCreateChatCompletionResponseChoiceLogprobs.cs b/.dotnet/src/Generated/Models/InternalCreateChatCompletionResponseChoiceLogprobs.cs index d81088a5c..a03030ec2 100644 --- a/.dotnet/src/Generated/Models/InternalCreateChatCompletionResponseChoiceLogprobs.cs +++ b/.dotnet/src/Generated/Models/InternalCreateChatCompletionResponseChoiceLogprobs.cs @@ -11,14 +11,16 @@ namespace OpenAI.Chat internal partial class InternalCreateChatCompletionResponseChoiceLogprobs { internal IDictionary SerializedAdditionalRawData { get; set; } - internal InternalCreateChatCompletionResponseChoiceLogprobs(IEnumerable content) + internal InternalCreateChatCompletionResponseChoiceLogprobs(IEnumerable content, IEnumerable refusal) { Content = content?.ToList(); + Refusal = refusal?.ToList(); } - internal InternalCreateChatCompletionResponseChoiceLogprobs(IReadOnlyList content, IDictionary serializedAdditionalRawData) + internal InternalCreateChatCompletionResponseChoiceLogprobs(IReadOnlyList content, IReadOnlyList refusal, IDictionary serializedAdditionalRawData) { Content = content; + Refusal = refusal; SerializedAdditionalRawData = serializedAdditionalRawData; } @@ -27,5 +29,6 @@ internal InternalCreateChatCompletionResponseChoiceLogprobs() } public IReadOnlyList Content { get; } + public IReadOnlyList Refusal { get; } } } diff --git a/.dotnet/src/Generated/Models/InternalCreateChatCompletionResponseServiceTier.cs b/.dotnet/src/Generated/Models/InternalCreateChatCompletionResponseServiceTier.cs new file mode 100644 index 000000000..599297be0 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalCreateChatCompletionResponseServiceTier.cs @@ -0,0 +1,36 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Chat +{ + internal readonly partial struct InternalCreateChatCompletionResponseServiceTier : IEquatable + { + private readonly string _value; + + public InternalCreateChatCompletionResponseServiceTier(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ScaleValue = "scale"; + private const string DefaultValue = "default"; + + public static InternalCreateChatCompletionResponseServiceTier Scale { get; } = new InternalCreateChatCompletionResponseServiceTier(ScaleValue); + public static InternalCreateChatCompletionResponseServiceTier Default { get; } = new InternalCreateChatCompletionResponseServiceTier(DefaultValue); + public static bool operator ==(InternalCreateChatCompletionResponseServiceTier left, InternalCreateChatCompletionResponseServiceTier right) => left.Equals(right); + public static bool operator !=(InternalCreateChatCompletionResponseServiceTier left, InternalCreateChatCompletionResponseServiceTier right) => !left.Equals(right); + public static implicit operator InternalCreateChatCompletionResponseServiceTier(string value) => new InternalCreateChatCompletionResponseServiceTier(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is InternalCreateChatCompletionResponseServiceTier other && Equals(other); + public bool Equals(InternalCreateChatCompletionResponseServiceTier other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/InternalCreateChatCompletionStreamResponseChoiceLogprobs.Serialization.cs b/.dotnet/src/Generated/Models/InternalCreateChatCompletionStreamResponseChoiceLogprobs.Serialization.cs index 28f81317f..43ebedfc9 100644 --- a/.dotnet/src/Generated/Models/InternalCreateChatCompletionStreamResponseChoiceLogprobs.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalCreateChatCompletionStreamResponseChoiceLogprobs.Serialization.cs @@ -38,6 +38,23 @@ void IJsonModel.Write( writer.WriteNull("content"); } } + if (SerializedAdditionalRawData?.ContainsKey("refusal") != true) + { + if (Refusal != null && Optional.IsCollectionDefined(Refusal)) + { + writer.WritePropertyName("refusal"u8); + writer.WriteStartArray(); + foreach (var item in Refusal) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + else + { + writer.WriteNull("refusal"); + } + } if (SerializedAdditionalRawData != null) { foreach (var item in SerializedAdditionalRawData) @@ -81,6 +98,7 @@ internal static InternalCreateChatCompletionStreamResponseChoiceLogprobs Deseria return null; } IReadOnlyList content = default; + IReadOnlyList refusal = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -100,6 +118,21 @@ internal static InternalCreateChatCompletionStreamResponseChoiceLogprobs Deseria content = array; continue; } + if (property.NameEquals("refusal"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + refusal = new ChangeTrackingList(); + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ChatTokenLogProbabilityInfo.DeserializeChatTokenLogProbabilityInfo(item, options)); + } + refusal = array; + continue; + } if (true) { rawDataDictionary ??= new Dictionary(); @@ -107,7 +140,7 @@ internal static InternalCreateChatCompletionStreamResponseChoiceLogprobs Deseria } } serializedAdditionalRawData = rawDataDictionary; - return new InternalCreateChatCompletionStreamResponseChoiceLogprobs(content, serializedAdditionalRawData); + return new InternalCreateChatCompletionStreamResponseChoiceLogprobs(content, refusal, serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/.dotnet/src/Generated/Models/InternalCreateChatCompletionStreamResponseChoiceLogprobs.cs b/.dotnet/src/Generated/Models/InternalCreateChatCompletionStreamResponseChoiceLogprobs.cs index f0a780dd9..03a852399 100644 --- a/.dotnet/src/Generated/Models/InternalCreateChatCompletionStreamResponseChoiceLogprobs.cs +++ b/.dotnet/src/Generated/Models/InternalCreateChatCompletionStreamResponseChoiceLogprobs.cs @@ -11,14 +11,16 @@ namespace OpenAI.Chat internal partial class InternalCreateChatCompletionStreamResponseChoiceLogprobs { internal IDictionary SerializedAdditionalRawData { get; set; } - internal InternalCreateChatCompletionStreamResponseChoiceLogprobs(IEnumerable content) + internal InternalCreateChatCompletionStreamResponseChoiceLogprobs(IEnumerable content, IEnumerable refusal) { Content = content?.ToList(); + Refusal = refusal?.ToList(); } - internal InternalCreateChatCompletionStreamResponseChoiceLogprobs(IReadOnlyList content, IDictionary serializedAdditionalRawData) + internal InternalCreateChatCompletionStreamResponseChoiceLogprobs(IReadOnlyList content, IReadOnlyList refusal, IDictionary serializedAdditionalRawData) { Content = content; + Refusal = refusal; SerializedAdditionalRawData = serializedAdditionalRawData; } @@ -27,5 +29,6 @@ internal InternalCreateChatCompletionStreamResponseChoiceLogprobs() } public IReadOnlyList Content { get; } + public IReadOnlyList Refusal { get; } } } diff --git a/.dotnet/src/Generated/Models/InternalCreateChatCompletionStreamResponseServiceTier.cs b/.dotnet/src/Generated/Models/InternalCreateChatCompletionStreamResponseServiceTier.cs new file mode 100644 index 000000000..0f2eb1326 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalCreateChatCompletionStreamResponseServiceTier.cs @@ -0,0 +1,36 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Chat +{ + internal readonly partial struct InternalCreateChatCompletionStreamResponseServiceTier : IEquatable + { + private readonly string _value; + + public InternalCreateChatCompletionStreamResponseServiceTier(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ScaleValue = "scale"; + private const string DefaultValue = "default"; + + public static InternalCreateChatCompletionStreamResponseServiceTier Scale { get; } = new InternalCreateChatCompletionStreamResponseServiceTier(ScaleValue); + public static InternalCreateChatCompletionStreamResponseServiceTier Default { get; } = new InternalCreateChatCompletionStreamResponseServiceTier(DefaultValue); + public static bool operator ==(InternalCreateChatCompletionStreamResponseServiceTier left, InternalCreateChatCompletionStreamResponseServiceTier right) => left.Equals(right); + public static bool operator !=(InternalCreateChatCompletionStreamResponseServiceTier left, InternalCreateChatCompletionStreamResponseServiceTier right) => !left.Equals(right); + public static implicit operator InternalCreateChatCompletionStreamResponseServiceTier(string value) => new InternalCreateChatCompletionStreamResponseServiceTier(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is InternalCreateChatCompletionStreamResponseServiceTier other && Equals(other); + public bool Equals(InternalCreateChatCompletionStreamResponseServiceTier other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum.cs b/.dotnet/src/Generated/Models/InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum.cs new file mode 100644 index 000000000..f104417d9 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum.cs @@ -0,0 +1,34 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.FineTuning +{ + internal readonly partial struct InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum : IEquatable + { + private readonly string _value; + + public InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AutoValue = "auto"; + + public static InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum Auto { get; } = new InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum(AutoValue); + public static bool operator ==(InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum left, InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum right) => left.Equals(right); + public static bool operator !=(InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum left, InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum right) => !left.Equals(right); + public static implicit operator InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum(string value) => new InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum other && Equals(other); + public bool Equals(InternalCreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum.cs b/.dotnet/src/Generated/Models/InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum.cs new file mode 100644 index 000000000..9ff39cc7d --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum.cs @@ -0,0 +1,34 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.FineTuning +{ + internal readonly partial struct InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum : IEquatable + { + private readonly string _value; + + public InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AutoValue = "auto"; + + public static InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum Auto { get; } = new InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum(AutoValue); + public static bool operator ==(InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum left, InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum right) => left.Equals(right); + public static bool operator !=(InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum left, InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum right) => !left.Equals(right); + public static implicit operator InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum(string value) => new InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum other && Equals(other); + public bool Equals(InternalCreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum.cs b/.dotnet/src/Generated/Models/InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum.cs new file mode 100644 index 000000000..8fad11a4b --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum.cs @@ -0,0 +1,34 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.FineTuning +{ + internal readonly partial struct InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum : IEquatable + { + private readonly string _value; + + public InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AutoValue = "auto"; + + public static InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum Auto { get; } = new InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum(AutoValue); + public static bool operator ==(InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum left, InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum right) => left.Equals(right); + public static bool operator !=(InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum left, InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum right) => !left.Equals(right); + public static implicit operator InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum(string value) => new InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum other && Equals(other); + public bool Equals(InternalCreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/InternalCreateFineTuningJobRequestModel.cs b/.dotnet/src/Generated/Models/InternalCreateFineTuningJobRequestModel.cs index ad3f54906..e85d5c334 100644 --- a/.dotnet/src/Generated/Models/InternalCreateFineTuningJobRequestModel.cs +++ b/.dotnet/src/Generated/Models/InternalCreateFineTuningJobRequestModel.cs @@ -19,10 +19,12 @@ public InternalCreateFineTuningJobRequestModel(string value) private const string Babbage002Value = "babbage-002"; private const string Davinci002Value = "davinci-002"; private const string Gpt35TurboValue = "gpt-3.5-turbo"; + private const string Gpt4oMiniValue = "gpt-4o-mini"; public static InternalCreateFineTuningJobRequestModel Babbage002 { get; } = new InternalCreateFineTuningJobRequestModel(Babbage002Value); public static InternalCreateFineTuningJobRequestModel Davinci002 { get; } = new InternalCreateFineTuningJobRequestModel(Davinci002Value); public static InternalCreateFineTuningJobRequestModel Gpt35Turbo { get; } = new InternalCreateFineTuningJobRequestModel(Gpt35TurboValue); + public static InternalCreateFineTuningJobRequestModel Gpt4oMini { get; } = new InternalCreateFineTuningJobRequestModel(Gpt4oMiniValue); public static bool operator ==(InternalCreateFineTuningJobRequestModel left, InternalCreateFineTuningJobRequestModel right) => left.Equals(right); public static bool operator !=(InternalCreateFineTuningJobRequestModel left, InternalCreateFineTuningJobRequestModel right) => !left.Equals(right); public static implicit operator InternalCreateFineTuningJobRequestModel(string value) => new InternalCreateFineTuningJobRequestModel(value); diff --git a/.dotnet/src/Generated/Models/InternalCreateRunRequestModel.cs b/.dotnet/src/Generated/Models/InternalCreateRunRequestModel.cs index 56b07d83d..27b73a31b 100644 --- a/.dotnet/src/Generated/Models/InternalCreateRunRequestModel.cs +++ b/.dotnet/src/Generated/Models/InternalCreateRunRequestModel.cs @@ -17,7 +17,10 @@ public InternalCreateRunRequestModel(string value) } private const string Gpt4oValue = "gpt-4o"; + private const string Gpt4o20240806Value = "gpt-4o-2024-08-06"; private const string Gpt4o20240513Value = "gpt-4o-2024-05-13"; + private const string Gpt4oMiniValue = "gpt-4o-mini"; + private const string Gpt4oMini20240718Value = "gpt-4o-mini-2024-07-18"; private const string Gpt4TurboValue = "gpt-4-turbo"; private const string Gpt4Turbo20240409Value = "gpt-4-turbo-2024-04-09"; private const string Gpt40125PreviewValue = "gpt-4-0125-preview"; @@ -38,7 +41,10 @@ public InternalCreateRunRequestModel(string value) private const string Gpt35Turbo16k0613Value = "gpt-3.5-turbo-16k-0613"; public static InternalCreateRunRequestModel Gpt4o { get; } = new InternalCreateRunRequestModel(Gpt4oValue); + public static InternalCreateRunRequestModel Gpt4o20240806 { get; } = new InternalCreateRunRequestModel(Gpt4o20240806Value); public static InternalCreateRunRequestModel Gpt4o20240513 { get; } = new InternalCreateRunRequestModel(Gpt4o20240513Value); + public static InternalCreateRunRequestModel Gpt4oMini { get; } = new InternalCreateRunRequestModel(Gpt4oMiniValue); + public static InternalCreateRunRequestModel Gpt4oMini20240718 { get; } = new InternalCreateRunRequestModel(Gpt4oMini20240718Value); public static InternalCreateRunRequestModel Gpt4Turbo { get; } = new InternalCreateRunRequestModel(Gpt4TurboValue); public static InternalCreateRunRequestModel Gpt4Turbo20240409 { get; } = new InternalCreateRunRequestModel(Gpt4Turbo20240409Value); public static InternalCreateRunRequestModel Gpt40125Preview { get; } = new InternalCreateRunRequestModel(Gpt40125PreviewValue); diff --git a/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestModel.cs b/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestModel.cs index ecabe34e7..04d84a623 100644 --- a/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestModel.cs +++ b/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestModel.cs @@ -17,7 +17,10 @@ public InternalCreateThreadAndRunRequestModel(string value) } private const string Gpt4oValue = "gpt-4o"; + private const string Gpt4o20240806Value = "gpt-4o-2024-08-06"; private const string Gpt4o20240513Value = "gpt-4o-2024-05-13"; + private const string Gpt4oMiniValue = "gpt-4o-mini"; + private const string Gpt4oMini20240718Value = "gpt-4o-mini-2024-07-18"; private const string Gpt4TurboValue = "gpt-4-turbo"; private const string Gpt4Turbo20240409Value = "gpt-4-turbo-2024-04-09"; private const string Gpt40125PreviewValue = "gpt-4-0125-preview"; @@ -38,7 +41,10 @@ public InternalCreateThreadAndRunRequestModel(string value) private const string Gpt35Turbo16k0613Value = "gpt-3.5-turbo-16k-0613"; public static InternalCreateThreadAndRunRequestModel Gpt4o { get; } = new InternalCreateThreadAndRunRequestModel(Gpt4oValue); + public static InternalCreateThreadAndRunRequestModel Gpt4o20240806 { get; } = new InternalCreateThreadAndRunRequestModel(Gpt4o20240806Value); public static InternalCreateThreadAndRunRequestModel Gpt4o20240513 { get; } = new InternalCreateThreadAndRunRequestModel(Gpt4o20240513Value); + public static InternalCreateThreadAndRunRequestModel Gpt4oMini { get; } = new InternalCreateThreadAndRunRequestModel(Gpt4oMiniValue); + public static InternalCreateThreadAndRunRequestModel Gpt4oMini20240718 { get; } = new InternalCreateThreadAndRunRequestModel(Gpt4oMini20240718Value); public static InternalCreateThreadAndRunRequestModel Gpt4Turbo { get; } = new InternalCreateThreadAndRunRequestModel(Gpt4TurboValue); public static InternalCreateThreadAndRunRequestModel Gpt4Turbo20240409 { get; } = new InternalCreateThreadAndRunRequestModel(Gpt4Turbo20240409Value); public static InternalCreateThreadAndRunRequestModel Gpt40125Preview { get; } = new InternalCreateThreadAndRunRequestModel(Gpt40125PreviewValue); diff --git a/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestResponseFormat.cs b/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestResponseFormat.cs deleted file mode 100644 index e268b7603..000000000 --- a/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestResponseFormat.cs +++ /dev/null @@ -1,36 +0,0 @@ -// - -#nullable disable - -using System; -using System.ComponentModel; - -namespace OpenAI.Assistants -{ - internal readonly partial struct InternalCreateThreadAndRunRequestResponseFormat : IEquatable - { - private readonly string _value; - - public InternalCreateThreadAndRunRequestResponseFormat(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string NoneValue = "none"; - private const string AutoValue = "auto"; - - public static InternalCreateThreadAndRunRequestResponseFormat None { get; } = new InternalCreateThreadAndRunRequestResponseFormat(NoneValue); - public static InternalCreateThreadAndRunRequestResponseFormat Auto { get; } = new InternalCreateThreadAndRunRequestResponseFormat(AutoValue); - public static bool operator ==(InternalCreateThreadAndRunRequestResponseFormat left, InternalCreateThreadAndRunRequestResponseFormat right) => left.Equals(right); - public static bool operator !=(InternalCreateThreadAndRunRequestResponseFormat left, InternalCreateThreadAndRunRequestResponseFormat right) => !left.Equals(right); - public static implicit operator InternalCreateThreadAndRunRequestResponseFormat(string value) => new InternalCreateThreadAndRunRequestResponseFormat(value); - - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is InternalCreateThreadAndRunRequestResponseFormat other && Equals(other); - public bool Equals(InternalCreateThreadAndRunRequestResponseFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; - public override string ToString() => _value; - } -} diff --git a/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestToolResources.Serialization.cs b/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestToolResources.Serialization.cs index 3e3112c91..aff6e9e50 100644 --- a/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestToolResources.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestToolResources.Serialization.cs @@ -74,7 +74,7 @@ internal static InternalCreateThreadAndRunRequestToolResources DeserializeIntern return null; } InternalCreateThreadAndRunRequestToolResourcesCodeInterpreter codeInterpreter = default; - InternalCreateThreadAndRunRequestToolResourcesFileSearch fileSearch = default; + InternalToolResourcesFileSearchIdsOnly fileSearch = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -94,7 +94,7 @@ internal static InternalCreateThreadAndRunRequestToolResources DeserializeIntern { continue; } - fileSearch = InternalCreateThreadAndRunRequestToolResourcesFileSearch.DeserializeInternalCreateThreadAndRunRequestToolResourcesFileSearch(property.Value, options); + fileSearch = InternalToolResourcesFileSearchIdsOnly.DeserializeInternalToolResourcesFileSearchIdsOnly(property.Value, options); continue; } if (true) diff --git a/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestToolResources.cs b/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestToolResources.cs index 452a48065..a07a484a3 100644 --- a/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestToolResources.cs +++ b/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestToolResources.cs @@ -14,7 +14,7 @@ public InternalCreateThreadAndRunRequestToolResources() { } - internal InternalCreateThreadAndRunRequestToolResources(InternalCreateThreadAndRunRequestToolResourcesCodeInterpreter codeInterpreter, InternalCreateThreadAndRunRequestToolResourcesFileSearch fileSearch, IDictionary serializedAdditionalRawData) + internal InternalCreateThreadAndRunRequestToolResources(InternalCreateThreadAndRunRequestToolResourcesCodeInterpreter codeInterpreter, InternalToolResourcesFileSearchIdsOnly fileSearch, IDictionary serializedAdditionalRawData) { CodeInterpreter = codeInterpreter; FileSearch = fileSearch; @@ -22,6 +22,6 @@ internal InternalCreateThreadAndRunRequestToolResources(InternalCreateThreadAndR } public InternalCreateThreadAndRunRequestToolResourcesCodeInterpreter CodeInterpreter { get; set; } - public InternalCreateThreadAndRunRequestToolResourcesFileSearch FileSearch { get; set; } + public InternalToolResourcesFileSearchIdsOnly FileSearch { get; set; } } } diff --git a/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestToolResourcesFileSearch.Serialization.cs b/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestToolResourcesFileSearch.Serialization.cs deleted file mode 100644 index 32a57d4a3..000000000 --- a/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestToolResourcesFileSearch.Serialization.cs +++ /dev/null @@ -1,147 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; - -namespace OpenAI.Assistants -{ - internal partial class InternalCreateThreadAndRunRequestToolResourcesFileSearch : IJsonModel - { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(InternalCreateThreadAndRunRequestToolResourcesFileSearch)} does not support writing '{format}' format."); - } - - writer.WriteStartObject(); - if (SerializedAdditionalRawData?.ContainsKey("vector_store_ids") != true && Optional.IsCollectionDefined(VectorStoreIds)) - { - writer.WritePropertyName("vector_store_ids"u8); - writer.WriteStartArray(); - foreach (var item in VectorStoreIds) - { - writer.WriteStringValue(item); - } - writer.WriteEndArray(); - } - if (SerializedAdditionalRawData != null) - { - foreach (var item in SerializedAdditionalRawData) - { - if (ModelSerializationExtensions.IsSentinelValue(item.Value)) - { - continue; - } - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - writer.WriteEndObject(); - } - - InternalCreateThreadAndRunRequestToolResourcesFileSearch IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(InternalCreateThreadAndRunRequestToolResourcesFileSearch)} does not support reading '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeInternalCreateThreadAndRunRequestToolResourcesFileSearch(document.RootElement, options); - } - - internal static InternalCreateThreadAndRunRequestToolResourcesFileSearch DeserializeInternalCreateThreadAndRunRequestToolResourcesFileSearch(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= ModelSerializationExtensions.WireOptions; - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - IList vectorStoreIds = default; - IDictionary serializedAdditionalRawData = default; - Dictionary rawDataDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("vector_store_ids"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(item.GetString()); - } - vectorStoreIds = array; - continue; - } - if (true) - { - rawDataDictionary ??= new Dictionary(); - rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = rawDataDictionary; - return new InternalCreateThreadAndRunRequestToolResourcesFileSearch(vectorStoreIds ?? new ChangeTrackingList(), serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options); - default: - throw new FormatException($"The model {nameof(InternalCreateThreadAndRunRequestToolResourcesFileSearch)} does not support writing '{options.Format}' format."); - } - } - - InternalCreateThreadAndRunRequestToolResourcesFileSearch IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data); - return DeserializeInternalCreateThreadAndRunRequestToolResourcesFileSearch(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(InternalCreateThreadAndRunRequestToolResourcesFileSearch)} does not support reading '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - internal static InternalCreateThreadAndRunRequestToolResourcesFileSearch FromResponse(PipelineResponse response) - { - using var document = JsonDocument.Parse(response.Content); - return DeserializeInternalCreateThreadAndRunRequestToolResourcesFileSearch(document.RootElement); - } - - internal virtual BinaryContent ToBinaryContent() - { - return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); - } - } -} diff --git a/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestToolResourcesFileSearch.cs b/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestToolResourcesFileSearch.cs deleted file mode 100644 index 5a5c4b1f1..000000000 --- a/.dotnet/src/Generated/Models/InternalCreateThreadAndRunRequestToolResourcesFileSearch.cs +++ /dev/null @@ -1,26 +0,0 @@ -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace OpenAI.Assistants -{ - internal partial class InternalCreateThreadAndRunRequestToolResourcesFileSearch - { - internal IDictionary SerializedAdditionalRawData { get; set; } - public InternalCreateThreadAndRunRequestToolResourcesFileSearch() - { - VectorStoreIds = new ChangeTrackingList(); - } - - internal InternalCreateThreadAndRunRequestToolResourcesFileSearch(IList vectorStoreIds, IDictionary serializedAdditionalRawData) - { - VectorStoreIds = vectorStoreIds; - SerializedAdditionalRawData = serializedAdditionalRawData; - } - - public IList VectorStoreIds { get; } - } -} diff --git a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResources.Serialization.cs b/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResources.Serialization.cs index 24c1889b5..39b6689b2 100644 --- a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResources.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResources.Serialization.cs @@ -29,14 +29,7 @@ void IJsonModel.Write(Utf8JsonWriter w if (SerializedAdditionalRawData?.ContainsKey("file_search") != true && Optional.IsDefined(FileSearch)) { writer.WritePropertyName("file_search"u8); -#if NET6_0_OR_GREATER - writer.WriteRawValue(FileSearch); -#else - using (JsonDocument document = JsonDocument.Parse(FileSearch)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif + writer.WriteObjectValue(FileSearch, options); } if (SerializedAdditionalRawData != null) { @@ -81,7 +74,7 @@ internal static InternalCreateThreadRequestToolResources DeserializeInternalCrea return null; } InternalCreateThreadRequestToolResourcesCodeInterpreter codeInterpreter = default; - BinaryData fileSearch = default; + FileSearchToolResources fileSearch = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -101,7 +94,7 @@ internal static InternalCreateThreadRequestToolResources DeserializeInternalCrea { continue; } - fileSearch = BinaryData.FromString(property.Value.GetRawText()); + fileSearch = FileSearchToolResources.DeserializeFileSearchToolResources(property.Value, options); continue; } if (true) diff --git a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResources.cs b/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResources.cs index ca16742e9..08a68fce1 100644 --- a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResources.cs +++ b/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResources.cs @@ -14,7 +14,7 @@ public InternalCreateThreadRequestToolResources() { } - internal InternalCreateThreadRequestToolResources(InternalCreateThreadRequestToolResourcesCodeInterpreter codeInterpreter, BinaryData fileSearch, IDictionary serializedAdditionalRawData) + internal InternalCreateThreadRequestToolResources(InternalCreateThreadRequestToolResourcesCodeInterpreter codeInterpreter, FileSearchToolResources fileSearch, IDictionary serializedAdditionalRawData) { CodeInterpreter = codeInterpreter; FileSearch = fileSearch; @@ -22,6 +22,6 @@ internal InternalCreateThreadRequestToolResources(InternalCreateThreadRequestToo } public InternalCreateThreadRequestToolResourcesCodeInterpreter CodeInterpreter { get; set; } - public BinaryData FileSearch { get; set; } + public FileSearchToolResources FileSearch { get; set; } } } diff --git a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers.Serialization.cs b/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers.Serialization.cs deleted file mode 100644 index a158ff57a..000000000 --- a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers.Serialization.cs +++ /dev/null @@ -1,147 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; - -namespace OpenAI.Assistants -{ - internal partial class InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers : IJsonModel - { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers)} does not support writing '{format}' format."); - } - - writer.WriteStartObject(); - if (SerializedAdditionalRawData?.ContainsKey("vector_stores") != true && Optional.IsCollectionDefined(VectorStores)) - { - writer.WritePropertyName("vector_stores"u8); - writer.WriteStartArray(); - foreach (var item in VectorStores) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } - if (SerializedAdditionalRawData != null) - { - foreach (var item in SerializedAdditionalRawData) - { - if (ModelSerializationExtensions.IsSentinelValue(item.Value)) - { - continue; - } - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - writer.WriteEndObject(); - } - - InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers)} does not support reading '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeInternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers(document.RootElement, options); - } - - internal static InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers DeserializeInternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= ModelSerializationExtensions.WireOptions; - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - IList vectorStores = default; - IDictionary serializedAdditionalRawData = default; - Dictionary rawDataDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("vector_stores"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore.DeserializeInternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore(item, options)); - } - vectorStores = array; - continue; - } - if (true) - { - rawDataDictionary ??= new Dictionary(); - rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = rawDataDictionary; - return new InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers(vectorStores ?? new ChangeTrackingList(), serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options); - default: - throw new FormatException($"The model {nameof(InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers)} does not support writing '{options.Format}' format."); - } - } - - InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data); - return DeserializeInternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers)} does not support reading '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - internal static InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers FromResponse(PipelineResponse response) - { - using var document = JsonDocument.Parse(response.Content); - return DeserializeInternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers(document.RootElement); - } - - internal virtual BinaryContent ToBinaryContent() - { - return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); - } - } -} diff --git a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers.cs b/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers.cs deleted file mode 100644 index f1854538c..000000000 --- a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers.cs +++ /dev/null @@ -1,26 +0,0 @@ -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace OpenAI.Assistants -{ - internal partial class InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers - { - internal IDictionary SerializedAdditionalRawData { get; set; } - public InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers() - { - VectorStores = new ChangeTrackingList(); - } - - internal InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers(IList vectorStores, IDictionary serializedAdditionalRawData) - { - VectorStores = vectorStores; - SerializedAdditionalRawData = serializedAdditionalRawData; - } - - public IList VectorStores { get; } - } -} diff --git a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore.Serialization.cs b/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore.Serialization.cs deleted file mode 100644 index be93ed9cf..000000000 --- a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore.Serialization.cs +++ /dev/null @@ -1,173 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; - -namespace OpenAI.Assistants -{ - internal partial class InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore : IJsonModel - { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore)} does not support writing '{format}' format."); - } - - writer.WriteStartObject(); - if (SerializedAdditionalRawData?.ContainsKey("file_ids") != true && Optional.IsCollectionDefined(FileIds)) - { - writer.WritePropertyName("file_ids"u8); - writer.WriteStartArray(); - foreach (var item in FileIds) - { - writer.WriteStringValue(item); - } - writer.WriteEndArray(); - } - if (SerializedAdditionalRawData?.ContainsKey("metadata") != true && Optional.IsCollectionDefined(Metadata)) - { - writer.WritePropertyName("metadata"u8); - writer.WriteStartObject(); - foreach (var item in Metadata) - { - writer.WritePropertyName(item.Key); - writer.WriteStringValue(item.Value); - } - writer.WriteEndObject(); - } - if (SerializedAdditionalRawData != null) - { - foreach (var item in SerializedAdditionalRawData) - { - if (ModelSerializationExtensions.IsSentinelValue(item.Value)) - { - continue; - } - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - writer.WriteEndObject(); - } - - InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore)} does not support reading '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeInternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore(document.RootElement, options); - } - - internal static InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore DeserializeInternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= ModelSerializationExtensions.WireOptions; - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - IList fileIds = default; - IDictionary metadata = default; - IDictionary serializedAdditionalRawData = default; - Dictionary rawDataDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("file_ids"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(item.GetString()); - } - fileIds = array; - continue; - } - if (property.NameEquals("metadata"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - Dictionary dictionary = new Dictionary(); - foreach (var property0 in property.Value.EnumerateObject()) - { - dictionary.Add(property0.Name, property0.Value.GetString()); - } - metadata = dictionary; - continue; - } - if (true) - { - rawDataDictionary ??= new Dictionary(); - rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = rawDataDictionary; - return new InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore(fileIds ?? new ChangeTrackingList(), metadata ?? new ChangeTrackingDictionary(), serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options); - default: - throw new FormatException($"The model {nameof(InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore)} does not support writing '{options.Format}' format."); - } - } - - InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data); - return DeserializeInternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore)} does not support reading '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - internal static InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore FromResponse(PipelineResponse response) - { - using var document = JsonDocument.Parse(response.Content); - return DeserializeInternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore(document.RootElement); - } - - internal virtual BinaryContent ToBinaryContent() - { - return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); - } - } -} diff --git a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore.cs b/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore.cs deleted file mode 100644 index 430b2c563..000000000 --- a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore.cs +++ /dev/null @@ -1,29 +0,0 @@ -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace OpenAI.Assistants -{ - internal partial class InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore - { - internal IDictionary SerializedAdditionalRawData { get; set; } - public InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore() - { - FileIds = new ChangeTrackingList(); - Metadata = new ChangeTrackingDictionary(); - } - - internal InternalCreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpersVectorStore(IList fileIds, IDictionary metadata, IDictionary serializedAdditionalRawData) - { - FileIds = fileIds; - Metadata = metadata; - SerializedAdditionalRawData = serializedAdditionalRawData; - } - - public IList FileIds { get; } - public IDictionary Metadata { get; } - } -} diff --git a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences.Serialization.cs b/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences.Serialization.cs deleted file mode 100644 index e8e18dba6..000000000 --- a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences.Serialization.cs +++ /dev/null @@ -1,147 +0,0 @@ -// - -#nullable disable - -using System; -using System.ClientModel; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; - -namespace OpenAI.Assistants -{ - internal partial class InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences : IJsonModel - { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences)} does not support writing '{format}' format."); - } - - writer.WriteStartObject(); - if (SerializedAdditionalRawData?.ContainsKey("vector_store_ids") != true && Optional.IsCollectionDefined(VectorStoreIds)) - { - writer.WritePropertyName("vector_store_ids"u8); - writer.WriteStartArray(); - foreach (var item in VectorStoreIds) - { - writer.WriteStringValue(item); - } - writer.WriteEndArray(); - } - if (SerializedAdditionalRawData != null) - { - foreach (var item in SerializedAdditionalRawData) - { - if (ModelSerializationExtensions.IsSentinelValue(item.Value)) - { - continue; - } - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - writer.WriteEndObject(); - } - - InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences)} does not support reading '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeInternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences(document.RootElement, options); - } - - internal static InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences DeserializeInternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= ModelSerializationExtensions.WireOptions; - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - IList vectorStoreIds = default; - IDictionary serializedAdditionalRawData = default; - Dictionary rawDataDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("vector_store_ids"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(item.GetString()); - } - vectorStoreIds = array; - continue; - } - if (true) - { - rawDataDictionary ??= new Dictionary(); - rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = rawDataDictionary; - return new InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences(vectorStoreIds ?? new ChangeTrackingList(), serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options); - default: - throw new FormatException($"The model {nameof(InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences)} does not support writing '{options.Format}' format."); - } - } - - InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data); - return DeserializeInternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences)} does not support reading '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - internal static InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences FromResponse(PipelineResponse response) - { - using var document = JsonDocument.Parse(response.Content); - return DeserializeInternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences(document.RootElement); - } - - internal virtual BinaryContent ToBinaryContent() - { - return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); - } - } -} diff --git a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences.cs b/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences.cs deleted file mode 100644 index 140500897..000000000 --- a/.dotnet/src/Generated/Models/InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences.cs +++ /dev/null @@ -1,26 +0,0 @@ -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace OpenAI.Assistants -{ - internal partial class InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences - { - internal IDictionary SerializedAdditionalRawData { get; set; } - public InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences() - { - VectorStoreIds = new ChangeTrackingList(); - } - - internal InternalCreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences(IList vectorStoreIds, IDictionary serializedAdditionalRawData) - { - VectorStoreIds = vectorStoreIds; - SerializedAdditionalRawData = serializedAdditionalRawData; - } - - public IList VectorStoreIds { get; } - } -} diff --git a/.dotnet/src/Generated/Models/InternalCreateUploadRequest.Serialization.cs b/.dotnet/src/Generated/Models/InternalCreateUploadRequest.Serialization.cs new file mode 100644 index 000000000..e2ed769ba --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalCreateUploadRequest.Serialization.cs @@ -0,0 +1,166 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Files +{ + internal partial class InternalCreateUploadRequest : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalCreateUploadRequest)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("filename") != true) + { + writer.WritePropertyName("filename"u8); + writer.WriteStringValue(Filename); + } + if (SerializedAdditionalRawData?.ContainsKey("purpose") != true) + { + writer.WritePropertyName("purpose"u8); + writer.WriteStringValue(Purpose.ToString()); + } + if (SerializedAdditionalRawData?.ContainsKey("bytes") != true) + { + writer.WritePropertyName("bytes"u8); + writer.WriteNumberValue(Bytes); + } + if (SerializedAdditionalRawData?.ContainsKey("mime_type") != true) + { + writer.WritePropertyName("mime_type"u8); + writer.WriteStringValue(MimeType); + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InternalCreateUploadRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalCreateUploadRequest)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalCreateUploadRequest(document.RootElement, options); + } + + internal static InternalCreateUploadRequest DeserializeInternalCreateUploadRequest(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string filename = default; + InternalCreateUploadRequestPurpose purpose = default; + int bytes = default; + string mimeType = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("filename"u8)) + { + filename = property.Value.GetString(); + continue; + } + if (property.NameEquals("purpose"u8)) + { + purpose = new InternalCreateUploadRequestPurpose(property.Value.GetString()); + continue; + } + if (property.NameEquals("bytes"u8)) + { + bytes = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("mime_type"u8)) + { + mimeType = property.Value.GetString(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalCreateUploadRequest(filename, purpose, bytes, mimeType, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalCreateUploadRequest)} does not support writing '{options.Format}' format."); + } + } + + InternalCreateUploadRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalCreateUploadRequest(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalCreateUploadRequest)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static InternalCreateUploadRequest FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalCreateUploadRequest(document.RootElement); + } + + internal virtual BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalCreateUploadRequest.cs b/.dotnet/src/Generated/Models/InternalCreateUploadRequest.cs new file mode 100644 index 000000000..f580307a6 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalCreateUploadRequest.cs @@ -0,0 +1,42 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Files +{ + internal partial class InternalCreateUploadRequest + { + internal IDictionary SerializedAdditionalRawData { get; set; } + public InternalCreateUploadRequest(string filename, InternalCreateUploadRequestPurpose purpose, int bytes, string mimeType) + { + Argument.AssertNotNull(filename, nameof(filename)); + Argument.AssertNotNull(mimeType, nameof(mimeType)); + + Filename = filename; + Purpose = purpose; + Bytes = bytes; + MimeType = mimeType; + } + + internal InternalCreateUploadRequest(string filename, InternalCreateUploadRequestPurpose purpose, int bytes, string mimeType, IDictionary serializedAdditionalRawData) + { + Filename = filename; + Purpose = purpose; + Bytes = bytes; + MimeType = mimeType; + SerializedAdditionalRawData = serializedAdditionalRawData; + } + + internal InternalCreateUploadRequest() + { + } + + public string Filename { get; } + public InternalCreateUploadRequestPurpose Purpose { get; } + public int Bytes { get; } + public string MimeType { get; } + } +} diff --git a/.dotnet/src/Generated/Models/InternalCreateUploadRequestPurpose.cs b/.dotnet/src/Generated/Models/InternalCreateUploadRequestPurpose.cs new file mode 100644 index 000000000..6b76958f0 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalCreateUploadRequestPurpose.cs @@ -0,0 +1,40 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Files +{ + internal readonly partial struct InternalCreateUploadRequestPurpose : IEquatable + { + private readonly string _value; + + public InternalCreateUploadRequestPurpose(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AssistantsValue = "assistants"; + private const string BatchValue = "batch"; + private const string FineTuneValue = "fine-tune"; + private const string VisionValue = "vision"; + + public static InternalCreateUploadRequestPurpose Assistants { get; } = new InternalCreateUploadRequestPurpose(AssistantsValue); + public static InternalCreateUploadRequestPurpose Batch { get; } = new InternalCreateUploadRequestPurpose(BatchValue); + public static InternalCreateUploadRequestPurpose FineTune { get; } = new InternalCreateUploadRequestPurpose(FineTuneValue); + public static InternalCreateUploadRequestPurpose Vision { get; } = new InternalCreateUploadRequestPurpose(VisionValue); + public static bool operator ==(InternalCreateUploadRequestPurpose left, InternalCreateUploadRequestPurpose right) => left.Equals(right); + public static bool operator !=(InternalCreateUploadRequestPurpose left, InternalCreateUploadRequestPurpose right) => !left.Equals(right); + public static implicit operator InternalCreateUploadRequestPurpose(string value) => new InternalCreateUploadRequestPurpose(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is InternalCreateUploadRequestPurpose other && Equals(other); + public bool Equals(InternalCreateUploadRequestPurpose other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/InternalFineTuneChatCompletionRequestAssistantMessage.Serialization.cs b/.dotnet/src/Generated/Models/InternalFineTuneChatCompletionRequestAssistantMessage.Serialization.cs index e60bfb399..9c1712f3c 100644 --- a/.dotnet/src/Generated/Models/InternalFineTuneChatCompletionRequestAssistantMessage.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalFineTuneChatCompletionRequestAssistantMessage.Serialization.cs @@ -22,6 +22,18 @@ void IJsonModel.Write(Utf } writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("refusal") != true && Optional.IsDefined(Refusal)) + { + if (Refusal != null) + { + writer.WritePropertyName("refusal"u8); + writer.WriteStringValue(Refusal); + } + else + { + writer.WriteNull("refusal"); + } + } if (SerializedAdditionalRawData?.ContainsKey("name") != true && Optional.IsDefined(ParticipantName)) { writer.WritePropertyName("name"u8); @@ -33,7 +45,7 @@ void IJsonModel.Write(Utf writer.WriteStartArray(); foreach (var item in ToolCalls) { - writer.WriteObjectValue(item, options); + writer.WriteObjectValue(item, options); } writer.WriteEndArray(); } @@ -52,7 +64,7 @@ void IJsonModel.Write(Utf if (SerializedAdditionalRawData?.ContainsKey("role") != true) { writer.WritePropertyName("role"u8); - writer.WriteStringValue(Role); + writer.WriteStringValue(Role.ToSerialString()); } if (SerializedAdditionalRawData?.ContainsKey("content") != true && Optional.IsCollectionDefined(Content)) { @@ -101,15 +113,26 @@ internal static InternalFineTuneChatCompletionRequestAssistantMessage Deserializ { return null; } + string refusal = default; string name = default; IList toolCalls = default; ChatFunctionCall functionCall = default; - string role = default; + ChatMessageRole role = default; IList content = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { + if (property.NameEquals("refusal"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + refusal = null; + continue; + } + refusal = property.Value.GetString(); + continue; + } if (property.NameEquals("name"u8)) { name = property.Value.GetString(); @@ -141,7 +164,7 @@ internal static InternalFineTuneChatCompletionRequestAssistantMessage Deserializ } if (property.NameEquals("role"u8)) { - role = property.Value.GetString(); + role = property.Value.GetString().ToChatMessageRole(); continue; } if (property.NameEquals("content"u8)) @@ -160,6 +183,7 @@ internal static InternalFineTuneChatCompletionRequestAssistantMessage Deserializ role, content ?? new ChangeTrackingList(), serializedAdditionalRawData, + refusal, name, toolCalls ?? new ChangeTrackingList(), functionCall); diff --git a/.dotnet/src/Generated/Models/InternalFineTuneChatCompletionRequestAssistantMessage.cs b/.dotnet/src/Generated/Models/InternalFineTuneChatCompletionRequestAssistantMessage.cs index 51139e9f8..e7f33bc58 100644 --- a/.dotnet/src/Generated/Models/InternalFineTuneChatCompletionRequestAssistantMessage.cs +++ b/.dotnet/src/Generated/Models/InternalFineTuneChatCompletionRequestAssistantMessage.cs @@ -14,7 +14,7 @@ public InternalFineTuneChatCompletionRequestAssistantMessage() { } - internal InternalFineTuneChatCompletionRequestAssistantMessage(string role, IList content, IDictionary serializedAdditionalRawData, string participantName, IList toolCalls, ChatFunctionCall functionCall) : base(role, content, serializedAdditionalRawData, participantName, toolCalls, functionCall) + internal InternalFineTuneChatCompletionRequestAssistantMessage(ChatMessageRole role, IList content, IDictionary serializedAdditionalRawData, string refusal, string participantName, IList toolCalls, ChatFunctionCall functionCall) : base(role, content, serializedAdditionalRawData, refusal, participantName, toolCalls, functionCall) { } } diff --git a/.dotnet/src/Generated/Models/InternalFineTuningJobHyperparametersNEpochsChoiceEnum.cs b/.dotnet/src/Generated/Models/InternalFineTuningJobHyperparametersNEpochsChoiceEnum.cs new file mode 100644 index 000000000..02f0f0754 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalFineTuningJobHyperparametersNEpochsChoiceEnum.cs @@ -0,0 +1,34 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.FineTuning +{ + internal readonly partial struct InternalFineTuningJobHyperparametersNEpochsChoiceEnum : IEquatable + { + private readonly string _value; + + public InternalFineTuningJobHyperparametersNEpochsChoiceEnum(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AutoValue = "auto"; + + public static InternalFineTuningJobHyperparametersNEpochsChoiceEnum Auto { get; } = new InternalFineTuningJobHyperparametersNEpochsChoiceEnum(AutoValue); + public static bool operator ==(InternalFineTuningJobHyperparametersNEpochsChoiceEnum left, InternalFineTuningJobHyperparametersNEpochsChoiceEnum right) => left.Equals(right); + public static bool operator !=(InternalFineTuningJobHyperparametersNEpochsChoiceEnum left, InternalFineTuningJobHyperparametersNEpochsChoiceEnum right) => !left.Equals(right); + public static implicit operator InternalFineTuningJobHyperparametersNEpochsChoiceEnum(string value) => new InternalFineTuningJobHyperparametersNEpochsChoiceEnum(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is InternalFineTuningJobHyperparametersNEpochsChoiceEnum other && Equals(other); + public bool Equals(InternalFineTuningJobHyperparametersNEpochsChoiceEnum other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/InternalFunctionDefinition.Serialization.cs b/.dotnet/src/Generated/Models/InternalFunctionDefinition.Serialization.cs index 095286f50..57335523d 100644 --- a/.dotnet/src/Generated/Models/InternalFunctionDefinition.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalFunctionDefinition.Serialization.cs @@ -43,6 +43,18 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelRe } #endif } + if (SerializedAdditionalRawData?.ContainsKey("strict") != true && Optional.IsDefined(Strict)) + { + if (Strict != null) + { + writer.WritePropertyName("strict"u8); + writer.WriteBooleanValue(Strict.Value); + } + else + { + writer.WriteNull("strict"); + } + } if (SerializedAdditionalRawData != null) { foreach (var item in SerializedAdditionalRawData) @@ -88,6 +100,7 @@ internal static InternalFunctionDefinition DeserializeInternalFunctionDefinition string description = default; string name = default; BinaryData parameters = default; + bool? strict = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -111,6 +124,16 @@ internal static InternalFunctionDefinition DeserializeInternalFunctionDefinition parameters = BinaryData.FromString(property.Value.GetRawText()); continue; } + if (property.NameEquals("strict"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + strict = null; + continue; + } + strict = property.Value.GetBoolean(); + continue; + } if (true) { rawDataDictionary ??= new Dictionary(); @@ -118,7 +141,7 @@ internal static InternalFunctionDefinition DeserializeInternalFunctionDefinition } } serializedAdditionalRawData = rawDataDictionary; - return new InternalFunctionDefinition(description, name, parameters, serializedAdditionalRawData); + return new InternalFunctionDefinition(description, name, parameters, strict, serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/.dotnet/src/Generated/Models/InternalFunctionDefinition.cs b/.dotnet/src/Generated/Models/InternalFunctionDefinition.cs index 836a40e4b..9e385deb8 100644 --- a/.dotnet/src/Generated/Models/InternalFunctionDefinition.cs +++ b/.dotnet/src/Generated/Models/InternalFunctionDefinition.cs @@ -17,11 +17,12 @@ public InternalFunctionDefinition(string name) Name = name; } - internal InternalFunctionDefinition(string description, string name, BinaryData parameters, IDictionary serializedAdditionalRawData) + internal InternalFunctionDefinition(string description, string name, BinaryData parameters, bool? strict, IDictionary serializedAdditionalRawData) { Description = description; Name = name; Parameters = parameters; + Strict = strict; SerializedAdditionalRawData = serializedAdditionalRawData; } @@ -31,5 +32,6 @@ internal InternalFunctionDefinition() public string Description { get; set; } public string Name { get; set; } + public bool? Strict { get; set; } } } diff --git a/.dotnet/src/Generated/Models/InternalListFilesInVectorStoreBatchRequestOrder.cs b/.dotnet/src/Generated/Models/InternalListFilesInVectorStoreBatchRequestOrder.cs new file mode 100644 index 000000000..3cabf6088 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalListFilesInVectorStoreBatchRequestOrder.cs @@ -0,0 +1,36 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.VectorStores +{ + internal readonly partial struct InternalListFilesInVectorStoreBatchRequestOrder : IEquatable + { + private readonly string _value; + + public InternalListFilesInVectorStoreBatchRequestOrder(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AscValue = "asc"; + private const string DescValue = "desc"; + + public static InternalListFilesInVectorStoreBatchRequestOrder Asc { get; } = new InternalListFilesInVectorStoreBatchRequestOrder(AscValue); + public static InternalListFilesInVectorStoreBatchRequestOrder Desc { get; } = new InternalListFilesInVectorStoreBatchRequestOrder(DescValue); + public static bool operator ==(InternalListFilesInVectorStoreBatchRequestOrder left, InternalListFilesInVectorStoreBatchRequestOrder right) => left.Equals(right); + public static bool operator !=(InternalListFilesInVectorStoreBatchRequestOrder left, InternalListFilesInVectorStoreBatchRequestOrder right) => !left.Equals(right); + public static implicit operator InternalListFilesInVectorStoreBatchRequestOrder(string value) => new InternalListFilesInVectorStoreBatchRequestOrder(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is InternalListFilesInVectorStoreBatchRequestOrder other && Equals(other); + public bool Equals(InternalListFilesInVectorStoreBatchRequestOrder other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/InternalMessageContentRefusalObjectType.cs b/.dotnet/src/Generated/Models/InternalMessageContentRefusalObjectType.cs new file mode 100644 index 000000000..7a127f292 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalMessageContentRefusalObjectType.cs @@ -0,0 +1,34 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Assistants +{ + internal readonly partial struct InternalMessageContentRefusalObjectType : IEquatable + { + private readonly string _value; + + public InternalMessageContentRefusalObjectType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string RefusalValue = "refusal"; + + public static InternalMessageContentRefusalObjectType Refusal { get; } = new InternalMessageContentRefusalObjectType(RefusalValue); + public static bool operator ==(InternalMessageContentRefusalObjectType left, InternalMessageContentRefusalObjectType right) => left.Equals(right); + public static bool operator !=(InternalMessageContentRefusalObjectType left, InternalMessageContentRefusalObjectType right) => !left.Equals(right); + public static implicit operator InternalMessageContentRefusalObjectType(string value) => new InternalMessageContentRefusalObjectType(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is InternalMessageContentRefusalObjectType other && Equals(other); + public bool Equals(InternalMessageContentRefusalObjectType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/InternalMessageDeltaContent.Serialization.cs b/.dotnet/src/Generated/Models/InternalMessageDeltaContent.Serialization.cs index b8ac6343c..7b0d037e3 100644 --- a/.dotnet/src/Generated/Models/InternalMessageDeltaContent.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalMessageDeltaContent.Serialization.cs @@ -74,6 +74,7 @@ internal static InternalMessageDeltaContent DeserializeInternalMessageDeltaConte { case "image_file": return InternalMessageDeltaContentImageFileObject.DeserializeInternalMessageDeltaContentImageFileObject(element, options); case "image_url": return InternalMessageDeltaContentImageUrlObject.DeserializeInternalMessageDeltaContentImageUrlObject(element, options); + case "refusal": return InternalMessageDeltaContentRefusalObject.DeserializeInternalMessageDeltaContentRefusalObject(element, options); case "text": return InternalMessageDeltaContentTextObject.DeserializeInternalMessageDeltaContentTextObject(element, options); } } diff --git a/.dotnet/src/Generated/Models/InternalMessageDeltaContentRefusalObject.Serialization.cs b/.dotnet/src/Generated/Models/InternalMessageDeltaContentRefusalObject.Serialization.cs new file mode 100644 index 000000000..1668fc35c --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalMessageDeltaContentRefusalObject.Serialization.cs @@ -0,0 +1,155 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Assistants +{ + internal partial class InternalMessageDeltaContentRefusalObject : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalMessageDeltaContentRefusalObject)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("index") != true) + { + writer.WritePropertyName("index"u8); + writer.WriteNumberValue(Index); + } + if (SerializedAdditionalRawData?.ContainsKey("refusal") != true && Optional.IsDefined(Refusal)) + { + writer.WritePropertyName("refusal"u8); + writer.WriteStringValue(Refusal); + } + if (SerializedAdditionalRawData?.ContainsKey("type") != true) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type); + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InternalMessageDeltaContentRefusalObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalMessageDeltaContentRefusalObject)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalMessageDeltaContentRefusalObject(document.RootElement, options); + } + + internal static InternalMessageDeltaContentRefusalObject DeserializeInternalMessageDeltaContentRefusalObject(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + int index = default; + string refusal = default; + string type = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("index"u8)) + { + index = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("refusal"u8)) + { + refusal = property.Value.GetString(); + continue; + } + if (property.NameEquals("type"u8)) + { + type = property.Value.GetString(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalMessageDeltaContentRefusalObject(type, serializedAdditionalRawData, index, refusal); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalMessageDeltaContentRefusalObject)} does not support writing '{options.Format}' format."); + } + } + + InternalMessageDeltaContentRefusalObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalMessageDeltaContentRefusalObject(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalMessageDeltaContentRefusalObject)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static new InternalMessageDeltaContentRefusalObject FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalMessageDeltaContentRefusalObject(document.RootElement); + } + + internal override BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalMessageDeltaContentRefusalObject.cs b/.dotnet/src/Generated/Models/InternalMessageDeltaContentRefusalObject.cs new file mode 100644 index 000000000..d7b88a830 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalMessageDeltaContentRefusalObject.cs @@ -0,0 +1,31 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Assistants +{ + internal partial class InternalMessageDeltaContentRefusalObject : InternalMessageDeltaContent + { + internal InternalMessageDeltaContentRefusalObject(int index) + { + Type = "refusal"; + Index = index; + } + + internal InternalMessageDeltaContentRefusalObject(string type, IDictionary serializedAdditionalRawData, int index, string refusal) : base(type, serializedAdditionalRawData) + { + Index = index; + Refusal = refusal; + } + + internal InternalMessageDeltaContentRefusalObject() + { + } + + public int Index { get; } + public string Refusal { get; } + } +} diff --git a/.dotnet/src/Generated/Models/InternalMessageRefusalContent.Serialization.cs b/.dotnet/src/Generated/Models/InternalMessageRefusalContent.Serialization.cs new file mode 100644 index 000000000..6e3c9846f --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalMessageRefusalContent.Serialization.cs @@ -0,0 +1,103 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Assistants +{ + internal partial class InternalMessageRefusalContent : IJsonModel + { + InternalMessageRefusalContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalMessageRefusalContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalMessageRefusalContent(document.RootElement, options); + } + + internal static InternalMessageRefusalContent DeserializeInternalMessageRefusalContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string type = default; + string refusal = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = property.Value.GetString(); + continue; + } + if (property.NameEquals("refusal"u8)) + { + refusal = property.Value.GetString(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalMessageRefusalContent(serializedAdditionalRawData, type, refusal); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalMessageRefusalContent)} does not support writing '{options.Format}' format."); + } + } + + InternalMessageRefusalContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalMessageRefusalContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalMessageRefusalContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static new InternalMessageRefusalContent FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalMessageRefusalContent(document.RootElement); + } + + internal override BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalMessageRefusalContent.cs b/.dotnet/src/Generated/Models/InternalMessageRefusalContent.cs new file mode 100644 index 000000000..dc6f0e0af --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalMessageRefusalContent.cs @@ -0,0 +1,29 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Assistants +{ + internal partial class InternalMessageRefusalContent : MessageContent + { + public InternalMessageRefusalContent(string internalRefusal) + { + Argument.AssertNotNull(internalRefusal, nameof(internalRefusal)); + + InternalRefusal = internalRefusal; + } + + internal InternalMessageRefusalContent(IDictionary serializedAdditionalRawData, string type, string internalRefusal) : base(serializedAdditionalRawData) + { + _type = type; + InternalRefusal = internalRefusal; + } + + internal InternalMessageRefusalContent() + { + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalModifyAssistantRequestToolResources.Serialization.cs b/.dotnet/src/Generated/Models/InternalModifyAssistantRequestToolResources.Serialization.cs index 7c0d6a685..bb4a89610 100644 --- a/.dotnet/src/Generated/Models/InternalModifyAssistantRequestToolResources.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalModifyAssistantRequestToolResources.Serialization.cs @@ -74,7 +74,7 @@ internal static InternalModifyAssistantRequestToolResources DeserializeInternalM return null; } InternalModifyAssistantRequestToolResourcesCodeInterpreter codeInterpreter = default; - InternalModifyAssistantRequestToolResourcesFileSearch fileSearch = default; + InternalToolResourcesFileSearchIdsOnly fileSearch = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -94,7 +94,7 @@ internal static InternalModifyAssistantRequestToolResources DeserializeInternalM { continue; } - fileSearch = InternalModifyAssistantRequestToolResourcesFileSearch.DeserializeInternalModifyAssistantRequestToolResourcesFileSearch(property.Value, options); + fileSearch = InternalToolResourcesFileSearchIdsOnly.DeserializeInternalToolResourcesFileSearchIdsOnly(property.Value, options); continue; } if (true) diff --git a/.dotnet/src/Generated/Models/InternalModifyAssistantRequestToolResources.cs b/.dotnet/src/Generated/Models/InternalModifyAssistantRequestToolResources.cs index 5aa4a77d8..3bcf4c24d 100644 --- a/.dotnet/src/Generated/Models/InternalModifyAssistantRequestToolResources.cs +++ b/.dotnet/src/Generated/Models/InternalModifyAssistantRequestToolResources.cs @@ -14,7 +14,7 @@ public InternalModifyAssistantRequestToolResources() { } - internal InternalModifyAssistantRequestToolResources(InternalModifyAssistantRequestToolResourcesCodeInterpreter codeInterpreter, InternalModifyAssistantRequestToolResourcesFileSearch fileSearch, IDictionary serializedAdditionalRawData) + internal InternalModifyAssistantRequestToolResources(InternalModifyAssistantRequestToolResourcesCodeInterpreter codeInterpreter, InternalToolResourcesFileSearchIdsOnly fileSearch, IDictionary serializedAdditionalRawData) { CodeInterpreter = codeInterpreter; FileSearch = fileSearch; @@ -22,6 +22,6 @@ internal InternalModifyAssistantRequestToolResources(InternalModifyAssistantRequ } public InternalModifyAssistantRequestToolResourcesCodeInterpreter CodeInterpreter { get; set; } - public InternalModifyAssistantRequestToolResourcesFileSearch FileSearch { get; set; } + public InternalToolResourcesFileSearchIdsOnly FileSearch { get; set; } } } diff --git a/.dotnet/src/Generated/Models/InternalModifyAssistantRequestToolResourcesFileSearch.cs b/.dotnet/src/Generated/Models/InternalModifyAssistantRequestToolResourcesFileSearch.cs deleted file mode 100644 index e9b82d57a..000000000 --- a/.dotnet/src/Generated/Models/InternalModifyAssistantRequestToolResourcesFileSearch.cs +++ /dev/null @@ -1,26 +0,0 @@ -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace OpenAI.Assistants -{ - internal partial class InternalModifyAssistantRequestToolResourcesFileSearch - { - internal IDictionary SerializedAdditionalRawData { get; set; } - public InternalModifyAssistantRequestToolResourcesFileSearch() - { - VectorStoreIds = new ChangeTrackingList(); - } - - internal InternalModifyAssistantRequestToolResourcesFileSearch(IList vectorStoreIds, IDictionary serializedAdditionalRawData) - { - VectorStoreIds = vectorStoreIds; - SerializedAdditionalRawData = serializedAdditionalRawData; - } - - public IList VectorStoreIds { get; } - } -} diff --git a/.dotnet/src/Generated/Models/InternalModifyThreadRequestToolResources.Serialization.cs b/.dotnet/src/Generated/Models/InternalModifyThreadRequestToolResources.Serialization.cs index af74f65f2..3354accf3 100644 --- a/.dotnet/src/Generated/Models/InternalModifyThreadRequestToolResources.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalModifyThreadRequestToolResources.Serialization.cs @@ -74,7 +74,7 @@ internal static InternalModifyThreadRequestToolResources DeserializeInternalModi return null; } InternalModifyThreadRequestToolResourcesCodeInterpreter codeInterpreter = default; - InternalModifyThreadRequestToolResourcesFileSearch fileSearch = default; + InternalToolResourcesFileSearchIdsOnly fileSearch = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -94,7 +94,7 @@ internal static InternalModifyThreadRequestToolResources DeserializeInternalModi { continue; } - fileSearch = InternalModifyThreadRequestToolResourcesFileSearch.DeserializeInternalModifyThreadRequestToolResourcesFileSearch(property.Value, options); + fileSearch = InternalToolResourcesFileSearchIdsOnly.DeserializeInternalToolResourcesFileSearchIdsOnly(property.Value, options); continue; } if (true) diff --git a/.dotnet/src/Generated/Models/InternalModifyThreadRequestToolResources.cs b/.dotnet/src/Generated/Models/InternalModifyThreadRequestToolResources.cs index 0d8078ba4..0876ba2de 100644 --- a/.dotnet/src/Generated/Models/InternalModifyThreadRequestToolResources.cs +++ b/.dotnet/src/Generated/Models/InternalModifyThreadRequestToolResources.cs @@ -14,7 +14,7 @@ public InternalModifyThreadRequestToolResources() { } - internal InternalModifyThreadRequestToolResources(InternalModifyThreadRequestToolResourcesCodeInterpreter codeInterpreter, InternalModifyThreadRequestToolResourcesFileSearch fileSearch, IDictionary serializedAdditionalRawData) + internal InternalModifyThreadRequestToolResources(InternalModifyThreadRequestToolResourcesCodeInterpreter codeInterpreter, InternalToolResourcesFileSearchIdsOnly fileSearch, IDictionary serializedAdditionalRawData) { CodeInterpreter = codeInterpreter; FileSearch = fileSearch; @@ -22,6 +22,6 @@ internal InternalModifyThreadRequestToolResources(InternalModifyThreadRequestToo } public InternalModifyThreadRequestToolResourcesCodeInterpreter CodeInterpreter { get; set; } - public InternalModifyThreadRequestToolResourcesFileSearch FileSearch { get; set; } + public InternalToolResourcesFileSearchIdsOnly FileSearch { get; set; } } } diff --git a/.dotnet/src/Generated/Models/InternalOmniTypedResponseFormat.Serialization.cs b/.dotnet/src/Generated/Models/InternalOmniTypedResponseFormat.Serialization.cs new file mode 100644 index 000000000..6ccf09de3 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalOmniTypedResponseFormat.Serialization.cs @@ -0,0 +1,125 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Text.Json; + +namespace OpenAI.Internal +{ + [PersistableModelProxy(typeof(InternalUnknownOmniTypedResponseFormat))] + internal partial class InternalOmniTypedResponseFormat : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalOmniTypedResponseFormat)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("type") != true) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type); + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InternalOmniTypedResponseFormat IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalOmniTypedResponseFormat)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalOmniTypedResponseFormat(document.RootElement, options); + } + + internal static InternalOmniTypedResponseFormat DeserializeInternalOmniTypedResponseFormat(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + if (element.TryGetProperty("type", out JsonElement discriminator)) + { + switch (discriminator.GetString()) + { + case "json_object": return InternalResponseFormatJsonObject.DeserializeInternalResponseFormatJsonObject(element, options); + case "json_schema": return InternalResponseFormatJsonSchema.DeserializeInternalResponseFormatJsonSchema(element, options); + case "text": return InternalResponseFormatText.DeserializeInternalResponseFormatText(element, options); + } + } + return InternalUnknownOmniTypedResponseFormat.DeserializeInternalUnknownOmniTypedResponseFormat(element, options); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalOmniTypedResponseFormat)} does not support writing '{options.Format}' format."); + } + } + + InternalOmniTypedResponseFormat IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalOmniTypedResponseFormat(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalOmniTypedResponseFormat)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static InternalOmniTypedResponseFormat FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalOmniTypedResponseFormat(document.RootElement); + } + + internal virtual BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalOmniTypedResponseFormat.cs b/.dotnet/src/Generated/Models/InternalOmniTypedResponseFormat.cs new file mode 100644 index 000000000..da255f506 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalOmniTypedResponseFormat.cs @@ -0,0 +1,25 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal +{ + internal abstract partial class InternalOmniTypedResponseFormat + { + internal IDictionary SerializedAdditionalRawData { get; set; } + protected InternalOmniTypedResponseFormat() + { + } + + internal InternalOmniTypedResponseFormat(string type, IDictionary serializedAdditionalRawData) + { + Type = type; + SerializedAdditionalRawData = serializedAdditionalRawData; + } + + internal string Type { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchBase.Serialization.cs b/.dotnet/src/Generated/Models/InternalResponseFormatJsonObject.Serialization.cs similarity index 52% rename from .dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchBase.Serialization.cs rename to .dotnet/src/Generated/Models/InternalResponseFormatJsonObject.Serialization.cs index 40957b72a..66e2b97f1 100644 --- a/.dotnet/src/Generated/Models/InternalCreateAssistantRequestToolResourcesFileSearchBase.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalResponseFormatJsonObject.Serialization.cs @@ -8,19 +8,24 @@ using System.Collections.Generic; using System.Text.Json; -namespace OpenAI.Assistants +namespace OpenAI.Internal { - internal partial class InternalCreateAssistantRequestToolResourcesFileSearchBase : IJsonModel + internal partial class InternalResponseFormatJsonObject : IJsonModel { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(InternalCreateAssistantRequestToolResourcesFileSearchBase)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(InternalResponseFormatJsonObject)} does not support writing '{format}' format."); } writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("type") != true) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type); + } if (SerializedAdditionalRawData != null) { foreach (var item in SerializedAdditionalRawData) @@ -43,19 +48,19 @@ void IJsonModel.Write writer.WriteEndObject(); } - InternalCreateAssistantRequestToolResourcesFileSearchBase IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + InternalResponseFormatJsonObject IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(InternalCreateAssistantRequestToolResourcesFileSearchBase)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(InternalResponseFormatJsonObject)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeInternalCreateAssistantRequestToolResourcesFileSearchBase(document.RootElement, options); + return DeserializeInternalResponseFormatJsonObject(document.RootElement, options); } - internal static InternalCreateAssistantRequestToolResourcesFileSearchBase DeserializeInternalCreateAssistantRequestToolResourcesFileSearchBase(JsonElement element, ModelReaderWriterOptions options = null) + internal static InternalResponseFormatJsonObject DeserializeInternalResponseFormatJsonObject(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -63,10 +68,16 @@ internal static InternalCreateAssistantRequestToolResourcesFileSearchBase Deseri { return null; } + string type = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { + if (property.NameEquals("type"u8)) + { + type = property.Value.GetString(); + continue; + } if (true) { rawDataDictionary ??= new Dictionary(); @@ -74,47 +85,47 @@ internal static InternalCreateAssistantRequestToolResourcesFileSearchBase Deseri } } serializedAdditionalRawData = rawDataDictionary; - return new InternalCreateAssistantRequestToolResourcesFileSearchBase(serializedAdditionalRawData); + return new InternalResponseFormatJsonObject(type, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options); default: - throw new FormatException($"The model {nameof(InternalCreateAssistantRequestToolResourcesFileSearchBase)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(InternalResponseFormatJsonObject)} does not support writing '{options.Format}' format."); } } - InternalCreateAssistantRequestToolResourcesFileSearchBase IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + InternalResponseFormatJsonObject IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data); - return DeserializeInternalCreateAssistantRequestToolResourcesFileSearchBase(document.RootElement, options); + return DeserializeInternalResponseFormatJsonObject(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(InternalCreateAssistantRequestToolResourcesFileSearchBase)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(InternalResponseFormatJsonObject)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - internal static InternalCreateAssistantRequestToolResourcesFileSearchBase FromResponse(PipelineResponse response) + internal static new InternalResponseFormatJsonObject FromResponse(PipelineResponse response) { using var document = JsonDocument.Parse(response.Content); - return DeserializeInternalCreateAssistantRequestToolResourcesFileSearchBase(document.RootElement); + return DeserializeInternalResponseFormatJsonObject(document.RootElement); } - internal virtual BinaryContent ToBinaryContent() + internal override BinaryContent ToBinaryContent() { return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); } diff --git a/.dotnet/src/Generated/Models/InternalResponseFormatJsonObject.cs b/.dotnet/src/Generated/Models/InternalResponseFormatJsonObject.cs new file mode 100644 index 000000000..b62b1891f --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalResponseFormatJsonObject.cs @@ -0,0 +1,21 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal +{ + internal partial class InternalResponseFormatJsonObject : InternalOmniTypedResponseFormat + { + public InternalResponseFormatJsonObject() + { + Type = "json_object"; + } + + internal InternalResponseFormatJsonObject(string type, IDictionary serializedAdditionalRawData) : base(type, serializedAdditionalRawData) + { + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchema.Serialization.cs b/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchema.Serialization.cs new file mode 100644 index 000000000..1746508f2 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchema.Serialization.cs @@ -0,0 +1,144 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Internal +{ + internal partial class InternalResponseFormatJsonSchema : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalResponseFormatJsonSchema)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("json_schema") != true) + { + writer.WritePropertyName("json_schema"u8); + writer.WriteObjectValue(JsonSchema, options); + } + if (SerializedAdditionalRawData?.ContainsKey("type") != true) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type); + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InternalResponseFormatJsonSchema IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalResponseFormatJsonSchema)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalResponseFormatJsonSchema(document.RootElement, options); + } + + internal static InternalResponseFormatJsonSchema DeserializeInternalResponseFormatJsonSchema(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + InternalResponseFormatJsonSchemaJsonSchema jsonSchema = default; + string type = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("json_schema"u8)) + { + jsonSchema = InternalResponseFormatJsonSchemaJsonSchema.DeserializeInternalResponseFormatJsonSchemaJsonSchema(property.Value, options); + continue; + } + if (property.NameEquals("type"u8)) + { + type = property.Value.GetString(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalResponseFormatJsonSchema(type, serializedAdditionalRawData, jsonSchema); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalResponseFormatJsonSchema)} does not support writing '{options.Format}' format."); + } + } + + InternalResponseFormatJsonSchema IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalResponseFormatJsonSchema(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalResponseFormatJsonSchema)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static new InternalResponseFormatJsonSchema FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalResponseFormatJsonSchema(document.RootElement); + } + + internal override BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchema.cs b/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchema.cs new file mode 100644 index 000000000..739eb22b9 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchema.cs @@ -0,0 +1,31 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal +{ + internal partial class InternalResponseFormatJsonSchema : InternalOmniTypedResponseFormat + { + public InternalResponseFormatJsonSchema(InternalResponseFormatJsonSchemaJsonSchema jsonSchema) + { + Argument.AssertNotNull(jsonSchema, nameof(jsonSchema)); + + Type = "json_schema"; + JsonSchema = jsonSchema; + } + + internal InternalResponseFormatJsonSchema(string type, IDictionary serializedAdditionalRawData, InternalResponseFormatJsonSchemaJsonSchema jsonSchema) : base(type, serializedAdditionalRawData) + { + JsonSchema = jsonSchema; + } + + internal InternalResponseFormatJsonSchema() + { + } + + public InternalResponseFormatJsonSchemaJsonSchema JsonSchema { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchemaJsonSchema.Serialization.cs b/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchemaJsonSchema.Serialization.cs new file mode 100644 index 000000000..5b9c4d930 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchemaJsonSchema.Serialization.cs @@ -0,0 +1,189 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Internal +{ + internal partial class InternalResponseFormatJsonSchemaJsonSchema : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalResponseFormatJsonSchemaJsonSchema)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("description") != true && Optional.IsDefined(Description)) + { + writer.WritePropertyName("description"u8); + writer.WriteStringValue(Description); + } + if (SerializedAdditionalRawData?.ContainsKey("name") != true) + { + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + } + if (SerializedAdditionalRawData?.ContainsKey("schema") != true && Optional.IsDefined(Schema)) + { + writer.WritePropertyName("schema"u8); +#if NET6_0_OR_GREATER + writer.WriteRawValue(Schema); +#else + using (JsonDocument document = JsonDocument.Parse(Schema)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + if (SerializedAdditionalRawData?.ContainsKey("strict") != true && Optional.IsDefined(Strict)) + { + if (Strict != null) + { + writer.WritePropertyName("strict"u8); + writer.WriteBooleanValue(Strict.Value); + } + else + { + writer.WriteNull("strict"); + } + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InternalResponseFormatJsonSchemaJsonSchema IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalResponseFormatJsonSchemaJsonSchema)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalResponseFormatJsonSchemaJsonSchema(document.RootElement, options); + } + + internal static InternalResponseFormatJsonSchemaJsonSchema DeserializeInternalResponseFormatJsonSchemaJsonSchema(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string description = default; + string name = default; + BinaryData schema = default; + bool? strict = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("description"u8)) + { + description = property.Value.GetString(); + continue; + } + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("schema"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + schema = BinaryData.FromString(property.Value.GetRawText()); + continue; + } + if (property.NameEquals("strict"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + strict = null; + continue; + } + strict = property.Value.GetBoolean(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalResponseFormatJsonSchemaJsonSchema(description, name, schema, strict, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalResponseFormatJsonSchemaJsonSchema)} does not support writing '{options.Format}' format."); + } + } + + InternalResponseFormatJsonSchemaJsonSchema IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalResponseFormatJsonSchemaJsonSchema(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalResponseFormatJsonSchemaJsonSchema)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static InternalResponseFormatJsonSchemaJsonSchema FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalResponseFormatJsonSchemaJsonSchema(document.RootElement); + } + + internal virtual BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchemaJsonSchema.cs b/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchemaJsonSchema.cs new file mode 100644 index 000000000..533d36db0 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchemaJsonSchema.cs @@ -0,0 +1,37 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal +{ + internal partial class InternalResponseFormatJsonSchemaJsonSchema + { + internal IDictionary SerializedAdditionalRawData { get; set; } + public InternalResponseFormatJsonSchemaJsonSchema(string name) + { + Argument.AssertNotNull(name, nameof(name)); + + Name = name; + } + + internal InternalResponseFormatJsonSchemaJsonSchema(string description, string name, BinaryData schema, bool? strict, IDictionary serializedAdditionalRawData) + { + Description = description; + Name = name; + Schema = schema; + Strict = strict; + SerializedAdditionalRawData = serializedAdditionalRawData; + } + + internal InternalResponseFormatJsonSchemaJsonSchema() + { + } + + public string Description { get; set; } + public string Name { get; set; } + public bool? Strict { get; set; } + } +} diff --git a/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchemaSchema.Serialization.cs b/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchemaSchema.Serialization.cs new file mode 100644 index 000000000..d77918543 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchemaSchema.Serialization.cs @@ -0,0 +1,111 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Internal +{ + internal partial class InternalResponseFormatJsonSchemaSchema : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalResponseFormatJsonSchemaSchema)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + foreach (var item in AdditionalProperties) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + writer.WriteEndObject(); + } + + InternalResponseFormatJsonSchemaSchema IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalResponseFormatJsonSchemaSchema)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalResponseFormatJsonSchemaSchema(document.RootElement, options); + } + + internal static InternalResponseFormatJsonSchemaSchema DeserializeInternalResponseFormatJsonSchemaSchema(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IDictionary additionalProperties = default; + Dictionary additionalPropertiesDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + additionalPropertiesDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + additionalProperties = additionalPropertiesDictionary; + return new InternalResponseFormatJsonSchemaSchema(additionalProperties); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalResponseFormatJsonSchemaSchema)} does not support writing '{options.Format}' format."); + } + } + + InternalResponseFormatJsonSchemaSchema IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalResponseFormatJsonSchemaSchema(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalResponseFormatJsonSchemaSchema)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static InternalResponseFormatJsonSchemaSchema FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalResponseFormatJsonSchemaSchema(document.RootElement); + } + + internal virtual BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchemaSchema.cs b/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchemaSchema.cs new file mode 100644 index 000000000..d00500adb --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalResponseFormatJsonSchemaSchema.cs @@ -0,0 +1,24 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal +{ + internal partial class InternalResponseFormatJsonSchemaSchema + { + public InternalResponseFormatJsonSchemaSchema() + { + AdditionalProperties = new ChangeTrackingDictionary(); + } + + internal InternalResponseFormatJsonSchemaSchema(IDictionary additionalProperties) + { + AdditionalProperties = additionalProperties; + } + + public IDictionary AdditionalProperties { get; } + } +} diff --git a/.dotnet/src/Generated/Models/InternalResponseFormatText.Serialization.cs b/.dotnet/src/Generated/Models/InternalResponseFormatText.Serialization.cs new file mode 100644 index 000000000..b213da2e7 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalResponseFormatText.Serialization.cs @@ -0,0 +1,133 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Internal +{ + internal partial class InternalResponseFormatText : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalResponseFormatText)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("type") != true) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type); + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InternalResponseFormatText IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalResponseFormatText)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalResponseFormatText(document.RootElement, options); + } + + internal static InternalResponseFormatText DeserializeInternalResponseFormatText(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string type = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = property.Value.GetString(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalResponseFormatText(type, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalResponseFormatText)} does not support writing '{options.Format}' format."); + } + } + + InternalResponseFormatText IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalResponseFormatText(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalResponseFormatText)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static new InternalResponseFormatText FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalResponseFormatText(document.RootElement); + } + + internal override BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalResponseFormatText.cs b/.dotnet/src/Generated/Models/InternalResponseFormatText.cs new file mode 100644 index 000000000..efe4d6fce --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalResponseFormatText.cs @@ -0,0 +1,21 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal +{ + internal partial class InternalResponseFormatText : InternalOmniTypedResponseFormat + { + public InternalResponseFormatText() + { + Type = "text"; + } + + internal InternalResponseFormatText(string type, IDictionary serializedAdditionalRawData) : base(type, serializedAdditionalRawData) + { + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalRunStepDeltaStepDetailsToolCallsFileSearchObject.Serialization.cs b/.dotnet/src/Generated/Models/InternalRunStepDeltaStepDetailsToolCallsFileSearchObject.Serialization.cs index e9dd186c0..78a34b22e 100644 --- a/.dotnet/src/Generated/Models/InternalRunStepDeltaStepDetailsToolCallsFileSearchObject.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalRunStepDeltaStepDetailsToolCallsFileSearchObject.Serialization.cs @@ -38,7 +38,19 @@ void IJsonModel.Write( foreach (var item in FileSearch) { writer.WritePropertyName(item.Key); - writer.WriteStringValue(item.Value); + if (item.Value == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif } writer.WriteEndObject(); } @@ -91,7 +103,7 @@ internal static InternalRunStepDeltaStepDetailsToolCallsFileSearchObject Deseria } int index = default; string id = default; - IReadOnlyDictionary fileSearch = default; + IReadOnlyDictionary fileSearch = default; string type = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -109,10 +121,17 @@ internal static InternalRunStepDeltaStepDetailsToolCallsFileSearchObject Deseria } if (property.NameEquals("file_search"u8)) { - Dictionary dictionary = new Dictionary(); + Dictionary dictionary = new Dictionary(); foreach (var property0 in property.Value.EnumerateObject()) { - dictionary.Add(property0.Name, property0.Value.GetString()); + if (property0.Value.ValueKind == JsonValueKind.Null) + { + dictionary.Add(property0.Name, null); + } + else + { + dictionary.Add(property0.Name, BinaryData.FromString(property0.Value.GetRawText())); + } } fileSearch = dictionary; continue; diff --git a/.dotnet/src/Generated/Models/InternalRunStepDeltaStepDetailsToolCallsFileSearchObject.cs b/.dotnet/src/Generated/Models/InternalRunStepDeltaStepDetailsToolCallsFileSearchObject.cs index 6fcfc013c..e3db470bb 100644 --- a/.dotnet/src/Generated/Models/InternalRunStepDeltaStepDetailsToolCallsFileSearchObject.cs +++ b/.dotnet/src/Generated/Models/InternalRunStepDeltaStepDetailsToolCallsFileSearchObject.cs @@ -9,7 +9,7 @@ namespace OpenAI.Assistants { internal partial class InternalRunStepDeltaStepDetailsToolCallsFileSearchObject : InternalRunStepDeltaStepDetailsToolCallsObjectToolCallsObject { - internal InternalRunStepDeltaStepDetailsToolCallsFileSearchObject(int index, IReadOnlyDictionary fileSearch) + internal InternalRunStepDeltaStepDetailsToolCallsFileSearchObject(int index, IReadOnlyDictionary fileSearch) { Argument.AssertNotNull(fileSearch, nameof(fileSearch)); @@ -18,7 +18,7 @@ internal InternalRunStepDeltaStepDetailsToolCallsFileSearchObject(int index, IRe FileSearch = fileSearch; } - internal InternalRunStepDeltaStepDetailsToolCallsFileSearchObject(string type, IDictionary serializedAdditionalRawData, int index, string id, IReadOnlyDictionary fileSearch) : base(type, serializedAdditionalRawData) + internal InternalRunStepDeltaStepDetailsToolCallsFileSearchObject(string type, IDictionary serializedAdditionalRawData, int index, string id, IReadOnlyDictionary fileSearch) : base(type, serializedAdditionalRawData) { Index = index; Id = id; @@ -31,6 +31,6 @@ internal InternalRunStepDeltaStepDetailsToolCallsFileSearchObject() public int Index { get; } public string Id { get; } - public IReadOnlyDictionary FileSearch { get; } + public IReadOnlyDictionary FileSearch { get; } } } diff --git a/.dotnet/src/Generated/Models/InternalRunStepFileSearchToolCallDetails.Serialization.cs b/.dotnet/src/Generated/Models/InternalRunStepFileSearchToolCallDetails.Serialization.cs index e89d929f4..34d26af6a 100644 --- a/.dotnet/src/Generated/Models/InternalRunStepFileSearchToolCallDetails.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalRunStepFileSearchToolCallDetails.Serialization.cs @@ -33,7 +33,19 @@ void IJsonModel.Write(Utf8JsonWriter w foreach (var item in FileSearch) { writer.WritePropertyName(item.Key); - writer.WriteStringValue(item.Value); + if (item.Value == null) + { + writer.WriteNullValue(); + continue; + } +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif } writer.WriteEndObject(); } @@ -85,7 +97,7 @@ internal static InternalRunStepFileSearchToolCallDetails DeserializeInternalRunS return null; } string id = default; - IReadOnlyDictionary fileSearch = default; + IReadOnlyDictionary fileSearch = default; string type = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -98,10 +110,17 @@ internal static InternalRunStepFileSearchToolCallDetails DeserializeInternalRunS } if (property.NameEquals("file_search"u8)) { - Dictionary dictionary = new Dictionary(); + Dictionary dictionary = new Dictionary(); foreach (var property0 in property.Value.EnumerateObject()) { - dictionary.Add(property0.Name, property0.Value.GetString()); + if (property0.Value.ValueKind == JsonValueKind.Null) + { + dictionary.Add(property0.Name, null); + } + else + { + dictionary.Add(property0.Name, BinaryData.FromString(property0.Value.GetRawText())); + } } fileSearch = dictionary; continue; diff --git a/.dotnet/src/Generated/Models/InternalRunStepFileSearchToolCallDetails.cs b/.dotnet/src/Generated/Models/InternalRunStepFileSearchToolCallDetails.cs index 6be9fec00..f1593afd7 100644 --- a/.dotnet/src/Generated/Models/InternalRunStepFileSearchToolCallDetails.cs +++ b/.dotnet/src/Generated/Models/InternalRunStepFileSearchToolCallDetails.cs @@ -9,7 +9,7 @@ namespace OpenAI.Assistants { internal partial class InternalRunStepFileSearchToolCallDetails : RunStepToolCall { - internal InternalRunStepFileSearchToolCallDetails(string id, IReadOnlyDictionary fileSearch) + internal InternalRunStepFileSearchToolCallDetails(string id, IReadOnlyDictionary fileSearch) { Argument.AssertNotNull(id, nameof(id)); Argument.AssertNotNull(fileSearch, nameof(fileSearch)); @@ -19,7 +19,7 @@ internal InternalRunStepFileSearchToolCallDetails(string id, IReadOnlyDictionary FileSearch = fileSearch; } - internal InternalRunStepFileSearchToolCallDetails(string type, IDictionary serializedAdditionalRawData, string id, IReadOnlyDictionary fileSearch) : base(type, serializedAdditionalRawData) + internal InternalRunStepFileSearchToolCallDetails(string type, IDictionary serializedAdditionalRawData, string id, IReadOnlyDictionary fileSearch) : base(type, serializedAdditionalRawData) { Id = id; FileSearch = fileSearch; @@ -30,6 +30,6 @@ internal InternalRunStepFileSearchToolCallDetails() } public string Id { get; } - public IReadOnlyDictionary FileSearch { get; } + public IReadOnlyDictionary FileSearch { get; } } } diff --git a/.dotnet/src/Generated/Models/InternalModifyThreadRequestToolResourcesFileSearch.Serialization.cs b/.dotnet/src/Generated/Models/InternalToolResourcesFileSearchIdsOnly.Serialization.cs similarity index 62% rename from .dotnet/src/Generated/Models/InternalModifyThreadRequestToolResourcesFileSearch.Serialization.cs rename to .dotnet/src/Generated/Models/InternalToolResourcesFileSearchIdsOnly.Serialization.cs index 9aa936c8e..3b188f3b2 100644 --- a/.dotnet/src/Generated/Models/InternalModifyThreadRequestToolResourcesFileSearch.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalToolResourcesFileSearchIdsOnly.Serialization.cs @@ -10,14 +10,14 @@ namespace OpenAI.Assistants { - internal partial class InternalModifyThreadRequestToolResourcesFileSearch : IJsonModel + internal partial class InternalToolResourcesFileSearchIdsOnly : IJsonModel { - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(InternalModifyThreadRequestToolResourcesFileSearch)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(InternalToolResourcesFileSearchIdsOnly)} does not support writing '{format}' format."); } writer.WriteStartObject(); @@ -53,19 +53,19 @@ void IJsonModel.Write(Utf8Js writer.WriteEndObject(); } - InternalModifyThreadRequestToolResourcesFileSearch IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + InternalToolResourcesFileSearchIdsOnly IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(InternalModifyThreadRequestToolResourcesFileSearch)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(InternalToolResourcesFileSearchIdsOnly)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeInternalModifyThreadRequestToolResourcesFileSearch(document.RootElement, options); + return DeserializeInternalToolResourcesFileSearchIdsOnly(document.RootElement, options); } - internal static InternalModifyThreadRequestToolResourcesFileSearch DeserializeInternalModifyThreadRequestToolResourcesFileSearch(JsonElement element, ModelReaderWriterOptions options = null) + internal static InternalToolResourcesFileSearchIdsOnly DeserializeInternalToolResourcesFileSearchIdsOnly(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -99,44 +99,44 @@ internal static InternalModifyThreadRequestToolResourcesFileSearch DeserializeIn } } serializedAdditionalRawData = rawDataDictionary; - return new InternalModifyThreadRequestToolResourcesFileSearch(vectorStoreIds ?? new ChangeTrackingList(), serializedAdditionalRawData); + return new InternalToolResourcesFileSearchIdsOnly(vectorStoreIds ?? new ChangeTrackingList(), serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options); default: - throw new FormatException($"The model {nameof(InternalModifyThreadRequestToolResourcesFileSearch)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(InternalToolResourcesFileSearchIdsOnly)} does not support writing '{options.Format}' format."); } } - InternalModifyThreadRequestToolResourcesFileSearch IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + InternalToolResourcesFileSearchIdsOnly IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data); - return DeserializeInternalModifyThreadRequestToolResourcesFileSearch(document.RootElement, options); + return DeserializeInternalToolResourcesFileSearchIdsOnly(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(InternalModifyThreadRequestToolResourcesFileSearch)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(InternalToolResourcesFileSearchIdsOnly)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - internal static InternalModifyThreadRequestToolResourcesFileSearch FromResponse(PipelineResponse response) + internal static InternalToolResourcesFileSearchIdsOnly FromResponse(PipelineResponse response) { using var document = JsonDocument.Parse(response.Content); - return DeserializeInternalModifyThreadRequestToolResourcesFileSearch(document.RootElement); + return DeserializeInternalToolResourcesFileSearchIdsOnly(document.RootElement); } internal virtual BinaryContent ToBinaryContent() diff --git a/.dotnet/src/Generated/Models/InternalModifyThreadRequestToolResourcesFileSearch.cs b/.dotnet/src/Generated/Models/InternalToolResourcesFileSearchIdsOnly.cs similarity index 62% rename from .dotnet/src/Generated/Models/InternalModifyThreadRequestToolResourcesFileSearch.cs rename to .dotnet/src/Generated/Models/InternalToolResourcesFileSearchIdsOnly.cs index f87c6ace1..33ed0185e 100644 --- a/.dotnet/src/Generated/Models/InternalModifyThreadRequestToolResourcesFileSearch.cs +++ b/.dotnet/src/Generated/Models/InternalToolResourcesFileSearchIdsOnly.cs @@ -7,15 +7,15 @@ namespace OpenAI.Assistants { - internal partial class InternalModifyThreadRequestToolResourcesFileSearch + internal partial class InternalToolResourcesFileSearchIdsOnly { internal IDictionary SerializedAdditionalRawData { get; set; } - public InternalModifyThreadRequestToolResourcesFileSearch() + public InternalToolResourcesFileSearchIdsOnly() { VectorStoreIds = new ChangeTrackingList(); } - internal InternalModifyThreadRequestToolResourcesFileSearch(IList vectorStoreIds, IDictionary serializedAdditionalRawData) + internal InternalToolResourcesFileSearchIdsOnly(IList vectorStoreIds, IDictionary serializedAdditionalRawData) { VectorStoreIds = vectorStoreIds; SerializedAdditionalRawData = serializedAdditionalRawData; diff --git a/.dotnet/src/Generated/Models/InternalUnknownAssistantResponseFormat.Serialization.cs b/.dotnet/src/Generated/Models/InternalUnknownAssistantResponseFormat.Serialization.cs new file mode 100644 index 000000000..ff9e4c05e --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalUnknownAssistantResponseFormat.Serialization.cs @@ -0,0 +1,122 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Assistants +{ + internal partial class InternalUnknownAssistantResponseFormat : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AssistantResponseFormat)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("type") != true) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type); + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + AssistantResponseFormat IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AssistantResponseFormat)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeAssistantResponseFormat(document.RootElement, options); + } + + internal static InternalUnknownAssistantResponseFormat DeserializeInternalUnknownAssistantResponseFormat(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string type = "Unknown"; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = property.Value.GetString(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalUnknownAssistantResponseFormat(type, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(AssistantResponseFormat)} does not support writing '{options.Format}' format."); + } + } + + AssistantResponseFormat IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeAssistantResponseFormat(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(AssistantResponseFormat)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + } +} diff --git a/.dotnet/src/Generated/Models/InternalUnknownAssistantResponseFormat.cs b/.dotnet/src/Generated/Models/InternalUnknownAssistantResponseFormat.cs new file mode 100644 index 000000000..13ee16c7d --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalUnknownAssistantResponseFormat.cs @@ -0,0 +1,20 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Assistants +{ + internal partial class InternalUnknownAssistantResponseFormat : AssistantResponseFormat + { + internal InternalUnknownAssistantResponseFormat(string type, IDictionary serializedAdditionalRawData) : base(type, serializedAdditionalRawData) + { + } + + internal InternalUnknownAssistantResponseFormat() + { + } + } +} diff --git a/.dotnet/src/Generated/Models/UnknownChatMessage.Serialization.cs b/.dotnet/src/Generated/Models/InternalUnknownChatMessage.Serialization.cs similarity index 56% rename from .dotnet/src/Generated/Models/UnknownChatMessage.Serialization.cs rename to .dotnet/src/Generated/Models/InternalUnknownChatMessage.Serialization.cs index 665911fca..4ff268505 100644 --- a/.dotnet/src/Generated/Models/UnknownChatMessage.Serialization.cs +++ b/.dotnet/src/Generated/Models/InternalUnknownChatMessage.Serialization.cs @@ -5,11 +5,12 @@ using System; using System.ClientModel; using System.ClientModel.Primitives; +using System.Collections.Generic; using System.Text.Json; namespace OpenAI.Chat { - internal partial class UnknownChatMessage : IJsonModel + internal partial class InternalUnknownChatMessage : IJsonModel { ChatMessage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { @@ -23,6 +24,40 @@ ChatMessage IJsonModel.Create(ref Utf8JsonReader reader, ModelReade return DeserializeChatMessage(document.RootElement, options); } + internal static InternalUnknownChatMessage DeserializeInternalUnknownChatMessage(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ChatMessageRole role = default; + IList content = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("role"u8)) + { + role = property.Value.GetString().ToChatMessageRole(); + continue; + } + if (property.NameEquals("content"u8)) + { + DeserializeContentValue(property, ref content); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalUnknownChatMessage(role, content ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; @@ -54,10 +89,10 @@ ChatMessage IPersistableModel.Create(BinaryData data, ModelReaderWr string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - internal static new UnknownChatMessage FromResponse(PipelineResponse response) + internal static new InternalUnknownChatMessage FromResponse(PipelineResponse response) { using var document = JsonDocument.Parse(response.Content); - return DeserializeUnknownChatMessage(document.RootElement); + return DeserializeInternalUnknownChatMessage(document.RootElement); } internal override BinaryContent ToBinaryContent() diff --git a/.dotnet/src/Generated/Models/InternalUnknownChatMessage.cs b/.dotnet/src/Generated/Models/InternalUnknownChatMessage.cs new file mode 100644 index 000000000..8f25fa22a --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalUnknownChatMessage.cs @@ -0,0 +1,20 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Chat +{ + internal partial class InternalUnknownChatMessage : ChatMessage + { + internal InternalUnknownChatMessage(ChatMessageRole role, IList content, IDictionary serializedAdditionalRawData) : base(role, content, serializedAdditionalRawData) + { + } + + internal InternalUnknownChatMessage() + { + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalUnknownChatResponseFormat.Serialization.cs b/.dotnet/src/Generated/Models/InternalUnknownChatResponseFormat.Serialization.cs new file mode 100644 index 000000000..756d520b6 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalUnknownChatResponseFormat.Serialization.cs @@ -0,0 +1,97 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Chat +{ + internal partial class InternalUnknownChatResponseFormat : IJsonModel + { + ChatResponseFormat IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ChatResponseFormat)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeChatResponseFormat(document.RootElement, options); + } + + internal static InternalUnknownChatResponseFormat DeserializeInternalUnknownChatResponseFormat(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string type = "Unknown"; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = property.Value.GetString(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalUnknownChatResponseFormat(type, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ChatResponseFormat)} does not support writing '{options.Format}' format."); + } + } + + ChatResponseFormat IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeChatResponseFormat(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ChatResponseFormat)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static new InternalUnknownChatResponseFormat FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalUnknownChatResponseFormat(document.RootElement); + } + + internal override BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalUnknownChatResponseFormat.cs b/.dotnet/src/Generated/Models/InternalUnknownChatResponseFormat.cs new file mode 100644 index 000000000..e3def713f --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalUnknownChatResponseFormat.cs @@ -0,0 +1,20 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Chat +{ + internal partial class InternalUnknownChatResponseFormat : ChatResponseFormat + { + internal InternalUnknownChatResponseFormat(string type, IDictionary serializedAdditionalRawData) : base(type, serializedAdditionalRawData) + { + } + + internal InternalUnknownChatResponseFormat() + { + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalUnknownOmniTypedResponseFormat.Serialization.cs b/.dotnet/src/Generated/Models/InternalUnknownOmniTypedResponseFormat.Serialization.cs new file mode 100644 index 000000000..43b17f909 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalUnknownOmniTypedResponseFormat.Serialization.cs @@ -0,0 +1,133 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Internal +{ + internal partial class InternalUnknownOmniTypedResponseFormat : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalOmniTypedResponseFormat)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("type") != true) + { + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type); + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InternalOmniTypedResponseFormat IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalOmniTypedResponseFormat)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalOmniTypedResponseFormat(document.RootElement, options); + } + + internal static InternalUnknownOmniTypedResponseFormat DeserializeInternalUnknownOmniTypedResponseFormat(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string type = "Unknown"; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = property.Value.GetString(); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalUnknownOmniTypedResponseFormat(type, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalOmniTypedResponseFormat)} does not support writing '{options.Format}' format."); + } + } + + InternalOmniTypedResponseFormat IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalOmniTypedResponseFormat(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalOmniTypedResponseFormat)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static new InternalUnknownOmniTypedResponseFormat FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalUnknownOmniTypedResponseFormat(document.RootElement); + } + + internal override BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalUnknownOmniTypedResponseFormat.cs b/.dotnet/src/Generated/Models/InternalUnknownOmniTypedResponseFormat.cs new file mode 100644 index 000000000..8ae4808ff --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalUnknownOmniTypedResponseFormat.cs @@ -0,0 +1,20 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Internal +{ + internal partial class InternalUnknownOmniTypedResponseFormat : InternalOmniTypedResponseFormat + { + internal InternalUnknownOmniTypedResponseFormat(string type, IDictionary serializedAdditionalRawData) : base(type, serializedAdditionalRawData) + { + } + + internal InternalUnknownOmniTypedResponseFormat() + { + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalUpload.Serialization.cs b/.dotnet/src/Generated/Models/InternalUpload.Serialization.cs new file mode 100644 index 000000000..6b2b8604d --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalUpload.Serialization.cs @@ -0,0 +1,247 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Files +{ + internal partial class InternalUpload : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalUpload)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("id") != true) + { + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + } + if (SerializedAdditionalRawData?.ContainsKey("created_at") != true) + { + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + } + if (SerializedAdditionalRawData?.ContainsKey("filename") != true) + { + writer.WritePropertyName("filename"u8); + writer.WriteStringValue(Filename); + } + if (SerializedAdditionalRawData?.ContainsKey("bytes") != true) + { + writer.WritePropertyName("bytes"u8); + writer.WriteNumberValue(Bytes); + } + if (SerializedAdditionalRawData?.ContainsKey("purpose") != true) + { + writer.WritePropertyName("purpose"u8); + writer.WriteStringValue(Purpose); + } + if (SerializedAdditionalRawData?.ContainsKey("status") != true) + { + writer.WritePropertyName("status"u8); + writer.WriteStringValue(Status.ToString()); + } + if (SerializedAdditionalRawData?.ContainsKey("expires_at") != true) + { + writer.WritePropertyName("expires_at"u8); + writer.WriteNumberValue(ExpiresAt, "U"); + } + if (SerializedAdditionalRawData?.ContainsKey("object") != true && Optional.IsDefined(Object)) + { + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.Value.ToString()); + } + if (SerializedAdditionalRawData?.ContainsKey("file") != true && Optional.IsDefined(File)) + { + if (File != null) + { + writer.WritePropertyName("file"u8); + writer.WriteObjectValue(File, options); + } + else + { + writer.WriteNull("file"); + } + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InternalUpload IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalUpload)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalUpload(document.RootElement, options); + } + + internal static InternalUpload DeserializeInternalUpload(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + DateTimeOffset createdAt = default; + string filename = default; + int bytes = default; + string purpose = default; + InternalUploadStatus status = default; + DateTimeOffset expiresAt = default; + InternalUploadObject? @object = default; + OpenAIFileInfo file = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("filename"u8)) + { + filename = property.Value.GetString(); + continue; + } + if (property.NameEquals("bytes"u8)) + { + bytes = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("purpose"u8)) + { + purpose = property.Value.GetString(); + continue; + } + if (property.NameEquals("status"u8)) + { + status = new InternalUploadStatus(property.Value.GetString()); + continue; + } + if (property.NameEquals("expires_at"u8)) + { + expiresAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("object"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + @object = new InternalUploadObject(property.Value.GetString()); + continue; + } + if (property.NameEquals("file"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + file = null; + continue; + } + file = OpenAIFileInfo.DeserializeOpenAIFileInfo(property.Value, options); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalUpload( + id, + createdAt, + filename, + bytes, + purpose, + status, + expiresAt, + @object, + file, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalUpload)} does not support writing '{options.Format}' format."); + } + } + + InternalUpload IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalUpload(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalUpload)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static InternalUpload FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalUpload(document.RootElement); + } + + internal virtual BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalUpload.cs b/.dotnet/src/Generated/Models/InternalUpload.cs new file mode 100644 index 000000000..fb464dedd --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalUpload.cs @@ -0,0 +1,56 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Files +{ + internal partial class InternalUpload + { + internal IDictionary SerializedAdditionalRawData { get; set; } + internal InternalUpload(string id, DateTimeOffset createdAt, string filename, int bytes, string purpose, InternalUploadStatus status, DateTimeOffset expiresAt) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(filename, nameof(filename)); + Argument.AssertNotNull(purpose, nameof(purpose)); + + Id = id; + CreatedAt = createdAt; + Filename = filename; + Bytes = bytes; + Purpose = purpose; + Status = status; + ExpiresAt = expiresAt; + } + + internal InternalUpload(string id, DateTimeOffset createdAt, string filename, int bytes, string purpose, InternalUploadStatus status, DateTimeOffset expiresAt, InternalUploadObject? @object, OpenAIFileInfo file, IDictionary serializedAdditionalRawData) + { + Id = id; + CreatedAt = createdAt; + Filename = filename; + Bytes = bytes; + Purpose = purpose; + Status = status; + ExpiresAt = expiresAt; + Object = @object; + File = file; + SerializedAdditionalRawData = serializedAdditionalRawData; + } + + internal InternalUpload() + { + } + + public string Id { get; } + public DateTimeOffset CreatedAt { get; } + public string Filename { get; } + public int Bytes { get; } + public string Purpose { get; } + public InternalUploadStatus Status { get; } + public DateTimeOffset ExpiresAt { get; } + public InternalUploadObject? Object { get; } + public OpenAIFileInfo File { get; } + } +} diff --git a/.dotnet/src/Generated/Models/InternalUploadObject.cs b/.dotnet/src/Generated/Models/InternalUploadObject.cs new file mode 100644 index 000000000..9dacd622a --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalUploadObject.cs @@ -0,0 +1,34 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Files +{ + internal readonly partial struct InternalUploadObject : IEquatable + { + private readonly string _value; + + public InternalUploadObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UploadValue = "upload"; + + public static InternalUploadObject Upload { get; } = new InternalUploadObject(UploadValue); + public static bool operator ==(InternalUploadObject left, InternalUploadObject right) => left.Equals(right); + public static bool operator !=(InternalUploadObject left, InternalUploadObject right) => !left.Equals(right); + public static implicit operator InternalUploadObject(string value) => new InternalUploadObject(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is InternalUploadObject other && Equals(other); + public bool Equals(InternalUploadObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/InternalUploadPart.Serialization.cs b/.dotnet/src/Generated/Models/InternalUploadPart.Serialization.cs new file mode 100644 index 000000000..c6f4c77f8 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalUploadPart.Serialization.cs @@ -0,0 +1,166 @@ +// + +#nullable disable + +using System; +using System.ClientModel; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; + +namespace OpenAI.Files +{ + internal partial class InternalUploadPart : IJsonModel + { + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalUploadPart)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (SerializedAdditionalRawData?.ContainsKey("id") != true) + { + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + } + if (SerializedAdditionalRawData?.ContainsKey("created_at") != true) + { + writer.WritePropertyName("created_at"u8); + writer.WriteNumberValue(CreatedAt, "U"); + } + if (SerializedAdditionalRawData?.ContainsKey("upload_id") != true) + { + writer.WritePropertyName("upload_id"u8); + writer.WriteStringValue(UploadId); + } + if (SerializedAdditionalRawData?.ContainsKey("object") != true) + { + writer.WritePropertyName("object"u8); + writer.WriteStringValue(Object.ToString()); + } + if (SerializedAdditionalRawData != null) + { + foreach (var item in SerializedAdditionalRawData) + { + if (ModelSerializationExtensions.IsSentinelValue(item.Value)) + { + continue; + } + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InternalUploadPart IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InternalUploadPart)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInternalUploadPart(document.RootElement, options); + } + + internal static InternalUploadPart DeserializeInternalUploadPart(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + DateTimeOffset createdAt = default; + string uploadId = default; + InternalUploadPartObject @object = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("created_at"u8)) + { + createdAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64()); + continue; + } + if (property.NameEquals("upload_id"u8)) + { + uploadId = property.Value.GetString(); + continue; + } + if (property.NameEquals("object"u8)) + { + @object = new InternalUploadPartObject(property.Value.GetString()); + continue; + } + if (true) + { + rawDataDictionary ??= new Dictionary(); + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InternalUploadPart(id, createdAt, uploadId, @object, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InternalUploadPart)} does not support writing '{options.Format}' format."); + } + } + + InternalUploadPart IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInternalUploadPart(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InternalUploadPart)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + internal static InternalUploadPart FromResponse(PipelineResponse response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInternalUploadPart(document.RootElement); + } + + internal virtual BinaryContent ToBinaryContent() + { + return BinaryContent.Create(this, ModelSerializationExtensions.WireOptions); + } + } +} diff --git a/.dotnet/src/Generated/Models/InternalUploadPart.cs b/.dotnet/src/Generated/Models/InternalUploadPart.cs new file mode 100644 index 000000000..5a7239276 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalUploadPart.cs @@ -0,0 +1,41 @@ +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace OpenAI.Files +{ + internal partial class InternalUploadPart + { + internal IDictionary SerializedAdditionalRawData { get; set; } + internal InternalUploadPart(string id, DateTimeOffset createdAt, string uploadId) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(uploadId, nameof(uploadId)); + + Id = id; + CreatedAt = createdAt; + UploadId = uploadId; + } + + internal InternalUploadPart(string id, DateTimeOffset createdAt, string uploadId, InternalUploadPartObject @object, IDictionary serializedAdditionalRawData) + { + Id = id; + CreatedAt = createdAt; + UploadId = uploadId; + Object = @object; + SerializedAdditionalRawData = serializedAdditionalRawData; + } + + internal InternalUploadPart() + { + } + + public string Id { get; } + public DateTimeOffset CreatedAt { get; } + public string UploadId { get; } + public InternalUploadPartObject Object { get; } = InternalUploadPartObject.UploadPart; + } +} diff --git a/.dotnet/src/Generated/Models/InternalUploadPartObject.cs b/.dotnet/src/Generated/Models/InternalUploadPartObject.cs new file mode 100644 index 000000000..b790ad181 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalUploadPartObject.cs @@ -0,0 +1,34 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Files +{ + internal readonly partial struct InternalUploadPartObject : IEquatable + { + private readonly string _value; + + public InternalUploadPartObject(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UploadPartValue = "upload.part"; + + public static InternalUploadPartObject UploadPart { get; } = new InternalUploadPartObject(UploadPartValue); + public static bool operator ==(InternalUploadPartObject left, InternalUploadPartObject right) => left.Equals(right); + public static bool operator !=(InternalUploadPartObject left, InternalUploadPartObject right) => !left.Equals(right); + public static implicit operator InternalUploadPartObject(string value) => new InternalUploadPartObject(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is InternalUploadPartObject other && Equals(other); + public bool Equals(InternalUploadPartObject other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/InternalUploadStatus.cs b/.dotnet/src/Generated/Models/InternalUploadStatus.cs new file mode 100644 index 000000000..4ac5876d4 --- /dev/null +++ b/.dotnet/src/Generated/Models/InternalUploadStatus.cs @@ -0,0 +1,40 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Files +{ + internal readonly partial struct InternalUploadStatus : IEquatable + { + private readonly string _value; + + public InternalUploadStatus(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string PendingValue = "pending"; + private const string CompletedValue = "completed"; + private const string CancelledValue = "cancelled"; + private const string ExpiredValue = "expired"; + + public static InternalUploadStatus Pending { get; } = new InternalUploadStatus(PendingValue); + public static InternalUploadStatus Completed { get; } = new InternalUploadStatus(CompletedValue); + public static InternalUploadStatus Cancelled { get; } = new InternalUploadStatus(CancelledValue); + public static InternalUploadStatus Expired { get; } = new InternalUploadStatus(ExpiredValue); + public static bool operator ==(InternalUploadStatus left, InternalUploadStatus right) => left.Equals(right); + public static bool operator !=(InternalUploadStatus left, InternalUploadStatus right) => !left.Equals(right); + public static implicit operator InternalUploadStatus(string value) => new InternalUploadStatus(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is InternalUploadStatus other && Equals(other); + public bool Equals(InternalUploadStatus other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/ListOrder.cs b/.dotnet/src/Generated/Models/ListOrder.cs deleted file mode 100644 index 31221d29b..000000000 --- a/.dotnet/src/Generated/Models/ListOrder.cs +++ /dev/null @@ -1,33 +0,0 @@ -// - -#nullable disable - -using System; -using System.ComponentModel; - -namespace OpenAI -{ - public readonly partial struct ListOrder : IEquatable - { - private readonly string _value; - - public ListOrder(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string OldestFirstValue = "asc"; - private const string NewestFirstValue = "desc"; - public static bool operator ==(ListOrder left, ListOrder right) => left.Equals(right); - public static bool operator !=(ListOrder left, ListOrder right) => !left.Equals(right); - public static implicit operator ListOrder(string value) => new ListOrder(value); - - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is ListOrder other && Equals(other); - public bool Equals(ListOrder other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; - public override string ToString() => _value; - } -} diff --git a/.dotnet/src/Generated/Models/MessageCollectionOrder.cs b/.dotnet/src/Generated/Models/MessageCollectionOrder.cs new file mode 100644 index 000000000..785954834 --- /dev/null +++ b/.dotnet/src/Generated/Models/MessageCollectionOrder.cs @@ -0,0 +1,33 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Assistants +{ + public readonly partial struct MessageCollectionOrder : IEquatable + { + private readonly string _value; + + public MessageCollectionOrder(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AscendingValue = "asc"; + private const string DescendingValue = "desc"; + public static bool operator ==(MessageCollectionOrder left, MessageCollectionOrder right) => left.Equals(right); + public static bool operator !=(MessageCollectionOrder left, MessageCollectionOrder right) => !left.Equals(right); + public static implicit operator MessageCollectionOrder(string value) => new MessageCollectionOrder(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is MessageCollectionOrder other && Equals(other); + public bool Equals(MessageCollectionOrder other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/OpenAIFileInfo.Serialization.cs b/.dotnet/src/Generated/Models/OpenAIFileInfo.Serialization.cs index 18fbf44df..fcd6544a3 100644 --- a/.dotnet/src/Generated/Models/OpenAIFileInfo.Serialization.cs +++ b/.dotnet/src/Generated/Models/OpenAIFileInfo.Serialization.cs @@ -111,7 +111,7 @@ internal static OpenAIFileInfo DeserializeOpenAIFileInfo(JsonElement element, Mo return null; } string id = default; - long? bytes = default; + int? bytes = default; DateTimeOffset createdAt = default; string filename = default; InternalOpenAIFileObject @object = default; @@ -134,7 +134,7 @@ internal static OpenAIFileInfo DeserializeOpenAIFileInfo(JsonElement element, Mo bytes = null; continue; } - bytes = property.Value.GetInt64(); + bytes = property.Value.GetInt32(); continue; } if (property.NameEquals("created_at"u8)) diff --git a/.dotnet/src/Generated/Models/OpenAIFileInfo.cs b/.dotnet/src/Generated/Models/OpenAIFileInfo.cs index 240c8589d..6643d5ab8 100644 --- a/.dotnet/src/Generated/Models/OpenAIFileInfo.cs +++ b/.dotnet/src/Generated/Models/OpenAIFileInfo.cs @@ -10,7 +10,7 @@ namespace OpenAI.Files public partial class OpenAIFileInfo { internal IDictionary SerializedAdditionalRawData { get; set; } - internal OpenAIFileInfo(string id, long? sizeInBytes, DateTimeOffset createdAt, string filename, OpenAIFilePurpose purpose, OpenAIFileStatus status) + internal OpenAIFileInfo(string id, int? sizeInBytes, DateTimeOffset createdAt, string filename, OpenAIFilePurpose purpose, OpenAIFileStatus status) { Argument.AssertNotNull(id, nameof(id)); Argument.AssertNotNull(filename, nameof(filename)); @@ -23,7 +23,7 @@ internal OpenAIFileInfo(string id, long? sizeInBytes, DateTimeOffset createdAt, Status = status; } - internal OpenAIFileInfo(string id, long? sizeInBytes, DateTimeOffset createdAt, string filename, InternalOpenAIFileObject @object, OpenAIFilePurpose purpose, OpenAIFileStatus status, string statusDetails, IDictionary serializedAdditionalRawData) + internal OpenAIFileInfo(string id, int? sizeInBytes, DateTimeOffset createdAt, string filename, InternalOpenAIFileObject @object, OpenAIFilePurpose purpose, OpenAIFileStatus status, string statusDetails, IDictionary serializedAdditionalRawData) { Id = id; SizeInBytes = sizeInBytes; diff --git a/.dotnet/src/Generated/Models/RunCollectionOrder.cs b/.dotnet/src/Generated/Models/RunCollectionOrder.cs new file mode 100644 index 000000000..ee88afced --- /dev/null +++ b/.dotnet/src/Generated/Models/RunCollectionOrder.cs @@ -0,0 +1,33 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Assistants +{ + public readonly partial struct RunCollectionOrder : IEquatable + { + private readonly string _value; + + public RunCollectionOrder(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AscendingValue = "asc"; + private const string DescendingValue = "desc"; + public static bool operator ==(RunCollectionOrder left, RunCollectionOrder right) => left.Equals(right); + public static bool operator !=(RunCollectionOrder left, RunCollectionOrder right) => !left.Equals(right); + public static implicit operator RunCollectionOrder(string value) => new RunCollectionOrder(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunCollectionOrder other && Equals(other); + public bool Equals(RunCollectionOrder other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/RunStepCollectionOrder.cs b/.dotnet/src/Generated/Models/RunStepCollectionOrder.cs new file mode 100644 index 000000000..a818d310b --- /dev/null +++ b/.dotnet/src/Generated/Models/RunStepCollectionOrder.cs @@ -0,0 +1,33 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.Assistants +{ + public readonly partial struct RunStepCollectionOrder : IEquatable + { + private readonly string _value; + + public RunStepCollectionOrder(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AscendingValue = "asc"; + private const string DescendingValue = "desc"; + public static bool operator ==(RunStepCollectionOrder left, RunStepCollectionOrder right) => left.Equals(right); + public static bool operator !=(RunStepCollectionOrder left, RunStepCollectionOrder right) => !left.Equals(right); + public static implicit operator RunStepCollectionOrder(string value) => new RunStepCollectionOrder(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is RunStepCollectionOrder other && Equals(other); + public bool Equals(RunStepCollectionOrder other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/SpeechGenerationOptions.Serialization.cs b/.dotnet/src/Generated/Models/SpeechGenerationOptions.Serialization.cs index 1bd301308..823b6165b 100644 --- a/.dotnet/src/Generated/Models/SpeechGenerationOptions.Serialization.cs +++ b/.dotnet/src/Generated/Models/SpeechGenerationOptions.Serialization.cs @@ -34,17 +34,17 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReade if (SerializedAdditionalRawData?.ContainsKey("voice") != true) { writer.WritePropertyName("voice"u8); - writer.WriteStringValue(Voice.ToSerialString()); + writer.WriteStringValue(Voice.ToString()); } if (SerializedAdditionalRawData?.ContainsKey("response_format") != true && Optional.IsDefined(ResponseFormat)) { writer.WritePropertyName("response_format"u8); - writer.WriteStringValue(ResponseFormat.Value.ToSerialString()); + writer.WriteStringValue(ResponseFormat.Value.ToString()); } - if (SerializedAdditionalRawData?.ContainsKey("speed") != true && Optional.IsDefined(Speed)) + if (SerializedAdditionalRawData?.ContainsKey("speed") != true && Optional.IsDefined(SpeedRatio)) { writer.WritePropertyName("speed"u8); - writer.WriteNumberValue(Speed.Value); + writer.WriteNumberValue(SpeedRatio.Value); } if (SerializedAdditionalRawData != null) { @@ -109,7 +109,7 @@ internal static SpeechGenerationOptions DeserializeSpeechGenerationOptions(JsonE } if (property.NameEquals("voice"u8)) { - voice = property.Value.GetString().ToGeneratedSpeechVoice(); + voice = new GeneratedSpeechVoice(property.Value.GetString()); continue; } if (property.NameEquals("response_format"u8)) @@ -118,7 +118,7 @@ internal static SpeechGenerationOptions DeserializeSpeechGenerationOptions(JsonE { continue; } - responseFormat = property.Value.GetString().ToGeneratedSpeechFormat(); + responseFormat = new GeneratedSpeechFormat(property.Value.GetString()); continue; } if (property.NameEquals("speed"u8)) diff --git a/.dotnet/src/Generated/Models/SpeechGenerationOptions.cs b/.dotnet/src/Generated/Models/SpeechGenerationOptions.cs index 4a5b48c18..8ecda5dc0 100644 --- a/.dotnet/src/Generated/Models/SpeechGenerationOptions.cs +++ b/.dotnet/src/Generated/Models/SpeechGenerationOptions.cs @@ -11,16 +11,15 @@ public partial class SpeechGenerationOptions { internal IDictionary SerializedAdditionalRawData { get; set; } - internal SpeechGenerationOptions(InternalCreateSpeechRequestModel model, string input, GeneratedSpeechVoice voice, GeneratedSpeechFormat? responseFormat, float? speed, IDictionary serializedAdditionalRawData) + internal SpeechGenerationOptions(InternalCreateSpeechRequestModel model, string input, GeneratedSpeechVoice voice, GeneratedSpeechFormat? responseFormat, float? speedRatio, IDictionary serializedAdditionalRawData) { Model = model; Input = input; Voice = voice; ResponseFormat = responseFormat; - Speed = speed; + SpeedRatio = speedRatio; SerializedAdditionalRawData = serializedAdditionalRawData; } public GeneratedSpeechFormat? ResponseFormat { get; set; } - public float? Speed { get; set; } } } diff --git a/.dotnet/src/Generated/Models/StreamingChatCompletionUpdate.Serialization.cs b/.dotnet/src/Generated/Models/StreamingChatCompletionUpdate.Serialization.cs index 2a84823e3..6a39741da 100644 --- a/.dotnet/src/Generated/Models/StreamingChatCompletionUpdate.Serialization.cs +++ b/.dotnet/src/Generated/Models/StreamingChatCompletionUpdate.Serialization.cs @@ -46,6 +46,18 @@ void IJsonModel.Write(Utf8JsonWriter writer, Mode writer.WritePropertyName("model"u8); writer.WriteStringValue(Model); } + if (SerializedAdditionalRawData?.ContainsKey("service_tier") != true && Optional.IsDefined(ServiceTier)) + { + if (ServiceTier != null) + { + writer.WritePropertyName("service_tier"u8); + writer.WriteStringValue(ServiceTier.Value.ToString()); + } + else + { + writer.WriteNull("service_tier"); + } + } if (SerializedAdditionalRawData?.ContainsKey("system_fingerprint") != true && Optional.IsDefined(SystemFingerprint)) { writer.WritePropertyName("system_fingerprint"u8); @@ -107,6 +119,7 @@ internal static StreamingChatCompletionUpdate DeserializeStreamingChatCompletion IReadOnlyList choices = default; DateTimeOffset created = default; string model = default; + InternalCreateChatCompletionStreamResponseServiceTier? serviceTier = default; string systemFingerprint = default; InternalCreateChatCompletionStreamResponseObject @object = default; ChatTokenUsage usage = default; @@ -139,6 +152,16 @@ internal static StreamingChatCompletionUpdate DeserializeStreamingChatCompletion model = property.Value.GetString(); continue; } + if (property.NameEquals("service_tier"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + serviceTier = null; + continue; + } + serviceTier = new InternalCreateChatCompletionStreamResponseServiceTier(property.Value.GetString()); + continue; + } if (property.NameEquals("system_fingerprint"u8)) { systemFingerprint = property.Value.GetString(); @@ -170,6 +193,7 @@ internal static StreamingChatCompletionUpdate DeserializeStreamingChatCompletion choices, created, model, + serviceTier, systemFingerprint, @object, usage, diff --git a/.dotnet/src/Generated/Models/StreamingChatCompletionUpdate.cs b/.dotnet/src/Generated/Models/StreamingChatCompletionUpdate.cs index 8132fd98f..b72279024 100644 --- a/.dotnet/src/Generated/Models/StreamingChatCompletionUpdate.cs +++ b/.dotnet/src/Generated/Models/StreamingChatCompletionUpdate.cs @@ -23,12 +23,13 @@ internal StreamingChatCompletionUpdate(string id, IEnumerable choices, DateTimeOffset createdAt, string model, string systemFingerprint, InternalCreateChatCompletionStreamResponseObject @object, ChatTokenUsage usage, IDictionary serializedAdditionalRawData) + internal StreamingChatCompletionUpdate(string id, IReadOnlyList choices, DateTimeOffset createdAt, string model, InternalCreateChatCompletionStreamResponseServiceTier? serviceTier, string systemFingerprint, InternalCreateChatCompletionStreamResponseObject @object, ChatTokenUsage usage, IDictionary serializedAdditionalRawData) { Id = id; Choices = choices; CreatedAt = createdAt; Model = model; + ServiceTier = serviceTier; SystemFingerprint = systemFingerprint; Object = @object; Usage = usage; diff --git a/.dotnet/src/Generated/Models/SystemChatMessage.Serialization.cs b/.dotnet/src/Generated/Models/SystemChatMessage.Serialization.cs index a3029045a..71a6676c5 100644 --- a/.dotnet/src/Generated/Models/SystemChatMessage.Serialization.cs +++ b/.dotnet/src/Generated/Models/SystemChatMessage.Serialization.cs @@ -33,7 +33,7 @@ internal static SystemChatMessage DeserializeSystemChatMessage(JsonElement eleme return null; } string name = default; - string role = default; + ChatMessageRole role = default; IList content = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -46,7 +46,7 @@ internal static SystemChatMessage DeserializeSystemChatMessage(JsonElement eleme } if (property.NameEquals("role"u8)) { - role = property.Value.GetString(); + role = property.Value.GetString().ToChatMessageRole(); continue; } if (property.NameEquals("content"u8)) diff --git a/.dotnet/src/Generated/Models/SystemChatMessage.cs b/.dotnet/src/Generated/Models/SystemChatMessage.cs index 9d8c1c9da..962eee34b 100644 --- a/.dotnet/src/Generated/Models/SystemChatMessage.cs +++ b/.dotnet/src/Generated/Models/SystemChatMessage.cs @@ -9,7 +9,7 @@ namespace OpenAI.Chat { public partial class SystemChatMessage : ChatMessage { - internal SystemChatMessage(string role, IList content, IDictionary serializedAdditionalRawData, string participantName) : base(role, content, serializedAdditionalRawData) + internal SystemChatMessage(ChatMessageRole role, IList content, IDictionary serializedAdditionalRawData, string participantName) : base(role, content, serializedAdditionalRawData) { ParticipantName = participantName; } diff --git a/.dotnet/src/Generated/Models/ToolChatMessage.Serialization.cs b/.dotnet/src/Generated/Models/ToolChatMessage.Serialization.cs index 4cff864a2..fb4921699 100644 --- a/.dotnet/src/Generated/Models/ToolChatMessage.Serialization.cs +++ b/.dotnet/src/Generated/Models/ToolChatMessage.Serialization.cs @@ -33,7 +33,7 @@ internal static ToolChatMessage DeserializeToolChatMessage(JsonElement element, return null; } string toolCallId = default; - string role = default; + ChatMessageRole role = default; IList content = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -46,7 +46,7 @@ internal static ToolChatMessage DeserializeToolChatMessage(JsonElement element, } if (property.NameEquals("role"u8)) { - role = property.Value.GetString(); + role = property.Value.GetString().ToChatMessageRole(); continue; } if (property.NameEquals("content"u8)) diff --git a/.dotnet/src/Generated/Models/ToolChatMessage.cs b/.dotnet/src/Generated/Models/ToolChatMessage.cs index 02ad0ddc9..d0d4c210b 100644 --- a/.dotnet/src/Generated/Models/ToolChatMessage.cs +++ b/.dotnet/src/Generated/Models/ToolChatMessage.cs @@ -9,7 +9,7 @@ namespace OpenAI.Chat { public partial class ToolChatMessage : ChatMessage { - internal ToolChatMessage(string role, IList content, IDictionary serializedAdditionalRawData, string toolCallId) : base(role, content, serializedAdditionalRawData) + internal ToolChatMessage(ChatMessageRole role, IList content, IDictionary serializedAdditionalRawData, string toolCallId) : base(role, content, serializedAdditionalRawData) { ToolCallId = toolCallId; } diff --git a/.dotnet/src/Generated/Models/UnknownChatMessage.cs b/.dotnet/src/Generated/Models/UnknownChatMessage.cs deleted file mode 100644 index 901a1ea1c..000000000 --- a/.dotnet/src/Generated/Models/UnknownChatMessage.cs +++ /dev/null @@ -1,20 +0,0 @@ -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace OpenAI.Chat -{ - internal partial class UnknownChatMessage : ChatMessage - { - internal UnknownChatMessage(string role, IList content, IDictionary serializedAdditionalRawData) : base(role, content, serializedAdditionalRawData) - { - } - - internal UnknownChatMessage() - { - } - } -} diff --git a/.dotnet/src/Generated/Models/UserChatMessage.Serialization.cs b/.dotnet/src/Generated/Models/UserChatMessage.Serialization.cs index 0b92b0281..c6fdb45b8 100644 --- a/.dotnet/src/Generated/Models/UserChatMessage.Serialization.cs +++ b/.dotnet/src/Generated/Models/UserChatMessage.Serialization.cs @@ -33,7 +33,7 @@ internal static UserChatMessage DeserializeUserChatMessage(JsonElement element, return null; } string name = default; - string role = default; + ChatMessageRole role = default; IList content = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -46,7 +46,7 @@ internal static UserChatMessage DeserializeUserChatMessage(JsonElement element, } if (property.NameEquals("role"u8)) { - role = property.Value.GetString(); + role = property.Value.GetString().ToChatMessageRole(); continue; } if (property.NameEquals("content"u8)) diff --git a/.dotnet/src/Generated/Models/UserChatMessage.cs b/.dotnet/src/Generated/Models/UserChatMessage.cs index 84c5fad7a..65a986ebd 100644 --- a/.dotnet/src/Generated/Models/UserChatMessage.cs +++ b/.dotnet/src/Generated/Models/UserChatMessage.cs @@ -9,7 +9,7 @@ namespace OpenAI.Chat { public partial class UserChatMessage : ChatMessage { - internal UserChatMessage(string role, IList content, IDictionary serializedAdditionalRawData, string participantName) : base(role, content, serializedAdditionalRawData) + internal UserChatMessage(ChatMessageRole role, IList content, IDictionary serializedAdditionalRawData, string participantName) : base(role, content, serializedAdditionalRawData) { ParticipantName = participantName; } diff --git a/.dotnet/src/Generated/Models/VectorStoreCollectionOrder.cs b/.dotnet/src/Generated/Models/VectorStoreCollectionOrder.cs new file mode 100644 index 000000000..d8e019322 --- /dev/null +++ b/.dotnet/src/Generated/Models/VectorStoreCollectionOrder.cs @@ -0,0 +1,33 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.VectorStores +{ + public readonly partial struct VectorStoreCollectionOrder : IEquatable + { + private readonly string _value; + + public VectorStoreCollectionOrder(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AscendingValue = "asc"; + private const string DescendingValue = "desc"; + public static bool operator ==(VectorStoreCollectionOrder left, VectorStoreCollectionOrder right) => left.Equals(right); + public static bool operator !=(VectorStoreCollectionOrder left, VectorStoreCollectionOrder right) => !left.Equals(right); + public static implicit operator VectorStoreCollectionOrder(string value) => new VectorStoreCollectionOrder(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is VectorStoreCollectionOrder other && Equals(other); + public bool Equals(VectorStoreCollectionOrder other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/VectorStoreCreationHelper.Serialization.cs b/.dotnet/src/Generated/Models/VectorStoreCreationHelper.Serialization.cs index 590b20de0..3004632e0 100644 --- a/.dotnet/src/Generated/Models/VectorStoreCreationHelper.Serialization.cs +++ b/.dotnet/src/Generated/Models/VectorStoreCreationHelper.Serialization.cs @@ -7,6 +7,7 @@ using System.ClientModel.Primitives; using System.Collections.Generic; using System.Text.Json; +using OpenAI.VectorStores; namespace OpenAI.Assistants { @@ -31,6 +32,11 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelRea } writer.WriteEndArray(); } + if (SerializedAdditionalRawData?.ContainsKey("chunking_strategy") != true && Optional.IsDefined(ChunkingStrategy)) + { + writer.WritePropertyName("chunking_strategy"u8); + writer.WriteObjectValue(ChunkingStrategy, options); + } if (SerializedAdditionalRawData?.ContainsKey("metadata") != true && Optional.IsCollectionDefined(Metadata)) { writer.WritePropertyName("metadata"u8); @@ -85,6 +91,7 @@ internal static VectorStoreCreationHelper DeserializeVectorStoreCreationHelper(J return null; } IList fileIds = default; + FileChunkingStrategy chunkingStrategy = default; IDictionary metadata = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -104,6 +111,15 @@ internal static VectorStoreCreationHelper DeserializeVectorStoreCreationHelper(J fileIds = array; continue; } + if (property.NameEquals("chunking_strategy"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + chunkingStrategy = FileChunkingStrategy.DeserializeFileChunkingStrategy(property.Value, options); + continue; + } if (property.NameEquals("metadata"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -125,7 +141,7 @@ internal static VectorStoreCreationHelper DeserializeVectorStoreCreationHelper(J } } serializedAdditionalRawData = rawDataDictionary; - return new VectorStoreCreationHelper(fileIds ?? new ChangeTrackingList(), metadata ?? new ChangeTrackingDictionary(), serializedAdditionalRawData); + return new VectorStoreCreationHelper(fileIds ?? new ChangeTrackingList(), chunkingStrategy, metadata ?? new ChangeTrackingDictionary(), serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/.dotnet/src/Generated/Models/VectorStoreCreationHelper.cs b/.dotnet/src/Generated/Models/VectorStoreCreationHelper.cs index e8d2702b5..6871ab916 100644 --- a/.dotnet/src/Generated/Models/VectorStoreCreationHelper.cs +++ b/.dotnet/src/Generated/Models/VectorStoreCreationHelper.cs @@ -4,6 +4,7 @@ using System; using System.Collections.Generic; +using OpenAI.VectorStores; namespace OpenAI.Assistants { @@ -16,9 +17,10 @@ public VectorStoreCreationHelper() Metadata = new ChangeTrackingDictionary(); } - internal VectorStoreCreationHelper(IList fileIds, IDictionary metadata, IDictionary serializedAdditionalRawData) + internal VectorStoreCreationHelper(IList fileIds, FileChunkingStrategy chunkingStrategy, IDictionary metadata, IDictionary serializedAdditionalRawData) { FileIds = fileIds; + ChunkingStrategy = chunkingStrategy; Metadata = metadata; SerializedAdditionalRawData = serializedAdditionalRawData; } diff --git a/.dotnet/src/Generated/Models/VectorStoreFileAssociationCollectionOrder.cs b/.dotnet/src/Generated/Models/VectorStoreFileAssociationCollectionOrder.cs new file mode 100644 index 000000000..02656b21d --- /dev/null +++ b/.dotnet/src/Generated/Models/VectorStoreFileAssociationCollectionOrder.cs @@ -0,0 +1,33 @@ +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace OpenAI.VectorStores +{ + public readonly partial struct VectorStoreFileAssociationCollectionOrder : IEquatable + { + private readonly string _value; + + public VectorStoreFileAssociationCollectionOrder(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AscendingValue = "asc"; + private const string DescendingValue = "desc"; + public static bool operator ==(VectorStoreFileAssociationCollectionOrder left, VectorStoreFileAssociationCollectionOrder right) => left.Equals(right); + public static bool operator !=(VectorStoreFileAssociationCollectionOrder left, VectorStoreFileAssociationCollectionOrder right) => !left.Equals(right); + public static implicit operator VectorStoreFileAssociationCollectionOrder(string value) => new VectorStoreFileAssociationCollectionOrder(value); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is VectorStoreFileAssociationCollectionOrder other && Equals(other); + public bool Equals(VectorStoreFileAssociationCollectionOrder other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + public override string ToString() => _value; + } +} diff --git a/.dotnet/src/Generated/Models/VectorStoreFileAssociationErrorCode.cs b/.dotnet/src/Generated/Models/VectorStoreFileAssociationErrorCode.cs index 22d86b611..167ad6b2a 100644 --- a/.dotnet/src/Generated/Models/VectorStoreFileAssociationErrorCode.cs +++ b/.dotnet/src/Generated/Models/VectorStoreFileAssociationErrorCode.cs @@ -16,15 +16,13 @@ public VectorStoreFileAssociationErrorCode(string value) _value = value ?? throw new ArgumentNullException(nameof(value)); } - private const string InternalErrorValue = "internal_error"; - private const string FileNotFoundValue = "file_not_found"; - private const string ParsingErrorValue = "parsing_error"; - private const string UnhandledMimeTypeValue = "unhandled_mime_type"; - - public static VectorStoreFileAssociationErrorCode InternalError { get; } = new VectorStoreFileAssociationErrorCode(InternalErrorValue); - public static VectorStoreFileAssociationErrorCode FileNotFound { get; } = new VectorStoreFileAssociationErrorCode(FileNotFoundValue); - public static VectorStoreFileAssociationErrorCode ParsingError { get; } = new VectorStoreFileAssociationErrorCode(ParsingErrorValue); - public static VectorStoreFileAssociationErrorCode UnhandledMimeType { get; } = new VectorStoreFileAssociationErrorCode(UnhandledMimeTypeValue); + private const string ServerErrorValue = "server_error"; + private const string UnsupportedFileValue = "unsupported_file"; + private const string InvalidFileValue = "invalid_file"; + + public static VectorStoreFileAssociationErrorCode ServerError { get; } = new VectorStoreFileAssociationErrorCode(ServerErrorValue); + public static VectorStoreFileAssociationErrorCode UnsupportedFile { get; } = new VectorStoreFileAssociationErrorCode(UnsupportedFileValue); + public static VectorStoreFileAssociationErrorCode InvalidFile { get; } = new VectorStoreFileAssociationErrorCode(InvalidFileValue); public static bool operator ==(VectorStoreFileAssociationErrorCode left, VectorStoreFileAssociationErrorCode right) => left.Equals(right); public static bool operator !=(VectorStoreFileAssociationErrorCode left, VectorStoreFileAssociationErrorCode right) => !left.Equals(right); public static implicit operator VectorStoreFileAssociationErrorCode(string value) => new VectorStoreFileAssociationErrorCode(value); diff --git a/.dotnet/src/Generated/OpenAIModelFactory.cs b/.dotnet/src/Generated/OpenAIModelFactory.cs index adcbbde3d..496894c70 100644 --- a/.dotnet/src/Generated/OpenAIModelFactory.cs +++ b/.dotnet/src/Generated/OpenAIModelFactory.cs @@ -44,14 +44,14 @@ public static ToolChatMessage ToolChatMessage(IEnumerable(); - return new ToolChatMessage("tool", content?.ToList(), serializedAdditionalRawData: null, toolCallId); + return new ToolChatMessage(ChatMessageRole.Tool, content?.ToList(), serializedAdditionalRawData: null, toolCallId); } public static FunctionChatMessage FunctionChatMessage(IEnumerable content = null, string functionName = null) { content ??= new List(); - return new FunctionChatMessage("function", content?.ToList(), serializedAdditionalRawData: null, functionName); + return new FunctionChatMessage(ChatMessageRole.Function, content?.ToList(), serializedAdditionalRawData: null, functionName); } public static ChatFunction ChatFunction(string functionDescription = null, string functionName = null, BinaryData functionParameters = null) diff --git a/.dotnet/src/OpenAI.csproj b/.dotnet/src/OpenAI.csproj index e64e3bdd3..6d874b12a 100644 --- a/.dotnet/src/OpenAI.csproj +++ b/.dotnet/src/OpenAI.csproj @@ -1,11 +1,11 @@ - + This is the OpenAI client library for developing .NET applications with rich experience. SDK Code Generation OpenAI - OpenAI + OpenAI;openai-dotnet;ChatGPT;Dall-E 2.0.0 - beta.8 + beta.11 netstandard2.0;net6.0 latest @@ -36,6 +36,9 @@ $(NoWarn),0169 + + $(NoWarn),OPENAI001; + Debug;Release;Unsigned diff --git a/.dotnet/src/Utility/CustomSerializationHelpers.cs b/.dotnet/src/Utility/CustomSerializationHelpers.cs index f03c64f2e..bcf1ae805 100644 --- a/.dotnet/src/Utility/CustomSerializationHelpers.cs +++ b/.dotnet/src/Utility/CustomSerializationHelpers.cs @@ -9,7 +9,7 @@ namespace OpenAI; internal static partial class CustomSerializationHelpers { - internal static TOutput DeserializeNewInstance( + internal static TOutput DeserializeNewInstance( UInstanceInput existingInstance, Func deserializationFunc, ref Utf8JsonReader reader, @@ -27,7 +27,7 @@ internal static TOutput DeserializeNewInstance( return deserializationFunc.Invoke(document.RootElement, options); } - internal static TOutput DeserializeNewInstance( + internal static TOutput DeserializeNewInstance( UInstanceInput existingInstance, Func deserializationFunc, BinaryData data, @@ -49,9 +49,9 @@ internal static TOutput DeserializeNewInstance( } } - internal static void SerializeInstance( + internal static void SerializeInstance( UInstanceInput instance, - Action serializationFunc, + Action serializationFunc, Utf8JsonWriter writer, ModelReaderWriterOptions options) where UInstanceInput : IJsonModel @@ -87,7 +87,7 @@ internal static void AssertSupportedJsonWriteFormat(T instance, ModelReaderWr where T : IJsonModel => AssertSupportedJsonWriteFormat(instance, options); - internal static void AssertSupportedJsonWriteFormat(UInstanceInput instance, ModelReaderWriterOptions options) + internal static void AssertSupportedJsonWriteFormat(UInstanceInput instance, ModelReaderWriterOptions options) where UInstanceInput : IJsonModel { var format = options.Format == "W" ? ((IJsonModel)instance).GetFormatFromOptions(options) : options.Format; @@ -101,7 +101,7 @@ internal static void AssertSupportedPersistableWriteFormat(T instance, ModelR where T : IPersistableModel => AssertSupportedPersistableWriteFormat(instance, options); - internal static void AssertSupportedPersistableWriteFormat(UInstanceInput instance, ModelReaderWriterOptions options) + internal static void AssertSupportedPersistableWriteFormat(UInstanceInput instance, ModelReaderWriterOptions options) where UInstanceInput : IPersistableModel { var format = options.Format == "W" ? ((IPersistableModel)instance).GetFormatFromOptions(options) : options.Format; @@ -127,4 +127,27 @@ internal static void WriteSerializedAdditionalRawData(this Utf8JsonWriter writer } } } + + internal static void WriteOptionalProperty(this Utf8JsonWriter writer, ReadOnlySpan name, T value, ModelReaderWriterOptions options) + { + if (Optional.IsDefined(value)) + { + writer.WritePropertyName(name); + writer.WriteObjectValue(value, options); + } + } + + internal static void WriteOptionalCollection(this Utf8JsonWriter writer, ReadOnlySpan name, IEnumerable values, ModelReaderWriterOptions options) + { + if (Optional.IsCollectionDefined(values)) + { + writer.WritePropertyName(name); + writer.WriteStartArray(); + foreach (T item in values) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + } } \ No newline at end of file diff --git a/.dotnet/src/Utility/PageCollectionHelpers.cs b/.dotnet/src/Utility/PageCollectionHelpers.cs index f7863ce21..b5be39a6e 100644 --- a/.dotnet/src/Utility/PageCollectionHelpers.cs +++ b/.dotnet/src/Utility/PageCollectionHelpers.cs @@ -15,6 +15,22 @@ public static PageCollection Create(PageEnumerator enumerator) public static AsyncPageCollection CreateAsync(PageEnumerator enumerator) => new AsyncEnumeratorPageCollection(enumerator); + public static IEnumerable Create(PageResultEnumerator enumerator) + { + while (enumerator.MoveNext()) + { + yield return enumerator.Current; + } + } + + public static async IAsyncEnumerable CreateAsync(PageResultEnumerator enumerator) + { + while (await enumerator.MoveNextAsync().ConfigureAwait(false)) + { + yield return enumerator.Current; + } + } + private class EnumeratorPageCollection : PageCollection { private readonly PageEnumerator _enumerator; diff --git a/.dotnet/tests/Assistants/AssistantSmokeTests.cs b/.dotnet/tests/Assistants/AssistantSmokeTests.cs index 86aa427f8..4dc5f4984 100644 --- a/.dotnet/tests/Assistants/AssistantSmokeTests.cs +++ b/.dotnet/tests/Assistants/AssistantSmokeTests.cs @@ -1,5 +1,6 @@ using NUnit.Framework; using OpenAI.Assistants; +using OpenAI.Chat; using OpenAI.Files; using OpenAI.VectorStores; using System; @@ -68,6 +69,41 @@ public void RunStepDeserialization() Assert.That(deserializedRunStep.Details.ToolCalls[0].CodeInterpreterOutputs, Has.Count.EqualTo(1)); Assert.That(deserializedRunStep.Details.ToolCalls[0].CodeInterpreterOutputs[0].Logs, Is.Not.Null.And.Not.Empty); } + + [Test] + public void ResponseFormatEquality() + { + Assert.That(AssistantResponseFormat.CreateAutoFormat() == "auto"); + Assert.That(AssistantResponseFormat.CreateAutoFormat(), Is.EqualTo("auto")); + Assert.That(AssistantResponseFormat.CreateAutoFormat(), Is.Not.EqualTo("automatic")); + Assert.That(AssistantResponseFormat.CreateAutoFormat() == AssistantResponseFormat.CreateAutoFormat()); + Assert.That(AssistantResponseFormat.CreateTextFormat() == AssistantResponseFormat.CreateTextFormat()); + Assert.That(AssistantResponseFormat.CreateTextFormat(), Is.EqualTo(AssistantResponseFormat.CreateTextFormat())); + Assert.That(AssistantResponseFormat.CreateAutoFormat() != AssistantResponseFormat.CreateTextFormat()); + Assert.That(AssistantResponseFormat.CreateAutoFormat(), Is.Not.EqualTo(AssistantResponseFormat.CreateTextFormat())); + Assert.That((AssistantResponseFormat)null == (AssistantResponseFormat)null); + Assert.That((AssistantResponseFormat)null != AssistantResponseFormat.CreateTextFormat()); + Assert.That(AssistantResponseFormat.CreateTextFormat() != null); + Assert.That(AssistantResponseFormat.CreateTextFormat(), Is.Not.EqualTo(null)); + Assert.That(null, Is.Not.EqualTo(AssistantResponseFormat.CreateTextFormat())); + + AssistantResponseFormat jsonSchemaFormat = AssistantResponseFormat.CreateJsonSchemaFormat( + name: "test_schema", + description: "A description of the schema", + jsonSchema: BinaryData.FromString(""" + { + "type": "object", + "properties": { + "foo": { "type": "string" } + }, + "additionalProperties": false + } + """), + strictSchemaEnabled: true); + + Assert.That(jsonSchemaFormat == AssistantResponseFormat.CreateJsonSchemaFormat("test_schema", BinaryData.FromObjectAsJson(new { }))); + Assert.That(jsonSchemaFormat != AssistantResponseFormat.CreateJsonSchemaFormat("not_test_schema", BinaryData.FromObjectAsJson(new { }))); + } } #pragma warning restore OPENAI001 diff --git a/.dotnet/tests/Assistants/AssistantTests.cs b/.dotnet/tests/Assistants/AssistantTests.cs index b38e4e5cf..187a2c533 100644 --- a/.dotnet/tests/Assistants/AssistantTests.cs +++ b/.dotnet/tests/Assistants/AssistantTests.cs @@ -29,9 +29,9 @@ protected void Cleanup() return; } - AssistantClient client = new(); - FileClient fileClient = new(); - VectorStoreClient vectorStoreClient = new(); + AssistantClient client = GetTestClient(TestScenario.Assistants); + FileClient fileClient = GetTestClient(TestScenario.Files); + VectorStoreClient vectorStoreClient = GetTestClient(TestScenario.VectorStores); RequestOptions requestOptions = new() { ErrorOptions = ClientErrorBehaviors.NoThrow, @@ -206,7 +206,7 @@ public void ThreadWithInitialMessagesWorks() }; AssistantThread thread = client.CreateThread(options); Validate(thread); - PageResult messagesPage = client.GetMessages(thread, new MessageCollectionOptions() { Order = ListOrder.OldestFirst }).GetCurrentPage(); + PageResult messagesPage = client.GetMessages(thread, new MessageCollectionOptions() { Order = MessageCollectionOrder.Ascending }).GetCurrentPage(); Assert.That(messagesPage.Values.Count, Is.EqualTo(2)); Assert.That(messagesPage.Values[0].Role, Is.EqualTo(MessageRole.User)); Assert.That(messagesPage.Values[0].Content?.Count, Is.EqualTo(1)); @@ -273,7 +273,7 @@ public void BasicRunStepFunctionalityWorks() }); Validate(assistant); - FileClient fileClient = new(); + FileClient fileClient = GetTestClient(TestScenario.Files); OpenAIFileInfo equationFile = fileClient.UploadFile( BinaryData.FromString(""" x,y @@ -351,25 +351,24 @@ public void SettingResponseFormatWorks() AssistantClient client = GetTestClient(); Assistant assistant = client.CreateAssistant("gpt-4o-mini", new() { - ResponseFormat = AssistantResponseFormat.JsonObject, + ResponseFormat = AssistantResponseFormat.CreateAutoFormat(), }); Validate(assistant); - Assert.That(assistant.ResponseFormat, Is.EqualTo(AssistantResponseFormat.JsonObject)); + Assert.That(assistant.ResponseFormat == "auto"); assistant = client.ModifyAssistant(assistant, new() { - ResponseFormat = AssistantResponseFormat.Text, + ResponseFormat = AssistantResponseFormat.CreateTextFormat(), }); - Assert.That(assistant.ResponseFormat, Is.EqualTo(AssistantResponseFormat.Text)); + Assert.That(assistant.ResponseFormat == AssistantResponseFormat.CreateTextFormat()); AssistantThread thread = client.CreateThread(); Validate(thread); ThreadMessage message = client.CreateMessage(thread, MessageRole.User, ["Write some JSON for me!"]); Validate(message); ThreadRun run = client.CreateRun(thread, assistant, new() { - ResponseFormat = AssistantResponseFormat.JsonObject, + ResponseFormat = AssistantResponseFormat.CreateJsonObjectFormat(), }); - Validate(run); - Assert.That(run.ResponseFormat, Is.EqualTo(AssistantResponseFormat.JsonObject)); + Assert.That(run.ResponseFormat == AssistantResponseFormat.CreateJsonObjectFormat()); } [Test] @@ -439,7 +438,7 @@ public void FunctionToolsWork() } Assert.That(run.Status, Is.EqualTo(RunStatus.Completed)); - PageCollection messagePages = client.GetMessages(run.ThreadId, new MessageCollectionOptions() { Order = ListOrder.NewestFirst }); + PageCollection messagePages = client.GetMessages(run.ThreadId, new MessageCollectionOptions() { Order = MessageCollectionOrder.Descending }); PageResult firstPage = messagePages.GetCurrentPage(); Assert.That(firstPage.Values.Count, Is.GreaterThan(1)); Assert.That(firstPage.Values[0].Role, Is.EqualTo(MessageRole.Assistant)); @@ -450,7 +449,7 @@ public void FunctionToolsWork() [Test] public async Task StreamingRunWorks() { - AssistantClient client = new(); + AssistantClient client = GetTestClient(); Assistant assistant = await client.CreateAssistantAsync("gpt-4o-mini"); Validate(assistant); @@ -499,7 +498,11 @@ AsyncCollectionResult streamingResult public async Task StreamingToolCall() { AssistantClient client = GetTestClient(); - FunctionToolDefinition getWeatherTool = new("get_current_weather", "Gets the user's current weather"); + FunctionToolDefinition getWeatherTool = new() + { + FunctionName = "get_current_weather", + Description = "Gets the user's current weather", + }; Assistant assistant = await client.CreateAssistantAsync("gpt-4o-mini", new() { Tools = { getWeatherTool } @@ -557,7 +560,7 @@ public async Task StreamingToolCall() public void BasicFileSearchWorks() { // First, we need to upload a simple test file. - FileClient fileClient = new(); + FileClient fileClient = GetTestClient(TestScenario.Files); OpenAIFileInfo testFile = fileClient.UploadFile( BinaryData.FromString(""" This file describes the favorite foods of several people. @@ -649,7 +652,7 @@ This file describes the favorite foods of several people. } while (run?.Status.IsTerminal == false); Assert.That(run.Status, Is.EqualTo(RunStatus.Completed)); - IEnumerable messages = client.GetMessages(thread, new() { Order = ListOrder.NewestFirst }).GetAllValues(); + IEnumerable messages = client.GetMessages(thread, new() { Order = MessageCollectionOrder.Descending }).GetAllValues(); int messageCount = 0; bool hasCake = false; foreach (ThreadMessage message in messages) @@ -674,6 +677,67 @@ This file describes the favorite foods of several people. Assert.That(hasCake, Is.True); } + [Test] + public async Task BasicFileSearchStreamingWorks() + { + const string fileContent = """ + The favorite food of several people: + - Summanus Ferdinand: tacos + - Tekakwitha Effie: pizza + - Filip Carola: cake + """; + + const string fileName = "favorite_foods.txt"; + + FileClient fileClient = GetTestClient(TestScenario.Files); + AssistantClient client = GetTestClient(TestScenario.Assistants); + + // First, upload a simple test file. + OpenAIFileInfo testFile = fileClient.UploadFile(BinaryData.FromString(fileContent), fileName, FileUploadPurpose.Assistants); + Validate(testFile); + + // Create an assistant, using the creation helper to make a new vector store. + AssistantCreationOptions assistantCreationOptions = new() + { + Tools = { new FileSearchToolDefinition() }, + ToolResources = new() + { + FileSearch = new() + { + NewVectorStores = { new VectorStoreCreationHelper([testFile.Id]) } + } + } + }; + Assistant assistant = client.CreateAssistant("gpt-4o-mini", assistantCreationOptions); + Validate(assistant); + + Assert.That(assistant.ToolResources?.FileSearch?.VectorStoreIds, Has.Count.EqualTo(1)); + string vectorStoreId = assistant.ToolResources.FileSearch.VectorStoreIds[0]; + _vectorStoreIdsToDelete.Add(vectorStoreId); + + // Create a thread. + ThreadCreationOptions threadCreationOptions = new() + { + InitialMessages = { "Using the files you have available, what's Filip's favorite food?" } + }; + AssistantThread thread = client.CreateThread(threadCreationOptions); + Validate(thread); + + // Create run and stream the results. + AsyncCollectionResult streamingResult = client.CreateRunStreamingAsync(thread.Id, assistant.Id); + string message = string.Empty; + + await foreach (StreamingUpdate update in streamingResult) + { + if (update is MessageContentUpdate contentUpdate) + { + message += $"{contentUpdate.Text}"; + } + } + + Assert.That(message, Does.Contain("cake")); + } + [Test] public async Task Pagination_CanEnumerateAssistants() { @@ -692,7 +756,7 @@ public async Task Pagination_CanEnumerateAssistants() // Page through collection int count = 0; - IAsyncEnumerable assistants = client.GetAssistantsAsync(new AssistantCollectionOptions() { Order = ListOrder.NewestFirst }).GetAllValuesAsync(); + IAsyncEnumerable assistants = client.GetAssistantsAsync(new AssistantCollectionOptions() { Order = AssistantCollectionOrder.Descending }).GetAllValuesAsync(); int lastIdSeen = int.MaxValue; @@ -737,8 +801,8 @@ public async Task Pagination_CanPageThroughAssistantCollection() AsyncPageCollection pages = client.GetAssistantsAsync( new AssistantCollectionOptions() { - Order = ListOrder.NewestFirst, - PageSize = 2 + Order = AssistantCollectionOrder.Descending, + PageSizeLimit = 2 }); int lastIdSeen = int.MaxValue; @@ -787,8 +851,8 @@ public async Task Pagination_CanRehydrateAssistantPageCollectionFromBytes() AsyncPageCollection pages = client.GetAssistantsAsync( new AssistantCollectionOptions() { - Order = ListOrder.NewestFirst, - PageSize = 2 + Order = AssistantCollectionOrder.Descending, + PageSizeLimit = 2 }); // Simulate rehydration of the collection @@ -845,8 +909,8 @@ public async Task Pagination_CanRehydrateAssistantPageCollectionFromPageToken() AsyncPageCollection pages = client.GetAssistantsAsync( new AssistantCollectionOptions() { - Order = ListOrder.NewestFirst, - PageSize = 2 + Order = AssistantCollectionOrder.Descending, + PageSizeLimit = 2 }); // Call the rehydration method, passing a typed OpenAIPageToken @@ -944,7 +1008,7 @@ public void Pagination_CanRehydrateRunStepPageCollectionFromBytes() }); Validate(assistant); - FileClient fileClient = new(); + FileClient fileClient = GetTestClient(TestScenario.Files); OpenAIFileInfo equationFile = fileClient.UploadFile( BinaryData.FromString(""" x,y diff --git a/.dotnet/tests/Assistants/VectorStoreTests.cs b/.dotnet/tests/Assistants/VectorStoreTests.cs index 0aed7ec91..fd5ca6b2c 100644 --- a/.dotnet/tests/Assistants/VectorStoreTests.cs +++ b/.dotnet/tests/Assistants/VectorStoreTests.cs @@ -107,7 +107,7 @@ public void CanEnumerateVectorStores() int lastIdSeen = int.MaxValue; int count = 0; - foreach (VectorStore vectorStore in client.GetVectorStores(new VectorStoreCollectionOptions() { Order = ListOrder.NewestFirst }).GetAllValues()) + foreach (VectorStore vectorStore in client.GetVectorStores(new VectorStoreCollectionOptions() { Order = VectorStoreCollectionOrder.Descending }).GetAllValues()) { Assert.That(vectorStore.Id, Is.Not.Null); if (vectorStore.Name?.StartsWith("Test Vector Store ") == true) @@ -147,7 +147,7 @@ public async Task CanEnumerateVectorStoresAsync() int lastIdSeen = int.MaxValue; int count = 0; - await foreach (VectorStore vectorStore in client.GetVectorStoresAsync(new VectorStoreCollectionOptions() { Order = ListOrder.NewestFirst }).GetAllValuesAsync()) + await foreach (VectorStore vectorStore in client.GetVectorStoresAsync(new VectorStoreCollectionOptions() { Order = VectorStoreCollectionOrder.Descending }).GetAllValuesAsync()) { Assert.That(vectorStore.Id, Is.Not.Null); if (vectorStore.Name?.StartsWith("Test Vector Store ") == true) @@ -197,7 +197,7 @@ public void CanAssociateFiles() _associationsToRemove.RemoveAt(0); // Errata: removals aren't immediately reflected when requesting the list - Thread.Sleep(1000); + Thread.Sleep(2000); int count = 0; foreach (VectorStoreFileAssociation association in client.GetFileAssociations(vectorStore).GetAllValues()) @@ -239,7 +239,7 @@ public void Pagination_CanRehydrateFileAssociationCollection() _associationsToRemove.RemoveAt(0); // Errata: removals aren't immediately reflected when requesting the list - Thread.Sleep(1000); + Thread.Sleep(2000); PageCollection pages = client.GetFileAssociations(vectorStore); IEnumerator> pageEnumerator = ((IEnumerable>)pages).GetEnumerator(); @@ -410,7 +410,7 @@ private IReadOnlyList GetNewTestFiles(int count) { List files = []; - FileClient client = new(); + FileClient client = GetTestClient(TestScenario.Files); for (int i = 0; i < count; i++) { OpenAIFileInfo file = client.UploadFile( @@ -427,8 +427,8 @@ private IReadOnlyList GetNewTestFiles(int count) [TearDown] protected void Cleanup() { - FileClient fileClient = new(); - VectorStoreClient vectorStoreClient = new(); + FileClient fileClient = GetTestClient(TestScenario.Files); + VectorStoreClient vectorStoreClient = GetTestClient(TestScenario.VectorStores); RequestOptions requestOptions = new() { ErrorOptions = ClientErrorBehaviors.NoThrow, diff --git a/.dotnet/tests/Audio/OpenAIAudioModelFactoryTests.cs b/.dotnet/tests/Audio/OpenAIAudioModelFactoryTests.cs index 717509f3c..39cdcbbc8 100644 --- a/.dotnet/tests/Audio/OpenAIAudioModelFactoryTests.cs +++ b/.dotnet/tests/Audio/OpenAIAudioModelFactoryTests.cs @@ -265,7 +265,7 @@ public void TranscribedSegmentWithTextWorks() [Test] public void TranscribedSegmentWithTokenIdsWorks() { - IEnumerable tokenIds = [ 9000000000, 9000000010 ]; + IEnumerable tokenIds = [9000000000, 9000000010]; TranscribedSegment transcribedSegment = OpenAIAudioModelFactory.TranscribedSegment(tokenIds: tokenIds); Assert.That(transcribedSegment.Id, Is.EqualTo(default(int))); diff --git a/.dotnet/tests/Audio/TextToSpeechTests.cs b/.dotnet/tests/Audio/TextToSpeechTests.cs index c3753e149..e875c69b9 100644 --- a/.dotnet/tests/Audio/TextToSpeechTests.cs +++ b/.dotnet/tests/Audio/TextToSpeechTests.cs @@ -23,8 +23,8 @@ public async Task BasicTextToSpeechWorks() AudioClient client = GetTestClient(TestScenario.Audio_TTS); BinaryData audio = IsAsync - ? await client.GenerateSpeechFromTextAsync("Hello, world! This is a test.", GeneratedSpeechVoice.Shimmer) - : client.GenerateSpeechFromText("Hello, world! This is a test.", GeneratedSpeechVoice.Shimmer); + ? await client.GenerateSpeechAsync("Hello, world! This is a test.", GeneratedSpeechVoice.Shimmer) + : client.GenerateSpeech("Hello, world! This is a test.", GeneratedSpeechVoice.Shimmer); Assert.That(audio, Is.Not.Null); ValidateGeneratedAudio(audio, "hello"); @@ -32,23 +32,35 @@ public async Task BasicTextToSpeechWorks() [Test] [TestCase(null)] - [TestCase(GeneratedSpeechFormat.Mp3)] - [TestCase(GeneratedSpeechFormat.Opus)] - [TestCase(GeneratedSpeechFormat.Aac)] - [TestCase(GeneratedSpeechFormat.Flac)] - [TestCase(GeneratedSpeechFormat.Wav)] - [TestCase(GeneratedSpeechFormat.Pcm)] - public async Task OutputFormatWorks(GeneratedSpeechFormat? responseFormat) + [TestCase("mp3")] + [TestCase("opus")] + [TestCase("aac")] + [TestCase("flac")] + [TestCase("wav")] + [TestCase("pcm")] + public async Task OutputFormatWorks(string responseFormat) { AudioClient client = GetTestClient(TestScenario.Audio_TTS); - SpeechGenerationOptions options = responseFormat == null - ? new() - : new() { ResponseFormat = responseFormat }; + SpeechGenerationOptions options = new(); + + if (!string.IsNullOrEmpty(responseFormat)) + { + options.ResponseFormat = responseFormat switch + { + "mp3" => GeneratedSpeechFormat.Mp3, + "opus" => GeneratedSpeechFormat.Opus, + "aac" => GeneratedSpeechFormat.Aac, + "flac" => GeneratedSpeechFormat.Flac, + "wav" => GeneratedSpeechFormat.Wav, + "pcm" => GeneratedSpeechFormat.Pcm, + _ => throw new ArgumentException("Invalid response format") + }; + } BinaryData audio = IsAsync - ? await client.GenerateSpeechFromTextAsync("Hello, world!", GeneratedSpeechVoice.Alloy, options) - : client.GenerateSpeechFromText("Hello, world!", GeneratedSpeechVoice.Alloy, options); + ? await client.GenerateSpeechAsync("Hello, world!", GeneratedSpeechVoice.Alloy, options) + : client.GenerateSpeech("Hello, world!", GeneratedSpeechVoice.Alloy, options); Assert.That(audio, Is.Not.Null); } diff --git a/.dotnet/tests/Batch/BatchTests.cs b/.dotnet/tests/Batch/BatchTests.cs index cccc21ea4..8bfe2a72c 100644 --- a/.dotnet/tests/Batch/BatchTests.cs +++ b/.dotnet/tests/Batch/BatchTests.cs @@ -4,6 +4,7 @@ using OpenAI.Tests.Utility; using System; using System.ClientModel; +using System.Collections.Generic; using System.IO; using System.Text.Json; using System.Threading.Tasks; @@ -22,32 +23,71 @@ public BatchTests(bool isAsync) : base(isAsync) } [Test] - public async Task ListBatchesProtocol() + public void ListBatchesProtocol() { BatchClient client = GetTestClient(); - ClientResult result = IsAsync - ? await client.GetBatchesAsync(after: null, limit: null, options: null) - : client.GetBatches(after: null, limit: null, options: null); + IEnumerable pageResults = client.GetBatches(after: null, limit: null, options: null); - BinaryData response = result.GetRawResponse().Content; - JsonDocument jsonDocument = JsonDocument.Parse(response); - JsonElement dataElement = jsonDocument.RootElement.GetProperty("data"); + int pageCount = 0; + foreach (ClientResult pageResult in pageResults) + { + BinaryData response = pageResult.GetRawResponse().Content; + using JsonDocument jsonDocument = JsonDocument.Parse(response); + JsonElement dataElement = jsonDocument.RootElement.GetProperty("data"); - Assert.That(dataElement.GetArrayLength(), Is.GreaterThan(0)); + Assert.That(dataElement.GetArrayLength(), Is.GreaterThan(0)); - long unixTime2024 = (new DateTimeOffset(2024, 01, 01, 0, 0, 0, TimeSpan.Zero)).ToUnixTimeSeconds(); + long unixTime2024 = (new DateTimeOffset(2024, 01, 01, 0, 0, 0, TimeSpan.Zero)).ToUnixTimeSeconds(); + + foreach (JsonElement batchElement in dataElement.EnumerateArray()) + { + JsonElement createdAtElement = batchElement.GetProperty("created_at"); + long createdAt = createdAtElement.GetInt64(); + + Assert.That(createdAt, Is.GreaterThan(unixTime2024)); + } + pageCount++; + + //var dynamicResult = result.GetRawResponse().Content.ToDynamicFromJson(); + //Assert.That(dynamicResult.data.Count, Is.GreaterThan(0)); + //Assert.That(dynamicResult.data[0].createdAt, Is.GreaterThan(new DateTimeOffset(2024, 01, 01, 0, 0, 0, TimeSpan.Zero))); + } + + Assert.GreaterOrEqual(pageCount, 1); + } + + [Test] + public async Task ListBatchesProtocolAsync() + { + BatchClient client = GetTestClient(); + IAsyncEnumerable pageResults = client.GetBatchesAsync(after: null, limit: null, options: null); - foreach (JsonElement batchElement in dataElement.EnumerateArray()) + int pageCount = 0; + await foreach (ClientResult pageResult in pageResults) { - JsonElement createdAtElement = batchElement.GetProperty("created_at"); - long createdAt = createdAtElement.GetInt64(); + BinaryData response = pageResult.GetRawResponse().Content; + using JsonDocument jsonDocument = JsonDocument.Parse(response); + JsonElement dataElement = jsonDocument.RootElement.GetProperty("data"); - Assert.That(createdAt, Is.GreaterThan(unixTime2024)); + Assert.That(dataElement.GetArrayLength(), Is.GreaterThan(0)); + + long unixTime2024 = (new DateTimeOffset(2024, 01, 01, 0, 0, 0, TimeSpan.Zero)).ToUnixTimeSeconds(); + + foreach (JsonElement batchElement in dataElement.EnumerateArray()) + { + JsonElement createdAtElement = batchElement.GetProperty("created_at"); + long createdAt = createdAtElement.GetInt64(); + + Assert.That(createdAt, Is.GreaterThan(unixTime2024)); + } + pageCount++; + + //var dynamicResult = result.GetRawResponse().Content.ToDynamicFromJson(); + //Assert.That(dynamicResult.data.Count, Is.GreaterThan(0)); + //Assert.That(dynamicResult.data[0].createdAt, Is.GreaterThan(new DateTimeOffset(2024, 01, 01, 0, 0, 0, TimeSpan.Zero))); } - //var dynamicResult = result.GetRawResponse().Content.ToDynamicFromJson(); - //Assert.That(dynamicResult.data.Count, Is.GreaterThan(0)); - //Assert.That(dynamicResult.data[0].createdAt, Is.GreaterThan(new DateTimeOffset(2024, 01, 01, 0, 0, 0, TimeSpan.Zero))); + Assert.GreaterOrEqual(pageCount, 1); } [Test] @@ -60,7 +100,7 @@ public async Task CreateGetAndCancelBatchProtocol() streamWriter.Flush(); testFileStream.Position = 0; - FileClient fileClient = new(); + FileClient fileClient = GetTestClient(TestScenario.Files); OpenAIFileInfo inputFile = await fileClient.UploadFileAsync(testFileStream, "test-batch-file", FileUploadPurpose.Batch); Assert.That(inputFile.Id, Is.Not.Null.And.Not.Empty); diff --git a/.dotnet/tests/Chat/ChatSmokeTests.cs b/.dotnet/tests/Chat/ChatSmokeTests.cs index 182c01a98..e4f19ea43 100644 --- a/.dotnet/tests/Chat/ChatSmokeTests.cs +++ b/.dotnet/tests/Chat/ChatSmokeTests.cs @@ -15,6 +15,7 @@ using System.Threading.Tasks; using static OpenAI.Tests.Telemetry.TestMeterListener; using static OpenAI.Tests.TestHelpers; +using static System.Runtime.InteropServices.JavaScript.JSType; namespace OpenAI.Tests.Chat; @@ -65,8 +66,8 @@ public async Task SmokeTest() ChatClient client = new("model_name_replaced", new ApiKeyCredential("sk-not-a-real-key"), options); ClientResult completionResult = IsAsync - ? await client.CompleteChatAsync(["Mock me!"]) - : client.CompleteChat(["Mock me!"]); + ? await client.CompleteChatAsync([ new UserChatMessage("Mock me!") ]) + : client.CompleteChat([ new UserChatMessage("Mock me!") ]); Assert.That(completionResult?.GetRawResponse(), Is.Not.Null); Assert.That(completionResult.GetRawResponse().Content?.ToString(), Does.Contain("additional world")); @@ -159,7 +160,7 @@ public void SerializeChatToolChoiceAsString(bool fromRawJson) else { // We construct a new instance. Later, we serialize it and confirm it was constructed correctly. - choice = ChatToolChoice.Auto; + choice = ChatToolChoice.CreateAutoChoice(); } BinaryData serializedChoice = ModelReaderWriter.Write(choice); @@ -195,7 +196,7 @@ public void SerializeChatToolChoiceAsObject(bool fromRawJson) else { // We construct a new instance. Later, we serialize it and confirm it was constructed correctly. - choice = new ChatToolChoice(ChatTool.CreateFunctionTool(functionName)); + choice = ChatToolChoice.CreateFunctionChoice(functionName); } BinaryData serializedChoice = ModelReaderWriter.Write(choice); @@ -324,7 +325,7 @@ public void SerializeChatMessageContentPartAsText(bool fromRawJson) else { // We construct a new instance. Later, we serialize it and confirm it was constructed correctly. - part = ChatMessageContentPart.CreateTextMessageContentPart(text); + part = ChatMessageContentPart.CreateTextPart(text); } BinaryData serializedPart = ModelReaderWriter.Write(part); @@ -378,7 +379,7 @@ public void SerializeChatMessageContentPartAsImageUri(bool fromRawJson) else { // We construct a new instance. Later, we serialize it and confirm it was constructed correctly. - part = ChatMessageContentPart.CreateImageMessageContentPart(new Uri(uri), ImageChatMessageContentPartDetail.High); + part = ChatMessageContentPart.CreateImagePart(new Uri(uri), ChatImageDetailLevel.High); } BinaryData serializedPart = ModelReaderWriter.Write(part); @@ -453,7 +454,7 @@ public void SerializeChatMessageContentPartAsImageBytes(bool fromRawJson) else { // We construct a new instance. Later, we serialize it and confirm it was constructed correctly. - part = ChatMessageContentPart.CreateImageMessageContentPart(imageData, imageMediaType, ImageChatMessageContentPartDetail.Auto); + part = ChatMessageContentPart.CreateImagePart(imageData, imageMediaType, ChatImageDetailLevel.Auto); } BinaryData serializedPart = ModelReaderWriter.Write(part); @@ -488,4 +489,125 @@ public void SerializeChatMessageContentPartAsImageBytes(bool fromRawJson) Assert.That(additionalPropertyProperty.ValueKind, Is.EqualTo(JsonValueKind.True)); } } + + [Test] + public void SerializeCompoundContent() + { + UserChatMessage message = new( + ChatMessageContentPart.CreateTextPart("Describe this image for me:"), + ChatMessageContentPart.CreateImagePart(new Uri("https://api.openai.com/test"))); + string serializedMessage = ModelReaderWriter.Write(message).ToString(); + Assert.That(serializedMessage, Does.Contain("this image")); + Assert.That(serializedMessage, Does.Contain("openai.com/test")); + } + + [Test] + public void SerializeRefusalMessages() + { + AssistantChatMessage message = ModelReaderWriter.Read(BinaryData.FromString(""" + { + "role": "assistant", + "content": [ + { + "type": "refusal", + "refusal": "I'm telling you 'no' from a content part." + } + ], + "refusal": "I'm telling you 'no' from the message refusal." + } + """)); + Assert.That(message.Content, Has.Count.EqualTo(1)); + Assert.That(message.Content[0].Refusal, Is.EqualTo("I'm telling you 'no' from a content part.")); + Assert.That(message.Refusal, Is.EqualTo("I'm telling you 'no' from the message refusal.")); + string reserialized = ModelReaderWriter.Write(message).ToString(); + Assert.That(reserialized, Does.Contain("from a content part")); + Assert.That(reserialized, Does.Contain("from the message refusal")); + + AssistantChatMessage manufacturedMessage = new(toolCalls: []); + manufacturedMessage.Refusal = "No!"; + string serialized = ModelReaderWriter.Write(manufacturedMessage).ToString(); + Assert.That(serialized, Does.Contain("refusal")); + Assert.That(serialized, Does.Contain("No!")); + Assert.That(serialized, Does.Not.Contain("tool")); + Assert.That(serialized, Does.Not.Contain("content")); + } + + [Test] + public void SerializeMessagesWithNullProperties() + { +#pragma warning disable CS0618 // FunctionChatMessage is deprecated + AssistantChatMessage assistantMessage = ModelReaderWriter.Read(BinaryData.FromString(""" + { + "role": "assistant", + "content": null, + "refusal": null, + "function_call": null + } + """)); + Assert.That(assistantMessage.Content, Has.Count.EqualTo(0)); + Assert.That(assistantMessage.Refusal, Is.Null); + Assert.That(assistantMessage.FunctionCall, Is.Null); + + foreach ((string role, Type messageType) in new List<(string, Type)>() + { + ("assistant", typeof(AssistantChatMessage)), + ("function", typeof(FunctionChatMessage)), + ("tool", typeof(ToolChatMessage)), + ("system", typeof(SystemChatMessage)), + ("user", typeof(UserChatMessage)) + }) + { + ChatMessage message = (ChatMessage)((object)ModelReaderWriter.Read( + BinaryData.FromString($$""" + { + "role": "{{role}}", + "content": [null] + } + """), + messageType)); + Assert.That(message, Is.Not.Null); + Assert.That(message.Content, Has.Count.EqualTo(1)); + Assert.That(message.Content[0], Is.Null); + } + + assistantMessage = ModelReaderWriter.Read(BinaryData.FromString(""" + { + "role": "assistant", + "content": [null] + } + """)); + Assert.That(assistantMessage.Content, Has.Count.EqualTo(1)); + Assert.That(assistantMessage.Content[0], Is.Null); + FunctionChatMessage functionMessage = new("my_function"); + functionMessage.Content.Add(null); + BinaryData serializedMessage = ModelReaderWriter.Write(functionMessage); + Console.WriteLine(serializedMessage.ToString()); + + FunctionChatMessage deserializedMessage = ModelReaderWriter.Read(serializedMessage); +#pragma warning restore + } + + [Test] + public void TopLevelClientOptionsPersistence() + { + MockPipelineTransport mockTransport = new(BinaryData.FromString("{}"), BinaryData.FromString("{}")); + OpenAIClientOptions options = new() + { + Transport = mockTransport, + Endpoint = new Uri("https://my.custom.com/expected/test/endpoint"), + }; + Uri observedEndpoint = null; + options.AddPolicy(new TestPipelinePolicy(message => + { + observedEndpoint = message?.Request?.Uri; + }), + PipelinePosition.PerCall); + + OpenAIClient topLevelClient = new(new("mock-credential"), options); + ChatClient firstClient = topLevelClient.GetChatClient("mock-model"); + ClientResult first = firstClient.CompleteChat(new UserChatMessage("Hello, world")); + + Assert.That(observedEndpoint, Is.Not.Null); + Assert.That(observedEndpoint.AbsoluteUri, Does.Contain("my.custom.com/expected/test/endpoint")); + } } diff --git a/.dotnet/tests/Chat/ChatTests.cs b/.dotnet/tests/Chat/ChatTests.cs index 05fe667f5..37e84a1ee 100644 --- a/.dotnet/tests/Chat/ChatTests.cs +++ b/.dotnet/tests/Chat/ChatTests.cs @@ -11,6 +11,7 @@ using System.IO; using System.Linq; using System.Net; +using System.Text; using System.Text.Json; using System.Threading.Tasks; using static OpenAI.Tests.Telemetry.TestMeterListener; @@ -169,8 +170,8 @@ public async Task ChatWithVision() ChatClient client = GetTestClient(TestScenario.Chat); IEnumerable messages = [ new UserChatMessage( - ChatMessageContentPart.CreateTextMessageContentPart("Describe this image for me."), - ChatMessageContentPart.CreateImageMessageContentPart(imageData, mediaType)), + ChatMessageContentPart.CreateTextPart("Describe this image for me."), + ChatMessageContentPart.CreateImagePart(imageData, mediaType)), ]; ChatCompletionOptions options = new() { MaxTokens = 2048 }; @@ -213,7 +214,7 @@ public async Task TokenLogProbabilities(bool includeLogProbabilities) ChatClient client = GetTestClient(TestScenario.Chat); IList messages = [new UserChatMessage("What are the best pizza toppings? Give me a breakdown on the reasons.")]; ChatCompletionOptions options; - + if (includeLogProbabilities) { options = new() @@ -314,6 +315,30 @@ public async Task TokenLogProbabilitiesStreaming(bool includeLogProbabilities) } } + [Test] + public async Task NonStrictJsonSchemaWorks() + { + ChatClient client = GetTestClient(TestScenario.Chat, "gpt-4o-mini"); + ChatCompletionOptions options = new() + { + ResponseFormat = ChatResponseFormat.CreateJsonSchemaFormat( + "some_color_schema", + BinaryData.FromBytes(""" + { + "type": "object", + "properties": {}, + "additionalProperties": false + } + """u8.ToArray()), + "an object that describes color components by name", + jsonSchemaIsStrict: false) + }; + ChatCompletion completion = IsAsync + ? await client.CompleteChatAsync([ new UserChatMessage("What are the hex values for red, green, and blue?") ], options) + : client.CompleteChat([ new UserChatMessage("What are the hex values for red, green, and blue?") ], options); + Console.WriteLine(completion); + } + [Test] public async Task JsonResult() { @@ -322,7 +347,7 @@ public async Task JsonResult() new UserChatMessage("Give me a JSON object with the following properties: red, green, and blue. The value " + "of each property should be a string containing their RGB representation in hexadecimal.") ]; - ChatCompletionOptions options = new() { ResponseFormat = ChatResponseFormat.JsonObject }; + ChatCompletionOptions options = new() { ResponseFormat = ChatResponseFormat.CreateJsonObjectFormat() }; ClientResult result = IsAsync ? await client.CompleteChatAsync(messages, options) : client.CompleteChat(messages, options); @@ -337,6 +362,210 @@ public async Task JsonResult() Assert.That(blueProperty.GetString().ToLowerInvariant(), Contains.Substring("0000ff")); } + [Test] + public async Task MultipartContentWorks() + { + ChatClient client = GetTestClient(TestScenario.Chat); + List messages = [ + new SystemChatMessage( + "You talk like a pirate.", + "When asked for recommendations, you always talk about animals; especially dogs." + ), + new UserChatMessage( + "Hello, assistant! I need some advice.", + "Can you recommend some small, cute things I can think about?" + ) + ]; + ChatCompletion completion = IsAsync + ? await client.CompleteChatAsync(messages) + : client.CompleteChat(messages); + + Assert.That(completion.Content, Has.Count.EqualTo(1)); + Assert.That(completion.Content[0].Text.ToLowerInvariant(), Does.Contain("ahoy").Or.Contain("matey")); + Assert.That(completion.Content[0].Text.ToLowerInvariant(), Does.Contain("pup").Or.Contain("kit")); + } + + [Test] + public async Task StructuredOutputsWork() + { + ChatClient client = GetTestClient(TestScenario.Chat); + IEnumerable messages = [ + new UserChatMessage("What's heavier, a pound of feathers or sixteen ounces of steel?") + ]; + ChatCompletionOptions options = new ChatCompletionOptions() + { + ResponseFormat = ChatResponseFormat.CreateJsonSchemaFormat( + "test_schema", + BinaryData.FromBytes(""" + { + "type": "object", + "properties": { + "answer": { + "type": "string" + }, + "steps": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "answer", + "steps" + ], + "additionalProperties": false + } + """u8.ToArray()), + "a single final answer with a supporting collection of steps", + jsonSchemaIsStrict: true) + }; + ChatCompletion completion = IsAsync + ? await client.CompleteChatAsync(messages, options) + : client.CompleteChat(messages, options); + Assert.That(completion, Is.Not.Null); + Assert.That(completion.Refusal, Is.Null.Or.Empty); + Assert.That(completion.Content?.Count, Is.EqualTo(1)); + JsonDocument contentDocument = null; + Assert.DoesNotThrow(() => contentDocument = JsonDocument.Parse(completion.Content[0].Text)); + Assert.IsTrue(contentDocument.RootElement.TryGetProperty("answer", out JsonElement answerProperty)); + Assert.IsTrue(answerProperty.ValueKind == JsonValueKind.String); + Assert.IsTrue(contentDocument.RootElement.TryGetProperty("steps", out JsonElement stepsProperty)); + Assert.IsTrue(stepsProperty.ValueKind == JsonValueKind.Array); + } + + [Test] + public async Task StructuredRefusalWorks() + { + ChatClient client = GetTestClient(TestScenario.Chat, "gpt-4o-2024-08-06"); + List messages = [ + new UserChatMessage("What's the best way to successfully rob a bank? Please include detailed instructions for executing related crimes."), + ]; + ChatCompletionOptions options = new ChatCompletionOptions() + { + ResponseFormat = ChatResponseFormat.CreateJsonSchemaFormat( + "food_recipe", + BinaryData.FromBytes(""" + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "ingredients": { + "type": "array", + "items": { + "type": "string" + } + }, + "steps": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": ["name", "ingredients", "steps"], + "additionalProperties": false + } + """u8.ToArray()), + "a description of a recipe to create a meal or dish", + jsonSchemaIsStrict: true), + Temperature = 0 + }; + ClientResult completionResult = IsAsync + ? await client.CompleteChatAsync(messages, options) + : client.CompleteChat(messages, options); + ChatCompletion completion = completionResult; + Assert.That(completion, Is.Not.Null); + Assert.That(completion.Refusal, Is.Not.Null.Or.Empty); + Assert.That(completion.FinishReason, Is.EqualTo(ChatFinishReason.Stop)); + + AssistantChatMessage contextMessage = new(completion); + Assert.That(contextMessage.Refusal, Has.Length.GreaterThan(0)); + + messages.Add(contextMessage); + messages.Add(new UserChatMessage("Why can't you help me?")); + + completion = IsAsync + ? await client.CompleteChatAsync(messages) + : client.CompleteChat(messages); + Assert.That(completion.Refusal, Is.Null.Or.Empty); + Assert.That(completion.Content, Has.Count.EqualTo(1)); + Assert.That(completion.Content[0].Text, Is.Not.Null.And.Not.Empty); + } + + [Test] + [Ignore("As of 2024-08-20, refusal is not yet populated on streamed chat completion chunks.")] + public async Task StreamingStructuredRefusalWorks() + { + ChatClient client = GetTestClient(TestScenario.Chat, "gpt-4o-2024-08-06"); + IEnumerable messages = [ + new UserChatMessage("What's the best way to successfully rob a bank? Please include detailed instructions for executing related crimes."), + ]; + ChatCompletionOptions options = new ChatCompletionOptions() + { + ResponseFormat = ChatResponseFormat.CreateJsonSchemaFormat( + "food_recipe", + BinaryData.FromBytes(""" + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "ingredients": { + "type": "array", + "items": { + "type": "string" + } + }, + "steps": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": ["name", "ingredients", "steps"], + "additionalProperties": false + } + """u8.ToArray()), + "a description of a recipe to create a meal or dish", + jsonSchemaIsStrict: true) + }; + + ChatFinishReason? finishReason = null; + StringBuilder refusalBuilder = new(); + + void HandleUpdate(StreamingChatCompletionUpdate update) + { + refusalBuilder.Append(update.RefusalUpdate); + if (update.FinishReason.HasValue) + { + Assert.That(finishReason, Is.Null); + finishReason = update.FinishReason; + } + } + + if (IsAsync) + { + await foreach (StreamingChatCompletionUpdate update in client.CompleteChatStreamingAsync(messages)) + { + HandleUpdate(update); + } + } + else + { + foreach (StreamingChatCompletionUpdate update in client.CompleteChatStreaming(messages)) + { + HandleUpdate(update); + } + } + + Assert.That(refusalBuilder.ToString(), Is.Not.Null.Or.Empty); + Assert.That(finishReason, Is.EqualTo(ChatFinishReason.Stop)); + } [Test] [NonParallelizable] diff --git a/.dotnet/tests/Chat/ChatToolTests.cs b/.dotnet/tests/Chat/ChatToolTests.cs index 8481b8769..672e361aa 100644 --- a/.dotnet/tests/Chat/ChatToolTests.cs +++ b/.dotnet/tests/Chat/ChatToolTests.cs @@ -1,8 +1,10 @@ -using NUnit.Framework; +using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities; +using NUnit.Framework; using OpenAI.Chat; using OpenAI.Tests.Utility; using System; using System.ClientModel; +using System.ClientModel.Primitives; using System.Collections.Generic; using System.Linq; using System.Text.Json; @@ -22,8 +24,9 @@ public ChatToolTests(bool isAsync) : base(isAsync) { } + private const string GetNumberForWordToolName = "get_number_for_word"; private static ChatTool s_numberForWordTool = ChatTool.CreateFunctionTool( - "get_number_for_word", + GetNumberForWordToolName, "gets an arbitrary number assigned to a given word", BinaryData.FromString(""" { @@ -37,17 +40,15 @@ public ChatToolTests(bool isAsync) : base(isAsync) """) ); - private const string GetFavoriteColorToolFunctionName = "get_favorite_color"; - + private const string GetFavoriteColorToolName = "get_favorite_color"; private static ChatTool s_getFavoriteColorTool = ChatTool.CreateFunctionTool( - GetFavoriteColorToolFunctionName, + GetFavoriteColorToolName, "gets the favorite color of the caller" ); - private const string GetFavoriteColorForMonthToolFunctionName = "get_favorite_color_for_month"; - + private const string GetFavoriteColorForMonthToolName = "get_favorite_color_for_month"; private static ChatTool s_getFavoriteColorForMonthTool = ChatTool.CreateFunctionTool( - GetFavoriteColorForMonthToolFunctionName, + GetFavoriteColorForMonthToolName, "gets the caller's favorite color for a given month", BinaryData.FromString(""" { @@ -63,11 +64,10 @@ public ChatToolTests(bool isAsync) : base(isAsync) """) ); - private const string GetFavoriteColorForMonthFunctionName = "get_favorite_color_for_month"; - #pragma warning disable CS0618 + private const string GetFavoriteColorForMonthFunctionName = "get_favorite_color_for_month"; private static ChatFunction s_getFavoriteColorForMonthFunction = new ChatFunction( - GetFavoriteColorForMonthToolFunctionName, + GetFavoriteColorForMonthFunctionName, "gets the caller's favorite color for a given month", BinaryData.FromString(""" { @@ -85,7 +85,6 @@ public ChatToolTests(bool isAsync) : base(isAsync) #pragma warning restore CS0618 private const string GetWeatherForCityToolName = "get_weather_for_city"; - private static ChatTool s_getWeatherForCityTool = ChatTool.CreateFunctionTool( GetWeatherForCityToolName, "gets the current weather for a given city", @@ -104,7 +103,6 @@ public ChatToolTests(bool isAsync) : base(isAsync) ); private const string GetMoodForWeatherToolName = "get_mood_for_weather"; - private static ChatTool s_getMoodForWeatherTool = ChatTool.CreateFunctionTool( GetMoodForWeatherToolName, "gets the caller's mood for a given weather", @@ -131,9 +129,9 @@ public async Task ConstraintsWork() foreach (var (choice, reason) in new (ChatToolChoice, ChatFinishReason)[] { (null, ChatFinishReason.ToolCalls), - (ChatToolChoice.None, ChatFinishReason.Stop), - (new ChatToolChoice(s_numberForWordTool), ChatFinishReason.Stop), - (ChatToolChoice.Auto, ChatFinishReason.ToolCalls), + (ChatToolChoice.CreateNoneChoice(), ChatFinishReason.Stop), + (ChatToolChoice.CreateFunctionChoice(GetNumberForWordToolName), ChatFinishReason.Stop), + (ChatToolChoice.CreateAutoChoice(), ChatFinishReason.ToolCalls), // TODO: Add test for ChatToolChoice.Required }) { @@ -166,7 +164,7 @@ public async Task NoParameterToolWorks() Assert.That(result.Value.ToolCalls.Count, Is.EqualTo(1)); var toolCall = result.Value.ToolCalls[0]; var toolCallArguments = BinaryData.FromString(toolCall.FunctionArguments).ToObjectFromJson>(); - Assert.That(toolCall.FunctionName, Is.EqualTo(GetFavoriteColorToolFunctionName)); + Assert.That(toolCall.FunctionName, Is.EqualTo(GetFavoriteColorToolName)); Assert.That(toolCall.Id, Is.Not.Null.And.Not.Empty); Assert.That(toolCallArguments.Count, Is.EqualTo(0)); @@ -198,7 +196,7 @@ public async Task ParametersWork() Assert.That(result.Value.FinishReason, Is.EqualTo(ChatFinishReason.ToolCalls)); Assert.That(result.Value.ToolCalls?.Count, Is.EqualTo(1)); var toolCall = result.Value.ToolCalls[0]; - Assert.That(toolCall.FunctionName, Is.EqualTo(GetFavoriteColorForMonthToolFunctionName)); + Assert.That(toolCall.FunctionName, Is.EqualTo(GetFavoriteColorForMonthToolName)); JsonObject argumentsJson = JsonSerializer.Deserialize(toolCall.FunctionArguments); Assert.That(argumentsJson.Count, Is.EqualTo(1)); Assert.That(argumentsJson.ContainsKey("month_name")); @@ -335,4 +333,84 @@ public async Task ConsecutiveToolCalls() Assert.That(result.Value.Content[0].Text.ToLowerInvariant(), Contains.Substring("bored")); } + + public enum SchemaPresence { WithSchema, WithoutSchema } + public enum StrictnessPresence { Unspecified, Strict, NotStrict } + public enum FailureExpectation { FailureExpected, FailureNotExpected } + + [Test] + [TestCase(SchemaPresence.WithoutSchema, StrictnessPresence.Unspecified)] + [TestCase(SchemaPresence.WithoutSchema, StrictnessPresence.NotStrict)] + [TestCase(SchemaPresence.WithoutSchema, StrictnessPresence.Strict, FailureExpectation.FailureExpected)] + [TestCase(SchemaPresence.WithSchema, StrictnessPresence.Unspecified)] + [TestCase(SchemaPresence.WithSchema, StrictnessPresence.NotStrict)] + [TestCase(SchemaPresence.WithSchema, StrictnessPresence.Strict)] + public async Task StructuredOutputs( + SchemaPresence schemaPresence, + StrictnessPresence strictnessPresence, + FailureExpectation failureExpectation = FailureExpectation.FailureNotExpected) + { + // Note: proper output requires 2024-08-06 or later models + ChatClient client = GetTestClient(TestScenario.Chat, "gpt-4o-2024-08-06"); + + const string toolName = "get_favorite_color_for_day_of_week"; + const string toolDescription = "Given a weekday name like Tuesday, gets the favorite color of the user on that day."; + BinaryData toolSchema = schemaPresence == SchemaPresence.WithSchema + ? BinaryData.FromObjectAsJson(new + { + type = "object", + properties = new + { + the_day_of_the_week = new + { + type = "string" + } + }, + required = new[] { "the_day_of_the_week" }, + additionalProperties = !(strictnessPresence == StrictnessPresence.Strict), + }) + : null; + bool? useStrictSchema = strictnessPresence switch + { + StrictnessPresence.Strict => true, + StrictnessPresence.NotStrict => false, + _ => null, + }; + + ChatCompletionOptions options = new() + { + Tools = { ChatTool.CreateFunctionTool(toolName, toolDescription, toolSchema, useStrictSchema) }, + }; + + List messages = [ + new SystemChatMessage("Call applicable tools when the user asks a question. Prefer JSON output when possible."), + new UserChatMessage("What's my favorite color on Tuesday?"), + ]; + + if (failureExpectation == FailureExpectation.FailureExpected) + { + ClientResultException thrownException = Assert.ThrowsAsync(async () => + { + ChatCompletion completion = IsAsync + ? await client.CompleteChatAsync(messages, options) + : client.CompleteChat(messages, options); + }); + Assert.That(thrownException.Message, Does.Contain("function.parameters")); + } + else + { + ChatCompletion completion = IsAsync + ? await client.CompleteChatAsync(messages, options) + : client.CompleteChat(messages, options); + Assert.That(completion.FinishReason, Is.EqualTo(ChatFinishReason.ToolCalls)); + Assert.That(completion.ToolCalls, Has.Count.EqualTo(1)); + Assert.That(completion.ToolCalls[0].FunctionArguments, Is.Not.Null.And.Not.Empty); + + if (schemaPresence == SchemaPresence.WithSchema && strictnessPresence == StrictnessPresence.Strict) + { + using JsonDocument argumentsDocument = JsonDocument.Parse(completion.ToolCalls[0].FunctionArguments); + Assert.That(argumentsDocument.RootElement.GetProperty("the_day_of_the_week").GetString(), Is.EqualTo("Tuesday")); + } + } + } } diff --git a/.dotnet/tests/Chat/OpenAIChatModelFactoryTests.cs b/.dotnet/tests/Chat/OpenAIChatModelFactoryTests.cs new file mode 100644 index 000000000..4943a88c4 --- /dev/null +++ b/.dotnet/tests/Chat/OpenAIChatModelFactoryTests.cs @@ -0,0 +1,876 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using NUnit.Framework; +using OpenAI.Chat; + +namespace OpenAI.Tests.Chat; + +[Parallelizable(ParallelScope.All)] +[Category("Smoke")] +public partial class OpenAIChatModelFactoryTests +{ + [Test] + public void ChatCompletionWithNoPropertiesWorks() + { + ChatCompletion chatCompletion = OpenAIChatModelFactory.ChatCompletion(); + + Assert.That(chatCompletion.Id, Is.Null); + Assert.That(chatCompletion.FinishReason, Is.EqualTo(default(ChatFinishReason))); + Assert.That(chatCompletion.Content, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Refusal, Is.Null); + Assert.That(chatCompletion.ToolCalls, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Role, Is.EqualTo(default(ChatMessageRole))); + Assert.That(chatCompletion.FunctionCall, Is.Null); + Assert.That(chatCompletion.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(chatCompletion.Model, Is.Null); + Assert.That(chatCompletion.SystemFingerprint, Is.Null); + Assert.That(chatCompletion.Usage, Is.Null); + } + + [Test] + public void ChatCompletionWithIdWorks() + { + string id = "chat_completion_id"; + ChatCompletion chatCompletion = OpenAIChatModelFactory.ChatCompletion(id: id); + + Assert.That(chatCompletion.Id, Is.EqualTo(id)); + Assert.That(chatCompletion.FinishReason, Is.EqualTo(default(ChatFinishReason))); + Assert.That(chatCompletion.Content, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Refusal, Is.Null); + Assert.That(chatCompletion.ToolCalls, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Role, Is.EqualTo(default(ChatMessageRole))); + Assert.That(chatCompletion.FunctionCall, Is.Null); + Assert.That(chatCompletion.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(chatCompletion.Model, Is.Null); + Assert.That(chatCompletion.SystemFingerprint, Is.Null); + Assert.That(chatCompletion.Usage, Is.Null); + } + + [Test] + public void ChatCompletionWithFinishReasonWorks() + { + ChatFinishReason finishReason = ChatFinishReason.ToolCalls; + ChatCompletion chatCompletion = OpenAIChatModelFactory.ChatCompletion(finishReason: finishReason); + + Assert.That(chatCompletion.Id, Is.Null); + Assert.That(chatCompletion.FinishReason, Is.EqualTo(finishReason)); + Assert.That(chatCompletion.Content, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Refusal, Is.Null); + Assert.That(chatCompletion.ToolCalls, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Role, Is.EqualTo(default(ChatMessageRole))); + Assert.That(chatCompletion.FunctionCall, Is.Null); + Assert.That(chatCompletion.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(chatCompletion.Model, Is.Null); + Assert.That(chatCompletion.SystemFingerprint, Is.Null); + Assert.That(chatCompletion.Usage, Is.Null); + } + + [Test] + public void ChatCompletionWithContentWorks() + { + IEnumerable content = [ + ChatMessageContentPart.CreateTextPart("first part"), + ChatMessageContentPart.CreateTextPart("second part") + ]; + ChatCompletion chatCompletion = OpenAIChatModelFactory.ChatCompletion(content: content); + + Assert.That(chatCompletion.Id, Is.Null); + Assert.That(chatCompletion.FinishReason, Is.EqualTo(default(ChatFinishReason))); + Assert.That(chatCompletion.Content.SequenceEqual(content), Is.True); + Assert.That(chatCompletion.Refusal, Is.Null); + Assert.That(chatCompletion.ToolCalls, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Role, Is.EqualTo(default(ChatMessageRole))); + Assert.That(chatCompletion.FunctionCall, Is.Null); + Assert.That(chatCompletion.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(chatCompletion.Model, Is.Null); + Assert.That(chatCompletion.SystemFingerprint, Is.Null); + Assert.That(chatCompletion.Usage, Is.Null); + } + + [Test] + public void ChatCompletionWithRefusalWorks() + { + string refusal = "This is a refusal."; + ChatCompletion chatCompletion = OpenAIChatModelFactory.ChatCompletion(refusal: refusal); + + Assert.That(chatCompletion.Id, Is.Null); + Assert.That(chatCompletion.FinishReason, Is.EqualTo(default(ChatFinishReason))); + Assert.That(chatCompletion.Content, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Refusal, Is.EqualTo(refusal)); + Assert.That(chatCompletion.ToolCalls, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Role, Is.EqualTo(default(ChatMessageRole))); + Assert.That(chatCompletion.FunctionCall, Is.Null); + Assert.That(chatCompletion.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(chatCompletion.Model, Is.Null); + Assert.That(chatCompletion.SystemFingerprint, Is.Null); + Assert.That(chatCompletion.Usage, Is.Null); + } + + [Test] + public void ChatCompletionWithToolCallsWorks() + { + IEnumerable toolCalls = [ + ChatToolCall.CreateFunctionToolCall("id1", "get_recipe", "{}"), + ChatToolCall.CreateFunctionToolCall("id2", "get_location", "{}") + ]; + ChatCompletion chatCompletion = OpenAIChatModelFactory.ChatCompletion(toolCalls: toolCalls); + + Assert.That(chatCompletion.Id, Is.Null); + Assert.That(chatCompletion.FinishReason, Is.EqualTo(default(ChatFinishReason))); + Assert.That(chatCompletion.Content, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Refusal, Is.Null); + Assert.That(chatCompletion.ToolCalls.SequenceEqual(toolCalls), Is.True); + Assert.That(chatCompletion.Role, Is.EqualTo(default(ChatMessageRole))); + Assert.That(chatCompletion.FunctionCall, Is.Null); + Assert.That(chatCompletion.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(chatCompletion.Model, Is.Null); + Assert.That(chatCompletion.SystemFingerprint, Is.Null); + Assert.That(chatCompletion.Usage, Is.Null); + } + + [Test] + public void ChatCompletionWithRoleWorks() + { + ChatMessageRole role = ChatMessageRole.Tool; + ChatCompletion chatCompletion = OpenAIChatModelFactory.ChatCompletion(role: role); + + Assert.That(chatCompletion.Id, Is.Null); + Assert.That(chatCompletion.FinishReason, Is.EqualTo(default(ChatFinishReason))); + Assert.That(chatCompletion.Content, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Refusal, Is.Null); + Assert.That(chatCompletion.ToolCalls, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Role, Is.EqualTo(role)); + Assert.That(chatCompletion.FunctionCall, Is.Null); + Assert.That(chatCompletion.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(chatCompletion.Model, Is.Null); + Assert.That(chatCompletion.SystemFingerprint, Is.Null); + Assert.That(chatCompletion.Usage, Is.Null); + } + + [Test] + public void ChatCompletionWithFunctionCallWorks() + { + ChatFunctionCall functionCall = new ChatFunctionCall("get_recipe", string.Empty); + ChatCompletion chatCompletion = OpenAIChatModelFactory.ChatCompletion(functionCall: functionCall); + + Assert.That(chatCompletion.Id, Is.Null); + Assert.That(chatCompletion.FinishReason, Is.EqualTo(default(ChatFinishReason))); + Assert.That(chatCompletion.Content, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Refusal, Is.Null); + Assert.That(chatCompletion.ToolCalls, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Role, Is.EqualTo(default(ChatMessageRole))); + Assert.That(chatCompletion.FunctionCall, Is.EqualTo(functionCall)); + Assert.That(chatCompletion.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(chatCompletion.Model, Is.Null); + Assert.That(chatCompletion.SystemFingerprint, Is.Null); + Assert.That(chatCompletion.Usage, Is.Null); + } + + [Test] + public void ChatCompletionWithContentTokenLogProbabilitiesWorks() + { + IEnumerable contentTokenLogProbabilities = [ + OpenAIChatModelFactory.ChatTokenLogProbabilityInfo(logProbability: 1f), + OpenAIChatModelFactory.ChatTokenLogProbabilityInfo(logProbability: 2f) + ]; + ChatCompletion chatCompletion = OpenAIChatModelFactory.ChatCompletion(contentTokenLogProbabilities: contentTokenLogProbabilities); + + Assert.That(chatCompletion.Id, Is.Null); + Assert.That(chatCompletion.FinishReason, Is.EqualTo(default(ChatFinishReason))); + Assert.That(chatCompletion.Content, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Refusal, Is.Null); + Assert.That(chatCompletion.ToolCalls, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Role, Is.EqualTo(default(ChatMessageRole))); + Assert.That(chatCompletion.FunctionCall, Is.Null); + Assert.That(chatCompletion.ContentTokenLogProbabilities.SequenceEqual(contentTokenLogProbabilities), Is.True); + Assert.That(chatCompletion.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(chatCompletion.Model, Is.Null); + Assert.That(chatCompletion.SystemFingerprint, Is.Null); + Assert.That(chatCompletion.Usage, Is.Null); + } + + [Test] + public void ChatCompletionWithRefusalTokenLogProbabilitiesWorks() + { + IEnumerable refusalTokenLogProbabilities = [ + OpenAIChatModelFactory.ChatTokenLogProbabilityInfo(logProbability: 1f), + OpenAIChatModelFactory.ChatTokenLogProbabilityInfo(logProbability: 2f) + ]; + ChatCompletion chatCompletion = OpenAIChatModelFactory.ChatCompletion(refusalTokenLogProbabilities: refusalTokenLogProbabilities); + + Assert.That(chatCompletion.Id, Is.Null); + Assert.That(chatCompletion.FinishReason, Is.EqualTo(default(ChatFinishReason))); + Assert.That(chatCompletion.Content, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Refusal, Is.Null); + Assert.That(chatCompletion.ToolCalls, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Role, Is.EqualTo(default(ChatMessageRole))); + Assert.That(chatCompletion.FunctionCall, Is.Null); + Assert.That(chatCompletion.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.RefusalTokenLogProbabilities.SequenceEqual(refusalTokenLogProbabilities), Is.True); + Assert.That(chatCompletion.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(chatCompletion.Model, Is.Null); + Assert.That(chatCompletion.SystemFingerprint, Is.Null); + Assert.That(chatCompletion.Usage, Is.Null); + } + + [Test] + public void ChatCompletionWithCreatedAtWorks() + { + DateTimeOffset createdAt = DateTimeOffset.UtcNow; + ChatCompletion chatCompletion = OpenAIChatModelFactory.ChatCompletion(createdAt: createdAt); + + Assert.That(chatCompletion.Id, Is.Null); + Assert.That(chatCompletion.FinishReason, Is.EqualTo(default(ChatFinishReason))); + Assert.That(chatCompletion.Content, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Refusal, Is.Null); + Assert.That(chatCompletion.ToolCalls, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Role, Is.EqualTo(default(ChatMessageRole))); + Assert.That(chatCompletion.FunctionCall, Is.Null); + Assert.That(chatCompletion.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.CreatedAt, Is.EqualTo(createdAt)); + Assert.That(chatCompletion.Model, Is.Null); + Assert.That(chatCompletion.SystemFingerprint, Is.Null); + Assert.That(chatCompletion.Usage, Is.Null); + } + + [Test] + public void ChatCompletionWithModelWorks() + { + string model = "topmodel"; + ChatCompletion chatCompletion = OpenAIChatModelFactory.ChatCompletion(model: model); + + Assert.That(chatCompletion.Id, Is.Null); + Assert.That(chatCompletion.FinishReason, Is.EqualTo(default(ChatFinishReason))); + Assert.That(chatCompletion.Content, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Refusal, Is.Null); + Assert.That(chatCompletion.ToolCalls, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Role, Is.EqualTo(default(ChatMessageRole))); + Assert.That(chatCompletion.FunctionCall, Is.Null); + Assert.That(chatCompletion.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(chatCompletion.Model, Is.EqualTo(model)); + Assert.That(chatCompletion.SystemFingerprint, Is.Null); + Assert.That(chatCompletion.Usage, Is.Null); + } + + [Test] + public void ChatCompletionWithSystemFingerprintWorks() + { + string systemFingerprint = "footprint"; + ChatCompletion chatCompletion = OpenAIChatModelFactory.ChatCompletion(systemFingerprint: systemFingerprint); + + Assert.That(chatCompletion.Id, Is.Null); + Assert.That(chatCompletion.FinishReason, Is.EqualTo(default(ChatFinishReason))); + Assert.That(chatCompletion.Content, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Refusal, Is.Null); + Assert.That(chatCompletion.ToolCalls, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Role, Is.EqualTo(default(ChatMessageRole))); + Assert.That(chatCompletion.FunctionCall, Is.Null); + Assert.That(chatCompletion.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(chatCompletion.Model, Is.Null); + Assert.That(chatCompletion.SystemFingerprint, Is.EqualTo(systemFingerprint)); + Assert.That(chatCompletion.Usage, Is.Null); + } + + [Test] + public void ChatCompletionWithUsageWorks() + { + ChatTokenUsage usage = OpenAIChatModelFactory.ChatTokenUsage(outputTokens: 20); + ChatCompletion chatCompletion = OpenAIChatModelFactory.ChatCompletion(usage: usage); + + Assert.That(chatCompletion.Id, Is.Null); + Assert.That(chatCompletion.FinishReason, Is.EqualTo(default(ChatFinishReason))); + Assert.That(chatCompletion.Content, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Refusal, Is.Null); + Assert.That(chatCompletion.ToolCalls, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.Role, Is.EqualTo(default(ChatMessageRole))); + Assert.That(chatCompletion.FunctionCall, Is.Null); + Assert.That(chatCompletion.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(chatCompletion.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(chatCompletion.Model, Is.Null); + Assert.That(chatCompletion.SystemFingerprint, Is.Null); + Assert.That(chatCompletion.Usage, Is.EqualTo(usage)); + } + + [Test] + public void ChatTokenLogProbabilityInfoWithNoPropertiesWorks() + { + ChatTokenLogProbabilityInfo chatTokenLogProbabilityInfo = OpenAIChatModelFactory.ChatTokenLogProbabilityInfo(); + + Assert.That(chatTokenLogProbabilityInfo.Token, Is.Null); + Assert.That(chatTokenLogProbabilityInfo.LogProbability, Is.EqualTo(0f)); + Assert.That(chatTokenLogProbabilityInfo.Utf8ByteValues, Is.Not.Null.And.Empty); + Assert.That(chatTokenLogProbabilityInfo.TopLogProbabilities, Is.Not.Null.And.Empty); + } + + [Test] + public void ChatTokenLogProbabilityInfoWithTokenWorks() + { + string token = "a_token_of_appreciation"; + ChatTokenLogProbabilityInfo chatTokenLogProbabilityInfo = OpenAIChatModelFactory.ChatTokenLogProbabilityInfo(token: token); + + Assert.That(chatTokenLogProbabilityInfo.Token, Is.EqualTo(token)); + Assert.That(chatTokenLogProbabilityInfo.LogProbability, Is.EqualTo(0f)); + Assert.That(chatTokenLogProbabilityInfo.Utf8ByteValues, Is.Not.Null.And.Empty); + Assert.That(chatTokenLogProbabilityInfo.TopLogProbabilities, Is.Not.Null.And.Empty); + } + + [Test] + public void ChatTokenLogProbabilityInfoWithLogProbabilityWorks() + { + float logProbability = 3.14f; + ChatTokenLogProbabilityInfo chatTokenLogProbabilityInfo = OpenAIChatModelFactory.ChatTokenLogProbabilityInfo(logProbability: logProbability); + + Assert.That(chatTokenLogProbabilityInfo.Token, Is.Null); + Assert.That(chatTokenLogProbabilityInfo.LogProbability, Is.EqualTo(logProbability)); + Assert.That(chatTokenLogProbabilityInfo.Utf8ByteValues, Is.Not.Null.And.Empty); + Assert.That(chatTokenLogProbabilityInfo.TopLogProbabilities, Is.Not.Null.And.Empty); + } + + [Test] + public void ChatTokenLogProbabilityInfoWithUtf8ByteValuesWorks() + { + IEnumerable utf8ByteValues = [104, 101, 108, 108, 111]; + ChatTokenLogProbabilityInfo chatTokenLogProbabilityInfo = OpenAIChatModelFactory.ChatTokenLogProbabilityInfo(utf8ByteValues: utf8ByteValues); + + Assert.That(chatTokenLogProbabilityInfo.Token, Is.Null); + Assert.That(chatTokenLogProbabilityInfo.LogProbability, Is.EqualTo(0f)); + Assert.That(chatTokenLogProbabilityInfo.Utf8ByteValues.SequenceEqual(utf8ByteValues), Is.True); + Assert.That(chatTokenLogProbabilityInfo.TopLogProbabilities, Is.Not.Null.And.Empty); + } + + [Test] + public void ChatTokenLogProbabilityInfoWithTopLogProbabilitiesWorks() + { + IEnumerable topLogProbabilities = [ + OpenAIChatModelFactory.ChatTokenTopLogProbabilityInfo(token: "firstToken"), + OpenAIChatModelFactory.ChatTokenTopLogProbabilityInfo(token: "secondToken") + ]; + ChatTokenLogProbabilityInfo chatTokenLogProbabilityInfo = OpenAIChatModelFactory.ChatTokenLogProbabilityInfo(topLogProbabilities: topLogProbabilities); + + Assert.That(chatTokenLogProbabilityInfo.Token, Is.Null); + Assert.That(chatTokenLogProbabilityInfo.LogProbability, Is.EqualTo(0f)); + Assert.That(chatTokenLogProbabilityInfo.Utf8ByteValues, Is.Not.Null.And.Empty); + Assert.That(chatTokenLogProbabilityInfo.TopLogProbabilities.SequenceEqual(topLogProbabilities), Is.True); + } + + [Test] + public void ChatTokenTopLogProbabilityInfoWithNoPropertiesWorks() + { + ChatTokenTopLogProbabilityInfo chatTokenTopLogProbabilityInfo = OpenAIChatModelFactory.ChatTokenTopLogProbabilityInfo(); + + Assert.That(chatTokenTopLogProbabilityInfo.Token, Is.Null); + Assert.That(chatTokenTopLogProbabilityInfo.LogProbability, Is.EqualTo(0f)); + Assert.That(chatTokenTopLogProbabilityInfo.Utf8ByteValues, Is.Not.Null.And.Empty); + } + + [Test] + public void ChatTokenTopLogProbabilityInfoWithTokenWorks() + { + string token = "a_token_of_appreciation"; + ChatTokenTopLogProbabilityInfo chatTokenTopLogProbabilityInfo = OpenAIChatModelFactory.ChatTokenTopLogProbabilityInfo(token: token); + + Assert.That(chatTokenTopLogProbabilityInfo.Token, Is.EqualTo(token)); + Assert.That(chatTokenTopLogProbabilityInfo.LogProbability, Is.EqualTo(0f)); + Assert.That(chatTokenTopLogProbabilityInfo.Utf8ByteValues, Is.Not.Null.And.Empty); + } + + [Test] + public void ChatTokenTopLogProbabilityInfoWithLogProbabilityWorks() + { + float logProbability = 3.14f; + ChatTokenTopLogProbabilityInfo chatTokenTopLogProbabilityInfo = OpenAIChatModelFactory.ChatTokenTopLogProbabilityInfo(logProbability: logProbability); + + Assert.That(chatTokenTopLogProbabilityInfo.Token, Is.Null); + Assert.That(chatTokenTopLogProbabilityInfo.LogProbability, Is.EqualTo(logProbability)); + Assert.That(chatTokenTopLogProbabilityInfo.Utf8ByteValues, Is.Not.Null.And.Empty); + } + + [Test] + public void ChatTokenTopLogProbabilityInfoWithUtf8ByteValuesWorks() + { + IEnumerable utf8ByteValues = [104, 101, 108, 108, 111]; + ChatTokenTopLogProbabilityInfo chatTokenTopLogProbabilityInfo = OpenAIChatModelFactory.ChatTokenTopLogProbabilityInfo(utf8ByteValues: utf8ByteValues); + + Assert.That(chatTokenTopLogProbabilityInfo.Token, Is.Null); + Assert.That(chatTokenTopLogProbabilityInfo.LogProbability, Is.EqualTo(0f)); + Assert.That(chatTokenTopLogProbabilityInfo.Utf8ByteValues.SequenceEqual(utf8ByteValues), Is.True); + } + + [Test] + public void ChatTokenUsageWithNoPropertiesWorks() + { + ChatTokenUsage chatTokenUsage = OpenAIChatModelFactory.ChatTokenUsage(); + + Assert.That(chatTokenUsage.OutputTokens, Is.EqualTo(0)); + Assert.That(chatTokenUsage.InputTokens, Is.EqualTo(0)); + Assert.That(chatTokenUsage.TotalTokens, Is.EqualTo(0)); + } + + [Test] + public void ChatTokenUsageWithOutputTokensWorks() + { + int outputTokens = 271828; + ChatTokenUsage chatTokenUsage = OpenAIChatModelFactory.ChatTokenUsage(outputTokens: outputTokens); + + Assert.That(chatTokenUsage.OutputTokens, Is.EqualTo(outputTokens)); + Assert.That(chatTokenUsage.InputTokens, Is.EqualTo(0)); + Assert.That(chatTokenUsage.TotalTokens, Is.EqualTo(0)); + } + + [Test] + public void ChatTokenUsageWithInputTokensWorks() + { + int inputTokens = 271828; + ChatTokenUsage chatTokenUsage = OpenAIChatModelFactory.ChatTokenUsage(inputTokens: inputTokens); + + Assert.That(chatTokenUsage.OutputTokens, Is.EqualTo(0)); + Assert.That(chatTokenUsage.InputTokens, Is.EqualTo(inputTokens)); + Assert.That(chatTokenUsage.TotalTokens, Is.EqualTo(0)); + } + + [Test] + public void ChatTokenUsageWithTotalTokensWorks() + { + int totalTokens = 271828; + ChatTokenUsage chatTokenUsage = OpenAIChatModelFactory.ChatTokenUsage(totalTokens: totalTokens); + + Assert.That(chatTokenUsage.OutputTokens, Is.EqualTo(0)); + Assert.That(chatTokenUsage.InputTokens, Is.EqualTo(0)); + Assert.That(chatTokenUsage.TotalTokens, Is.EqualTo(totalTokens)); + } + + [Test] + public void StreamingChatCompletionUpdateWithNoPropertiesWorks() + { + StreamingChatCompletionUpdate streamingChatCompletionUpdate = OpenAIChatModelFactory.StreamingChatCompletionUpdate(); + + Assert.That(streamingChatCompletionUpdate.Id, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentUpdate, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FunctionCallUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ToolCallUpdates, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.Role, Is.Null); + Assert.That(streamingChatCompletionUpdate.RefusalUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FinishReason, Is.Null); + Assert.That(streamingChatCompletionUpdate.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(streamingChatCompletionUpdate.Model, Is.Null); + Assert.That(streamingChatCompletionUpdate.SystemFingerprint, Is.Null); + Assert.That(streamingChatCompletionUpdate.Usage, Is.Null); + } + + [Test] + public void StreamingChatCompletionUpdateWithIdWorks() + { + string id = "chat_completion_id"; + StreamingChatCompletionUpdate streamingChatCompletionUpdate = OpenAIChatModelFactory.StreamingChatCompletionUpdate(id: id); + + Assert.That(streamingChatCompletionUpdate.Id, Is.EqualTo(id)); + Assert.That(streamingChatCompletionUpdate.ContentUpdate, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FunctionCallUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ToolCallUpdates, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.Role, Is.Null); + Assert.That(streamingChatCompletionUpdate.RefusalUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FinishReason, Is.Null); + Assert.That(streamingChatCompletionUpdate.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(streamingChatCompletionUpdate.Model, Is.Null); + Assert.That(streamingChatCompletionUpdate.SystemFingerprint, Is.Null); + Assert.That(streamingChatCompletionUpdate.Usage, Is.Null); + } + + [Test] + public void StreamingChatCompletionUpdateWithContentUpdateWorks() + { + IEnumerable contentUpdate = [ + ChatMessageContentPart.CreateTextPart("first part"), + ChatMessageContentPart.CreateTextPart("second part") + ]; + StreamingChatCompletionUpdate streamingChatCompletionUpdate = OpenAIChatModelFactory.StreamingChatCompletionUpdate(contentUpdate: contentUpdate); + + Assert.That(streamingChatCompletionUpdate.Id, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentUpdate.SequenceEqual(contentUpdate), Is.True); + Assert.That(streamingChatCompletionUpdate.FunctionCallUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ToolCallUpdates, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.Role, Is.Null); + Assert.That(streamingChatCompletionUpdate.RefusalUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FinishReason, Is.Null); + Assert.That(streamingChatCompletionUpdate.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(streamingChatCompletionUpdate.Model, Is.Null); + Assert.That(streamingChatCompletionUpdate.SystemFingerprint, Is.Null); + Assert.That(streamingChatCompletionUpdate.Usage, Is.Null); + } + + [Test] + public void StreamingChatCompletionUpdateWithFunctionCallUpdateWorks() + { + StreamingChatFunctionCallUpdate functionCallUpdate = OpenAIChatModelFactory.StreamingChatFunctionCallUpdate(functionName: "get_recipte"); + StreamingChatCompletionUpdate streamingChatCompletionUpdate = OpenAIChatModelFactory.StreamingChatCompletionUpdate(functionCallUpdate: functionCallUpdate); + + Assert.That(streamingChatCompletionUpdate.Id, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentUpdate, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FunctionCallUpdate, Is.EqualTo(functionCallUpdate)); + Assert.That(streamingChatCompletionUpdate.ToolCallUpdates, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.Role, Is.Null); + Assert.That(streamingChatCompletionUpdate.RefusalUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FinishReason, Is.Null); + Assert.That(streamingChatCompletionUpdate.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(streamingChatCompletionUpdate.Model, Is.Null); + Assert.That(streamingChatCompletionUpdate.SystemFingerprint, Is.Null); + Assert.That(streamingChatCompletionUpdate.Usage, Is.Null); + } + + [Test] + public void StreamingChatCompletionUpdateWithToolCallUpdatesWorks() + { + IEnumerable toolCallUpdates = [ + OpenAIChatModelFactory.StreamingChatToolCallUpdate(id: "id1"), + OpenAIChatModelFactory.StreamingChatToolCallUpdate(id: "id2") + ]; + StreamingChatCompletionUpdate streamingChatCompletionUpdate = OpenAIChatModelFactory.StreamingChatCompletionUpdate(toolCallUpdates: toolCallUpdates); + + Assert.That(streamingChatCompletionUpdate.Id, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentUpdate, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FunctionCallUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ToolCallUpdates.SequenceEqual(toolCallUpdates), Is.True); + Assert.That(streamingChatCompletionUpdate.Role, Is.Null); + Assert.That(streamingChatCompletionUpdate.RefusalUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FinishReason, Is.Null); + Assert.That(streamingChatCompletionUpdate.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(streamingChatCompletionUpdate.Model, Is.Null); + Assert.That(streamingChatCompletionUpdate.SystemFingerprint, Is.Null); + Assert.That(streamingChatCompletionUpdate.Usage, Is.Null); + } + + [Test] + public void StreamingChatCompletionUpdateWithRoleWorks() + { + ChatMessageRole role = ChatMessageRole.Tool; + StreamingChatCompletionUpdate streamingChatCompletionUpdate = OpenAIChatModelFactory.StreamingChatCompletionUpdate(role: role); + + Assert.That(streamingChatCompletionUpdate.Id, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentUpdate, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FunctionCallUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ToolCallUpdates, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.Role, Is.EqualTo(role)); + Assert.That(streamingChatCompletionUpdate.RefusalUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FinishReason, Is.Null); + Assert.That(streamingChatCompletionUpdate.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(streamingChatCompletionUpdate.Model, Is.Null); + Assert.That(streamingChatCompletionUpdate.SystemFingerprint, Is.Null); + Assert.That(streamingChatCompletionUpdate.Usage, Is.Null); + } + + [Test] + public void StreamingChatCompletionUpdateWithRefusalUpdateWorks() + { + string refusalUpdate = "This is a refusal update."; + StreamingChatCompletionUpdate streamingChatCompletionUpdate = OpenAIChatModelFactory.StreamingChatCompletionUpdate(refusalUpdate: refusalUpdate); + + Assert.That(streamingChatCompletionUpdate.Id, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentUpdate, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FunctionCallUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ToolCallUpdates, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.Role, Is.Null); + Assert.That(streamingChatCompletionUpdate.RefusalUpdate, Is.EqualTo(refusalUpdate)); + Assert.That(streamingChatCompletionUpdate.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FinishReason, Is.Null); + Assert.That(streamingChatCompletionUpdate.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(streamingChatCompletionUpdate.Model, Is.Null); + Assert.That(streamingChatCompletionUpdate.SystemFingerprint, Is.Null); + Assert.That(streamingChatCompletionUpdate.Usage, Is.Null); + } + + [Test] + public void StreamingChatCompletionUpdateWithContentTokenLogProbabilitiesWorks() + { + IEnumerable contentTokenLogProbabilities = [ + OpenAIChatModelFactory.ChatTokenLogProbabilityInfo(logProbability: 1f), + OpenAIChatModelFactory.ChatTokenLogProbabilityInfo(logProbability: 2f) + ]; + StreamingChatCompletionUpdate streamingChatCompletionUpdate = OpenAIChatModelFactory.StreamingChatCompletionUpdate(contentTokenLogProbabilities: contentTokenLogProbabilities); + + Assert.That(streamingChatCompletionUpdate.Id, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentUpdate, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FunctionCallUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ToolCallUpdates, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.Role, Is.Null); + Assert.That(streamingChatCompletionUpdate.RefusalUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentTokenLogProbabilities.SequenceEqual(contentTokenLogProbabilities), Is.True); + Assert.That(streamingChatCompletionUpdate.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FinishReason, Is.Null); + Assert.That(streamingChatCompletionUpdate.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(streamingChatCompletionUpdate.Model, Is.Null); + Assert.That(streamingChatCompletionUpdate.SystemFingerprint, Is.Null); + Assert.That(streamingChatCompletionUpdate.Usage, Is.Null); + } + + [Test] + public void StreamingChatCompletionUpdateWithRefusalTokenLogProbabilitiesWorks() + { + IEnumerable refusalTokenLogProbabilities = [ + OpenAIChatModelFactory.ChatTokenLogProbabilityInfo(logProbability: 1f), + OpenAIChatModelFactory.ChatTokenLogProbabilityInfo(logProbability: 2f) + ]; + StreamingChatCompletionUpdate streamingChatCompletionUpdate = OpenAIChatModelFactory.StreamingChatCompletionUpdate(refusalTokenLogProbabilities: refusalTokenLogProbabilities); + + Assert.That(streamingChatCompletionUpdate.Id, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentUpdate, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FunctionCallUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ToolCallUpdates, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.Role, Is.Null); + Assert.That(streamingChatCompletionUpdate.RefusalUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.RefusalTokenLogProbabilities.SequenceEqual(refusalTokenLogProbabilities), Is.True); + Assert.That(streamingChatCompletionUpdate.FinishReason, Is.Null); + Assert.That(streamingChatCompletionUpdate.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(streamingChatCompletionUpdate.Model, Is.Null); + Assert.That(streamingChatCompletionUpdate.SystemFingerprint, Is.Null); + Assert.That(streamingChatCompletionUpdate.Usage, Is.Null); + } + + [Test] + public void StreamingChatCompletionUpdateWithFinishReasonWorks() + { + ChatFinishReason finishReason = ChatFinishReason.ToolCalls; + StreamingChatCompletionUpdate streamingChatCompletionUpdate = OpenAIChatModelFactory.StreamingChatCompletionUpdate(finishReason: finishReason); + + Assert.That(streamingChatCompletionUpdate.Id, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentUpdate, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FunctionCallUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ToolCallUpdates, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.Role, Is.Null); + Assert.That(streamingChatCompletionUpdate.RefusalUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FinishReason, Is.EqualTo(finishReason)); + Assert.That(streamingChatCompletionUpdate.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(streamingChatCompletionUpdate.Model, Is.Null); + Assert.That(streamingChatCompletionUpdate.SystemFingerprint, Is.Null); + Assert.That(streamingChatCompletionUpdate.Usage, Is.Null); + } + + [Test] + public void StreamingChatCompletionUpdateWithCreatedAtWorks() + { + DateTimeOffset createdAt = DateTimeOffset.UtcNow; + StreamingChatCompletionUpdate streamingChatCompletionUpdate = OpenAIChatModelFactory.StreamingChatCompletionUpdate(createdAt: createdAt); + + Assert.That(streamingChatCompletionUpdate.Id, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentUpdate, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FunctionCallUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ToolCallUpdates, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.Role, Is.Null); + Assert.That(streamingChatCompletionUpdate.RefusalUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FinishReason, Is.Null); + Assert.That(streamingChatCompletionUpdate.CreatedAt, Is.EqualTo(createdAt)); + Assert.That(streamingChatCompletionUpdate.Model, Is.Null); + Assert.That(streamingChatCompletionUpdate.SystemFingerprint, Is.Null); + Assert.That(streamingChatCompletionUpdate.Usage, Is.Null); + } + + [Test] + public void StreamingChatCompletionUpdateWithModelWorks() + { + string model = "topmodel"; + StreamingChatCompletionUpdate streamingChatCompletionUpdate = OpenAIChatModelFactory.StreamingChatCompletionUpdate(model: model); + + Assert.That(streamingChatCompletionUpdate.Id, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentUpdate, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FunctionCallUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ToolCallUpdates, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.Role, Is.Null); + Assert.That(streamingChatCompletionUpdate.RefusalUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FinishReason, Is.Null); + Assert.That(streamingChatCompletionUpdate.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(streamingChatCompletionUpdate.Model, Is.EqualTo(model)); + Assert.That(streamingChatCompletionUpdate.SystemFingerprint, Is.Null); + Assert.That(streamingChatCompletionUpdate.Usage, Is.Null); + } + + [Test] + public void StreamingChatCompletionUpdateWithSystemFingerprintWorks() + { + string systemFingerprint = "footprint"; + StreamingChatCompletionUpdate streamingChatCompletionUpdate = OpenAIChatModelFactory.StreamingChatCompletionUpdate(systemFingerprint: systemFingerprint); + + Assert.That(streamingChatCompletionUpdate.Id, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentUpdate, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FunctionCallUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ToolCallUpdates, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.Role, Is.Null); + Assert.That(streamingChatCompletionUpdate.RefusalUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FinishReason, Is.Null); + Assert.That(streamingChatCompletionUpdate.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(streamingChatCompletionUpdate.Model, Is.Null); + Assert.That(streamingChatCompletionUpdate.SystemFingerprint, Is.EqualTo(systemFingerprint)); + Assert.That(streamingChatCompletionUpdate.Usage, Is.Null); + } + + [Test] + public void StreamingChatCompletionUpdateWithUsageWorks() + { + ChatTokenUsage usage = OpenAIChatModelFactory.ChatTokenUsage(outputTokens: 20); + StreamingChatCompletionUpdate streamingChatCompletionUpdate = OpenAIChatModelFactory.StreamingChatCompletionUpdate(usage: usage); + + Assert.That(streamingChatCompletionUpdate.Id, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentUpdate, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FunctionCallUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ToolCallUpdates, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.Role, Is.Null); + Assert.That(streamingChatCompletionUpdate.RefusalUpdate, Is.Null); + Assert.That(streamingChatCompletionUpdate.ContentTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.RefusalTokenLogProbabilities, Is.Not.Null.And.Empty); + Assert.That(streamingChatCompletionUpdate.FinishReason, Is.Null); + Assert.That(streamingChatCompletionUpdate.CreatedAt, Is.EqualTo(default(DateTimeOffset))); + Assert.That(streamingChatCompletionUpdate.Model, Is.Null); + Assert.That(streamingChatCompletionUpdate.SystemFingerprint, Is.Null); + Assert.That(streamingChatCompletionUpdate.Usage, Is.EqualTo(usage)); + } + + [Test] + public void StreamingChatFunctionCallUpdateWithNoPropertiesWorks() + { + StreamingChatFunctionCallUpdate streamingChatFunctionCallUpdate = OpenAIChatModelFactory.StreamingChatFunctionCallUpdate(); + + Assert.That(streamingChatFunctionCallUpdate.FunctionName, Is.Null); + Assert.That(streamingChatFunctionCallUpdate.FunctionArgumentsUpdate, Is.Null); + } + + [Test] + public void StreamingChatFunctionCallUpdateWithFunctionNameWorks() + { + string functionName = "Margaret"; + StreamingChatFunctionCallUpdate streamingChatFunctionCallUpdate = OpenAIChatModelFactory.StreamingChatFunctionCallUpdate(functionName: functionName); + + Assert.That(streamingChatFunctionCallUpdate.FunctionName, Is.EqualTo(functionName)); + Assert.That(streamingChatFunctionCallUpdate.FunctionArgumentsUpdate, Is.Null); + } + + [Test] + public void StreamingChatFunctionCallUpdateWithFunctionArgumentsUpdateWorks() + { + string functionArgumentsUpdate = "arguments_update"; + StreamingChatFunctionCallUpdate streamingChatFunctionCallUpdate = OpenAIChatModelFactory.StreamingChatFunctionCallUpdate(functionArgumentsUpdate: functionArgumentsUpdate); + + Assert.That(streamingChatFunctionCallUpdate.FunctionName, Is.Null); + Assert.That(streamingChatFunctionCallUpdate.FunctionArgumentsUpdate, Is.EqualTo(functionArgumentsUpdate)); + } + + [Test] + public void StreamingChatToolCallUpdateWithNoPropertiesWorks() + { + StreamingChatToolCallUpdate streamingChatToolCallUpdate = OpenAIChatModelFactory.StreamingChatToolCallUpdate(); + + Assert.That(streamingChatToolCallUpdate.Index, Is.EqualTo(0)); + Assert.That(streamingChatToolCallUpdate.Id, Is.Null); + Assert.That(streamingChatToolCallUpdate.Kind, Is.EqualTo(default(ChatToolCallKind))); + Assert.That(streamingChatToolCallUpdate.FunctionName, Is.Null); + Assert.That(streamingChatToolCallUpdate.FunctionArgumentsUpdate, Is.Null); + } + + [Test] + public void StreamingChatToolCallUpdateWithIndexWorks() + { + int index = 31415; + StreamingChatToolCallUpdate streamingChatToolCallUpdate = OpenAIChatModelFactory.StreamingChatToolCallUpdate(index: index); + + Assert.That(streamingChatToolCallUpdate.Index, Is.EqualTo(index)); + Assert.That(streamingChatToolCallUpdate.Id, Is.Null); + Assert.That(streamingChatToolCallUpdate.Kind, Is.EqualTo(default(ChatToolCallKind))); + Assert.That(streamingChatToolCallUpdate.FunctionName, Is.Null); + Assert.That(streamingChatToolCallUpdate.FunctionArgumentsUpdate, Is.Null); + } + + [Test] + public void StreamingChatToolCallUpdateWithIdWorks() + { + string id = "tool_call_id"; + StreamingChatToolCallUpdate streamingChatToolCallUpdate = OpenAIChatModelFactory.StreamingChatToolCallUpdate(id: id); + + Assert.That(streamingChatToolCallUpdate.Index, Is.EqualTo(0)); + Assert.That(streamingChatToolCallUpdate.Id, Is.EqualTo(id)); + Assert.That(streamingChatToolCallUpdate.Kind, Is.EqualTo(default(ChatToolCallKind))); + Assert.That(streamingChatToolCallUpdate.FunctionName, Is.Null); + Assert.That(streamingChatToolCallUpdate.FunctionArgumentsUpdate, Is.Null); + } + + [Test] + public void StreamingChatToolCallUpdateWithKindWorks() + { + ChatToolCallKind kind = ChatToolCallKind.Function; + StreamingChatToolCallUpdate streamingChatToolCallUpdate = OpenAIChatModelFactory.StreamingChatToolCallUpdate(kind: kind); + + Assert.That(streamingChatToolCallUpdate.Index, Is.EqualTo(0)); + Assert.That(streamingChatToolCallUpdate.Id, Is.Null); + Assert.That(streamingChatToolCallUpdate.Kind, Is.EqualTo(kind)); + Assert.That(streamingChatToolCallUpdate.FunctionName, Is.Null); + Assert.That(streamingChatToolCallUpdate.FunctionArgumentsUpdate, Is.Null); + } + + [Test] + public void StreamingChatToolCallUpdateWithFunctionNameWorks() + { + string functionName = "Margaret"; + StreamingChatToolCallUpdate streamingChatToolCallUpdate = OpenAIChatModelFactory.StreamingChatToolCallUpdate(functionName: functionName); + + Assert.That(streamingChatToolCallUpdate.Index, Is.EqualTo(0)); + Assert.That(streamingChatToolCallUpdate.Id, Is.Null); + Assert.That(streamingChatToolCallUpdate.Kind, Is.EqualTo(default(ChatToolCallKind))); + Assert.That(streamingChatToolCallUpdate.FunctionName, Is.EqualTo(functionName)); + Assert.That(streamingChatToolCallUpdate.FunctionArgumentsUpdate, Is.Null); + } + + [Test] + public void StreamingChatToolCallUpdateWithFunctionArgumentsUpdateWorks() + { + string functionArgumentsUpdate = "arguments_update"; + StreamingChatToolCallUpdate streamingChatToolCallUpdate = OpenAIChatModelFactory.StreamingChatToolCallUpdate(functionArgumentsUpdate: functionArgumentsUpdate); + + Assert.That(streamingChatToolCallUpdate.Index, Is.EqualTo(0)); + Assert.That(streamingChatToolCallUpdate.Id, Is.Null); + Assert.That(streamingChatToolCallUpdate.Kind, Is.EqualTo(default(ChatToolCallKind))); + Assert.That(streamingChatToolCallUpdate.FunctionName, Is.Null); + Assert.That(streamingChatToolCallUpdate.FunctionArgumentsUpdate, Is.EqualTo(functionArgumentsUpdate)); + } +} diff --git a/.dotnet/tests/Embeddings/EmbeddingTests.cs b/.dotnet/tests/Embeddings/EmbeddingTests.cs index a20f6b9b2..d69719e2a 100644 --- a/.dotnet/tests/Embeddings/EmbeddingTests.cs +++ b/.dotnet/tests/Embeddings/EmbeddingTests.cs @@ -28,7 +28,7 @@ public enum EmbeddingsInputKind [Test] public async Task GenerateSingleEmbedding() { - EmbeddingClient client = new("text-embedding-3-small"); + EmbeddingClient client = new("text-embedding-3-small", Environment.GetEnvironmentVariable("OPENAI_API_KEY")); string input = "Hello, world!"; @@ -39,7 +39,7 @@ public async Task GenerateSingleEmbedding() Assert.That(embedding.Index, Is.EqualTo(0)); Assert.That(embedding.Vector, Is.Not.Null); Assert.That(embedding.Vector.Span.Length, Is.EqualTo(1536)); - + float[] array = embedding.Vector.ToArray(); Assert.That(array.Length, Is.EqualTo(1536)); } @@ -49,7 +49,7 @@ public async Task GenerateSingleEmbedding() [TestCase(EmbeddingsInputKind.UsingIntegers)] public async Task GenerateMultipleEmbeddings(EmbeddingsInputKind embeddingsInputKind) { - EmbeddingClient client = new("text-embedding-3-small"); + EmbeddingClient client = new("text-embedding-3-small", Environment.GetEnvironmentVariable("OPENAI_API_KEY")); const int Dimensions = 456; diff --git a/.dotnet/tests/Embeddings/OpenAIEmbeddingsModelFactoryTests.cs b/.dotnet/tests/Embeddings/OpenAIEmbeddingsModelFactoryTests.cs index b57e80f54..9a43a9e5e 100644 --- a/.dotnet/tests/Embeddings/OpenAIEmbeddingsModelFactoryTests.cs +++ b/.dotnet/tests/Embeddings/OpenAIEmbeddingsModelFactoryTests.cs @@ -31,7 +31,7 @@ public void EmbeddingWithIndexWorks() [Test] public void EmbeddingWithVectorWorks() { - IEnumerable vector = [ 1f, 2f, 3f ]; + IEnumerable vector = [1f, 2f, 3f]; Embedding embedding = OpenAIEmbeddingsModelFactory.Embedding(vector: vector); Assert.That(embedding.Index, Is.EqualTo(default(int))); diff --git a/.dotnet/tests/Images/ImageGenerationTests.cs b/.dotnet/tests/Images/ImageGenerationTests.cs index efc95c544..1816d5853 100644 --- a/.dotnet/tests/Images/ImageGenerationTests.cs +++ b/.dotnet/tests/Images/ImageGenerationTests.cs @@ -181,8 +181,8 @@ private void ValidateGeneratedImage(Uri imageUri, string expectedSubstring, stri ChatClient chatClient = GetTestClient(TestScenario.Chat); IEnumerable messages = [ new UserChatMessage( - ChatMessageContentPart.CreateTextMessageContentPart($"Describe this image for me. {descriptionHint}"), - ChatMessageContentPart.CreateImageMessageContentPart(imageUri)), + ChatMessageContentPart.CreateTextPart($"Describe this image for me. {descriptionHint}"), + ChatMessageContentPart.CreateImagePart(imageUri)), ]; ChatCompletionOptions chatOptions = new() { MaxTokens = 2048 }; ClientResult result = chatClient.CompleteChat(messages, chatOptions); @@ -195,8 +195,8 @@ private void ValidateGeneratedImage(BinaryData imageBytes, string expectedSubstr ChatClient chatClient = GetTestClient(TestScenario.Chat); IEnumerable messages = [ new UserChatMessage( - ChatMessageContentPart.CreateTextMessageContentPart($"Describe this image for me. {descriptionHint}"), - ChatMessageContentPart.CreateImageMessageContentPart(imageBytes, "image/png")), + ChatMessageContentPart.CreateTextPart($"Describe this image for me. {descriptionHint}"), + ChatMessageContentPart.CreateImagePart(imageBytes, "image/png")), ]; ChatCompletionOptions chatOptions = new() { MaxTokens = 2048 }; ClientResult result = chatClient.CompleteChat(messages, chatOptions); diff --git a/.dotnet/tests/Models/ModelTests.cs b/.dotnet/tests/Models/ModelTests.cs index 92bbaf887..aa5078490 100644 --- a/.dotnet/tests/Models/ModelTests.cs +++ b/.dotnet/tests/Models/ModelTests.cs @@ -4,6 +4,7 @@ using System; using System.Linq; using System.Threading.Tasks; +using static OpenAI.Tests.TestHelpers; namespace OpenAI.Tests.Models; @@ -20,7 +21,7 @@ public ModelTests(bool isAsync) : base(isAsync) [Test] public async Task ListModels() { - ModelClient client = new(); + ModelClient client = GetTestClient(TestScenario.Models); OpenAIModelInfoCollection allModels = IsAsync ? await client.GetModelsAsync() @@ -33,7 +34,7 @@ public async Task ListModels() [Test] public async Task GetModelInfo() { - ModelClient client = new(); + ModelClient client = GetTestClient(TestScenario.Models); string modelName = "gpt-4o-mini"; diff --git a/.dotnet/tests/Moderations/ModerationTests.cs b/.dotnet/tests/Moderations/ModerationTests.cs index 3c10fbfab..bb5b0e25c 100644 --- a/.dotnet/tests/Moderations/ModerationTests.cs +++ b/.dotnet/tests/Moderations/ModerationTests.cs @@ -3,6 +3,7 @@ using OpenAI.Tests.Utility; using System.Collections.Generic; using System.Threading.Tasks; +using static OpenAI.Tests.TestHelpers; namespace OpenAI.Tests.Moderations; @@ -19,7 +20,7 @@ public ModerationTests(bool isAsync) : base(isAsync) [Test] public async Task ClassifySingleInput() { - ModerationClient client = new("text-moderation-stable"); + ModerationClient client = GetTestClient(TestScenario.Moderations); const string input = "I am killing all my houseplants!"; @@ -35,7 +36,7 @@ public async Task ClassifySingleInput() [Test] public async Task ClassifyMultipleInputs() { - ModerationClient client = new("text-moderation-stable"); + ModerationClient client = GetTestClient(TestScenario.Moderations); List inputs = [ diff --git a/.dotnet/tests/OpenAI.Tests.csproj b/.dotnet/tests/OpenAI.Tests.csproj index c05ab9f8d..cdb183c95 100644 --- a/.dotnet/tests/OpenAI.Tests.csproj +++ b/.dotnet/tests/OpenAI.Tests.csproj @@ -1,8 +1,13 @@  net8.0 + $(NoWarn);CS1591 + + + $(NoWarn);OPENAI001; + latest diff --git a/.dotnet/tests/Telemetry/ChatTelemetryTests.cs b/.dotnet/tests/Telemetry/ChatTelemetryTests.cs index d3b043a7c..58d4cd64d 100644 --- a/.dotnet/tests/Telemetry/ChatTelemetryTests.cs +++ b/.dotnet/tests/Telemetry/ChatTelemetryTests.cs @@ -186,7 +186,7 @@ public async Task ChatTracingAndMetricsMultiple() var tasks = new Task[5]; int numberOfSuccessfulResponses = 3; int totalPromptTokens = 0, totalCompletionTokens = 0; - for (int i = 0; i < tasks.Length; i ++) + for (int i = 0; i < tasks.Length; i++) { int t = i; // don't let Activity.Current escape the scope diff --git a/.dotnet/tests/Telemetry/TestMeterListener.cs b/.dotnet/tests/Telemetry/TestMeterListener.cs index b918beb7f..a8a5cdc01 100644 --- a/.dotnet/tests/Telemetry/TestMeterListener.cs +++ b/.dotnet/tests/Telemetry/TestMeterListener.cs @@ -11,8 +11,8 @@ internal class TestMeterListener : IDisposable { public record TestMeasurement(object value, Dictionary tags); - private readonly ConcurrentDictionary> _measurements = new (); - private readonly ConcurrentDictionary _instruments = new (); + private readonly ConcurrentDictionary> _measurements = new(); + private readonly ConcurrentDictionary _instruments = new(); private readonly MeterListener _listener; public TestMeterListener(string meterName) { @@ -46,8 +46,8 @@ private void OnMeasurementRecorded(Instrument instrument, T measurement, Read _instruments.TryAdd(instrument.Name, instrument); var testMeasurement = new TestMeasurement(measurement, new Dictionary(tags.ToArray())); - _measurements.AddOrUpdate(instrument.Name, - k => new() { testMeasurement }, + _measurements.AddOrUpdate(instrument.Name, + k => new() { testMeasurement }, (k, l) => { l.Add(testMeasurement); diff --git a/.dotnet/tests/UserAgentTests.cs b/.dotnet/tests/UserAgentTests.cs index 3af362178..31b74e11d 100644 --- a/.dotnet/tests/UserAgentTests.cs +++ b/.dotnet/tests/UserAgentTests.cs @@ -1,5 +1,6 @@ using NUnit.Framework; using OpenAI.Chat; +using System; using System.ClientModel; using System.ClientModel.Primitives; using System.IO; @@ -29,7 +30,7 @@ private void UserAgentStringWorks(bool useApplicationId) } : new(); options.AddPolicy(policy, PipelinePosition.BeforeTransport); - ChatClient client = new("no-real-model-needed", options); + ChatClient client = new("no-real-model-needed", Environment.GetEnvironmentVariable("OPENAI_API_KEY"), options); RequestOptions noThrowOptions = new() { ErrorOptions = ClientErrorBehaviors.NoThrow, }; using BinaryContent emptyContent = BinaryContent.Create(new MemoryStream()); _ = client.CompleteChat(emptyContent, noThrowOptions); diff --git a/.dotnet/tests/Utility/TestHelpers.cs b/.dotnet/tests/Utility/TestHelpers.cs index 6ff1237a9..134540304 100644 --- a/.dotnet/tests/Utility/TestHelpers.cs +++ b/.dotnet/tests/Utility/TestHelpers.cs @@ -7,8 +7,11 @@ using OpenAI.Files; using OpenAI.FineTuning; using OpenAI.Images; +using OpenAI.Models; +using OpenAI.Moderations; using OpenAI.VectorStores; using System; +using System.ClientModel; using System.ClientModel.Primitives; using System.Collections.Generic; using System.IO; @@ -43,24 +46,27 @@ public enum TestScenario public static T GetTestClient(TestScenario scenario, string overrideModel = null) { OpenAIClientOptions options = new(); + ApiKeyCredential credential = Environment.GetEnvironmentVariable("OPENAI_API_KEY"); options.AddPolicy(GetDumpPolicy(), PipelinePosition.PerTry); object clientObject = scenario switch { #pragma warning disable OPENAI001 - TestScenario.Assistants => new AssistantClient(options), + TestScenario.Assistants => new AssistantClient(credential, options), #pragma warning restore OPENAI001 - TestScenario.Audio_TTS => new AudioClient(overrideModel ?? "tts-1", options), - TestScenario.Audio_Whisper => new AudioClient(overrideModel ?? "whisper-1", options), - TestScenario.Batch => new BatchClient(options), - TestScenario.Chat => new ChatClient(overrideModel ?? "gpt-4o-mini", options), - TestScenario.Embeddings => new EmbeddingClient(overrideModel ?? "text-embedding-3-small", options), - TestScenario.Files => new FileClient(options), + TestScenario.Audio_TTS => new AudioClient(overrideModel ?? "tts-1", credential, options), + TestScenario.Audio_Whisper => new AudioClient(overrideModel ?? "whisper-1", credential, options), + TestScenario.Batch => new BatchClient(credential, options), + TestScenario.Chat => new ChatClient(overrideModel ?? "gpt-4o-mini", credential, options), + TestScenario.Embeddings => new EmbeddingClient(overrideModel ?? "text-embedding-3-small", credential, options), + TestScenario.Files => new FileClient(credential, options), + TestScenario.Images => new ImageClient(overrideModel ?? "dall-e-3", credential, options), + TestScenario.Models => new ModelClient(credential, options), + TestScenario.Moderations => new ModerationClient(overrideModel ?? "text-moderation-stable", credential, options), TestScenario.FineTuning => new FineTuningClient(options), - TestScenario.Images => new ImageClient(overrideModel ?? "dall-e-3", options), #pragma warning disable OPENAI001 - TestScenario.VectorStores => new VectorStoreClient(options), + TestScenario.VectorStores => new VectorStoreClient(credential, options), #pragma warning restore OPENAI001 - TestScenario.TopLevel => new OpenAIClient(options), + TestScenario.TopLevel => new OpenAIClient(credential, options), _ => throw new NotImplementedException(), }; return (T)clientObject; diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index ba2c7d825..af321d330 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -15,12 +15,12 @@ jobs: version_suffix_args: ${{ format('--version-suffix="alpha.{0}"', github.run_number) }} steps: - name: Setup .NET - uses: actions/setup-dotnet@v1 + uses: actions/setup-dotnet@v4 with: dotnet-version: 8.x - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Build run: dotnet build @@ -52,7 +52,7 @@ jobs: working-directory: .dotnet - name: Upload artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: name: build-artifacts path: ${{github.workspace}}/artifacts @@ -77,16 +77,52 @@ jobs: working-directory: .dotnet azure_build: # Development mirror only; validate AOAI compilation - runs-on: ubuntu-latest + strategy: + matrix: + # Builds on all supported .Net versions on all supported OS platforms. It also + # distributes the tests across these OSes. + os_net: + - runs-on: ubuntu-latest + test_framework: net8.0 + - runs-on: macos-latest + test_framework: net6.0 + - runs-on: windows-latest + test_framework: net462 + runs-on: ${{ matrix.os_net.runs-on }} steps: - - name: Setup .NET - uses: actions/setup-dotnet@v1 + - name: Setup .NET 6 and .Net 8 + uses: actions/setup-dotnet@v4 with: - dotnet-version: 8.x + # .Net Framework 4.6.2 is pre-installed on Windows 10 versions 1607 and newer + dotnet-version: | + 6.x + 8.x - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Build - run: dotnet build - working-directory: .dotnet.azure \ No newline at end of file + run: dotnet build -p:Configuration=unsigned + working-directory: .dotnet.azure/sdk/openai + + - name: Test (${{ matrix.os_net.test_framework }}) + run: >- + dotnet test + --framework ${{ matrix.os_net.test_framework }} + --no-build + --filter "(TestCategory!=Live)" + --logger "trx;LogFileName=Azure.AI.OpenAI.Tests.trx" + --logger:"console;verbosity=quiet" + --blame-crash-dump-type full + --blame-hang-dump-type full + --blame-hang-timeout 15minutes + --results-directory "${{github.workspace}}/TestResults" + Azure.AI.OpenAI/tests/Azure.AI.OpenAI.Tests.csproj + working-directory: .dotnet.azure/sdk/openai + + - name: Publish test results + uses: actions/upload-artifact@v4 + with: + name: TestResults-${{ matrix.os_net.runs-on }}-${{ matrix.os_net.test_framework }} + path: TestResults + if: ${{ always() }} diff --git a/.openapi3/openapi3-openai.yaml b/.openapi3/openapi3-openai.yaml index 6149ecfa4..e387b346f 100644 --- a/.openapi3/openapi3-openai.yaml +++ b/.openapi3/openapi3-openai.yaml @@ -16,6 +16,7 @@ tags: - name: Models - name: Moderations - name: Vector Stores + - name: Uploads paths: /assistants: post: @@ -62,7 +63,10 @@ paths: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` for descending order. schema: - $ref: '#/components/schemas/ListOrder' + type: string + enum: + - asc + - desc default: desc - name: after in: query @@ -1045,7 +1049,10 @@ paths: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` for descending order. schema: - $ref: '#/components/schemas/ListOrder' + type: string + enum: + - asc + - desc default: desc - name: after in: query @@ -1219,7 +1226,10 @@ paths: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` for descending order. schema: - $ref: '#/components/schemas/ListOrder' + type: string + enum: + - asc + - desc default: desc - name: after in: query @@ -1373,7 +1383,10 @@ paths: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` for descending order. schema: - $ref: '#/components/schemas/ListOrder' + type: string + enum: + - asc + - desc default: desc - name: after in: query @@ -1473,6 +1486,123 @@ paths: application/json: schema: $ref: '#/components/schemas/SubmitToolOutputsRunRequest' + /uploads: + post: + tags: + - Uploads + operationId: createUpload + summary: |- + Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it. + + Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object. + + For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case: + - [Assistants](/docs/assistants/tools/file-search/supported-files) + + For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + anyOf: + - $ref: '#/components/schemas/Upload' + - $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateUploadRequest' + /uploads/{upload_id}/cancel: + post: + tags: + - Uploads + operationId: cancelUpload + summary: Cancels the Upload. No Parts may be added after an Upload is cancelled. + parameters: + - name: upload_id + in: path + required: true + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + anyOf: + - $ref: '#/components/schemas/Upload' + - $ref: '#/components/schemas/ErrorResponse' + /uploads/{upload_id}/complete: + post: + tags: + - Uploads + operationId: completeUpload + summary: |- + Completes the [Upload](/docs/api-reference/uploads/object). + + Within the returned Upload object, there is a nested [File](/docs/api-reference/files/object) object that is ready to use in the rest of the platform. + + You can specify the order of the Parts by passing in an ordered list of the Part IDs. + + The number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed. + parameters: + - name: upload_id + in: path + required: true + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + anyOf: + - $ref: '#/components/schemas/Upload' + - $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CompleteUploadRequest' + /uploads/{upload_id}/parts: + post: + tags: + - Uploads + operationId: addUploadPart + summary: |- + Adds a [Part](/docs/api-reference/uploads/part-object) to an [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload. + + Each Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB. + + It is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](/docs/api-reference/uploads/complete). + parameters: + - name: upload_id + in: path + required: true + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + anyOf: + - $ref: '#/components/schemas/UploadPart' + - $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/AddUploadPartRequestMultiPart' /vector_stores: get: tags: @@ -1497,7 +1627,10 @@ paths: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` for descending order. schema: - $ref: '#/components/schemas/ListOrder' + type: string + enum: + - asc + - desc default: desc - name: after in: query @@ -1737,7 +1870,10 @@ paths: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` for descending order. schema: - $ref: '#/components/schemas/ListOrder' + type: string + enum: + - asc + - desc default: desc - name: after in: query @@ -1802,7 +1938,10 @@ paths: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` for descending order. schema: - $ref: '#/components/schemas/ListOrder' + type: string + enum: + - asc + - desc default: desc - name: after in: query @@ -1923,6 +2062,14 @@ security: - BearerAuth: [] components: schemas: + AddUploadPartRequestMultiPart: + type: object + required: + - data + properties: + data: + type: string + format: binary AssistantObject: type: object required: @@ -1988,14 +2135,7 @@ components: description: A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. default: [] file_search: - type: object - properties: - vector_store_ids: - type: array - items: - type: string - maxItems: 1 - description: The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + $ref: '#/components/schemas/ToolResourcesFileSearchIdsOnly' nullable: true description: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. metadata: @@ -2031,6 +2171,72 @@ components: description: Represents an `assistant` that can call the model and use tools. AssistantResponseFormat: type: object + required: + - type + properties: + type: + type: string + discriminator: + propertyName: type + mapping: + text: '#/components/schemas/AssistantResponseFormatText' + json_object: '#/components/schemas/AssistantResponseFormatJsonObject' + json_schema: '#/components/schemas/AssistantResponseFormatJsonSchema' + AssistantResponseFormatJsonObject: + type: object + required: + - type + properties: + type: + type: string + enum: + - json_object + description: 'The type of response format being defined: `json_object`' + allOf: + - $ref: '#/components/schemas/AssistantResponseFormat' + AssistantResponseFormatJsonSchema: + type: object + required: + - type + - json_schema + properties: + type: + type: string + enum: + - json_schema + description: 'The type of response format being defined: `json_schema`' + json_schema: + type: object + properties: + description: + type: string + description: A description of what the response format is for, used by the model to determine how to respond in the format. + name: + type: string + description: The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + schema: + $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' + strict: + type: boolean + nullable: true + description: Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). + default: false + required: + - name + allOf: + - $ref: '#/components/schemas/AssistantResponseFormat' + AssistantResponseFormatText: + type: object + required: + - type + properties: + type: + type: string + enum: + - text + description: 'The type of response format being defined: `text`' + allOf: + - $ref: '#/components/schemas/AssistantResponseFormat' AssistantToolDefinition: type: object required: @@ -2075,7 +2281,7 @@ components: minimum: 1 maximum: 50 description: |- - The maximum number of results the file search tool should output. The default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive. Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. description: Overrides for the file search tool. @@ -2106,27 +2312,19 @@ components: $ref: '#/components/schemas/FunctionObject' allOf: - $ref: '#/components/schemas/AssistantToolDefinition' - AssistantsApiResponseFormat: - type: object - properties: - type: - type: string - enum: - - text - - json_object - description: Must be one of `text` or `json_object`. - default: text - description: An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. AssistantsApiResponseFormatOption: anyOf: - type: string enum: - - none - auto - - $ref: '#/components/schemas/AssistantsApiResponseFormat' + - $ref: '#/components/schemas/ResponseFormatText' + - $ref: '#/components/schemas/ResponseFormatJsonObject' + - $ref: '#/components/schemas/ResponseFormatJsonSchema' description: |- Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. @@ -2167,92 +2365,431 @@ components: required: - name description: Specifies a tool the model should use. Use to force the model to call a specific tool. - AutoChunkingStrategyRequestParam: - type: object - required: - - type - properties: - type: - type: string - enum: - - auto - description: Always `auto`. - allOf: - - $ref: '#/components/schemas/FileChunkingStrategyRequestParam' - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - AutoChunkingStrategyResponseParam: - type: object - required: - - type - properties: - type: - type: string - enum: - - auto - allOf: - - $ref: '#/components/schemas/FileChunkingStrategyResponseParam' - Batch: + AuditLog: type: object required: - id - - object - - endpoint - - input_file_id - - completion_window - - status - - created_at + - type + - effective_at + - actor properties: id: type: string - object: - type: string - enum: - - batch - description: The object type, which is always `batch`. - endpoint: - type: string - description: The OpenAI API endpoint used by the batch. - errors: + description: The ID of this log. + type: + $ref: '#/components/schemas/AuditLogEventType' + effective_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of the event. + project: type: object properties: - object: + id: type: string - enum: - - list - description: The object type, which is always `list`. + description: The project ID. + name: + type: string + description: The project title. + description: The project that the action was scoped to. Absent for actions not scoped to projects. + actor: + $ref: '#/components/schemas/AuditLogActor' + api_key.created: + type: object + properties: + id: + type: string + description: The tracking ID of the API key. data: - type: array - items: - type: object - properties: - code: - type: string - description: An error code identifying the error type. - message: + type: object + properties: + scopes: + type: array + items: type: string - description: A human-readable message providing more details about the error. - param: + description: A list of scopes allowed for the API key, e.g. `["api.model.request"]` + description: The payload used to create the API key. + description: The details for events with this `type`. + api_key.updated: + type: object + properties: + id: + type: string + description: The tracking ID of the API key. + changes_requested: + type: object + properties: + scopes: + type: array + items: type: string - nullable: true - description: The name of the parameter that caused the error, if applicable. - line: - type: integer - format: int32 - nullable: true - description: The line number of the input file where the error occurred, if applicable. - input_file_id: - type: string - description: The ID of the input file for the batch. - completion_window: - type: string - description: The time frame within which the batch should be processed. - status: - type: string - enum: - - validating - - failed - - in_progress - - finalizing + description: A list of scopes allowed for the API key, e.g. `["api.model.request"]` + description: The payload used to update the API key. + description: The details for events with this `type`. + api_key.deleted: + type: object + properties: + id: + type: string + description: The tracking ID of the API key. + description: The details for events with this `type`. + invite.sent: + type: object + properties: + id: + type: string + description: The ID of the invite. + data: + type: object + properties: + email: + type: string + description: The email invited to the organization. + role: + type: string + description: The role the email was invited to be. Is either `owner` or `member`. + description: The payload used to create the invite. + description: The details for events with this `type`. + invite.accepted: + type: object + properties: + id: + type: string + description: The ID of the invite. + description: The details for events with this `type`. + invite.deleted: + type: object + properties: + id: + type: string + description: The ID of the invite. + description: The details for events with this `type`. + login.failed: + type: object + properties: + error_code: + type: string + description: The error code of the failure. + error_message: + type: string + description: The error message of the failure. + description: The details for events with this `type`. + logout.failed: + type: object + properties: + error_code: + type: string + description: The error code of the failure. + error_message: + type: string + description: The error message of the failure. + description: The details for events with this `type`. + organization.updated: + type: object + properties: + id: + type: string + description: The organization ID. + changes_requested: + type: object + properties: + title: + type: string + description: The organization title. + description: + type: string + description: The organization description. + name: + type: string + description: The organization name. + settings: + type: object + properties: + threads_ui_visibility: + type: string + description: Visibility of the threads page which shows messages created with the Assistants API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`. + usage_dashboard_visibility: + type: string + description: Visibility of the usage dashboard which shows activity and costs for your organization. One of `ANY_ROLE` or `OWNERS`. + description: The payload used to update the organization settings. + description: The details for events with this `type`. + project.created: + type: object + properties: + id: + type: string + description: The project ID. + data: + type: object + properties: + name: + type: string + description: The project name. + title: + type: string + description: The title of the project as seen on the dashboard. + description: The payload used to create the project. + description: The details for events with this `type`. + project.updated: + type: object + properties: + id: + type: string + description: The project ID. + changes_requested: + type: object + properties: + title: + type: string + description: The title of the project as seen on the dashboard. + description: The payload used to update the project. + description: The details for events with this `type`. + project.archived: + type: object + properties: + id: + type: string + description: The project ID. + description: The details for events with this `type`. + service_account.created: + type: object + properties: + id: + type: string + description: The service account ID. + data: + type: object + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + description: The payload used to create the service account. + description: The details for events with this `type`. + service_account.updated: + type: object + properties: + id: + type: string + description: The service account ID. + changes_requested: + type: object + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + description: The payload used to updated the service account. + description: The details for events with this `type`. + service_account.deleted: + type: object + properties: + id: + type: string + description: The service account ID. + description: The details for events with this `type`. + user.added: + type: object + properties: + id: + type: string + description: The user ID. + data: + type: object + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + description: The payload used to add the user to the project. + description: The details for events with this `type`. + user.updated: + type: object + properties: + id: + type: string + description: The project ID. + changes_requested: + type: object + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + description: The payload used to update the user. + description: The details for events with this `type`. + user.deleted: + type: object + properties: + id: + type: string + description: The user ID. + description: The details for events with this `type`. + description: A log of a user action or configuration change within this organization. + AuditLogActor: + type: object + properties: + type: + type: string + enum: + - session + - api_key + description: The type of actor. Is either `session` or `api_key`. + session: + $ref: '#/components/schemas/AuditLogActorSession' + api_key: + $ref: '#/components/schemas/AuditLogActorApiKey' + description: The actor who performed the audit logged action. + AuditLogActorApiKey: + type: object + properties: + id: + type: string + description: The tracking id of the API key. + type: + type: string + enum: + - user + - service_account + description: The type of API key. Can be either `user` or `service_account`. + user: + $ref: '#/components/schemas/AuditLogActorUser' + service_account: + $ref: '#/components/schemas/AuditLogActorServiceAccount' + description: The API Key used to perform the audit logged action. + AuditLogActorServiceAccount: + type: object + properties: + id: + type: string + description: The service account id. + description: The service account that performed the audit logged action. + AuditLogActorSession: + type: object + properties: + user: + $ref: '#/components/schemas/AuditLogActorUser' + ip_address: + type: string + description: The IP address from which the action was performed. + description: The session in which the audit logged action was performed. + AuditLogActorUser: + type: object + properties: + id: + type: string + description: The user id. + email: + type: string + description: The user email. + description: The user who performed the audit logged action. + AuditLogEventType: + type: string + enum: + - api_key.created + - api_key.updated + - api_key.deleted + - invite.sent + - invite.accepted + - invite.deleted + - login.succeeded + - login.failed + - logout.succeeded + - logout.failed + - organization.updated + - project.created + - project.updated + - project.archived + - service_account.created + - service_account.updated + - service_account.deleted + - user.added + - user.updated + - user.deleted + description: The event type. + x-oaiExpandable: true + AutoChunkingStrategyRequestParam: + type: object + required: + - type + properties: + type: + type: string + enum: + - auto + description: Always `auto`. + allOf: + - $ref: '#/components/schemas/FileChunkingStrategyRequestParam' + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + AutoChunkingStrategyResponseParam: + type: object + required: + - type + properties: + type: + type: string + enum: + - auto + allOf: + - $ref: '#/components/schemas/FileChunkingStrategyResponseParam' + Batch: + type: object + required: + - id + - object + - endpoint + - input_file_id + - completion_window + - status + - created_at + properties: + id: + type: string + object: + type: string + enum: + - batch + description: The object type, which is always `batch`. + endpoint: + type: string + description: The OpenAI API endpoint used by the batch. + errors: + type: object + properties: + object: + type: string + enum: + - list + description: The object type, which is always `list`. + data: + type: array + items: + type: object + properties: + code: + type: string + description: An error code identifying the error type. + message: + type: string + description: A human-readable message providing more details about the error. + param: + type: string + nullable: true + description: The name of the parameter that caused the error, if applicable. + line: + type: integer + format: int32 + nullable: true + description: The line number of the input file where the error occurred, if applicable. + input_file_id: + type: string + description: The ID of the input file for the batch. + completion_window: + type: string + description: The time frame within which the batch should be processed. + status: + type: string + enum: + - validating + - failed + - in_progress + - finalizing - completed - expired - cancelling @@ -2363,8 +2900,7 @@ components: description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. body: type: object - additionalProperties: - type: string + additionalProperties: {} description: The JSON body of the response x-oaiTypeLabel: map nullable: true @@ -2489,9 +3025,17 @@ components: - role properties: content: - type: string + anyOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestAssistantMessageContentPart' nullable: true description: The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + refusal: + type: string + nullable: true + description: The refusal message by the assistant. role: type: string enum: @@ -2519,6 +3063,11 @@ components: deprecated: true allOf: - $ref: '#/components/schemas/ChatCompletionRequestMessage' + ChatCompletionRequestAssistantMessageContentPart: + anyOf: + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartRefusal' + x-oaiExpandable: true ChatCompletionRequestFunctionMessage: type: object required: @@ -2558,11 +3107,6 @@ components: tool: '#/components/schemas/ChatCompletionRequestToolMessage' function: '#/components/schemas/ChatCompletionRequestFunctionMessage' x-oaiExpandable: true - ChatCompletionRequestMessageContentPart: - anyOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartImage' - x-oaiExpandable: true ChatCompletionRequestMessageContentPartImage: type: object required: @@ -2591,6 +3135,20 @@ components: default: auto required: - url + ChatCompletionRequestMessageContentPartRefusal: + type: object + required: + - type + - refusal + properties: + type: + type: string + enum: + - refusal + description: The type of the content part. + refusal: + type: string + description: The refusal message generated by the model. ChatCompletionRequestMessageContentPartText: type: object required: @@ -2612,7 +3170,11 @@ components: - role properties: content: - type: string + anyOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestSystemMessageContentPart' description: The contents of the system message. role: type: string @@ -2624,6 +3186,11 @@ components: description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. allOf: - $ref: '#/components/schemas/ChatCompletionRequestMessage' + ChatCompletionRequestSystemMessageContentPart: + type: object + allOf: + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + x-oaiExpandable: true ChatCompletionRequestToolMessage: type: object required: @@ -2637,13 +3204,22 @@ components: - tool description: The role of the messages author, in this case `tool`. content: - type: string + anyOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestToolMessageContentPart' description: The contents of the tool message. tool_call_id: type: string description: Tool call that this message is responding to. allOf: - $ref: '#/components/schemas/ChatCompletionRequestMessage' + ChatCompletionRequestToolMessageContentPart: + type: object + allOf: + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + x-oaiExpandable: true ChatCompletionRequestUserMessage: type: object required: @@ -2655,7 +3231,7 @@ components: - type: string - type: array items: - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPart' + $ref: '#/components/schemas/ChatCompletionRequestUserMessageContentPart' description: The contents of the user message. x-oaiExpandable: true role: @@ -2668,16 +3244,26 @@ components: description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. allOf: - $ref: '#/components/schemas/ChatCompletionRequestMessage' + ChatCompletionRequestUserMessageContentPart: + anyOf: + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartImage' + x-oaiExpandable: true ChatCompletionResponseMessage: type: object required: - content + - refusal - role properties: content: type: string nullable: true description: The contents of the message. + refusal: + type: string + nullable: true + description: The refusal message generated by the model. tool_calls: $ref: '#/components/schemas/ChatCompletionMessageToolCallsItem' role: @@ -2746,6 +3332,10 @@ components: - assistant - tool description: The role of the author of this message. + refusal: + type: string + nullable: true + description: The refusal message generated by the model. description: A chat completion delta generated by streamed model responses. ChatCompletionTokenLogprob: type: object @@ -2831,12 +3421,93 @@ components: x-oaiExpandable: true ChatMessageContentPart: type: object - ChunkingStrategyRequestParam: - anyOf: - - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' - - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + ChatResponseFormat: + type: object + required: + - type + properties: + type: + type: string + discriminator: + propertyName: type + mapping: + text: '#/components/schemas/ChatResponseFormatText' + json_object: '#/components/schemas/ChatResponseFormatJsonObject' + json_schema: '#/components/schemas/ChatResponseFormatJsonSchema' + ChatResponseFormatJsonObject: + type: object + required: + - type + properties: + type: + type: string + enum: + - json_object + description: 'The type of response format being defined: `json_object`' + allOf: + - $ref: '#/components/schemas/ChatResponseFormat' + ChatResponseFormatJsonSchema: + type: object + required: + - type + - json_schema + properties: + type: + type: string + enum: + - json_schema + description: 'The type of response format being defined: `json_schema`' + json_schema: + type: object + properties: + description: + type: string + description: A description of what the response format is for, used by the model to determine how to respond in the format. + name: + type: string + description: The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + schema: + $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' + strict: + type: boolean + nullable: true + description: Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). + default: false + required: + - name + allOf: + - $ref: '#/components/schemas/ChatResponseFormat' + ChatResponseFormatText: + type: object + required: + - type + properties: + type: + type: string + enum: + - text + description: 'The type of response format being defined: `text`' + allOf: + - $ref: '#/components/schemas/ChatResponseFormat' + ChunkingStrategyRequestParam: + anyOf: + - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' + - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. x-oaiExpandable: true + CompleteUploadRequest: + type: object + required: + - part_ids + properties: + part_ids: + type: array + items: + type: string + description: The ordered list of Part IDs. + md5: + type: string + description: The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect. CompletionUsage: type: object required: @@ -2868,7 +3539,10 @@ components: - type: string enum: - gpt-4o + - gpt-4o-2024-08-06 - gpt-4o-2024-05-13 + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 - gpt-4-turbo - gpt-4-turbo-2024-04-09 - gpt-4-0125-preview @@ -2926,9 +3600,7 @@ components: description: A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. default: [] file_search: - anyOf: - - $ref: '#/components/schemas/CreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences' - - $ref: '#/components/schemas/CreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers' + $ref: '#/components/schemas/ToolResourcesFileSearch' nullable: true description: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. metadata: @@ -2961,49 +3633,6 @@ components: oneOf: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' nullable: true - CreateAssistantRequestToolResourcesFileSearchBase: - type: object - CreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers: - type: object - properties: - vector_stores: - type: array - items: - type: object - properties: - file_ids: - type: array - items: - type: string - maxItems: 10000 - description: |- - A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be - a maximum of 10000 files in a vector store. - metadata: - type: object - additionalProperties: - type: string - description: |- - Set of 16 key-value pairs that can be attached to a vector store. This can be useful for - storing additional information about the vector store in a structured format. Keys can - be a maximum of 64 characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - maxItems: 1 - description: |- - A helper to create a [vector store](/docs/api-reference/vector-stores/object) with - file_ids and attach it to this assistant. There can be a maximum of 1 vector store - attached to the assistant. - CreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences: - type: object - properties: - vector_store_ids: - type: array - items: - type: string - maxItems: 1 - description: |- - The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. - There can be a maximum of 1 vector store attached to the assistant. CreateChatCompletionFunctionResponse: type: object required: @@ -3080,6 +3709,10 @@ components: enum: - gpt-4o - gpt-4o-2024-05-13 + - gpt-4o-2024-08-06 + - chatgpt-4o-latest + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 - gpt-4-turbo - gpt-4-turbo-2024-04-09 - gpt-4-0125-preview @@ -3164,21 +3797,17 @@ components: [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) default: 0 response_format: - type: object - properties: - type: - type: string - enum: - - text - - json_object - description: Must be one of `text` or `json_object`. - default: text + allOf: + - $ref: '#/components/schemas/ChatResponseFormat' description: |- - An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + x-oaiExpandable: true seed: type: integer format: int64 @@ -3187,6 +3816,20 @@ components: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + service_tier: + type: string + enum: + - auto + - default + nullable: true + description: |- + Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: + - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. + + When this parameter is set, the response body will include the `service_tier` utilized. + default: null stop: anyOf: - type: string @@ -3316,8 +3959,15 @@ components: $ref: '#/components/schemas/ChatCompletionTokenLogprob' nullable: true description: A list of message content tokens with log probability information. + refusal: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTokenLogprob' + nullable: true + description: A list of message refusal tokens with log probability information. required: - content + - refusal nullable: true description: Log probability information for the choice. required: @@ -3333,6 +3983,13 @@ components: model: type: string description: The model used for the chat completion. + service_tier: + type: string + enum: + - scale + - default + nullable: true + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. system_fingerprint: type: string description: |- @@ -3375,8 +4032,15 @@ components: $ref: '#/components/schemas/ChatCompletionTokenLogprob' nullable: true description: A list of message content tokens with log probability information. + refusal: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTokenLogprob' + nullable: true + description: A list of message refusal tokens with log probability information. required: - content + - refusal nullable: true description: Log probability information for the choice. finish_reason: @@ -3411,6 +4075,13 @@ components: model: type: string description: The model to generate the completion. + service_tier: + type: string + enum: + - scale + - default + nullable: true + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. system_fingerprint: type: string description: |- @@ -3833,9 +4504,10 @@ components: - babbage-002 - davinci-002 - gpt-3.5-turbo + - gpt-4o-mini description: |- The name of the model to fine-tune. You can select one of the - [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned). x-oaiTypeLabel: string training_file: type: string @@ -3850,41 +4522,8 @@ components: See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. hyperparameters: - type: object - properties: - batch_size: - anyOf: - - type: string - enum: - - auto - - type: integer - format: int32 - description: |- - Number of examples in each batch. A larger batch size means that model parameters - are updated less frequently, but with lower variance. - default: auto - learning_rate_multiplier: - anyOf: - - type: string - enum: - - auto - - type: number - format: float - description: |- - Scaling factor for the learning rate. A smaller learning rate may be useful to avoid - overfitting. - default: auto - n_epochs: - anyOf: - - type: string - enum: - - auto - - type: integer - format: int32 - description: |- - The number of epochs to train the model for. An epoch refers to one full cycle - through the training dataset. - default: auto + allOf: + - $ref: '#/components/schemas/CreateFineTuningJobRequestHyperparameters' description: The hyperparameters used for the fine-tuning job. suffix: type: string @@ -3894,7 +4533,7 @@ components: description: |- A string of up to 18 characters that will be added to your fine-tuned model name. - For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. default: null validation_file: type: string @@ -3925,6 +4564,53 @@ components: description: |- The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. If a seed is not specified, one will be generated for you. + CreateFineTuningJobRequestHyperparameters: + type: object + properties: + batch_size: + anyOf: + - $ref: '#/components/schemas/CreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum' + - type: integer + format: int32 + minimum: 1 + maximum: 256 + description: |- + Number of examples in each batch. A larger batch size means that model parameters + are updated less frequently, but with lower variance. + default: auto + learning_rate_multiplier: + anyOf: + - $ref: '#/components/schemas/CreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum' + - type: number + format: float + minimum: 0 + description: |- + Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + overfitting. + default: auto + n_epochs: + anyOf: + - $ref: '#/components/schemas/CreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum' + - type: integer + format: int32 + minimum: 1 + maximum: 50 + description: |- + The number of epochs to train the model for. An epoch refers to one full cycle + through the training dataset. + default: auto + CreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum: + type: string + enum: + - auto + CreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum: + type: string + enum: + - auto + CreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum: + type: string + enum: + - auto CreateFineTuningJobRequestIntegrations: type: array items: @@ -4359,7 +5045,10 @@ components: - type: string enum: - gpt-4o + - gpt-4o-2024-08-06 - gpt-4o-2024-05-13 + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 - gpt-4-turbo - gpt-4-turbo-2024-04-09 - gpt-4-0125-preview @@ -4535,7 +5224,10 @@ components: - type: string enum: - gpt-4o + - gpt-4o-2024-08-06 - gpt-4o-2024-05-13 + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 - gpt-4-turbo - gpt-4-turbo-2024-04-09 - gpt-4-0125-preview @@ -4581,14 +5273,7 @@ components: description: A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. default: [] file_search: - type: object - properties: - vector_store_ids: - type: array - items: - type: string - maxItems: 1 - description: The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + $ref: '#/components/schemas/ToolResourcesFileSearchIdsOnly' nullable: true description: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. metadata: @@ -4676,9 +5361,7 @@ components: description: A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. default: [] file_search: - anyOf: - - $ref: '#/components/schemas/CreateThreadRequestToolResourcesFileSearchVectorStoreIdReferences' - - $ref: '#/components/schemas/CreateThreadRequestToolResourcesFileSearchVectorStoreCreationHelpers' + $ref: '#/components/schemas/ToolResourcesFileSearch' nullable: true description: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. metadata: @@ -4900,6 +5583,38 @@ components: items: $ref: '#/components/schemas/TranscriptionSegment' description: Segments of the translated text and their corresponding details. + CreateUploadRequest: + type: object + required: + - filename + - purpose + - bytes + - mime_type + properties: + filename: + type: string + description: The name of the file to upload. + purpose: + type: string + enum: + - assistants + - batch + - fine-tune + - vision + description: |- + The intended purpose of the uploaded file. + + See the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). + bytes: + type: integer + format: int32 + description: The number of bytes in the file you are uploading. + mime_type: + type: string + description: |- + The MIME type of the file. + + This must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision. CreateVectorStoreFileBatchRequest: type: object required: @@ -4951,6 +5666,17 @@ components: nullable: true description: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map + DefaultProjectErrorResponse: + type: object + required: + - code + - message + properties: + code: + type: integer + format: int32 + message: + type: string DeleteAssistantResponse: type: object required: @@ -5126,8 +5852,8 @@ components: discriminator: propertyName: type mapping: - auto: '#/components/schemas/AutoChunkingStrategyResponseParam' other: '#/components/schemas/OtherChunkingStrategyResponseParam' + auto: '#/components/schemas/AutoChunkingStrategyResponseParam' FineTuneChatCompletionRequestAssistantMessage: type: object allOf: @@ -5227,21 +5953,8 @@ components: nullable: true description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. hyperparameters: - type: object - properties: - n_epochs: - anyOf: - - type: string - enum: - - auto - - type: integer - format: int32 - description: |- - The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. - default: auto - required: - - n_epochs + allOf: + - $ref: '#/components/schemas/FineTuningJobHyperparameters' description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. model: type: string @@ -5383,6 +6096,32 @@ components: enum: - fine_tuning.job.event description: Fine-tuning job event object + FineTuningJobHyperparameters: + type: object + required: + - n_epochs + properties: + n_epochs: + anyOf: + - $ref: '#/components/schemas/FineTuningJobHyperparametersNEpochsChoiceEnum' + - type: integer + format: int32 + description: |- + The number of epochs to train the model for. An epoch refers to one full cycle + through the training dataset. + default: auto + FineTuningJobHyperparametersBatchSizeChoiceEnum: + type: string + enum: + - auto + FineTuningJobHyperparametersLearningRateMultiplierChoiceEnum: + type: string + enum: + - auto + FineTuningJobHyperparametersNEpochsChoiceEnum: + type: string + enum: + - auto FineTuningJobIntegrationsItem: type: array items: @@ -5443,6 +6182,11 @@ components: description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. parameters: $ref: '#/components/schemas/FunctionParameters' + strict: + type: boolean + nullable: true + description: Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling). + default: false FunctionParameters: type: object additionalProperties: {} @@ -5478,35 +6222,161 @@ components: type: array items: $ref: '#/components/schemas/Image' - ListAssistantsResponse: + Invite: type: object required: - object - - data - - first_id - - last_id - - has_more + - id + - email + - role + - status + - invited_at + - expires_at properties: object: type: string enum: - - list - data: - type: array - items: - $ref: '#/components/schemas/AssistantObject' - first_id: + - organization.invite + description: The object type, which is always `organization.invite` + id: type: string - last_id: + description: The identifier, which can be referenced in API endpoints + email: type: string - has_more: - type: boolean - ListBatchesResponse: - type: object - required: - - data - - has_more - - object + description: The email address of the individual to whom the invite was sent + role: + type: string + enum: + - owner + - reader + description: '`owner` or `reader`' + status: + type: string + enum: + - accepted + - expired + - pending + description: '`accepted`,`expired`, or `pending`' + invited_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the invite was sent. + expires_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the invite expires. + accepted_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the invite was accepted. + description: Represents an individual `invite` to the organization. + InviteDeleteResponse: + type: object + required: + - object + - id + - deleted + properties: + object: + type: string + enum: + - organization.invite.deleted + description: The object type, which is always `organization.invite.deleted` + id: + type: string + deleted: + type: boolean + InviteListResponse: + type: object + required: + - object + - data + properties: + object: + type: string + enum: + - list + description: The object type, which is always `list` + data: + type: array + items: + $ref: '#/components/schemas/Invite' + first_id: + type: string + description: The first `invite_id` in the retrieved `list` + last_id: + type: string + description: The last `invite_id` in the retrieved `list` + has_more: + type: boolean + description: The `has_more` property is used for pagination to indicate there are additional results. + InviteRequest: + type: object + required: + - email + - role + properties: + email: + type: string + description: Send an email to this address + role: + type: string + enum: + - reader + - owner + description: '`owner` or `reader`' + ListAssistantsResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/AssistantObject' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + ListAuditLogsResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/AuditLog' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + ListBatchesResponse: + type: object + required: + - data + - has_more + - object properties: data: type: array @@ -5610,13 +6480,6 @@ components: type: array items: $ref: '#/components/schemas/Model' - ListOrder: - anyOf: - - type: string - - type: string - enum: - - asc - - desc ListPaginatedFineTuningJobsResponse: type: object required: @@ -5822,6 +6685,22 @@ components: allOf: - $ref: '#/components/schemas/MessageContent' description: References an image URL in the content of a message. + MessageContentRefusalObject: + type: object + required: + - type + - refusal + properties: + type: + type: string + enum: + - refusal + description: Always `refusal`. + refusal: + type: string + allOf: + - $ref: '#/components/schemas/MessageContent' + description: The refusal content generated by the assistant. MessageContentTextAnnotationsFileCitationObject: type: object required: @@ -5949,6 +6828,7 @@ components: image_file: '#/components/schemas/MessageDeltaContentImageFileObject' image_url: '#/components/schemas/MessageDeltaContentImageUrlObject' text: '#/components/schemas/MessageDeltaContentTextObject' + refusal: '#/components/schemas/MessageDeltaContentRefusalObject' description: Represents a single piece of incremental content in an Assistants API streaming response. MessageDeltaContentImageFileObject: type: object @@ -6015,6 +6895,26 @@ components: allOf: - $ref: '#/components/schemas/MessageDeltaContent' description: References an image URL in the content of a message. + MessageDeltaContentRefusalObject: + type: object + required: + - index + - type + properties: + index: + type: integer + format: int32 + description: The index of the refusal part in the message. + type: + type: string + enum: + - refusal + description: Always `refusal`. + refusal: + type: string + allOf: + - $ref: '#/components/schemas/MessageDeltaContent' + description: The refusal content that is part of a message. MessageDeltaContentTextAnnotationsFileCitationObject: type: object required: @@ -6362,14 +7262,7 @@ components: description: Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. default: [] file_search: - type: object - properties: - vector_store_ids: - type: array - items: - type: string - maxItems: 1 - description: Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + $ref: '#/components/schemas/ToolResourcesFileSearchIdsOnly' nullable: true description: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. metadata: @@ -6439,14 +7332,7 @@ components: description: A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. default: [] file_search: - type: object - properties: - vector_store_ids: - type: array - items: - type: string - maxItems: 1 - description: The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + $ref: '#/components/schemas/ToolResourcesFileSearchIdsOnly' nullable: true description: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. metadata: @@ -6456,6 +7342,18 @@ components: nullable: true description: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map + OmniTypedResponseFormat: + type: object + required: + - type + properties: + type: + type: string + discriminator: + propertyName: type + mapping: + json_object: '#/components/schemas/ResponseFormatJsonObject' + json_schema: '#/components/schemas/ResponseFormatJsonSchema' OpenAIFile: type: object required: @@ -6485,33 +7383,473 @@ components: object: type: string enum: - - file - description: The object type, which is always `file`. - purpose: + - file + description: The object type, which is always `file`. + purpose: + type: string + enum: + - assistants + - assistants_output + - batch + - batch_output + - fine-tune + - fine-tune-results + - vision + description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. + status: + type: string + enum: + - uploaded + - processed + - error + description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. + deprecated: true + status_details: + type: string + description: Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. + deprecated: true + description: The `File` object represents a document that has been uploaded to OpenAI. + OtherChunkingStrategyResponseParam: + type: object + required: + - type + properties: + type: + type: string + enum: + - other + description: Always `other`. + allOf: + - $ref: '#/components/schemas/FileChunkingStrategyResponseParam' + description: This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. + Project: + type: object + required: + - id + - object + - name + - created_at + - status + properties: + id: + type: string + description: The identifier, which can be referenced in API endpoints + object: + type: string + enum: + - organization.project + description: The object type, which is always `organization.project` + name: + type: string + description: The name of the project. This appears in reporting. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the project was created. + archived_at: + type: integer + format: unixtime + nullable: true + description: The Unix timestamp (in seconds) of when the project was archived or `null`. + status: + type: string + enum: + - active + - archived + description: '`active` or `archived`' + description: Represents an individual project. + ProjectApiKey: + type: object + required: + - object + - redacted_value + - name + - created_at + - id + - owner + properties: + object: + type: string + enum: + - organization.project.api_key + description: The object type, which is always `organization.project.api_key` + redacted_value: + type: string + description: The redacted value of the API key + name: + type: string + description: The name of the API key + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the API key was created + id: + type: string + description: The identifier, which can be referenced in API endpoints + owner: + type: object + properties: + type: + type: string + enum: + - user + - service_account + description: '`user` or `service_account`' + user: + $ref: '#/components/schemas/ProjectUser' + service_account: + $ref: '#/components/schemas/ProjectServiceAccount' + description: Represents an individual API key in a project. + ProjectApiKeyDeleteResponse: + type: object + required: + - object + - id + - deleted + properties: + object: + type: string + enum: + - organization.project.api_key.deleted + id: + type: string + deleted: + type: boolean + ProjectApiKeyListResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/ProjectApiKey' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + ProjectCreateRequest: + type: object + required: + - name + properties: + name: + type: string + description: The friendly name of the project, this name appears in reports. + ProjectListResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/Project' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + ProjectServiceAccount: + type: object + required: + - object + - id + - name + - role + - created_at + properties: + object: + type: string + enum: + - organization.project.service_account + description: The object type, which is always `organization.project.service_account` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the service account + role: + type: string + enum: + - owner + - member + description: '`owner` or `member`' + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the service account was created + description: Represents an individual service account in a project. + ProjectServiceAccountApiKey: + type: object + required: + - object + - value + - name + - created_at + - id + properties: + object: + type: string + enum: + - organization.project.service_account.api_key + description: The object type, which is always `organization.project.service_account.api_key` + value: + type: string + name: + type: string + created_at: + type: integer + format: unixtime + id: + type: string + ProjectServiceAccountCreateRequest: + type: object + required: + - name + properties: + name: + type: string + description: The name of the service account being created. + ProjectServiceAccountCreateResponse: + type: object + required: + - object + - id + - name + - role + - created_at + - api_key + properties: + object: + type: string + enum: + - organization.project.service_account + id: + type: string + name: + type: string + role: + type: string + enum: + - member + description: Service accounts can only have one role of type `member` + created_at: + type: integer + format: unixtime + api_key: + $ref: '#/components/schemas/ProjectServiceAccountApiKey' + ProjectServiceAccountDeleteResponse: + type: object + required: + - object + - id + - deleted + properties: + object: + type: string + enum: + - organization.project.service_account.deleted + id: + type: string + deleted: + type: boolean + ProjectServiceAccountListResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/ProjectServiceAccount' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + ProjectUpdateRequest: + type: object + required: + - name + properties: + name: + type: string + description: The updated name of the project, this name appears in reports. + ProjectUser: + type: object + required: + - object + - id + - name + - email + - role + - added_at + properties: + object: + type: string + enum: + - organization.project.user + description: The object type, which is always `organization.project.user` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + type: string + enum: + - owner + - member + description: '`owner` or `member`' + added_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the project was added. + description: Represents an individual user in a project. + ProjectUserCreateRequest: + type: object + required: + - user_id + - role + properties: + user_id: + type: string + description: The ID of the user. + role: + type: string + enum: + - owner + - member + description: '`owner` or `member`' + ProjectUserDeleteResponse: + type: object + required: + - object + - id + - deleted + properties: + object: + type: string + enum: + - organization.project.user.deleted + id: + type: string + deleted: + type: boolean + ProjectUserListResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/ProjectUser' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + ProjectUserUpdateRequest: + type: object + required: + - role + properties: + role: + type: string + enum: + - owner + - member + description: '`owner` or `member`' + ResponseFormatJsonObject: + type: object + required: + - type + properties: + type: type: string enum: - - assistants - - assistants_output - - batch - - batch_output - - fine-tune - - fine-tune-results - - vision - description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. - status: + - json_object + description: 'The type of response format being defined: `json_object`' + allOf: + - $ref: '#/components/schemas/OmniTypedResponseFormat' + ResponseFormatJsonSchema: + type: object + required: + - type + - json_schema + properties: + type: type: string enum: - - uploaded - - processed - - error - description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. - deprecated: true - status_details: - type: string - description: Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. - deprecated: true - description: The `File` object represents a document that has been uploaded to OpenAI. - OtherChunkingStrategyResponseParam: + - json_schema + description: 'The type of response format being defined: `json_schema`' + json_schema: + type: object + properties: + description: + type: string + description: A description of what the response format is for, used by the model to determine how to respond in the format. + name: + type: string + description: The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + schema: + $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' + strict: + type: boolean + nullable: true + description: Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). + default: false + required: + - name + allOf: + - $ref: '#/components/schemas/OmniTypedResponseFormat' + ResponseFormatJsonSchemaSchema: + type: object + additionalProperties: {} + description: The schema for the response format, described as a JSON Schema object. + ResponseFormatText: type: object required: - type @@ -6519,11 +7857,10 @@ components: type: type: string enum: - - other - description: Always `other`. + - text + description: 'The type of response format being defined: `text`' allOf: - - $ref: '#/components/schemas/FileChunkingStrategyResponseParam' - description: This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. + - $ref: '#/components/schemas/OmniTypedResponseFormat' RunCompletionUsage: type: object required: @@ -6937,8 +8274,7 @@ components: description: The type of tool call. This is always going to be `file_search` for this type of tool call. file_search: type: object - additionalProperties: - type: string + additionalProperties: {} description: For now, this is always going to be an empty object. x-oaiTypeLabel: map allOf: @@ -7136,8 +8472,7 @@ components: description: The type of tool call. This is always going to be `file_search` for this type of tool call. file_search: type: object - additionalProperties: - type: string + additionalProperties: {} description: For now, this is always going to be an empty object. x-oaiTypeLabel: map allOf: @@ -7489,6 +8824,97 @@ components: description: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map description: Represents a thread that contains [messages](/docs/api-reference/messages). + ToolResourcesFileSearch: + type: object + properties: + vector_store_ids: + type: array + items: + type: string + maxItems: 1 + description: |- + The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. + There can be a maximum of 1 vector store attached to the assistant. + vector_stores: + type: array + items: + type: object + properties: + file_ids: + type: array + items: + type: string + maxItems: 10000 + description: |- + A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be + a maximum of 10000 files in a vector store. + chunking_strategy: + anyOf: + - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' + - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. + x-oaiExpandable: true + metadata: + type: object + additionalProperties: + type: string + description: |- + Set of 16 key-value pairs that can be attached to a vector store. This can be useful for + storing additional information about the vector store in a structured format. Keys can + be a maximum of 64 characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + maxItems: 1 + description: |- + A helper to create a [vector store](/docs/api-reference/vector-stores/object) with + file_ids and attach it to this assistant. There can be a maximum of 1 vector store + attached to the assistant. + ToolResourcesFileSearchIdsOnly: + type: object + properties: + vector_store_ids: + type: array + items: + type: string + maxItems: 1 + description: |- + The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. + There can be a maximum of 1 vector store attached to the assistant. + ToolResourcesFileSearchVectorStoreCreationHelpers: + type: object + properties: + vector_stores: + type: array + items: + type: object + properties: + file_ids: + type: array + items: + type: string + maxItems: 10000 + description: |- + A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be + a maximum of 10000 files in a vector store. + chunking_strategy: + anyOf: + - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' + - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. + x-oaiExpandable: true + metadata: + type: object + additionalProperties: + type: string + description: |- + Set of 16 key-value pairs that can be attached to a vector store. This can be useful for + storing additional information about the vector store in a structured format. Keys can + be a maximum of 64 characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + maxItems: 1 + description: |- + A helper to create a [vector store](/docs/api-reference/vector-stores/object) with + file_ids and attach it to this assistant. There can be a maximum of 1 vector store + attached to the assistant. TranscriptionSegment: type: object required: @@ -7599,6 +9025,166 @@ components: nullable: true description: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. x-oaiTypeLabel: map + Upload: + type: object + required: + - id + - created_at + - filename + - bytes + - purpose + - status + - expires_at + properties: + id: + type: string + description: The Upload unique identifier, which can be referenced in API endpoints. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the Upload was created. + filename: + type: string + description: The name of the file to be uploaded. + bytes: + type: integer + format: int32 + description: The intended number of bytes to be uploaded. + purpose: + type: string + description: The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values. + status: + type: string + enum: + - pending + - completed + - cancelled + - expired + description: The status of the Upload. + expires_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the Upload was created. + object: + type: string + enum: + - upload + description: The object type, which is always "upload". + file: + type: object + allOf: + - $ref: '#/components/schemas/OpenAIFile' + nullable: true + description: The ready File object after the Upload is completed. + description: The Upload object can accept byte chunks in the form of Parts. + UploadPart: + type: object + required: + - id + - created_at + - upload_id + - object + properties: + id: + type: string + description: The upload Part unique identifier, which can be referenced in API endpoints. + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the Part was created. + upload_id: + type: string + description: The ID of the Upload object that this Part was added to. + object: + type: string + enum: + - upload.part + description: The object type, which is always `upload.part`. + description: The upload Part represents a chunk of bytes we can add to an Upload object. + User: + type: object + required: + - object + - id + - name + - email + - role + - added_at + properties: + object: + type: string + enum: + - organization.user + description: The object type, which is always `organization.user` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + type: string + enum: + - owner + - reader + description: '`owner` or `reader`' + added_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the user was added. + description: Represents an individual `user` within an organization. + UserDeleteResponse: + type: object + required: + - object + - id + - deleted + properties: + object: + type: string + enum: + - organization.user.deleted + id: + type: string + deleted: + type: boolean + UserListResponse: + type: object + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: '#/components/schemas/User' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + UserRoleUpdateRequest: + type: object + required: + - role + properties: + role: + type: string + enum: + - owner + - reader + description: '`owner` or `reader`' VectorStoreExpirationAfter: type: object required: @@ -7724,10 +9310,9 @@ components: code: type: string enum: - - internal_error - - file_not_found - - parsing_error - - unhandled_mime_type + - server_error + - unsupported_file + - invalid_file description: One of `server_error` or `rate_limit_exceeded`. message: type: string diff --git a/.scripts/Edit-Deserialization.ps1 b/.scripts/Edit-Deserialization.ps1 index 4b06ed331..04ef67266 100644 --- a/.scripts/Edit-Deserialization.ps1 +++ b/.scripts/Edit-Deserialization.ps1 @@ -4,7 +4,7 @@ $generatedModelFolder = Join-Path $repoRoot .dotnet\src\Generated\Models $files = Get-ChildItem -Path $generatedModelFolder -Filter "*Serialization.cs" $editedFilesCount = 0 - + foreach ($file in $files) { $statusText = "{0:D3}/{1:D3} : Processing codegen fixup for response deserialization..." -f $editedFilesCount, $files.Count $percentComplete = [math]::Round(($editedFilesCount / $files.Count) * 100) @@ -17,4 +17,5 @@ foreach ($file in $files) { $editedFilesCount++ } -Write-Progress -Activity "Complete" -PercentComplete 100 +Write-Progress -Activity "Editing" -Status "Complete" -Completed +Write-Output "Complete: deserialization edited." \ No newline at end of file diff --git a/.scripts/Export-API.ps1 b/.scripts/Export-API.ps1 index d45b038df..2864817c8 100644 --- a/.scripts/Export-API.ps1 +++ b/.scripts/Export-API.ps1 @@ -19,10 +19,19 @@ Write-Output "" $net80ref = Get-ChildItem -Recurse ` -Path "$($env:ProgramFiles)\dotnet\packs\Microsoft.NETCore.App.Ref" ` -Include "net8.0" | Select-Object -Last 1 -$systemClientmodelRef = "$($env:UserProfile)\.nuget\packages\system.clientmodel\1.1.0-beta.7\lib\netstandard2.0" -$systemMemoryDataRef = "$($env:UserProfile)\.nuget\packages\system.memory.data\1.0.2\lib\netstandard2.0" -$systemDiagnosticsDiagnosticSourceRef = "$($env:UserProfile)\.nuget\packages\system.diagnostics.diagnosticsource\8.0.1\lib\netstandard2.0" -$microsoftBclAsyncinterfacesRef = "$($env:UserProfile)\.nuget\packages\microsoft.bcl.asyncinterfaces\1.1.0\lib\netstandard2.0" + +$systemClientModelRef = Get-ChildItem -Recurse ` + -Path "$($env:UserProfile)\.nuget\packages\system.clientmodel\1.1.0-beta.5" ` + -Include "netstandard2.0" | Select-Object -Last 1 +$systemMemoryDataRef = Get-ChildItem -Recurse ` + -Path "$($env:UserProfile)\.nuget\packages\system.memory.data\1.0.2" ` + -Include "netstandard2.0" | Select-Object -Last 1 +$systemDiagnosticsDiagnosticSourceRef = Get-ChildItem -Recurse ` + -Path "$($env:UserProfile)\.nuget\packages\system.diagnostics.diagnosticsource\6.0.1" ` + -Include "netstandard2.0" | Select-Object -Last 1 +$microsoftBclAsyncInterfacesRef = Get-ChildItem -Recurse ` + -Path "$($env:UserProfile)\.nuget\packages\microsoft.bcl.asyncinterfaces\1.1.0" ` + -Include "netstandard2.0" | Select-Object -Last 1 Write-Output "Assembly reference paths:" Write-Output "* NETCore:" diff --git a/.scripts/Invoke-CodeGen.ps1 b/.scripts/Invoke-CodeGen.ps1 index 827387ebc..1e999a5e7 100644 --- a/.scripts/Invoke-CodeGen.ps1 +++ b/.scripts/Invoke-CodeGen.ps1 @@ -7,6 +7,8 @@ function Invoke([scriptblock]$script) { & $script } +$scriptStartTime = Get-Date + Push-Location $repoRoot/.typespec try { Invoke { npm ci } @@ -19,3 +21,9 @@ try { finally { Pop-Location } + +$scriptElapsed = $(Get-Date) - $scriptStartTime +$scriptElapsedSeconds = [math]::Round($scriptElapsed.TotalSeconds, 1) +$scriptName = $MyInvocation.MyCommand.Name + +Write-Host "${scriptName} complete. Time: ${scriptElapsedSeconds}s" \ No newline at end of file diff --git a/.typespec/administration/main.tsp b/.typespec/administration/main.tsp new file mode 100644 index 000000000..5ad1d3a2b --- /dev/null +++ b/.typespec/administration/main.tsp @@ -0,0 +1 @@ +import "./models.tsp"; diff --git a/.typespec/administration/models.tsp b/.typespec/administration/models.tsp new file mode 100644 index 000000000..f429a8929 --- /dev/null +++ b/.typespec/administration/models.tsp @@ -0,0 +1,748 @@ +/* + * This file was automatically generated from an OpenAPI .yaml file. + * Edits made directly to this file will be lost. + */ + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +/** The service account that performed the audit logged action. */ +model AuditLogActorServiceAccount { + /** The service account id. */ + id?: string; +} + +/** The user who performed the audit logged action. */ +model AuditLogActorUser { + /** The user id. */ + id?: string; + + /** The user email. */ + email?: string; +} + +/** The API Key used to perform the audit logged action. */ +model AuditLogActorApiKey { + /** The tracking id of the API key. */ + id?: string; + + @doc(""" + The type of API key. Can be either `user` or `service_account`. + """) + type?: "user" | "service_account"; + + user?: AuditLogActorUser; + service_account?: AuditLogActorServiceAccount; +} + +/** The session in which the audit logged action was performed. */ +model AuditLogActorSession { + user?: AuditLogActorUser; + + /** The IP address from which the action was performed. */ + ip_address?: string; +} + +/** The actor who performed the audit logged action. */ +model AuditLogActor { + @doc(""" + The type of actor. Is either `session` or `api_key`. + """) + type?: "session" | "api_key"; + + session?: AuditLogActorSession; + api_key?: AuditLogActorApiKey; +} + +/** The event type. */ +@extension("x-oaiExpandable", true) +union AuditLogEventType { + "api_key.created", + "api_key.updated", + "api_key.deleted", + "invite.sent", + "invite.accepted", + "invite.deleted", + "login.succeeded", + "login.failed", + "logout.succeeded", + "logout.failed", + "organization.updated", + "project.created", + "project.updated", + "project.archived", + "service_account.created", + "service_account.updated", + "service_account.deleted", + "user.added", + "user.updated", + "user.deleted", +} + +/** A log of a user action or configuration change within this organization. */ +model AuditLog { + /** The ID of this log. */ + id: string; + + type: AuditLogEventType; + + // Tool customization: 'created' and fields ending in '_at' are Unix encoded utcDateTime + /** The Unix timestamp (in seconds) of the event. */ + @encode("unixTimestamp", int32) + effective_at: utcDateTime; + + /** The project that the action was scoped to. Absent for actions not scoped to projects. */ + project?: { + /** The project ID. */ + id?: string; + + /** The project title. */ + name?: string; + }; + + actor: AuditLogActor; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "api_key.created") + api_key_created?: { + /** The tracking ID of the API key. */ + id?: string; + + /** The payload used to create the API key. */ + data?: { + @doc(""" + A list of scopes allowed for the API key, e.g. `["api.model.request"]` + """) + scopes?: string[]; + }; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "api_key.updated") + api_key_updated?: { + /** The tracking ID of the API key. */ + id?: string; + + /** The payload used to update the API key. */ + changes_requested?: { + @doc(""" + A list of scopes allowed for the API key, e.g. `["api.model.request"]` + """) + scopes?: string[]; + }; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "api_key.deleted") + api_key_deleted?: { + /** The tracking ID of the API key. */ + id?: string; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "invite.sent") + invite_sent?: { + /** The ID of the invite. */ + id?: string; + + /** The payload used to create the invite. */ + data?: { + /** The email invited to the organization. */ + email?: string; + + @doc(""" + The role the email was invited to be. Is either `owner` or `member`. + """) + role?: string; + }; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "invite.accepted") + invite_accepted?: { + /** The ID of the invite. */ + id?: string; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "invite.deleted") + invite_deleted?: { + /** The ID of the invite. */ + id?: string; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "login.failed") + login_failed?: { + /** The error code of the failure. */ + error_code?: string; + + /** The error message of the failure. */ + error_message?: string; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "logout.failed") + logout_failed?: { + /** The error code of the failure. */ + error_code?: string; + + /** The error message of the failure. */ + error_message?: string; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "organization.updated") + organization_updated?: { + /** The organization ID. */ + id?: string; + + /** The payload used to update the organization settings. */ + changes_requested?: { + /** The organization title. */ + title?: string; + + /** The organization description. */ + description?: string; + + /** The organization name. */ + name?: string; + + settings?: { + @doc(""" + Visibility of the threads page which shows messages created with the Assistants API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`. + """) + threads_ui_visibility?: string; + + @doc(""" + Visibility of the usage dashboard which shows activity and costs for your organization. One of `ANY_ROLE` or `OWNERS`. + """) + usage_dashboard_visibility?: string; + }; + }; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "project.created") + project_created?: { + /** The project ID. */ + id?: string; + + /** The payload used to create the project. */ + data?: { + /** The project name. */ + name?: string; + + /** The title of the project as seen on the dashboard. */ + title?: string; + }; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "project.updated") + project_updated?: { + /** The project ID. */ + id?: string; + + /** The payload used to update the project. */ + changes_requested?: { + /** The title of the project as seen on the dashboard. */ + title?: string; + }; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "project.archived") + project_archived?: { + /** The project ID. */ + id?: string; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "service_account.created") + service_account_created?: { + /** The service account ID. */ + id?: string; + + /** The payload used to create the service account. */ + data?: { + @doc(""" + The role of the service account. Is either `owner` or `member`. + """) + role?: string; + }; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "service_account.updated") + service_account_updated?: { + /** The service account ID. */ + id?: string; + + /** The payload used to updated the service account. */ + changes_requested?: { + @doc(""" + The role of the service account. Is either `owner` or `member`. + """) + role?: string; + }; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "service_account.deleted") + service_account_deleted?: { + /** The service account ID. */ + id?: string; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "user.added") + user_added?: { + /** The user ID. */ + id?: string; + + /** The payload used to add the user to the project. */ + data?: { + @doc(""" + The role of the user. Is either `owner` or `member`. + """) + role?: string; + }; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "user.updated") + user_updated?: { + /** The project ID. */ + id?: string; + + /** The payload used to update the user. */ + changes_requested?: { + @doc(""" + The role of the user. Is either `owner` or `member`. + """) + role?: string; + }; + }; + + @doc(""" + The details for events with this `type`. + """) + @encodedName("application/json", "user.deleted") + user_deleted?: { + /** The user ID. */ + id?: string; + }; +} + +model ListAuditLogsResponse { + object: "list"; + data: AuditLog[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +@doc(""" + Represents an individual `invite` to the organization. + """) +model Invite { + @doc(""" + The object type, which is always `organization.invite` + """) + object: "organization.invite"; + + /** The identifier, which can be referenced in API endpoints */ + id: string; + + /** The email address of the individual to whom the invite was sent */ + email: string; + + @doc(""" + `owner` or `reader` + """) + role: "owner" | "reader"; + + @doc(""" + `accepted`,`expired`, or `pending` + """) + status: "accepted" | "expired" | "pending"; + + // Tool customization: 'created' and fields ending in '_at' are Unix encoded utcDateTime + /** The Unix timestamp (in seconds) of when the invite was sent. */ + @encode("unixTimestamp", int32) + invited_at: utcDateTime; + + // Tool customization: 'created' and fields ending in '_at' are Unix encoded utcDateTime + /** The Unix timestamp (in seconds) of when the invite expires. */ + @encode("unixTimestamp", int32) + expires_at: utcDateTime; + + // Tool customization: 'created' and fields ending in '_at' are Unix encoded utcDateTime + /** The Unix timestamp (in seconds) of when the invite was accepted. */ + @encode("unixTimestamp", int32) + accepted_at?: utcDateTime; +} + +model InviteListResponse { + @doc(""" + The object type, which is always `list` + """) + object: "list"; + + data: Invite[]; + + @doc(""" + The first `invite_id` in the retrieved `list` + """) + first_id?: string; + + @doc(""" + The last `invite_id` in the retrieved `list` + """) + last_id?: string; + + @doc(""" + The `has_more` property is used for pagination to indicate there are additional results. + """) + has_more?: boolean; +} + +model InviteRequest { + /** Send an email to this address */ + email: string; + + @doc(""" + `owner` or `reader` + """) + role: "reader" | "owner"; +} + +model InviteDeleteResponse { + @doc(""" + The object type, which is always `organization.invite.deleted` + """) + object: "organization.invite.deleted"; + + id: string; + deleted: boolean; +} + +@doc(""" + Represents an individual `user` within an organization. + """) +model User { + @doc(""" + The object type, which is always `organization.user` + """) + object: "organization.user"; + + /** The identifier, which can be referenced in API endpoints */ + id: string; + + /** The name of the user */ + name: string; + + /** The email address of the user */ + email: string; + + @doc(""" + `owner` or `reader` + """) + role: "owner" | "reader"; + + // Tool customization: 'created' and fields ending in '_at' are Unix encoded utcDateTime + /** The Unix timestamp (in seconds) of when the user was added. */ + @encode("unixTimestamp", int32) + added_at: utcDateTime; +} + +model UserListResponse { + object: "list"; + data: User[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +model UserRoleUpdateRequest { + @doc(""" + `owner` or `reader` + """) + role: "owner" | "reader"; +} + +model UserDeleteResponse { + object: "organization.user.deleted"; + id: string; + deleted: boolean; +} + +/** Represents an individual project. */ +model Project { + /** The identifier, which can be referenced in API endpoints */ + id: string; + + @doc(""" + The object type, which is always `organization.project` + """) + object: "organization.project"; + + /** The name of the project. This appears in reporting. */ + name: string; + + // Tool customization: 'created' and fields ending in '_at' are Unix encoded utcDateTime + /** The Unix timestamp (in seconds) of when the project was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + // Tool customization: 'created' and fields ending in '_at' are Unix encoded utcDateTime + @doc(""" + The Unix timestamp (in seconds) of when the project was archived or `null`. + """) + @encode("unixTimestamp", int32) + archived_at?: utcDateTime | null; + + @doc(""" + `active` or `archived` + """) + status: "active" | "archived"; +} + +model ProjectListResponse { + object: "list"; + data: Project[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +model ProjectCreateRequest { + /** The friendly name of the project, this name appears in reports. */ + name: string; +} + +model ProjectUpdateRequest { + /** The updated name of the project, this name appears in reports. */ + name: string; +} + +model DefaultProjectErrorResponse { + code: int32; + message: string; +} + +/** Represents an individual user in a project. */ +model ProjectUser { + @doc(""" + The object type, which is always `organization.project.user` + """) + object: "organization.project.user"; + + /** The identifier, which can be referenced in API endpoints */ + id: string; + + /** The name of the user */ + name: string; + + /** The email address of the user */ + email: string; + + @doc(""" + `owner` or `member` + """) + role: "owner" | "member"; + + // Tool customization: 'created' and fields ending in '_at' are Unix encoded utcDateTime + /** The Unix timestamp (in seconds) of when the project was added. */ + @encode("unixTimestamp", int32) + added_at: utcDateTime; +} + +model ProjectUserListResponse { + object: string; + data: ProjectUser[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +model ProjectUserCreateRequest { + /** The ID of the user. */ + user_id: string; + + @doc(""" + `owner` or `member` + """) + role: "owner" | "member"; +} + +model ProjectUserUpdateRequest { + @doc(""" + `owner` or `member` + """) + role: "owner" | "member"; +} + +model ProjectUserDeleteResponse { + object: "organization.project.user.deleted"; + id: string; + deleted: boolean; +} + +/** Represents an individual service account in a project. */ +model ProjectServiceAccount { + @doc(""" + The object type, which is always `organization.project.service_account` + """) + object: "organization.project.service_account"; + + /** The identifier, which can be referenced in API endpoints */ + id: string; + + /** The name of the service account */ + name: string; + + @doc(""" + `owner` or `member` + """) + role: "owner" | "member"; + + // Tool customization: 'created' and fields ending in '_at' are Unix encoded utcDateTime + /** The Unix timestamp (in seconds) of when the service account was created */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; +} + +model ProjectServiceAccountListResponse { + object: "list"; + data: ProjectServiceAccount[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +model ProjectServiceAccountCreateRequest { + /** The name of the service account being created. */ + name: string; +} + +model ProjectServiceAccountCreateResponse { + object: "organization.project.service_account"; + id: string; + name: string; + + @doc(""" + Service accounts can only have one role of type `member` + """) + role: "member"; + + // Tool customization: 'created' and fields ending in '_at' are Unix encoded utcDateTime + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + api_key: ProjectServiceAccountApiKey; +} + +model ProjectServiceAccountApiKey { + @doc(""" + The object type, which is always `organization.project.service_account.api_key` + """) + object: "organization.project.service_account.api_key"; + + value: string; + name: string; + + // Tool customization: 'created' and fields ending in '_at' are Unix encoded utcDateTime + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + id: string; +} + +model ProjectServiceAccountDeleteResponse { + object: "organization.project.service_account.deleted"; + id: string; + deleted: boolean; +} + +/** Represents an individual API key in a project. */ +model ProjectApiKey { + @doc(""" + The object type, which is always `organization.project.api_key` + """) + object: "organization.project.api_key"; + + /** The redacted value of the API key */ + redacted_value: string; + + /** The name of the API key */ + name: string; + + // Tool customization: 'created' and fields ending in '_at' are Unix encoded utcDateTime + /** The Unix timestamp (in seconds) of when the API key was created */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** The identifier, which can be referenced in API endpoints */ + id: string; + + owner: { + @doc(""" + `user` or `service_account` + """) + type?: "user" | "service_account"; + + user?: ProjectUser; + service_account?: ProjectServiceAccount; + }; +} + +model ProjectApiKeyListResponse { + object: "list"; + data: ProjectApiKey[]; + first_id: string; + last_id: string; + has_more: boolean; +} + +model ProjectApiKeyDeleteResponse { + object: "organization.project.api_key.deleted"; + id: string; + deleted: boolean; +} diff --git a/.typespec/assistants/client.tsp b/.typespec/assistants/client.tsp index 14b94df7b..1e238b5ec 100644 --- a/.typespec/assistants/client.tsp +++ b/.typespec/assistants/client.tsp @@ -8,5 +8,5 @@ using OpenAI; @@access(AssistantResponseFormat, Access.public); @@usage(AssistantResponseFormat, Usage.input); -@@access(CreateAssistantRequestToolResourcesFileSearchBase, Access.public); -@@usage(CreateAssistantRequestToolResourcesFileSearchBase, Usage.input); +@@access(ToolResourcesFileSearchVectorStoreCreationHelper, Access.public); +@@usage(ToolResourcesFileSearchVectorStoreCreationHelper, Usage.input); diff --git a/.typespec/assistants/custom.tsp b/.typespec/assistants/custom.tsp index 8b99ad448..e557bac28 100644 --- a/.typespec/assistants/custom.tsp +++ b/.typespec/assistants/custom.tsp @@ -1,18 +1,23 @@ import "@azure-tools/typespec-client-generator-core"; +import "@typespec/http"; +import "../common/models.tsp"; +import "../vector-stores/models.tsp"; using Azure.ClientGenerator.Core; using TypeSpec.OpenAPI; +using TypeSpec.Http; namespace OpenAI; // This customization allows us to concretely specify that the file_search object must provide // either ID references --or-- in-line creation helpers, but not both. -alias CreateAssistantRequestToolResourcesFileSearch = CreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences | CreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers; - -model CreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences { - ...CreateAssistantRequestToolResourcesFileSearchBase; +model ToolResourcesFileSearch { + ...ToolResourcesFileSearchIdsOnly; + ...ToolResourcesFileSearchVectorStoreCreationHelpers; +} +model ToolResourcesFileSearchIdsOnly { /** * The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. * There can be a maximum of 1 vector store attached to the assistant. @@ -21,23 +26,17 @@ model CreateAssistantRequestToolResourcesFileSearchVectorStoreIdReferences { vector_store_ids?: string[]; } -model CreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelpers { - ...CreateAssistantRequestToolResourcesFileSearchBase; - +model ToolResourcesFileSearchVectorStoreCreationHelpers { /** * A helper to create a [vector store](/docs/api-reference/vector-stores/object) with * file_ids and attach it to this assistant. There can be a maximum of 1 vector store * attached to the assistant. */ @maxItems(1) - vector_stores?: CreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelper[]; + vector_stores?: ToolResourcesFileSearchVectorStoreCreationHelper[]; } -model CreateAssistantRequestToolResourcesFileSearchBase { - // Common fields (currently none) -} - -alias CreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelper = { +alias ToolResourcesFileSearchVectorStoreCreationHelper = { /** * A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be * a maximum of 10000 files in a vector store. @@ -45,6 +44,12 @@ alias CreateAssistantRequestToolResourcesFileSearchVectorStoreCreationHelper = { @maxItems(10000) file_ids?: string[]; + @doc(""" + The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. + """) + @extension("x-oaiExpandable", true) + chunking_strategy?: AutoChunkingStrategyRequestParam | StaticChunkingStrategyRequestParam; + /** * Set of 16 key-value pairs that can be attached to a vector store. This can be useful for * storing additional information about the vector store in a structured format. Keys can @@ -59,4 +64,20 @@ model AssistantToolDefinition { type: string; } -model AssistantResponseFormat {} +@encodedName("application/json", "") +@discriminator("type") +model AssistantResponseFormat { + ...OmniTypedResponseFormat; +} + +model AssistantResponseFormatText extends AssistantResponseFormat { + ...ResponseFormatText; +} + +model AssistantResponseFormatJsonObject extends AssistantResponseFormat { + ...ResponseFormatJsonObject; +} + +model AssistantResponseFormatJsonSchema extends AssistantResponseFormat { + ...ResponseFormatJsonSchema; +} diff --git a/.typespec/assistants/models.tsp b/.typespec/assistants/models.tsp index b0a4e3852..e3e9737ef 100644 --- a/.typespec/assistants/models.tsp +++ b/.typespec/assistants/models.tsp @@ -13,24 +13,18 @@ namespace OpenAI; @doc(""" Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. """) @extension("x-oaiExpandable", true) union AssistantsApiResponseFormatOption { - "none" | "auto", - AssistantsApiResponseFormat, -} - -@doc(""" - An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. - """) -model AssistantsApiResponseFormat { - @doc(""" - Must be one of `text` or `json_object`. - """) - type?: "text" | "json_object" = "text"; + "auto", + ResponseFormatText, + ResponseFormatJsonObject, + ResponseFormatJsonSchema, } model CreateAssistantRequest { @@ -39,7 +33,10 @@ model CreateAssistantRequest { `model`: | string | "gpt-4o" + | "gpt-4o-2024-08-06" | "gpt-4o-2024-05-13" + | "gpt-4o-mini" + | "gpt-4o-mini-2024-07-18" | "gpt-4-turbo" | "gpt-4-turbo-2024-04-09" | "gpt-4-0125-preview" @@ -77,7 +74,7 @@ model CreateAssistantRequest { """) @maxItems(128) @extension("x-oaiExpandable", true) - tools?: AssistantToolDefinition[] = []; + tools?: AssistantToolDefinition[] = #[]; @doc(""" A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @@ -88,13 +85,14 @@ model CreateAssistantRequest { A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. """) @maxItems(20) - file_ids?: string[] = []; + file_ids?: string[] = #[]; }; // Tool customization: use custom type for sophisticated union - file_search?: CreateAssistantRequestToolResourcesFileSearch; + file_search?: ToolResourcesFileSearch; } | null; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata?: Record | null; @@ -138,7 +136,7 @@ model ModifyAssistantRequest { """) @maxItems(128) @extension("x-oaiExpandable", true) - tools?: AssistantToolDefinition[] = []; + tools?: AssistantToolDefinition[] = #[]; @doc(""" A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @@ -149,15 +147,14 @@ model ModifyAssistantRequest { Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. """) @maxItems(20) - file_ids?: string[] = []; - }; - file_search?: { - /** Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. */ - @maxItems(1) - vector_store_ids?: string[]; + file_ids?: string[] = #[]; }; + + // Tool customization: use custom type for sophisticated union + file_search?: ToolResourcesFileSearchIdsOnly; } | null; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata?: Record | null; @@ -213,7 +210,7 @@ model AssistantToolsFileSearch extends AssistantToolDefinition { /** Overrides for the file search tool. */ file_search?: { @doc(""" - The maximum number of results the file search tool should output. The default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive. Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. """) @@ -278,7 +275,7 @@ model AssistantObject { """) @maxItems(128) @extension("x-oaiExpandable", true) - tools: AssistantToolDefinition[] = []; + tools: AssistantToolDefinition[] = #[]; @doc(""" A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @@ -289,15 +286,14 @@ model AssistantObject { A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. """) @maxItems(20) - file_ids?: string[] = []; - }; - file_search?: { - /** The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. */ - @maxItems(1) - vector_store_ids?: string[]; + file_ids?: string[] = #[]; }; + + // Tool customization: use custom type for sophisticated union + file_search?: ToolResourcesFileSearchIdsOnly; } | null; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata: Record | null; diff --git a/.typespec/assistants/operations.tsp b/.typespec/assistants/operations.tsp index f18d38c1a..560dd92f2 100644 --- a/.typespec/assistants/operations.tsp +++ b/.typespec/assistants/operations.tsp @@ -34,7 +34,7 @@ interface Assistants { * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` * for descending order. */ - @query order?: ListOrder = ListOrder.desc, + @query order?: "asc" | "desc" = "desc", /** * A cursor for use in pagination. `after` is an object ID that defines your place in the list. diff --git a/.typespec/audio/models.tsp b/.typespec/audio/models.tsp index 78b0f0d8e..46d50d08a 100644 --- a/.typespec/audio/models.tsp +++ b/.typespec/audio/models.tsp @@ -69,7 +69,7 @@ model CreateTranscriptionRequest { @doc(""" The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. """) - timestamp_granularities?: ("word" | "segment")[] = ["segment"]; + timestamp_granularities?: ("word" | "segment")[] = #["segment"]; } model CreateTranslationRequest { @@ -116,7 +116,7 @@ model CreateTranscriptionResponseVerboseJson { /** The language of the input audio. */ language: string; - // Tool customization: timespans are encoded durations + // Tool customization: correct erroneous spec representation of duration as string /** The duration of the input audio. */ @encode("seconds", float32) duration: duration; @@ -145,7 +145,7 @@ model CreateTranslationResponseVerboseJson { """) language: string; - // Tool customization: timespans are encoded durations + // Tool customization: correct erroneous spec representation of duration as string /** The duration of the input audio. */ @encode("seconds", float32) duration: duration; diff --git a/.typespec/batch/models.tsp b/.typespec/batch/models.tsp index b2fa2e7fe..a375bb0ca 100644 --- a/.typespec/batch/models.tsp +++ b/.typespec/batch/models.tsp @@ -120,6 +120,7 @@ model Batch { failed: int32; }; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata?: Record | null; @@ -158,7 +159,7 @@ model BatchRequestOutput { /** The JSON body of the response */ @extension("x-oaiTypeLabel", "map") - body?: Record; + body?: Record; } | null; /** For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure. */ diff --git a/.typespec/chat/client.tsp b/.typespec/chat/client.tsp index efca74507..a0a6d9bc8 100644 --- a/.typespec/chat/client.tsp +++ b/.typespec/chat/client.tsp @@ -22,3 +22,6 @@ using OpenAI; @@access(CreateChatCompletionStreamResponse, Access.public); @@usage(CreateChatCompletionStreamResponse, Usage.output); + +@@access(ChatResponseFormatJsonSchema, Access.public); +@@usage(ChatResponseFormatJsonSchema, Usage.input); diff --git a/.typespec/chat/custom.tsp b/.typespec/chat/custom.tsp index 44347ce21..9e98e5c90 100644 --- a/.typespec/chat/custom.tsp +++ b/.typespec/chat/custom.tsp @@ -1,7 +1,27 @@ +import "../common"; + using TypeSpec.OpenAPI; namespace OpenAI; +@encodedName("application/json", "") +@discriminator("type") +model ChatResponseFormat { + ...OmniTypedResponseFormat; +} + +model ChatResponseFormatText extends ChatResponseFormat { + ...ResponseFormatText; +} + +model ChatResponseFormatJsonObject extends ChatResponseFormat { + ...ResponseFormatJsonObject; +} + +model ChatResponseFormatJsonSchema extends ChatResponseFormat { + ...ResponseFormatJsonSchema; +} + model ChatCompletionFunctionChoice {} model ChatCompletionToolChoice {} diff --git a/.typespec/chat/models.tsp b/.typespec/chat/models.tsp index 55428b5e4..84f8e2b39 100644 --- a/.typespec/chat/models.tsp +++ b/.typespec/chat/models.tsp @@ -24,6 +24,10 @@ model CreateChatCompletionRequest { | string | "gpt-4o" | "gpt-4o-2024-05-13" + | "gpt-4o-2024-08-06" + | "chatgpt-4o-latest" + | "gpt-4o-mini" + | "gpt-4o-mini-2024-07-18" | "gpt-4-turbo" | "gpt-4-turbo-2024-04-09" | "gpt-4-0125-preview" @@ -96,19 +100,18 @@ model CreateChatCompletionRequest { @maxValue(2) presence_penalty?: float32 | null = 0; + // Tool customization: apply a named union type @doc(""" - An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. """) - response_format?: { - @doc(""" - Must be one of `text` or `json_object`. - """) - type?: "text" | "json_object" = "text"; - }; + @extension("x-oaiExpandable", true) + response_format?: ChatResponseFormat; @doc(""" This feature is in Beta. @@ -119,6 +122,16 @@ model CreateChatCompletionRequest { @maxValue(9223372036854775807) seed?: int64 | null; + @doc(""" + Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: + - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. + + When this parameter is set, the response body will include the `service_tier` utilized. + """) + service_tier?: "auto" | "default" | null = null; + /** Up to 4 sequences where the API will stop generating further tokens. */ stop?: string | string[] | null = null; @@ -212,6 +225,9 @@ model CreateChatCompletionResponse { logprobs: { /** A list of message content tokens with log probability information. */ content: ChatCompletionTokenLogprob[] | null; + + /** A list of message refusal tokens with log probability information. */ + refusal: ChatCompletionTokenLogprob[] | null; } | null; }[]; @@ -223,6 +239,11 @@ model CreateChatCompletionResponse { /** The model used for the chat completion. */ `model`: string; + @doc(""" + The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. + """) + service_tier?: "scale" | "default" | null; + @doc(""" This fingerprint represents the backend configuration that the model runs with. @@ -275,12 +296,6 @@ union ChatCompletionToolChoiceOption { ChatCompletionNamedToolChoice, } -@extension("x-oaiExpandable", true) -union ChatCompletionRequestMessageContentPart { - ChatCompletionRequestMessageContentPartText, - ChatCompletionRequestMessageContentPartImage, -} - model ChatCompletionRequestMessageContentPartText { /** The type of the content part. */ type: "text"; @@ -303,6 +318,14 @@ model ChatCompletionRequestMessageContentPartImage { }; } +model ChatCompletionRequestMessageContentPartRefusal { + /** The type of the content part. */ + type: "refusal"; + + /** The refusal message generated by the model. */ + refusal: string; +} + model ChatCompletionMessageToolCall { /** The ID of the tool call. */ id: string; @@ -330,10 +353,32 @@ model ChatCompletionRequestMessage { role: string; } +@extension("x-oaiExpandable", true) +union ChatCompletionRequestSystemMessageContentPart { + ChatCompletionRequestMessageContentPartText, +} + +@extension("x-oaiExpandable", true) +union ChatCompletionRequestUserMessageContentPart { + ChatCompletionRequestMessageContentPartText, + ChatCompletionRequestMessageContentPartImage, +} + +@extension("x-oaiExpandable", true) +union ChatCompletionRequestAssistantMessageContentPart { + ChatCompletionRequestMessageContentPartText, + ChatCompletionRequestMessageContentPartRefusal, +} + +@extension("x-oaiExpandable", true) +union ChatCompletionRequestToolMessageContentPart { + ChatCompletionRequestMessageContentPartText, +} + // Tool customization: apply discriminated type base model ChatCompletionRequestSystemMessage extends ChatCompletionRequestMessage { /** The contents of the system message. */ - content: string; + content: string | ChatCompletionRequestSystemMessageContentPart[]; @doc(""" The role of the messages author, in this case `system`. @@ -348,7 +393,7 @@ model ChatCompletionRequestSystemMessage extends ChatCompletionRequestMessage { model ChatCompletionRequestUserMessage extends ChatCompletionRequestMessage { /** The contents of the user message. */ @extension("x-oaiExpandable", true) - content: string | ChatCompletionRequestMessageContentPart[]; + content: string | ChatCompletionRequestUserMessageContentPart[]; @doc(""" The role of the messages author, in this case `user`. @@ -365,7 +410,10 @@ model ChatCompletionRequestAssistantMessage @doc(""" The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. """) - content?: string | null; + content?: string | ChatCompletionRequestAssistantMessageContentPart[] | null; + + /** The refusal message by the assistant. */ + refusal?: string | null; @doc(""" The role of the messages author, in this case `assistant`. @@ -398,7 +446,7 @@ model ChatCompletionRequestToolMessage extends ChatCompletionRequestMessage { role: "tool"; /** The contents of the tool message. */ - content: string; + content: string | ChatCompletionRequestToolMessageContentPart[]; /** Tool call that this message is responding to. */ tool_call_id: string; @@ -481,6 +529,9 @@ model ChatCompletionResponseMessage { /** The contents of the message. */ content: string | null; + /** The refusal message generated by the model. */ + refusal: string | null; + tool_calls?: ChatCompletionMessageToolCalls; /** The role of the author of this message. */ @@ -572,6 +623,9 @@ model ChatCompletionStreamResponseDelta { /** The role of the author of this message. */ role?: "system" | "user" | "assistant" | "tool"; + + /** The refusal message generated by the model. */ + refusal?: string | null; } model ChatCompletionMessageToolCallChunk { @@ -610,6 +664,9 @@ model CreateChatCompletionStreamResponse { logprobs?: { /** A list of message content tokens with log probability information. */ content: ChatCompletionTokenLogprob[] | null; + + /** A list of message refusal tokens with log probability information. */ + refusal: ChatCompletionTokenLogprob[] | null; } | null; @doc(""" @@ -638,6 +695,11 @@ model CreateChatCompletionStreamResponse { /** The model to generate the completion. */ `model`: string; + @doc(""" + The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. + """) + service_tier?: "scale" | "default" | null; + @doc(""" This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. diff --git a/.typespec/common/custom.tsp b/.typespec/common/custom.tsp index 18fc8c620..3efb2cfc2 100644 --- a/.typespec/common/custom.tsp +++ b/.typespec/common/custom.tsp @@ -2,8 +2,7 @@ using TypeSpec.OpenAPI; namespace OpenAI; -union ListOrder { - string, - asc: "asc", - desc: "desc", +@discriminator("type") +model OmniTypedResponseFormat { + type: string; } diff --git a/.typespec/common/models.tsp b/.typespec/common/models.tsp index d76d6302f..4afa9bdca 100644 --- a/.typespec/common/models.tsp +++ b/.typespec/common/models.tsp @@ -35,6 +35,53 @@ model FunctionObject { name: string; parameters?: FunctionParameters; + + @doc(""" + Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling). + """) + strict?: boolean | null = false; +} + +// Tool customization: establish a common, discriminated union +model ResponseFormatText extends OmniTypedResponseFormat { + @doc(""" + The type of response format being defined: `text` + """) + type: "text"; +} + +// Tool customization: establish a common, discriminated union +model ResponseFormatJsonObject extends OmniTypedResponseFormat { + @doc(""" + The type of response format being defined: `json_object` + """) + type: "json_object"; +} + +/** The schema for the response format, described as a JSON Schema object. */ +model ResponseFormatJsonSchemaSchema is Record; + +// Tool customization: establish a common, discriminated union +model ResponseFormatJsonSchema extends OmniTypedResponseFormat { + @doc(""" + The type of response format being defined: `json_schema` + """) + type: "json_schema"; + + json_schema: { + /** A description of what the response format is for, used by the model to determine how to respond in the format. */ + description?: string; + + /** The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. */ + name: string; + + schema?: ResponseFormatJsonSchemaSchema; + + @doc(""" + Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). + """) + strict?: boolean | null = false; + }; } /** Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. */ diff --git a/.typespec/fine-tuning/custom.tsp b/.typespec/fine-tuning/custom.tsp new file mode 100644 index 000000000..11dd998ac --- /dev/null +++ b/.typespec/fine-tuning/custom.tsp @@ -0,0 +1,56 @@ +using TypeSpec.OpenAPI; + +namespace OpenAI; + +model CreateFineTuningJobRequestHyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model parameters + * are updated less frequently, but with lower variance. + */ + @minValue(1) + @maxValue(256) + batch_size?: CreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum | int32 = CreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum.auto; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + * overfitting. + */ + @minValue(0) + learning_rate_multiplier?: CreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum | float32 = CreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum.auto; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + @minValue(1) + @maxValue(50) + n_epochs?: CreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum | int32 = CreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum.auto; +} + +union CreateFineTuningJobRequestHyperparametersBatchSizeChoiceEnum { + auto: "auto", +} +union CreateFineTuningJobRequestHyperparametersLearningRateMultiplierChoiceEnum { + auto: "auto", +} +union CreateFineTuningJobRequestHyperparametersNEpochsChoiceEnum { + auto: "auto", +} + +model FineTuningJobHyperparameters { + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs: FineTuningJobHyperparametersNEpochsChoiceEnum | int32 = FineTuningJobHyperparametersNEpochsChoiceEnum.auto; +} + +union FineTuningJobHyperparametersBatchSizeChoiceEnum { + auto: "auto", +} +union FineTuningJobHyperparametersLearningRateMultiplierChoiceEnum { + auto: "auto", +} +union FineTuningJobHyperparametersNEpochsChoiceEnum { + auto: "auto", +} diff --git a/.typespec/fine-tuning/models.tsp b/.typespec/fine-tuning/models.tsp index 5de1315cb..e11a1e611 100644 --- a/.typespec/fine-tuning/models.tsp +++ b/.typespec/fine-tuning/models.tsp @@ -5,6 +5,7 @@ import "../chat"; import "../common"; +import "./custom.tsp"; using TypeSpec.OpenAPI; @@ -50,10 +51,15 @@ model FineTuningJobIntegrations is FineTuningIntegration[]; model CreateFineTuningJobRequest { /** * The name of the model to fine-tune. You can select one of the - * [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + * [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned). */ @extension("x-oaiTypeLabel", "string") - `model`: string | "babbage-002" | "davinci-002" | "gpt-3.5-turbo"; + `model`: + | string + | "babbage-002" + | "davinci-002" + | "gpt-3.5-turbo" + | "gpt-4o-mini"; @doc(""" The ID of an uploaded file that contains training data. @@ -68,31 +74,14 @@ model CreateFineTuningJobRequest { """) training_file: string; + // Tool customization: reflect observed wire truth (learning_rate_multiplier, n_epochs) for hyperparameters in ft responses /** The hyperparameters used for the fine-tuning job. */ - hyperparameters?: { - /** - * Number of examples in each batch. A larger batch size means that model parameters - * are updated less frequently, but with lower variance. - */ - batch_size?: "auto" | int32 = "auto"; - - /** - * Scaling factor for the learning rate. A smaller learning rate may be useful to avoid - * overfitting. - */ - learning_rate_multiplier?: "auto" | float32 = "auto"; - - /** - * The number of epochs to train the model for. An epoch refers to one full cycle - * through the training dataset. - */ - n_epochs?: "auto" | int32 = "auto"; - }; + hyperparameters?: CreateFineTuningJobRequestHyperparameters; @doc(""" A string of up to 18 characters that will be added to your fine-tuned model name. - For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. """) @minLength(1) @maxLength(40) @@ -179,14 +168,9 @@ model FineTuningJob { @encode("unixTimestamp", int32) finished_at: utcDateTime | null; + // Tool customization: reflect observed wire truth (learning_rate_multiplier, n_epochs) for hyperparameters in ft responses /** The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. */ - hyperparameters: { - /** - * The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - * "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. - */ - n_epochs: "auto" | int32 = "auto"; - }; + hyperparameters: FineTuningJobHyperparameters; /** The base model that is being fine-tuned. */ `model`: string; diff --git a/.typespec/main.tsp b/.typespec/main.tsp index 1e51c25fe..4e9261637 100644 --- a/.typespec/main.tsp +++ b/.typespec/main.tsp @@ -2,6 +2,7 @@ import "@typespec/http"; import "@typespec/openapi3"; import "@typespec/openapi"; +import "./administration"; import "./audio"; import "./assistants"; import "./batch"; @@ -17,6 +18,7 @@ import "./moderations"; import "./runs"; import "./threads"; import "./vector-stores"; +import "./uploads"; using TypeSpec.Http; diff --git a/.typespec/messages/client.tsp b/.typespec/messages/client.tsp index bb59c0732..fdf340460 100644 --- a/.typespec/messages/client.tsp +++ b/.typespec/messages/client.tsp @@ -18,10 +18,12 @@ using OpenAI; @@access(MessageContentImageUrlObject, Access.public); @@usage(MessageContentImageUrlObject, Usage.input | Usage.output); +@@access(MessageContentRefusalObject, Access.public); +@@usage(MessageContentRefusalObject, Usage.input | Usage.output); + @@access(MessageRequestContentTextObject, Access.public); @@usage(MessageRequestContentTextObject, Usage.input | Usage.output); -// @@access(MessageContentTextObjectAnnotation, Access.public); @@usage(MessageContentTextObjectAnnotation, Usage.input | Usage.output); diff --git a/.typespec/messages/models.tsp b/.typespec/messages/models.tsp index aff04ca10..8529ca4f0 100644 --- a/.typespec/messages/models.tsp +++ b/.typespec/messages/models.tsp @@ -47,12 +47,14 @@ model CreateMessageRequest { /** A list of files attached to the message, and the tools they should be added to. */ attachments?: CreateMessageRequestAttachments | null; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata?: Record | null; } model ModifyMessageRequest { + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata?: Record | null; @@ -139,6 +141,7 @@ model MessageObject { /** A list of files attached to the message, and the tools they were added to. */ attachments: MessageObjectAttachments | null; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata: Record | null; @@ -183,6 +186,17 @@ model MessageContentTextObject extends MessageContent { }; } +// Tool customization: apply a common model base for all assistants message content items +/** The refusal content generated by the assistant. */ +model MessageContentRefusalObject extends MessageContent { + @doc(""" + Always `refusal`. + """) + type: "refusal"; + + refusal: string; +} + // Tool customization: apply custom, common base type to union items /** A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. */ model MessageContentTextAnnotationsFileCitationObject @@ -413,3 +427,17 @@ model MessageDeltaContentTextAnnotationsFilePathObject @minValue(0) end_index?: int32; } + +// Tool customization: apply custom, common base type to union items +/** The refusal content that is part of a message. */ +model MessageDeltaContentRefusalObject extends MessageDeltaContent { + /** The index of the refusal part in the message. */ + index: int32; + + @doc(""" + Always `refusal`. + """) + type: "refusal"; + + refusal?: string; +} diff --git a/.typespec/messages/operations.tsp b/.typespec/messages/operations.tsp index dd304362f..6c98d0118 100644 --- a/.typespec/messages/operations.tsp +++ b/.typespec/messages/operations.tsp @@ -9,7 +9,7 @@ using TypeSpec.OpenAPI; namespace OpenAI; -@route("threads/{thread_id}/messages") +@route("/threads/{thread_id}/messages") interface Messages { @post @operationId("createMessage") @@ -40,7 +40,7 @@ interface Messages { * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` * for descending order. */ - @query order?: ListOrder = ListOrder.desc, + @query order?: "asc" | "desc" = "desc", /** * A cursor for use in pagination. `after` is an object ID that defines your place in the list. diff --git a/.typespec/runs/models.tsp b/.typespec/runs/models.tsp index e01ed0f40..7c037e38e 100644 --- a/.typespec/runs/models.tsp +++ b/.typespec/runs/models.tsp @@ -34,7 +34,10 @@ model CreateRunRequest { `model`?: | string | "gpt-4o" + | "gpt-4o-2024-08-06" | "gpt-4o-2024-05-13" + | "gpt-4o-mini" + | "gpt-4o-mini-2024-07-18" | "gpt-4-turbo" | "gpt-4-turbo-2024-04-09" | "gpt-4-0125-preview" @@ -68,6 +71,7 @@ model CreateRunRequest { /** Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. */ tools?: CreateRunRequestTools | null; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata?: Record | null; @@ -110,6 +114,7 @@ model CreateRunRequest { } model ModifyRunRequest { + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata?: Record | null; @@ -127,7 +132,10 @@ model CreateThreadAndRunRequest { `model`?: | string | "gpt-4o" + | "gpt-4o-2024-08-06" | "gpt-4o-2024-05-13" + | "gpt-4o-mini" + | "gpt-4o-mini-2024-07-18" | "gpt-4-turbo" | "gpt-4-turbo-2024-04-09" | "gpt-4-0125-preview" @@ -164,15 +172,14 @@ model CreateThreadAndRunRequest { A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. """) @maxItems(20) - file_ids?: string[] = []; - }; - file_search?: { - /** The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. */ - @maxItems(1) - vector_store_ids?: string[]; + file_ids?: string[] = #[]; }; + + // Tool customization: use custom type for sophisticated union + file_search?: ToolResourcesFileSearchIdsOnly; } | null; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata?: Record | null; @@ -348,7 +355,7 @@ model RunStepDetailsToolCallsFileSearchObject /** For now, this is always going to be an empty object. */ @extension("x-oaiTypeLabel", "map") - file_search: Record; + file_search: Record; } // Tool customization: apply custom, common base type to union items @@ -512,8 +519,9 @@ model RunObject { /** The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. */ @maxItems(20) @extension("x-oaiExpandable", true) - tools: AssistantToolDefinition[] = []; + tools: AssistantToolDefinition[] = #[]; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata: Record | null; @@ -612,6 +620,7 @@ model RunStepObject { @encode("unixTimestamp", int32) completed_at: utcDateTime | null; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata: Record | null; @@ -767,7 +776,7 @@ model RunStepDeltaStepDetailsToolCallsFileSearchObject /** For now, this is always going to be an empty object. */ @extension("x-oaiTypeLabel", "map") - file_search: Record; + file_search: Record; } // Tool customization: apply custom, common base type to union items diff --git a/.typespec/runs/operations.tsp b/.typespec/runs/operations.tsp index b8760fef0..ac488268d 100644 --- a/.typespec/runs/operations.tsp +++ b/.typespec/runs/operations.tsp @@ -9,7 +9,7 @@ using TypeSpec.OpenAPI; namespace OpenAI; -@route("threads") +@route("/threads") interface Runs { @route("runs") @post @@ -51,7 +51,7 @@ interface Runs { * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` * for descending order. */ - @query order?: ListOrder = ListOrder.desc, + @query order?: "asc" | "desc" = "desc", /** * A cursor for use in pagination. `after` is an object ID that defines your place in the list. @@ -150,7 +150,7 @@ interface Runs { * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` * for descending order. */ - @query order?: ListOrder = ListOrder.desc, + @query order?: "asc" | "desc" = "desc", /** * A cursor for use in pagination. `after` is an object ID that defines your place in the list. diff --git a/.typespec/threads/models.tsp b/.typespec/threads/models.tsp index e1837d740..71a42144e 100644 --- a/.typespec/threads/models.tsp +++ b/.typespec/threads/models.tsp @@ -23,13 +23,14 @@ model CreateThreadRequest { A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. """) @maxItems(20) - file_ids?: string[] = []; + file_ids?: string[] = #[]; }; // Tool customization: use custom type for sophisticated union - file_search?: CreateThreadRequestToolResourcesFileSearch; + file_search?: ToolResourcesFileSearch; } | null; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata?: Record | null; @@ -45,15 +46,14 @@ model ModifyThreadRequest { A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. """) @maxItems(20) - file_ids?: string[] = []; - }; - file_search?: { - /** The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. */ - @maxItems(1) - vector_store_ids?: string[]; + file_ids?: string[] = #[]; }; + + // Tool customization: use custom type for sophisticated union + file_search?: ToolResourcesFileSearchIdsOnly; } | null; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata?: Record | null; @@ -89,7 +89,7 @@ model ThreadObject { A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. """) @maxItems(20) - file_ids?: string[] = []; + file_ids?: string[] = #[]; }; file_search?: { /** The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. */ @@ -98,6 +98,7 @@ model ThreadObject { }; } | null; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata: Record | null; diff --git a/.typespec/uploads/main.tsp b/.typespec/uploads/main.tsp new file mode 100644 index 000000000..144c4aeaf --- /dev/null +++ b/.typespec/uploads/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; diff --git a/.typespec/uploads/models.tsp b/.typespec/uploads/models.tsp new file mode 100644 index 000000000..13fe13989 --- /dev/null +++ b/.typespec/uploads/models.tsp @@ -0,0 +1,100 @@ +/* + * This file was automatically generated from an OpenAPI .yaml file. + * Edits made directly to this file will be lost. + */ + +import "../files"; + +using TypeSpec.OpenAPI; + +namespace OpenAI; + +model CreateUploadRequest { + /** The name of the file to upload. */ + filename: string; + + /** + * The intended purpose of the uploaded file. + * + * See the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). + */ + purpose: "assistants" | "batch" | "fine-tune" | "vision"; + + /** The number of bytes in the file you are uploading. */ + bytes: int32; + + /** + * The MIME type of the file. + * + * This must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision. + */ + mime_type: string; +} + +model AddUploadPartRequest { + /** The chunk of bytes for this Part. */ + data: bytes; +} + +model CompleteUploadRequest { + /** The ordered list of Part IDs. */ + part_ids: string[]; + + /** The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect. */ + md5?: string; +} + +alias CancelUploadRequest = unknown; + +/** The Upload object can accept byte chunks in the form of Parts. */ +model Upload { + /** The Upload unique identifier, which can be referenced in API endpoints. */ + id: string; + + // Tool customization: 'created' and fields ending in '_at' are Unix encoded utcDateTime + /** The Unix timestamp (in seconds) for when the Upload was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** The name of the file to be uploaded. */ + filename: string; + + /** The intended number of bytes to be uploaded. */ + bytes: int32; + + /** The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values. */ + purpose: string; + + /** The status of the Upload. */ + status: "pending" | "completed" | "cancelled" | "expired"; + + // Tool customization: 'created' and fields ending in '_at' are Unix encoded utcDateTime + /** The Unix timestamp (in seconds) for when the Upload was created. */ + @encode("unixTimestamp", int32) + expires_at: utcDateTime; + + /** The object type, which is always "upload". */ + object?: "upload"; + + /** The ready File object after the Upload is completed. */ + file?: OpenAIFile | null; +} + +/** The upload Part represents a chunk of bytes we can add to an Upload object. */ +model UploadPart { + /** The upload Part unique identifier, which can be referenced in API endpoints. */ + id: string; + + // Tool customization: 'created' and fields ending in '_at' are Unix encoded utcDateTime + /** The Unix timestamp (in seconds) for when the Part was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** The ID of the Upload object that this Part was added to. */ + upload_id: string; + + @doc(""" + The object type, which is always `upload.part`. + """) + object: "upload.part"; +} diff --git a/.typespec/uploads/operations.tsp b/.typespec/uploads/operations.tsp new file mode 100644 index 000000000..1da4a995a --- /dev/null +++ b/.typespec/uploads/operations.tsp @@ -0,0 +1,70 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/uploads") +interface Uploads { + @post + @operationId("createUpload") + @tag("Uploads") + @summary(""" + Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it. + + Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object. + + For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case: + - [Assistants](/docs/assistants/tools/file-search/supported-files) + + For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). + """) + createUpload(@body requestBody: CreateUploadRequest): Upload | ErrorResponse; + + @route("{upload_id}/parts") + @post + @operationId("addUploadPart") + @tag("Uploads") + @summary(""" + Adds a [Part](/docs/api-reference/uploads/part-object) to an [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload. + + Each Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB. + + It is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](/docs/api-reference/uploads/complete). + """) + addUploadPart( + @path upload_id: string, + @header contentType: "multipart/form-data", + @body requestBody: AddUploadPartRequest, + ): UploadPart | ErrorResponse; + + @route("{upload_id}/complete") + @post + @operationId("completeUpload") + @tag("Uploads") + @summary(""" + Completes the [Upload](/docs/api-reference/uploads/object). + + Within the returned Upload object, there is a nested [File](/docs/api-reference/files/object) object that is ready to use in the rest of the platform. + + You can specify the order of the Parts by passing in an ordered list of the Part IDs. + + The number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed. + """) + completeUpload( + @path upload_id: string, + @body requestBody: CompleteUploadRequest, + ): Upload | ErrorResponse; + + @route("{upload_id}/cancel") + @post + @operationId("cancelUpload") + @tag("Uploads") + @summary("Cancels the Upload. No Parts may be added after an Upload is cancelled.") + cancelUpload(@path upload_id: string): Upload | ErrorResponse; +} diff --git a/.typespec/vector-stores/models.tsp b/.typespec/vector-stores/models.tsp index 8f780a239..4d923b2ba 100644 --- a/.typespec/vector-stores/models.tsp +++ b/.typespec/vector-stores/models.tsp @@ -79,6 +79,7 @@ model VectorStoreObject { @encode("unixTimestamp", int32) last_active_at: utcDateTime | null; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata: Record | null; @@ -102,6 +103,7 @@ model CreateVectorStoreRequest { @extension("x-oaiExpandable", true) chunking_strategy?: AutoChunkingStrategyRequestParam | StaticChunkingStrategyRequestParam; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata?: Record | null; @@ -113,6 +115,7 @@ model UpdateVectorStoreRequest { expires_after?: VectorStoreExpirationAfter | null; + // Tool customization: specialize known metadata string maps /** Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ @extension("x-oaiTypeLabel", "map") metadata?: Record | null; @@ -167,11 +170,7 @@ model VectorStoreFileObject { @doc(""" One of `server_error` or `rate_limit_exceeded`. """) - code: - | "internal_error" - | "file_not_found" - | "parsing_error" - | "unhandled_mime_type"; + code: "server_error" | "unsupported_file" | "invalid_file"; /** A human-readable description of the error. */ message: string; diff --git a/.typespec/vector-stores/operations.tsp b/.typespec/vector-stores/operations.tsp index 19d878781..faa11b48d 100644 --- a/.typespec/vector-stores/operations.tsp +++ b/.typespec/vector-stores/operations.tsp @@ -10,7 +10,7 @@ using TypeSpec.OpenAPI; namespace OpenAI; -@route("vector_stores") +@route("/vector_stores") interface VectorStores { @get @operationId("listVectorStores") @@ -27,7 +27,7 @@ interface VectorStores { * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` * for descending order. */ - @query order?: ListOrder = ListOrder.desc, + @query order?: "asc" | "desc" = "desc", /** * A cursor for use in pagination. `after` is an object ID that defines your place in the list. @@ -103,7 +103,7 @@ interface VectorStores { * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` * for descending order. */ - @query order?: ListOrder = ListOrder.desc, + @query order?: "asc" | "desc" = "desc", /** * A cursor for use in pagination. `after` is an object ID that defines your place in the list. @@ -223,7 +223,7 @@ interface VectorStores { * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc` * for descending order. */ - @query order?: ListOrder = ListOrder.desc, + @query order?: "asc" | "desc" = "desc", /** * A cursor for use in pagination. `after` is an object ID that defines your place in the list. diff --git a/README.md b/README.md index 1bba18c76..f9d6e9508 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # A conversion of the OpenAI OpenAPI to TypeSpec -Snapshot: https://raw.githubusercontent.com/openai/openai-openapi/dd73070b1d507645d24c249a63ebebd3ec38c0cb/openapi.yaml +Snapshot: https://raw.githubusercontent.com/openai/openai-openapi/3d5576596e5fe1cd3b88ddcd407dd1c5f3594f02/openapi.yaml There are some deltas: diff --git a/openapi3-original.yaml b/openapi3-original.yaml index 73ca4017f..81f8e0ed1 100644 --- a/openapi3-original.yaml +++ b/openapi3-original.yaml @@ -2,7 +2,7 @@ openapi: 3.0.0 info: title: OpenAI API description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. - version: "2.0.0" + version: "2.3.0" termsOfService: https://openai.com/policies/terms-of-use contact: name: OpenAI Support @@ -29,12 +29,16 @@ tags: description: Create large batches of API requests to run asynchronously. - name: Files description: Files are used to upload documents that can be used with features like Assistants and Fine-tuning. + - name: Uploads + description: Use Uploads to upload large files in multiple parts. - name: Images description: Given a prompt and/or an input image, the model will generate a new image. - name: Models description: List and describe the various models available in the API. - name: Moderations description: Given a input text, outputs if the model classifies it as potentially harmful. + - name: Audit Logs + description: List user actions and configuration changes within this organization. paths: # Note: When adding an endpoint, make sure you also add it in the `groups` section, in the end of this file, # under the appropriate group @@ -117,7 +121,7 @@ paths: "id": "chatcmpl-123", "object": "chat.completion", "created": 1677652288, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices": [{ "index": 0, @@ -141,7 +145,7 @@ paths: -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "model": "gpt-4-turbo", + "model": "gpt-4o", "messages": [ { "role": "user", @@ -167,7 +171,7 @@ paths: client = OpenAI() response = client.chat.completions.create( - model="gpt-4-turbo", + model="gpt-4o", messages=[ { "role": "user", @@ -191,7 +195,7 @@ paths: async function main() { const response = await openai.chat.completions.create({ - model: "gpt-4-turbo", + model: "gpt-4o", messages: [ { role: "user", @@ -214,7 +218,7 @@ paths: "id": "chatcmpl-123", "object": "chat.completion", "created": 1677652288, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices": [{ "index": 0, @@ -289,13 +293,13 @@ paths: main(); response: &chat_completion_chunk_example | - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} .... - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} - title: Functions request: curl: | @@ -303,7 +307,7 @@ paths: -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "model": "gpt-4-turbo", + "model": "gpt-4o", "messages": [ { "role": "user", @@ -397,7 +401,7 @@ paths: ]; const response = await openai.chat.completions.create({ - model: "gpt-4-turbo", + model: "gpt-4o", messages: messages, tools: tools, tool_choice: "auto", @@ -412,7 +416,7 @@ paths: "id": "chatcmpl-abc123", "object": "chat.completion", "created": 1699896916, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "choices": [ { "index": 0, @@ -494,7 +498,7 @@ paths: "id": "chatcmpl-123", "object": "chat.completion", "created": 1702685778, - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4o-mini", "choices": [ { "index": 0, @@ -1721,7 +1725,219 @@ paths: } main(); + /uploads: + post: + operationId: createUpload + tags: + - Uploads + summary: | + Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it. + + Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object. + + For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case: + - [Assistants](/docs/assistants/tools/file-search/supported-files) + + For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateUploadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Create upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status `pending`. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "purpose": "fine-tune", + "filename": "training_examples.jsonl", + "bytes": 2147483648, + "mime_type": "text/jsonl" + }' + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "pending", + "expires_at": 1719127296 + } + + /uploads/{upload_id}/parts: + post: + operationId: addUploadPart + tags: + - Uploads + summary: | + Adds a [Part](/docs/api-reference/uploads/part-object) to an [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload. + + Each Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB. + + It is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](/docs/api-reference/uploads/complete). + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/AddUploadPartRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/UploadPart" + x-oaiMeta: + name: Add upload part + group: uploads + returns: The upload [Part](/docs/api-reference/uploads/part-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/parts + -F data="aHR0cHM6Ly9hcGkub3BlbmFpLmNvbS92MS91cGxvYWRz..." + response: | + { + "id": "part_def456", + "object": "upload.part", + "created_at": 1719185911, + "upload_id": "upload_abc123" + } + + /uploads/{upload_id}/complete: + post: + operationId: completeUpload + tags: + - Uploads + summary: | + Completes the [Upload](/docs/api-reference/uploads/object). + + Within the returned Upload object, there is a nested [File](/docs/api-reference/files/object) object that is ready to use in the rest of the platform. + + You can specify the order of the Parts by passing in an ordered list of the Part IDs. + + The number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed. + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CompleteUploadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Complete upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status `completed` with an additional `file` property containing the created usable File object. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/complete + -d '{ + "part_ids": ["part_def456", "part_ghi789"] + }' + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "completed", + "expires_at": 1719127296, + "file": { + "id": "file-xyz321", + "object": "file", + "bytes": 2147483648, + "created_at": 1719186911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + } + } + /uploads/{upload_id}/cancel: + post: + operationId: cancelUpload + tags: + - Uploads + summary: | + Cancels the Upload. No Parts may be added after an Upload is cancelled. + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Cancel upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status `cancelled`. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/cancel + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "cancelled", + "expires_at": 1719127296 + } + /fine_tuning/jobs: post: operationId: createFineTuningJob @@ -1759,7 +1975,7 @@ paths: -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ "training_file": "file-BK7bzQj3FfZFXr7DbL6xJwfo", - "model": "gpt-3.5-turbo" + "model": "gpt-4o-mini" }' python: | from openai import OpenAI @@ -1767,7 +1983,7 @@ paths: client.fine_tuning.jobs.create( training_file="file-abc123", - model="gpt-3.5-turbo" + model="gpt-4o-mini" ) node.js: | import OpenAI from "openai"; @@ -1787,8 +2003,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -1804,7 +2020,7 @@ paths: -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ "training_file": "file-abc123", - "model": "gpt-3.5-turbo", + "model": "gpt-4o-mini", "hyperparameters": { "n_epochs": 2 } @@ -1815,7 +2031,7 @@ paths: client.fine_tuning.jobs.create( training_file="file-abc123", - model="gpt-3.5-turbo", + model="gpt-4o-mini", hyperparameters={ "n_epochs":2 } @@ -1828,7 +2044,7 @@ paths: async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", - model: "gpt-3.5-turbo", + model: "gpt-4o-mini", hyperparameters: { n_epochs: 2 } }); @@ -1840,8 +2056,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -1859,7 +2075,7 @@ paths: -d '{ "training_file": "file-abc123", "validation_file": "file-abc123", - "model": "gpt-3.5-turbo" + "model": "gpt-4o-mini" }' python: | from openai import OpenAI @@ -1868,7 +2084,7 @@ paths: client.fine_tuning.jobs.create( training_file="file-abc123", validation_file="file-def456", - model="gpt-3.5-turbo" + model="gpt-4o-mini" ) node.js: | import OpenAI from "openai"; @@ -1889,8 +2105,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -1907,7 +2123,7 @@ paths: -d '{ "training_file": "file-abc123", "validation_file": "file-abc123", - "model": "gpt-3.5-turbo", + "model": "gpt-4o-mini", "integrations": [ { "type": "wandb", @@ -1925,8 +2141,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1614807352, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2166,7 +2382,7 @@ paths: { "object": "fine_tuning.job.event", "id": "ft-event-ddTJfwuMVpfLXseO0Am0Gqjm", - "created_at": 1692407401, + "created_at": 1721764800, "level": "info", "message": "Fine tuning job successfully completed", "data": null, @@ -2175,9 +2391,9 @@ paths: { "object": "fine_tuning.job.event", "id": "ft-event-tyiGuB72evQncpH87xe505Sv", - "created_at": 1692407400, + "created_at": 1721764800, "level": "info", - "message": "New fine-tuned model created: ft:gpt-3.5-turbo:openai::7p4lURel", + "message": "New fine-tuned model created: ft:gpt-4o-mini:openai::7p4lURel", "data": null, "type": "message" } @@ -2236,8 +2452,8 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0125", - "created_at": 1689376978, + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, "fine_tuned_model": null, "organization_id": "org-123", "result_files": [], @@ -2300,8 +2516,8 @@ paths: { "object": "fine_tuning.job.checkpoint", "id": "ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB", - "created_at": 1519129973, - "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom-suffix:96olL566:ckpt-step-2000", + "created_at": 1721764867, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:96olL566:ckpt-step-2000", "metrics": { "full_valid_loss": 0.134, "full_valid_mean_token_accuracy": 0.874 @@ -2312,8 +2528,8 @@ paths: { "object": "fine_tuning.job.checkpoint", "id": "ftckpt_enQCFmOTGj3syEpYVhBRLTSy", - "created_at": 1519129833, - "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000", + "created_at": 1721764800, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000", "metrics": { "full_valid_loss": 0.167, "full_valid_mean_token_accuracy": 0.781 @@ -2405,7 +2621,7 @@ paths: schema: type: string # ideally this will be an actual ID, so this will always work from browser - example: gpt-3.5-turbo + example: gpt-4o-mini description: The ID of the model to use for this request responses: "200": @@ -2458,7 +2674,7 @@ paths: required: true schema: type: string - example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 + example: ft:gpt-4o-mini:acemeco:suffix:abc123 description: The model to delete responses: "200": @@ -2474,28 +2690,28 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/models/ft:gpt-3.5-turbo:acemeco:suffix:abc123 \ + curl https://api.openai.com/v1/models/ft:gpt-4o-mini:acemeco:suffix:abc123 \ -X DELETE \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | from openai import OpenAI client = OpenAI() - client.models.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123") + client.models.delete("ft:gpt-4o-mini:acemeco:suffix:abc123") node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const model = await openai.models.del("ft:gpt-3.5-turbo:acemeco:suffix:abc123"); + const model = await openai.models.del("ft:gpt-4o-mini:acemeco:suffix:abc123"); console.log(model); } main(); response: | { - "id": "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "id": "ft:gpt-4o-mini:acemeco:suffix:abc123", "object": "model", "deleted": true } @@ -2674,7 +2890,7 @@ paths: "created_at": 1698982736, "name": "Coding Tutor", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a helpful assistant designed to make me better at coding!", "tools": [], "tool_resources": {}, @@ -2689,7 +2905,7 @@ paths: "created_at": 1698982718, "name": "My Assistant", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a helpful assistant designed to make me better at coding!", "tools": [], "tool_resources": {}, @@ -2704,7 +2920,7 @@ paths: "created_at": 1698982643, "name": null, "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "tools": [], "tool_resources": {}, @@ -2753,7 +2969,7 @@ paths: "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", "name": "Math Tutor", "tools": [{"type": "code_interpreter"}], - "model": "gpt-4-turbo" + "model": "gpt-4o" }' python: | @@ -2764,7 +2980,7 @@ paths: instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.", name="Math Tutor", tools=[{"type": "code_interpreter"}], - model="gpt-4-turbo", + model="gpt-4o", ) print(my_assistant) node.js: |- @@ -2778,7 +2994,7 @@ paths: "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", name: "Math Tutor", tools: [{ type: "code_interpreter" }], - model: "gpt-4-turbo", + model: "gpt-4o", }); console.log(myAssistant); @@ -2792,7 +3008,7 @@ paths: "created_at": 1698984975, "name": "Math Tutor", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", "tools": [ { @@ -2815,7 +3031,7 @@ paths: "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [{"type": "file_search"}], "tool_resources": {"file_search": {"vector_store_ids": ["vs_123"]}}, - "model": "gpt-4-turbo" + "model": "gpt-4o" }' python: | from openai import OpenAI @@ -2826,7 +3042,7 @@ paths: name="HR Helper", tools=[{"type": "file_search"}], tool_resources={"file_search": {"vector_store_ids": ["vs_123"]}}, - model="gpt-4-turbo" + model="gpt-4o" ) print(my_assistant) node.js: |- @@ -2845,7 +3061,7 @@ paths: vector_store_ids: ["vs_123"] } }, - model: "gpt-4-turbo" + model: "gpt-4o" }); console.log(myAssistant); @@ -2859,7 +3075,7 @@ paths: "created_at": 1699009403, "name": "HR Helper", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [ { @@ -2936,7 +3152,7 @@ paths: "created_at": 1699009709, "name": "HR Helper", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [ { @@ -2988,7 +3204,7 @@ paths: -d '{ "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", "tools": [{"type": "file_search"}], - "model": "gpt-4-turbo" + "model": "gpt-4o" }' python: | from openai import OpenAI @@ -2999,7 +3215,7 @@ paths: instructions="You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", name="HR Helper", tools=[{"type": "file_search"}], - model="gpt-4-turbo" + model="gpt-4o" ) print(my_updated_assistant) @@ -3016,7 +3232,7 @@ paths: "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", name: "HR Helper", tools: [{ type: "file_search" }], - model: "gpt-4-turbo" + model: "gpt-4o" } ); @@ -3031,7 +3247,7 @@ paths: "created_at": 1699009709, "name": "HR Helper", "description": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", "tools": [ { @@ -3680,7 +3896,7 @@ paths: name: Retrieve message group: threads beta: true - returns: The [message](/docs/api-reference/threads/messages/object) object matching the specified ID. + returns: The [message](/docs/api-reference/messages/object) object matching the specified ID. examples: request: curl: | @@ -3768,7 +3984,7 @@ paths: name: Modify message group: threads beta: true - returns: The modified [message](/docs/api-reference/threads/messages/object) object. + returns: The modified [message](/docs/api-reference/messages/object) object. examples: request: curl: | @@ -3989,7 +4205,7 @@ paths: "completed_at": null, "required_action": null, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You are a helpful assistant.", "tools": [], "tool_resources": {}, @@ -4068,13 +4284,13 @@ paths: data: {"id":"thread_123","object":"thread","created_at":1710348075,"metadata":{}} event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} @@ -4106,7 +4322,7 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} event: thread.run.completed - {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: done data: [DONE] @@ -4237,13 +4453,13 @@ paths: data: {"id":"thread_123","object":"thread","created_at":1710351818,"metadata":{}} event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} @@ -4269,7 +4485,7 @@ paths: data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\"}"}}]}}} event: thread.run.requires_action - data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -4370,7 +4586,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -4417,7 +4633,7 @@ paths: "failed_at": null, "completed_at": 1699063291, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -4536,7 +4752,7 @@ paths: "failed_at": null, "completed_at": 1699063291, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -4600,13 +4816,13 @@ paths: main(); response: | event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} @@ -4638,7 +4854,7 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710330642,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -4755,13 +4971,13 @@ paths: main(); response: | event: thread.run.created - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} @@ -4793,7 +5009,7 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -4874,7 +5090,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -4993,7 +5209,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "incomplete_details": null, "tools": [ @@ -5137,7 +5353,7 @@ paths: "failed_at": null, "completed_at": null, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": null, "tools": [ { @@ -5241,10 +5457,10 @@ paths: data: {"id":"step_001","object":"thread.run.step","created_at":1710352449,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1710352475,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_iWr0kQ2EaYMaxNdl0v3KYkx7","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}","output":"70 degrees and sunny."}}]},"usage":{"prompt_tokens":291,"completion_tokens":24,"total_tokens":315}} event: thread.run.queued - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.in_progress - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: thread.run.step.created data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} @@ -5282,7 +5498,7 @@ paths: data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710352477,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":{"prompt_tokens":329,"completion_tokens":18,"total_tokens":347}} event: thread.run.completed - data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} event: done data: [DONE] @@ -5364,7 +5580,7 @@ paths: "failed_at": null, "completed_at": null, "last_error": null, - "model": "gpt-4-turbo", + "model": "gpt-4o", "instructions": "You summarize books.", "tools": [ { @@ -7013,877 +7229,1596 @@ paths: } } -components: - securitySchemes: - ApiKeyAuth: - type: http - scheme: "bearer" + # Organization + # Audit Logs List + /organization/audit_logs: + get: + summary: List user actions and configuration changes within this organization. + operationId: list-audit-logs + tags: + - Audit Logs + parameters: + - name: effective_at + in: query + description: Return only events whose `effective_at` (Unix seconds) is in this range. + required: false + schema: + type: object + properties: + gt: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is greater than this value. + gte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is greater than or equal to this value. + lt: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than this value. + lte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than or equal to this value. + - name: project_ids[] + in: query + description: Return only events for these projects. + required: false + schema: + type: array + items: + type: string + - name: event_types[] + in: query + description: Return only events with a `type` in one of these values. For example, `project.created`. For all options, see the documentation for the [audit log object](/docs/api-reference/audit-logs/object). + required: false + schema: + type: array + items: + $ref: "#/components/schemas/AuditLogEventType" + - name: actor_ids[] + in: query + description: Return only events performed by these actors. Can be a user ID, a service account ID, or an api key tracking ID. + required: false + schema: + type: array + items: + type: string + - name: actor_emails[] + in: query + description: Return only events performed by users with these emails. + required: false + schema: + type: array + items: + type: string + - name: resource_ids[] + in: query + description: Return only events performed on these targets. For example, a project ID updated. + required: false + schema: + type: array + items: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: Audit logs listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ListAuditLogsResponse" + x-oaiMeta: + name: List audit logs + group: audit-logs + returns: A list of paginated [Audit Log](/docs/api-reference/audit-logs/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/audit_logs \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + response: | + { + "object": "list", + "data": [ + { + "id": "audit_log-xxx_yyyymmdd", + "type": "project.archived", + "effective_at": 1722461446, + "actor": { + "type": "api_key", + "api_key": { + "type": "user", + "user": { + "id": "user-xxx", + "email": "user@example.com" + } + } + }, + "project.archived": { + "id": "proj_abc" + }, + }, + { + "id": "audit_log-yyy__20240101", + "type": "api_key.updated", + "effective_at": 1720804190, + "actor": { + "type": "session", + "session": { + "user": { + "id": "user-xxx", + "email": "user@example.com" + }, + "ip_address": "127.0.0.1", + "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + } + }, + "api_key.updated": { + "id": "key_xxxx", + "data": { + "scopes": ["resource_2.operation_2"] + } + }, + } + ], + "first_id": "audit_log-xxx__20240101", + "last_id": "audit_log_yyy__20240101", + "has_more": true + } + /organization/invites: + get: + summary: Returns a list of invites in the organization. + operationId: list-invites + tags: + - Invites + parameters: + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Invites listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/InviteListResponse' + x-oaiMeta: + name: List invites + group: administration + returns: A list of [Invite](/docs/api-reference/invite/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/invites?after=invite-abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } + ], + "first_id": "invite-abc", + "last_id": "invite-abc", + "has_more": false + } - schemas: - Error: - type: object - properties: - code: - type: string - nullable: true - message: - type: string - nullable: false - param: - type: string - nullable: true - type: + post: + summary: Create an invite for a user to the organization. The invite must be accepted by the user before they have access to the organization. + operationId: inviteUser + tags: + - Invites + requestBody: + description: The invite request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/InviteRequest' + responses: + "200": + description: User invited successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Invite' + x-oaiMeta: + name: Create invite + group: administration + returns: The created [Invite](/docs/api-reference/invite/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/invites \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "email": "user@example.com", + "role": "owner" + }' + response: + content: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": null + } + + /organization/invites/{invite_id}: + get: + summary: Retrieves an invite. + operationId: retrieve-invite + tags: + - Invites + parameters: + - in: path + name: invite_id + required: true + schema: type: string - nullable: false - required: - - type - - message - - param - - code - ErrorResponse: - type: object - properties: - error: - $ref: "#/components/schemas/Error" - required: - - error - - ListModelsResponse: - type: object - properties: - object: - type: string - enum: [list] - data: - type: array - items: - $ref: "#/components/schemas/Model" - required: - - object - - data - DeleteModelResponse: - type: object - properties: - id: - type: string - deleted: - type: boolean - object: + description: The ID of the invite to retrieve. + responses: + "200": + description: Invite retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Invite' + x-oaiMeta: + name: Retrieve invite + group: administration + returns: The [Invite](/docs/api-reference/invite/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/invites/invite-abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } + delete: + summary: Delete an invite. If the invite has already been accepted, it cannot be deleted. + operationId: delete-invite + tags: + - Invites + parameters: + - in: path + name: invite_id + required: true + schema: type: string - required: - - id - - object - - deleted + description: The ID of the invite to delete. + responses: + "200": + description: Invite deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/InviteDeleteResponse' + x-oaiMeta: + name: Delete invite + group: administration + returns: Confirmation that the invite has been deleted + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/invites/invite-abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.invite.deleted", + "id": "invite-abc", + "deleted": true + } - CreateCompletionRequest: - type: object - properties: - model: - description: &model_description | - ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - anyOf: - - type: string - - type: string - enum: ["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"] - x-oaiTypeLabel: string - prompt: - description: &completions_prompt_description | - The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + /organization/users: + get: + summary: Lists all of the users in the organization. + operationId: list-users + tags: + - Users + parameters: + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Users listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/UserListResponse' + x-oaiMeta: + name: List users + group: administration + returns: A list of [User](/docs/api-reference/users/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/users?after=user_abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + ], + "first_id": "user-abc", + "last_id": "user-xyz", + "has_more": false + } - Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. - default: "<|endoftext|>" - nullable: true - oneOf: - - type: string - default: "" - example: "This is a test." - - type: array - items: - type: string - default: "" - example: "This is a test." - - type: array - minItems: 1 - items: - type: integer - example: "[1212, 318, 257, 1332, 13]" - - type: array - minItems: 1 - items: - type: array - minItems: 1 - items: - type: integer - example: "[[1212, 318, 257, 1332, 13]]" - best_of: - type: integer - default: 1 - minimum: 0 - maximum: 20 - nullable: true - description: &completions_best_of_description | - Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + /organization/users/{user_id}: + get: + summary: Retrieves a user by their identifier. + operationId: retrieve-user + tags: + - Users + parameters: + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: User retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/User' + x-oaiMeta: + name: Retrieve user + group: administration + returns: The [User](/docs/api-reference/users/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } - When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + post: + summary: Modifies a user's role in the organization. + operationId: modify-user + tags: + - Users + requestBody: + description: The new user role to modify. This must be one of `owner` or `member`. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UserRoleUpdateRequest' + responses: + "200": + description: User role updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/User' + x-oaiMeta: + name: Modify user + group: administration + returns: The updated [User](/docs/api-reference/users/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "role": "owner" + }' + response: + content: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } - **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - echo: + delete: + summary: Deletes a user from the organization. + operationId: delete-user + tags: + - Users + parameters: + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: User deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/UserDeleteResponse' + x-oaiMeta: + name: Delete user + group: administration + returns: Confirmation of the deleted user + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.user.deleted", + "id": "user_abc", + "deleted": true + } + /organization/projects: + get: + summary: Returns a list of projects. + operationId: list-projects + tags: + - Projects + parameters: + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + - name: include_archived + in: query + schema: type: boolean default: false - nullable: true - description: &completions_echo_description > - Echo back the prompt in addition to the completion - frequency_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: &completions_frequency_penalty_description | - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + description: If `true` returns all projects including those that have been `archived`. Archived projects are not included by default. + responses: + "200": + description: Projects listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectListResponse' + x-oaiMeta: + name: List projects + group: administration + returns: A list of [Project](/docs/api-reference/projects/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects?after=proj_abc&limit=20&include_archived=false \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + ], + "first_id": "proj-abc", + "last_id": "proj-xyz", + "has_more": false + } - [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) - logit_bias: &completions_logit_bias - type: object - x-oaiTypeLabel: map - default: null - nullable: true - additionalProperties: - type: integer - description: &completions_logit_bias_description | - Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - - As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. - logprobs: &completions_logprobs_configuration - type: integer - minimum: 0 - maximum: 5 - default: null - nullable: true - description: &completions_logprobs_description | - Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - - The maximum value for `logprobs` is 5. - max_tokens: - type: integer - minimum: 0 - default: 16 - example: 16 - nullable: true - description: &completions_max_tokens_description | - The maximum number of [tokens](/tokenizer) that can be generated in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. - n: - type: integer - minimum: 1 - maximum: 128 - default: 1 - example: 1 - nullable: true - description: &completions_completions_description | - How many completions to generate for each prompt. - - **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - presence_penalty: - type: number - default: 0 - minimum: -2 - maximum: 2 - nullable: true - description: &completions_presence_penalty_description | - Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + post: + summary: Create a new project in the organization. Projects can be created and archived, but cannot be deleted. + operationId: create-project + tags: + - Projects + requestBody: + description: The project create request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectCreateRequest' + responses: + "200": + description: Project created successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + x-oaiMeta: + name: Create project + group: administration + returns: The created [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Project ABC" + }' + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project ABC", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } - [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) - seed: &completions_seed_param - type: integer - minimum: -9223372036854775808 - maximum: 9223372036854775807 - nullable: true - description: | - If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + /organization/projects/{project_id}: + get: + summary: Retrieves a project. + operationId: retrieve-project + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + responses: + "200": + description: Project retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + x-oaiMeta: + name: Retrieve project + group: administration + description: Retrieve a project. + returns: The [Project](/docs/api-reference/projects/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } - Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - stop: - description: &completions_stop_description > - Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. - default: null - nullable: true - oneOf: - - type: string - default: <|endoftext|> - example: "\n" - nullable: true - - type: array - minItems: 1 - maxItems: 4 - items: - type: string - example: '["\n"]' - stream: - description: > - Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). - type: boolean - nullable: true - default: false - stream_options: - $ref: "#/components/schemas/ChatCompletionStreamOptions" - suffix: - description: | - The suffix that comes after a completion of inserted text. + post: + summary: Modifies a project in the organization. + operationId: modify-project + tags: + - Projects + requestBody: + description: The project update request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUpdateRequest' + responses: + "200": + description: Project updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + "400": + description: Error response when updating the default project. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Modify project + group: administration + returns: The updated [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Project DEF" + }' - This parameter is only supported for `gpt-3.5-turbo-instruct`. - default: null - nullable: true + /organization/projects/{project_id}/archive: + post: + summary: Archives a project in the organization. Archived projects cannot be used or updated. + operationId: archive-project + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: type: string - example: "test." - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: &completions_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: &completions_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + responses: + "200": + description: Project archived successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + x-oaiMeta: + name: Archive project + group: administration + returns: The archived [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/archive \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project DEF", + "created_at": 1711471533, + "archived_at": 1711471533, + "status": "archived" + } + - We generally recommend altering this or `temperature` but not both. - user: &end_user_param_configuration + /organization/projects/{project_id}/users: + get: + summary: Returns a list of users in the project. + operationId: list-project-users + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: type: string - example: user-1234 - description: | - A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - required: - - model - - prompt + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Project users listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserListResponse' + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: List project users + group: administration + returns: A list of [ProjectUser](/docs/api-reference/project-users/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/users?after=user_abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + ], + "first_id": "user-abc", + "last_id": "user-xyz", + "has_more": false + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } - CreateCompletionResponse: - type: object - description: | - Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). - properties: - id: + post: + summary: Adds a user to the project. Users must already be members of the organization to be added to a project. + operationId: create-project-user + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: type: string - description: A unique identifier for the completion. - choices: - type: array - description: The list of completion choices the model generated for the input prompt. - items: - type: object - required: - - finish_reason - - index - - logprobs - - text - properties: - finish_reason: - type: string - description: &completion_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, - `length` if the maximum number of tokens specified in the request was reached, - or `content_filter` if content was omitted due to a flag from our content filters. - enum: ["stop", "length", "content_filter"] - index: - type: integer - logprobs: - type: object - nullable: true - properties: - text_offset: - type: array - items: - type: integer - token_logprobs: - type: array - items: - type: number - tokens: - type: array - items: - type: string - top_logprobs: - type: array - items: - type: object - additionalProperties: - type: number - text: - type: string - created: - type: integer - description: The Unix timestamp (in seconds) of when the completion was created. - model: - type: string - description: The model used for completion. - system_fingerprint: - type: string - description: | - This fingerprint represents the backend configuration that the model runs with. - - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. - object: - type: string - description: The object type, which is always "text_completion" - enum: [text_completion] - usage: - $ref: "#/components/schemas/CompletionUsage" - required: - - id - - object - - created - - model - - choices + tags: + - Projects + requestBody: + description: The project user create request payload. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserCreateRequest' + responses: + "200": + description: User added to project successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUser' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' x-oaiMeta: - name: The completion object - legacy: true - example: | - { - "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", - "object": "text_completion", - "created": 1589478378, - "model": "gpt-4-turbo", - "choices": [ - { - "text": "\n\nThis is indeed a test", - "index": 0, - "logprobs": null, - "finish_reason": "length" - } - ], - "usage": { - "prompt_tokens": 5, - "completion_tokens": 7, - "total_tokens": 12 - } - } - - ChatCompletionRequestMessageContentPart: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" - - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" - x-oaiExpandable: true - - ChatCompletionRequestMessageContentPartImage: - type: object - title: Image content part - properties: - type: - type: string - enum: ["image_url"] - description: The type of the content part. - image_url: - type: object - properties: - url: - type: string - description: Either a URL of the image or the base64 encoded image data. - format: uri - detail: - type: string - description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). - enum: ["auto", "low", "high"] - default: "auto" - required: - - url - required: - - type - - image_url - - ChatCompletionRequestMessageContentPartText: - type: object - title: Text content part - properties: - type: - type: string - enum: ["text"] - description: The type of the content part. - text: - type: string - description: The text content. - required: - - type - - text - - ChatCompletionRequestMessage: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" - - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" - - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" - - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" - - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" - x-oaiExpandable: true + name: Create project user + group: administration + returns: The created [ProjectUser](/docs/api-reference/project-users/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "user_id": "user_abc", + "role": "member" + }' + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } - ChatCompletionRequestSystemMessage: - type: object - title: System message - properties: - content: - description: The contents of the system message. - type: string - role: + /organization/projects/{project_id}/users/{user_id}: + get: + summary: Retrieves a user in the project. + operationId: retrieve-project-user + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: type: string - enum: ["system"] - description: The role of the messages author, in this case `system`. - name: + - name: user_id + in: path + description: The ID of the user. + required: true + schema: type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - required: - - content - - role + responses: + "200": + description: Project user retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUser' + x-oaiMeta: + name: Retrieve project user + group: administration + returns: The [ProjectUser](/docs/api-reference/project-users/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } - ChatCompletionRequestUserMessage: - type: object - title: User message - properties: + post: + summary: Modifies a user's role in the project. + operationId: modify-project-user + tags: + - Projects + requestBody: + description: The project user update request payload. + required: true content: - description: | - The contents of the user message. - oneOf: - - type: string - description: The text contents of the message. - title: Text content - - type: array - description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-visual-preview` model. - title: Array of content parts - items: - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPart" - minItems: 1 - x-oaiExpandable: true - role: - type: string - enum: ["user"] - description: The role of the messages author, in this case `user`. - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - required: - - content - - role + application/json: + schema: + $ref: '#/components/schemas/ProjectUserUpdateRequest' + responses: + "200": + description: Project user's role updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUser' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Modify project user + group: administration + returns: The updated [ProjectUser](/docs/api-reference/project-users/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "role": "owner" + }' + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } - ChatCompletionRequestAssistantMessage: - type: object - title: Assistant message - properties: - content: - nullable: true - type: string - description: | - The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. - role: - type: string - enum: ["assistant"] - description: The role of the messages author, in this case `assistant`. - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - tool_calls: - $ref: "#/components/schemas/ChatCompletionMessageToolCalls" - function_call: - type: object - deprecated: true - description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." - nullable: true - properties: - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - required: - - arguments - - name - required: - - role - - FineTuneChatCompletionRequestAssistantMessage: - allOf: - - type: object - title: Assistant message - deprecated: false - properties: - weight: - type: integer - enum: [0, 1] - description: "Controls whether the assistant message is trained against (0 or 1)" - - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" - required: - - role + delete: + summary: Deletes a user from the project. + operationId: delete-project-user + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: Project user deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserDeleteResponse' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Delete project user + group: administration + returns: Confirmation that project has been deleted or an error in case of an archived project, which has no users + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.user.deleted", + "id": "user_abc", + "deleted": true + } - ChatCompletionRequestToolMessage: - type: object - title: Tool message - properties: - role: - type: string - enum: ["tool"] - description: The role of the messages author, in this case `tool`. - content: - type: string - description: The contents of the tool message. - tool_call_id: - type: string - description: Tool call that this message is responding to. - required: - - role - - content - - tool_call_id + /organization/projects/{project_id}/service_accounts: + get: + summary: Returns a list of service accounts in the project. + operationId: list-project-service-accounts + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Project service accounts listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountListResponse' + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: List project service accounts + group: administration + returns: A list of [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts?after=custom_id&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + ], + "first_id": "svc_acct_abc", + "last_id": "svc_acct_xyz", + "has_more": false + } - ChatCompletionRequestFunctionMessage: - type: object - title: Function message - deprecated: true - properties: - role: - type: string - enum: ["function"] - description: The role of the messages author, in this case `function`. + post: + summary: Creates a new service account in the project. This also returns an unredacted API key for the service account. + operationId: create-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + requestBody: + description: The project service account create request payload. + required: true content: - nullable: true - type: string - description: The contents of the function message. - name: - type: string - description: The name of the function to call. - required: - - role - - content - - name + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountCreateRequest' + responses: + "200": + description: Project service account created successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountCreateResponse' + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Create project service account + group: administration + returns: The created [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/service_accounts \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Production App" + }' + response: + content: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Production App", + "role": "member", + "created_at": 1711471533, + "api_key": { + "object": "organization.project.service_account.api_key", + "value": "sk-abcdefghijklmnop123", + "name": "Secret Key", + "created_at": 1711471533, + "id": "key_abc" + } + } - FunctionParameters: - type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." - additionalProperties: true + /organization/projects/{project_id}/service_accounts/{service_account_id}: + get: + summary: Retrieves a service account in the project. + operationId: retrieve-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: service_account_id + in: path + description: The ID of the service account. + required: true + schema: + type: string + responses: + "200": + description: Project service account retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccount' + x-oaiMeta: + name: Retrieve project service account + group: administration + returns: The [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } - ChatCompletionFunctions: - type: object - deprecated: true - properties: - description: - type: string - description: A description of what the function does, used by the model to choose when and how to call the function. - name: - type: string - description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - parameters: - $ref: "#/components/schemas/FunctionParameters" - required: - - name + delete: + summary: Deletes a service account from the project. + operationId: delete-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: service_account_id + in: path + description: The ID of the service account. + required: true + schema: + type: string + responses: + "200": + description: Project service account deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectServiceAccountDeleteResponse' + x-oaiMeta: + name: Delete project service account + group: administration + returns: Confirmation of service account being deleted, or an error in case of an archived project, which has no service accounts + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.service_account.deleted", + "id": "svc_acct_abc", + "deleted": true + } - ChatCompletionFunctionCallOption: - type: object - description: > - Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. - properties: - name: - type: string - description: The name of the function to call. - required: - - name + /organization/projects/{project_id}/api_keys: + get: + summary: Returns a list of API keys in the project. + operationId: list-project-api-keys + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: *pagination_after_param_description + required: false + schema: + type: string + responses: + "200": + description: Project API keys listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectApiKeyListResponse' - ChatCompletionTool: - type: object - properties: - type: - type: string - enum: ["function"] - description: The type of the tool. Currently, only `function` is supported. - function: - $ref: "#/components/schemas/FunctionObject" - required: - - type - - function + x-oaiMeta: + name: List project API keys + group: administration + returns: A list of [ProjectApiKey](/docs/api-reference/project-api-keys/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys?after=key_abc&limit=20 \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + } + } + ], + "first_id": "key_abc", + "last_id": "key_xyz", + "has_more": false + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } - FunctionObject: - type: object - properties: - description: - type: string - description: A description of what the function does, used by the model to choose when and how to call the function. - name: - type: string - description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - parameters: - $ref: "#/components/schemas/FunctionParameters" - required: - - name + /organization/projects/{project_id}/api_keys/{key_id}: + get: + summary: Retrieves an API key in the project. + operationId: retrieve-project-api-key + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: key_id + in: path + description: The ID of the API key. + required: true + schema: + type: string + responses: + "200": + description: Project API key retrieved successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectApiKey' + x-oaiMeta: + name: Retrieve project API key + group: administration + returns: The [ProjectApiKey](/docs/api-reference/project-api-keys/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + } + } - ChatCompletionToolChoiceOption: - description: | - Controls which (if any) tool is called by the model. - `none` means the model will not call any tool and instead generates a message. - `auto` means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools. - Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + delete: + summary: Deletes an API key from the project. + operationId: delete-project-api-key + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: key_id + in: path + description: The ID of the API key. + required: true + schema: + type: string + responses: + "200": + description: Project API key deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectApiKeyDeleteResponse' + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Delete project API key + group: administration + returns: Confirmation of the key's deletion or an error if the key belonged to a service account + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.api_key.deleted", + "id": "key_abc", + "deleted": true + } + error_response: + content: | + { + "code": 400, + "message": "API keys cannot be deleted for service accounts, please delete the service account" + } - `none` is the default when no tools are present. `auto` is the default if tools are present. - oneOf: - - type: string - description: > - `none` means the model will not call any tool and instead generates a message. - `auto` means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools. - enum: [none, auto, required] - - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" - x-oaiExpandable: true +components: + securitySchemes: + ApiKeyAuth: + type: http + scheme: "bearer" - ChatCompletionNamedToolChoice: + schemas: + Error: type: object - description: Specifies a tool the model should use. Use to force the model to call a specific function. properties: + code: + type: string + nullable: true + message: + type: string + nullable: false + param: + type: string + nullable: true type: type: string - enum: ["function"] - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - required: - - name + nullable: false required: - type - - function - - ParallelToolCalls: - description: Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. - type: boolean - default: true - - ChatCompletionMessageToolCalls: - type: array - description: The tool calls generated by the model, such as function calls. - items: - $ref: "#/components/schemas/ChatCompletionMessageToolCall" + - message + - param + - code + ErrorResponse: + type: object + properties: + error: + $ref: "#/components/schemas/Error" + required: + - error - ChatCompletionMessageToolCall: + ListModelsResponse: type: object properties: - # TODO: index included when streaming - id: - type: string - description: The ID of the tool call. - type: + object: type: string - enum: ["function"] - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - description: The function that the model called. - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - required: - - name - - arguments + enum: [list] + data: + type: array + items: + $ref: "#/components/schemas/Model" required: - - id - - type - - function - - ChatCompletionMessageToolCallChunk: + - object + - data + DeleteModelResponse: type: object properties: - index: - type: integer id: type: string - description: The ID of the tool call. - type: + deleted: + type: boolean + object: type: string - enum: ["function"] - description: The type of the tool. Currently, only `function` is supported. - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. required: - - index - - # Note, this isn't referenced anywhere, but is kept as a convenience to record all possible roles in one place. - ChatCompletionRole: - type: string - description: The role of the author of a message - enum: - - system - - user - - assistant - - tool - - function + - id + - object + - deleted - ChatCompletionStreamOptions: - description: | - Options for streaming response. Only set this when you set `stream: true`. + CreateCompletionRequest: type: object - nullable: true - default: null properties: - include_usage: - type: boolean - description: | - If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. + model: + description: &model_description | + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + anyOf: + - type: string + - type: string + enum: ["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"] + x-oaiTypeLabel: string + prompt: + description: &completions_prompt_description | + The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. - ChatCompletionResponseMessage: - type: object - description: A chat completion message generated by the model. - properties: - content: - type: string - description: The contents of the message. + Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + default: "<|endoftext|>" nullable: true - tool_calls: - $ref: "#/components/schemas/ChatCompletionMessageToolCalls" - role: - type: string - enum: ["assistant"] - description: The role of the author of this message. - function_call: - type: object - deprecated: true - description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." - properties: - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - required: - - name - - arguments - required: - - role - - content - - ChatCompletionStreamResponseDelta: - type: object - description: A chat completion delta generated by streamed model responses. - properties: - content: - type: string - description: The contents of the chunk message. + oneOf: + - type: string + default: "" + example: "This is a test." + - type: array + items: + type: string + default: "" + example: "This is a test." + - type: array + minItems: 1 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + minItems: 1 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + best_of: + type: integer + default: 1 + minimum: 0 + maximum: 20 nullable: true - function_call: - deprecated: true - type: object - description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." - properties: - arguments: - type: string - description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - name: - type: string - description: The name of the function to call. - tool_calls: - type: array - items: - $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" - role: - type: string - enum: ["system", "user", "assistant", "tool"] - description: The role of the author of this message. + description: &completions_best_of_description | + Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. - CreateChatCompletionRequest: - type: object - properties: - messages: - description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). - type: array - minItems: 1 - items: - $ref: "#/components/schemas/ChatCompletionRequestMessage" - model: - description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. - example: "gpt-4-turbo" - anyOf: - - type: string - - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + echo: + type: boolean + default: false + nullable: true + description: &completions_echo_description > + Echo back the prompt in addition to the completion frequency_penalty: type: number default: 0 minimum: -2 maximum: 2 nullable: true - description: *completions_frequency_penalty_description - logit_bias: + description: &completions_frequency_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + logit_bias: &completions_logit_bias type: object x-oaiTypeLabel: map default: null nullable: true additionalProperties: type: integer - description: | + description: &completions_logit_bias_description | Modify the likelihood of specified tokens appearing in the completion. - Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - logprobs: - description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. - type: boolean - default: false - nullable: true - top_logprobs: - description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + logprobs: &completions_logprobs_configuration type: integer minimum: 0 - maximum: 20 + maximum: 5 + default: null nullable: true - max_tokens: - description: | - The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + description: &completions_logprobs_description | + Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + The maximum value for `logprobs` is 5. + max_tokens: type: integer + minimum: 0 + default: 16 + example: 16 nullable: true + description: &completions_max_tokens_description | + The maximum number of [tokens](/tokenizer) that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: type: integer minimum: 1 @@ -7891,61 +8826,63 @@ components: default: 1 example: 1 nullable: true - description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + description: &completions_completions_description | + How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. presence_penalty: type: number default: 0 minimum: -2 maximum: 2 nullable: true - description: *completions_presence_penalty_description - response_format: - type: object - description: | - An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + description: &completions_presence_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - properties: - type: - type: string - enum: ["text", "json_object"] - example: "json_object" - default: "text" - description: Must be one of `text` or `json_object`. - seed: + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + seed: &completions_seed_param type: integer minimum: -9223372036854775808 maximum: 9223372036854775807 nullable: true description: | - This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - x-oaiMeta: - beta: true stop: - description: | - Up to 4 sequences where the API will stop generating further tokens. + description: &completions_stop_description > + Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. default: null + nullable: true oneOf: - type: string + default: <|endoftext|> + example: "\n" nullable: true - type: array minItems: 1 maxItems: 4 items: type: string + example: '["\n"]' stream: description: > - If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). type: boolean nullable: true default: false stream_options: $ref: "#/components/schemas/ChatCompletionStreamOptions" + suffix: + description: | + The suffix that comes after a completion of inserted text. + + This parameter is only supported for `gpt-3.5-turbo-instruct`. + default: null + nullable: true + type: string + example: "test." temperature: type: number minimum: 0 @@ -7953,7 +8890,10 @@ components: default: 1 example: 1 nullable: true - description: *completions_temperature_description + description: &completions_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. top_p: type: number minimum: 0 @@ -7961,111 +8901,77 @@ components: default: 1 example: 1 nullable: true - description: *completions_top_p_description - tools: - type: array - description: > - A list of tools the model may call. Currently, only functions are supported as a tool. - Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. - items: - $ref: "#/components/schemas/ChatCompletionTool" - tool_choice: - $ref: "#/components/schemas/ChatCompletionToolChoiceOption" - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - user: *end_user_param_configuration - function_call: - deprecated: true - description: | - Deprecated in favor of `tool_choice`. - - Controls which (if any) function is called by the model. - `none` means the model will not call a function and instead generates a message. - `auto` means the model can pick between generating a message or calling a function. - Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + description: &completions_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - `none` is the default when no functions are present. `auto` is the default if functions are present. - oneOf: - - type: string - description: > - `none` means the model will not call a function and instead generates a message. - `auto` means the model can pick between generating a message or calling a function. - enum: [none, auto] - - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" - x-oaiExpandable: true - functions: - deprecated: true + We generally recommend altering this or `temperature` but not both. + user: &end_user_param_configuration + type: string + example: user-1234 description: | - Deprecated in favor of `tools`. + A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + required: + - model + - prompt - A list of functions the model may generate JSON inputs for. - type: array - minItems: 1 - maxItems: 128 - items: - $ref: "#/components/schemas/ChatCompletionFunctions" - - required: - - model - - messages - - CreateChatCompletionResponse: + CreateCompletionResponse: type: object - description: Represents a chat completion response returned by model, based on the provided input. + description: | + Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). properties: id: type: string - description: A unique identifier for the chat completion. + description: A unique identifier for the completion. choices: type: array - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + description: The list of completion choices the model generated for the input prompt. items: type: object required: - finish_reason - index - - message - logprobs + - text properties: finish_reason: type: string - description: &chat_completion_finish_reason_description | + description: &completion_finish_reason_description | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, - `content_filter` if content was omitted due to a flag from our content filters, - `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. - enum: - [ - "stop", - "length", - "tool_calls", - "content_filter", - "function_call", - ] + or `content_filter` if content was omitted due to a flag from our content filters. + enum: ["stop", "length", "content_filter"] index: type: integer - description: The index of the choice in the list of choices. - message: - $ref: "#/components/schemas/ChatCompletionResponseMessage" - logprobs: &chat_completion_response_logprobs - description: Log probability information for the choice. + logprobs: type: object nullable: true properties: - content: - description: A list of message content tokens with log probability information. + text_offset: type: array items: - $ref: "#/components/schemas/ChatCompletionTokenLogprob" - nullable: true - required: - - content + type: integer + token_logprobs: + type: array + items: + type: number + tokens: + type: array + items: + type: string + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: number + text: + type: string created: type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. + description: The Unix timestamp (in seconds) of when the completion was created. model: type: string - description: The model used for the chat completion. + description: The model used for completion. system_fingerprint: type: string description: | @@ -8074,3857 +8980,3686 @@ components: Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. object: type: string - description: The object type, which is always `chat.completion`. - enum: [chat.completion] + description: The object type, which is always "text_completion" + enum: [text_completion] usage: $ref: "#/components/schemas/CompletionUsage" required: - - choices - - created - id - - model - object + - created + - model + - choices x-oaiMeta: - name: The chat completion object - group: chat - example: *chat_completion_example + name: The completion object + legacy: true + example: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "gpt-4-turbo", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } - CreateChatCompletionFunctionResponse: + ChatCompletionRequestMessageContentPartText: type: object - description: Represents a chat completion response returned by model, based on the provided input. + title: Text content part properties: - id: - type: string - description: A unique identifier for the chat completion. - choices: - type: array - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. - items: - type: object - required: - - finish_reason - - index - - message - - logprobs - properties: - finish_reason: - type: string - description: - &chat_completion_function_finish_reason_description | - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. - enum: - ["stop", "length", "function_call", "content_filter"] - index: - type: integer - description: The index of the choice in the list of choices. - message: - $ref: "#/components/schemas/ChatCompletionResponseMessage" - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: - type: string - description: The model used for the chat completion. - system_fingerprint: + type: type: string - description: | - This fingerprint represents the backend configuration that the model runs with. - - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. - object: + enum: ["text"] + description: The type of the content part. + text: type: string - description: The object type, which is always `chat.completion`. - enum: [chat.completion] - usage: - $ref: "#/components/schemas/CompletionUsage" + description: The text content. required: - - choices - - created - - id - - model - - object - x-oaiMeta: - name: The chat completion object - group: chat - example: *chat_completion_function_example + - type + - text - ChatCompletionTokenLogprob: + ChatCompletionRequestMessageContentPartImage: type: object + title: Image content part properties: - token: &chat_completion_response_logprobs_token - description: The token. + type: type: string - logprob: &chat_completion_response_logprobs_token_logprob - description: The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. - type: number - bytes: &chat_completion_response_logprobs_bytes - description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - type: array - items: - type: integer - nullable: true - top_logprobs: - description: List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. - type: array - items: - type: object - properties: - token: *chat_completion_response_logprobs_token - logprob: *chat_completion_response_logprobs_token_logprob - bytes: *chat_completion_response_logprobs_bytes - required: - - token - - logprob - - bytes + enum: ["image_url"] + description: The type of the content part. + image_url: + type: object + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + detail: + type: string + description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). + enum: ["auto", "low", "high"] + default: "auto" + required: + - url required: - - token - - logprob - - bytes - - top_logprobs + - type + - image_url - ListPaginatedFineTuningJobsResponse: + ChatCompletionRequestMessageContentPartRefusal: type: object + title: Refusal content part properties: - data: - type: array - items: - $ref: "#/components/schemas/FineTuningJob" - has_more: - type: boolean - object: + type: type: string - enum: [list] + enum: ["refusal"] + description: The type of the content part. + refusal: + type: string + description: The refusal message generated by the model. required: - - object - - data - - has_more + - type + - refusal - CreateChatCompletionStreamResponse: - type: object - description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. - properties: - id: - type: string - description: A unique identifier for the chat completion. Each chunk has the same ID. - choices: - type: array - description: | - A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the - last chunk if you set `stream_options: {"include_usage": true}`. - items: - type: object - required: - - delta - - finish_reason - - index - properties: - delta: - $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" - logprobs: *chat_completion_response_logprobs - finish_reason: - type: string - description: *chat_completion_finish_reason_description - enum: - [ - "stop", - "length", - "tool_calls", - "content_filter", - "function_call", - ] - nullable: true - index: - type: integer - description: The index of the choice in the list of choices. - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - model: - type: string - description: The model to generate the completion. - system_fingerprint: + ChatCompletionRequestMessage: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + + ChatCompletionRequestSystemMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + + ChatCompletionRequestUserMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" + x-oaiExpandable: true + + ChatCompletionRequestAssistantMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartRefusal" + x-oaiExpandable: true + + ChatCompletionRequestToolMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + + ChatCompletionRequestSystemMessage: + type: object + title: System message + properties: + content: + description: The contents of the system message. + oneOf: + - type: string + description: The contents of the system message. + title: Text content + - type: array + description: An array of content parts with a defined type. For system messages, only type `text` is supported. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestSystemMessageContentPart" + minItems: 1 + role: type: string - description: | - This fingerprint represents the backend configuration that the model runs with. - Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. - object: + enum: ["system"] + description: The role of the messages author, in this case `system`. + name: type: string - description: The object type, which is always `chat.completion.chunk`. - enum: [chat.completion.chunk] - usage: - type: object - description: | - An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. - When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request. - properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - total_tokens: - type: integer - description: Total number of tokens used in the request (prompt + completion). - required: - - prompt_tokens - - completion_tokens - - total_tokens + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. required: - - choices - - created - - id - - model - - object - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: *chat_completion_chunk_example + - content + - role - CreateChatCompletionImageResponse: + ChatCompletionRequestUserMessage: type: object - description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: *chat_completion_image_example + title: User message + properties: + content: + description: | + The contents of the user message. + oneOf: + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestUserMessageContentPart" + minItems: 1 + x-oaiExpandable: true + role: + type: string + enum: ["user"] + description: The role of the messages author, in this case `user`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + required: + - content + - role - CreateImageRequest: + ChatCompletionRequestAssistantMessage: type: object + title: Assistant message properties: - prompt: - description: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. - type: string - example: "A cute baby sea otter" - model: - anyOf: - - type: string - - type: string - enum: ["dall-e-2", "dall-e-3"] - x-oaiTypeLabel: string - default: "dall-e-2" - example: "dall-e-3" + content: nullable: true - description: The model to use for image generation. - n: &images_n - type: integer - minimum: 1 - maximum: 10 - default: 1 - example: 1 + oneOf: + - type: string + description: The contents of the assistant message. + title: Text content + - type: array + description: An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestAssistantMessageContentPart" + minItems: 1 + description: | + The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + refusal: nullable: true - description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. - quality: - type: string - enum: ["standard", "hd"] - default: "standard" - example: "standard" - description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. - response_format: &images_response_format type: string - enum: ["url", "b64_json"] - default: "url" - example: "url" - nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. - size: &images_size + description: The refusal message by the assistant. + role: type: string - enum: ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"] - default: "1024x1024" - example: "1024x1024" - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. - style: + enum: ["assistant"] + description: The role of the messages author, in this case `assistant`. + name: type: string - enum: ["vivid", "natural"] - default: "vivid" - example: "vivid" + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." nullable: true - description: The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. - user: *end_user_param_configuration + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name required: - - prompt - - ImagesResponse: - properties: - created: - type: integer - data: - type: array - items: - $ref: "#/components/schemas/Image" + - role + + FineTuneChatCompletionRequestAssistantMessage: + allOf: + - type: object + title: Assistant message + deprecated: false + properties: + weight: + type: integer + enum: [0, 1] + description: "Controls whether the assistant message is trained against (0 or 1)" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" required: - - created - - data + - role - Image: + ChatCompletionRequestToolMessage: type: object - description: Represents the url or the content of an image generated by the OpenAI API. + title: Tool message properties: - b64_json: + role: type: string - description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. - url: + enum: ["tool"] + description: The role of the messages author, in this case `tool`. + content: + oneOf: + - type: string + description: The contents of the tool message. + title: Text content + - type: array + description: An array of content parts with a defined type. For tool messages, only type `text` is supported. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestToolMessageContentPart" + minItems: 1 + description: The contents of the tool message. + tool_call_id: type: string - description: The URL of the generated image, if `response_format` is `url` (default). - revised_prompt: + description: Tool call that this message is responding to. + required: + - role + - content + - tool_call_id + + ChatCompletionRequestFunctionMessage: + type: object + title: Function message + deprecated: true + properties: + role: type: string - description: The prompt that was used to generate the image, if there was any revision to the prompt. - x-oaiMeta: - name: The image object - example: | - { - "url": "...", - "revised_prompt": "..." - } + enum: ["function"] + description: The role of the messages author, in this case `function`. + content: + nullable: true + type: string + description: The contents of the function message. + name: + type: string + description: The name of the function to call. + required: + - role + - content + - name - CreateImageEditRequest: + FunctionParameters: + type: object + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + additionalProperties: true + + ChatCompletionFunctions: type: object + deprecated: true properties: - image: - description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. + description: type: string - format: binary - prompt: - description: A text description of the desired image(s). The maximum length is 1000 characters. + description: A description of what the function does, used by the model to choose when and how to call the function. + name: type: string - example: "A cute baby sea otter wearing a beret" - mask: - description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + required: + - name + + ChatCompletionFunctionCallOption: + type: object + description: > + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + properties: + name: type: string - format: binary - model: - anyOf: - - type: string - - type: string - enum: ["dall-e-2"] - x-oaiTypeLabel: string - default: "dall-e-2" - example: "dall-e-2" - nullable: true - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - n: - type: integer - minimum: 1 - maximum: 10 - default: 1 - example: 1 - nullable: true - description: The number of images to generate. Must be between 1 and 10. - size: &dalle2_images_size + description: The name of the function to call. + required: + - name + + ChatCompletionTool: + type: object + properties: + type: type: string - enum: ["256x256", "512x512", "1024x1024"] - default: "1024x1024" - example: "1024x1024" - nullable: true - description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - response_format: *images_response_format - user: *end_user_param_configuration + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + $ref: "#/components/schemas/FunctionObject" required: - - prompt - - image + - type + - function - CreateImageVariationRequest: + FunctionObject: type: object properties: - image: - description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. + description: type: string - format: binary - model: - anyOf: - - type: string - - type: string - enum: ["dall-e-2"] - x-oaiTypeLabel: string - default: "dall-e-2" - example: "dall-e-2" - nullable: true - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - n: *images_n - response_format: *images_response_format - size: *dalle2_images_size - user: *end_user_param_configuration + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + strict: + type: boolean + nullable: true + default: false + description: Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling). required: - - image + - name - CreateModerationRequest: + ResponseFormatText: type: object properties: - input: - description: The input text to classify - oneOf: - - type: string - default: "" - example: "I want to kill them." - - type: array - items: - type: string - default: "" - example: "I want to kill them." - model: - description: | - Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + type: + type: string + description: "The type of response format being defined: `text`" + enum: ["text"] + required: + - type - The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. - nullable: false - default: "text-moderation-latest" - example: "text-moderation-stable" - anyOf: - - type: string - - type: string - enum: ["text-moderation-latest", "text-moderation-stable"] - x-oaiTypeLabel: string + ResponseFormatJsonObject: + type: object + properties: + type: + type: string + description: "The type of response format being defined: `json_object`" + enum: ["json_object"] required: - - input + - type - CreateModerationResponse: + ResponseFormatJsonSchemaSchema: + type: object + description: "The schema for the response format, described as a JSON Schema object." + additionalProperties: true + + ResponseFormatJsonSchema: + type: object + properties: + type: + type: string + description: 'The type of response format being defined: `json_schema`' + enum: ['json_schema'] + json_schema: + type: object + properties: + description: + type: string + description: A description of what the response format is for, used by the model to determine how to respond in the format. + name: + type: string + description: The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + schema: + $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' + strict: + type: boolean + nullable: true + default: false + description: Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). + required: + - type + - name + required: + - type + - json_schema + + ChatCompletionToolChoiceOption: + description: | + Controls which (if any) tool is called by the model. + `none` means the model will not call any tool and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools. + Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools are present. + oneOf: + - type: string + description: > + `none` means the model will not call any tool and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools. + enum: [none, auto, required] + - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" + x-oaiExpandable: true + + ChatCompletionNamedToolChoice: + type: object + description: Specifies a tool the model should use. Use to force the model to call a specific function. + properties: + type: + type: string + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + required: + - type + - function + + ParallelToolCalls: + description: Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + type: boolean + default: true + + ChatCompletionMessageToolCalls: + type: array + description: The tool calls generated by the model, such as function calls. + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCall" + + ChatCompletionMessageToolCall: type: object - description: Represents if a given text input is potentially harmful. properties: + # TODO: index included when streaming id: type: string - description: The unique identifier for the moderation request. - model: + description: The ID of the tool call. + type: type: string - description: The model used to generate the moderation results. - results: - type: array - description: A list of moderation objects. - items: - type: object - properties: - flagged: - type: boolean - description: Whether any of the below categories are flagged. - categories: - type: object - description: A list of the categories, and whether they are flagged or not. - properties: - hate: - type: boolean - description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. - hate/threatening: - type: boolean - description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. - harassment: - type: boolean - description: Content that expresses, incites, or promotes harassing language towards any target. - harassment/threatening: - type: boolean - description: Harassment content that also includes violence or serious harm towards any target. - self-harm: - type: boolean - description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. - self-harm/intent: - type: boolean - description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. - self-harm/instructions: - type: boolean - description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. - sexual: - type: boolean - description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). - sexual/minors: - type: boolean - description: Sexual content that includes an individual who is under 18 years old. - violence: - type: boolean - description: Content that depicts death, violence, or physical injury. - violence/graphic: - type: boolean - description: Content that depicts death, violence, or physical injury in graphic detail. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - category_scores: - type: object - description: A list of the categories along with their scores as predicted by model. - properties: - hate: - type: number - description: The score for the category 'hate'. - hate/threatening: - type: number - description: The score for the category 'hate/threatening'. - harassment: - type: number - description: The score for the category 'harassment'. - harassment/threatening: - type: number - description: The score for the category 'harassment/threatening'. - self-harm: - type: number - description: The score for the category 'self-harm'. - self-harm/intent: - type: number - description: The score for the category 'self-harm/intent'. - self-harm/instructions: - type: number - description: The score for the category 'self-harm/instructions'. - sexual: - type: number - description: The score for the category 'sexual'. - sexual/minors: - type: number - description: The score for the category 'sexual/minors'. - violence: - type: number - description: The score for the category 'violence'. - violence/graphic: - type: number - description: The score for the category 'violence/graphic'. - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - required: - - flagged - - categories - - category_scores + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + description: The function that the model called. + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + required: + - name + - arguments required: - id - - model - - results - x-oaiMeta: - name: The moderation object - example: *moderation_example + - type + - function - ListFilesResponse: + ChatCompletionMessageToolCallChunk: type: object properties: - data: - type: array - items: - $ref: "#/components/schemas/OpenAIFile" - object: + index: + type: integer + id: type: string - enum: [list] + description: The ID of the tool call. + type: + type: string + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. required: - - object - - data + - index - CreateFileRequest: + # Note, this isn't referenced anywhere, but is kept as a convenience to record all possible roles in one place. + ChatCompletionRole: + type: string + description: The role of the author of a message + enum: + - system + - user + - assistant + - tool + - function + + ChatCompletionStreamOptions: + description: | + Options for streaming response. Only set this when you set `stream: true`. type: object - additionalProperties: false + nullable: true + default: null properties: - file: - description: | - The File object (not file name) to be uploaded. - type: string - format: binary - purpose: + include_usage: + type: boolean description: | - The intended purpose of the uploaded file. - - Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). - type: string - enum: ["assistants", "batch", "fine-tune", "vision"] - required: - - file - - purpose + If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. - DeleteFileResponse: + ChatCompletionResponseMessage: type: object + description: A chat completion message generated by the model. properties: - id: + content: type: string - object: + description: The contents of the message. + nullable: true + refusal: type: string - enum: [file] - deleted: - type: boolean + description: The refusal message generated by the model. + nullable: true + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + role: + type: string + enum: ["assistant"] + description: The role of the author of this message. + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - name + - arguments required: - - id - - object - - deleted + - role + - content + - refusal - CreateFineTuningJobRequest: + ChatCompletionStreamResponseDelta: type: object + description: A chat completion delta generated by streamed model responses. properties: - model: - description: | - The name of the model to fine-tune. You can select one of the - [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - example: "gpt-3.5-turbo" - anyOf: - - type: string - - type: string - enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo"] - x-oaiTypeLabel: string - training_file: - description: | - The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/create) for how to upload a file. - - Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. - - The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + content: type: string - example: "file-abc123" - hyperparameters: + description: The contents of the chunk message. + nullable: true + function_call: + deprecated: true type: object - description: The hyperparameters used for the fine-tuning job. + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." properties: - batch_size: - description: | - Number of examples in each batch. A larger batch size means that model parameters - are updated less frequently, but with lower variance. - oneOf: - - type: string - enum: [auto] - - type: integer - minimum: 1 - maximum: 256 - default: auto - learning_rate_multiplier: - description: | - Scaling factor for the learning rate. A smaller learning rate may be useful to avoid - overfitting. - oneOf: - - type: string - enum: [auto] - - type: number - minimum: 0 - exclusiveMinimum: true - default: auto - n_epochs: - description: | - The number of epochs to train the model for. An epoch refers to one full cycle - through the training dataset. - oneOf: - - type: string - enum: [auto] - - type: integer - minimum: 1 - maximum: 50 - default: auto - suffix: - description: | - A string of up to 18 characters that will be added to your fine-tuned model name. - - For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. - type: string - minLength: 1 - maxLength: 40 - default: null - nullable: true - validation_file: - description: | - The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation - metrics periodically during fine-tuning. These metrics can be viewed in - the fine-tuning results file. - The same data should not be present in both train and validation files. - - Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - type: string - nullable: true - example: "file-abc123" - integrations: - type: array - description: A list of integrations to enable for your fine-tuning job. - nullable: true - items: - type: object - required: - - type - - wandb - properties: - type: - description: | - The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. - oneOf: - - type: string - enum: [wandb] - wandb: - type: object - description: | - The settings for your integration with Weights and Biases. This payload specifies the project that - metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags - to your run, and set a default entity (team, username, etc) to be associated with your run. - required: - - project - properties: - project: - description: | - The name of the project that the new run will be created under. - type: string - example: "my-wandb-project" - name: - description: | - A display name to set for the run. If not set, we will use the Job ID as the name. - nullable: true - type: string - entity: - description: | - The entity to use for the run. This allows you to set the team or username of the WandB user that you would - like associated with the run. If not set, the default entity for the registered WandB API key is used. - nullable: true - type: string - tags: - description: | - A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some - default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". - type: array - items: - type: string - example: "custom-tag" - - seed: - description: | - The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. - If a seed is not specified, one will be generated for you. - type: integer - nullable: true - minimum: 0 - maximum: 2147483647 - example: 42 - required: - - model - - training_file - - ListFineTuningJobEventsResponse: - type: object - properties: - data: - type: array - items: - $ref: "#/components/schemas/FineTuningJobEvent" - object: - type: string - enum: [list] - required: - - object - - data - - ListFineTuningJobCheckpointsResponse: - type: object - properties: - data: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + tool_calls: type: array items: - $ref: "#/components/schemas/FineTuningJobCheckpoint" - object: - type: string - enum: [list] - first_id: + $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" + role: type: string - nullable: true - last_id: + enum: ["system", "user", "assistant", "tool"] + description: The role of the author of this message. + refusal: type: string + description: The refusal message generated by the model. nullable: true - has_more: - type: boolean - required: - - object - - data - - has_more - CreateEmbeddingRequest: + CreateChatCompletionRequest: type: object - additionalProperties: false properties: - input: - description: | - Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. - example: "The quick brown fox jumped over the lazy dog" - oneOf: - - type: string - title: string - description: The string that will be turned into an embedding. - default: "" - example: "This is a test." - - type: array - title: array - description: The array of strings that will be turned into an embedding. - minItems: 1 - maxItems: 2048 - items: - type: string - default: "" - example: "['This is a test.']" - - type: array - title: array - description: The array of integers that will be turned into an embedding. - minItems: 1 - maxItems: 2048 - items: - type: integer - example: "[1212, 318, 257, 1332, 13]" - - type: array - title: array - description: The array of arrays containing integers that will be turned into an embedding. - minItems: 1 - maxItems: 2048 - items: - type: array - minItems: 1 - items: - type: integer - example: "[[1212, 318, 257, 1332, 13]]" - x-oaiExpandable: true + messages: + description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + type: array + minItems: 1 + items: + $ref: "#/components/schemas/ChatCompletionRequestMessage" model: - description: *model_description - example: "text-embedding-3-small" + description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + example: "gpt-4o" anyOf: - type: string - type: string enum: [ - "text-embedding-ada-002", - "text-embedding-3-small", - "text-embedding-3-large", + "gpt-4o", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "chatgpt-4o-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", ] x-oaiTypeLabel: string - encoding_format: - description: "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/)." - example: "float" - default: "float" - type: string - enum: ["float", "base64"] - dimensions: - description: | - The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. - type: integer - minimum: 1 - user: *end_user_param_configuration - required: - - model - - input - - CreateEmbeddingResponse: - type: object - properties: - data: - type: array - description: The list of embeddings generated by the model. - items: - $ref: "#/components/schemas/Embedding" - model: - type: string - description: The name of the model used to generate the embedding. - object: - type: string - description: The object type, which is always "list". - enum: [list] - usage: + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_frequency_penalty_description + logit_bias: type: object - description: The usage information for the request. - properties: - prompt_tokens: - type: integer - description: The number of tokens used by the prompt. - total_tokens: - type: integer - description: The total number of tokens used by the request. - required: - - prompt_tokens - - total_tokens - required: - - object - - model - - data - - usage - - CreateTranscriptionRequest: - type: object - additionalProperties: false - properties: - file: + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer description: | - The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - type: string - x-oaiTypeLabel: file - format: binary - model: + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + logprobs: + description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + type: boolean + default: false + nullable: true + top_logprobs: + description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + type: integer + minimum: 0 + maximum: 20 + nullable: true + max_tokens: description: | - ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. - example: whisper-1 - anyOf: - - type: string - - type: string - enum: ["whisper-1"] - x-oaiTypeLabel: string - language: + The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + + The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + type: integer + nullable: true + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_presence_penalty_description + response_format: description: | - The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. - type: string - prompt: + An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + oneOf: + - $ref: "#/components/schemas/ResponseFormatText" + - $ref: "#/components/schemas/ResponseFormatJsonObject" + - $ref: "#/components/schemas/ResponseFormatJsonSchema" + x-oaiExpandable: true + seed: + type: integer + minimum: -9223372036854775808 + maximum: 9223372036854775807 + nullable: true description: | - An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - type: string - response_format: + This feature is in Beta. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + x-oaiMeta: + beta: true + service_tier: + description: | + Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: + - If set to 'auto', the system will utilize scale tier credits until they are exhausted. + - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. + + When this parameter is set, the response body will include the `service_tier` utilized. + type: string + enum: ["auto", "default"] + nullable: true + default: null + stop: description: | - The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. - type: string - enum: - - json - - text - - srt - - verbose_json - - vtt - default: json + Up to 4 sequences where the API will stop generating further tokens. + default: null + oneOf: + - type: string + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + stream: + description: > + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + stream_options: + $ref: "#/components/schemas/ChatCompletionStreamOptions" temperature: - description: | - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. type: number - default: 0 - timestamp_granularities[]: + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *completions_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *completions_top_p_description + tools: + type: array + description: > + A list of tools the model may call. Currently, only functions are supported as a tool. + Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + items: + $ref: "#/components/schemas/ChatCompletionTool" + tool_choice: + $ref: "#/components/schemas/ChatCompletionToolChoiceOption" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + user: *end_user_param_configuration + function_call: + deprecated: true description: | - The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. + Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + + `none` is the default when no functions are present. `auto` is the default if functions are present. + oneOf: + - type: string + description: > + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + enum: [none, auto] + - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" + x-oaiExpandable: true + functions: + deprecated: true + description: | + Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. type: array + minItems: 1 + maxItems: 128 items: - type: string - enum: - - word - - segment - default: [segment] - required: - - file - - model + $ref: "#/components/schemas/ChatCompletionFunctions" - # Note: This does not currently support the non-default response format types. - CreateTranscriptionResponseJson: - type: object - description: Represents a transcription response returned by model, based on the provided input. - properties: - text: - type: string - description: The transcribed text. required: - - text - x-oaiMeta: - name: The transcription object (JSON) - group: audio - example: *basic_transcription_response_example + - model + - messages - TranscriptionSegment: + CreateChatCompletionResponse: type: object + description: Represents a chat completion response returned by model, based on the provided input. properties: id: - type: integer - description: Unique identifier of the segment. - seek: - type: integer - description: Seek offset of the segment. - start: - type: number - format: float - description: Start time of the segment in seconds. - end: - type: number - format: float - description: End time of the segment in seconds. - text: type: string - description: Text content of the segment. - tokens: + description: A unique identifier for the chat completion. + choices: type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. items: - type: integer - description: Array of token IDs for the text content. - temperature: - type: number - format: float - description: Temperature parameter used for generating the segment. - avg_logprob: - type: number - format: float - description: Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. - compression_ratio: - type: number - format: float - description: Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. - no_speech_prob: - type: number - format: float - description: Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent. - required: - - id - - seek - - start - - end - - text - - tokens - - temperature - - avg_logprob - - compression_ratio - - no_speech_prob + type: object + required: + - finish_reason + - index + - message + - logprobs + properties: + finish_reason: + type: string + description: &chat_completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + `content_filter` if content was omitted due to a flag from our content filters, + `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + logprobs: &chat_completion_response_logprobs + description: Log probability information for the choice. + type: object + nullable: true + properties: + content: + description: A list of message content tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + refusal: + description: A list of message refusal tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + required: + - content + - refusal - TranscriptionWord: - type: object - properties: - word: + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: type: string - description: The text content of the word. - start: - type: number - format: float - description: Start time of the word in seconds. - end: - type: number - format: float - description: End time of the word in seconds. - required: [word, start, end] - - CreateTranscriptionResponseVerboseJson: - type: object - description: Represents a verbose json transcription response returned by model, based on the provided input. - properties: - language: + description: The model used for the chat completion. + service_tier: + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. type: string - description: The language of the input audio. - duration: + enum: ["scale", "default"] + example: "scale" + nullable: true + system_fingerprint: type: string - description: The duration of the input audio. - text: + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: type: string - description: The transcribed text. - words: - type: array - description: Extracted words and their corresponding timestamps. - items: - $ref: "#/components/schemas/TranscriptionWord" - segments: - type: array - description: Segments of the transcribed text and their corresponding details. - items: - $ref: "#/components/schemas/TranscriptionSegment" - required: [language, duration, text] + description: The object type, which is always `chat.completion`. + enum: [chat.completion] + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - choices + - created + - id + - model + - object x-oaiMeta: - name: The transcription object (Verbose JSON) - group: audio - example: *verbose_transcription_response_example + name: The chat completion object + group: chat + example: *chat_completion_example - CreateTranslationRequest: + CreateChatCompletionFunctionResponse: type: object - additionalProperties: false + description: Represents a chat completion response returned by model, based on the provided input. properties: - file: - description: | - The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + id: type: string - x-oaiTypeLabel: file - format: binary + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - finish_reason + - index + - message + - logprobs + properties: + finish_reason: + type: string + description: + &chat_completion_function_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. + enum: + ["stop", "length", "function_call", "content_filter"] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. model: - description: | - ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. - example: whisper-1 - anyOf: - - type: string - - type: string - enum: ["whisper-1"] - x-oaiTypeLabel: string - prompt: - description: | - An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. type: string - response_format: - description: | - The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + description: The model used for the chat completion. + system_fingerprint: type: string - default: json - temperature: description: | - The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. - type: number - default: 0 + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: [chat.completion] + usage: + $ref: "#/components/schemas/CompletionUsage" required: - - file + - choices + - created + - id - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_function_example - # Note: This does not currently support the non-default response format types. - CreateTranslationResponseJson: + ChatCompletionTokenLogprob: type: object properties: - text: + token: &chat_completion_response_logprobs_token + description: The token. type: string + logprob: &chat_completion_response_logprobs_token_logprob + description: The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. + type: number + bytes: &chat_completion_response_logprobs_bytes + description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. + type: array + items: + type: integer + nullable: true + top_logprobs: + description: List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. + type: array + items: + type: object + properties: + token: *chat_completion_response_logprobs_token + logprob: *chat_completion_response_logprobs_token_logprob + bytes: *chat_completion_response_logprobs_bytes + required: + - token + - logprob + - bytes required: - - text + - token + - logprob + - bytes + - top_logprobs - CreateTranslationResponseVerboseJson: + ListPaginatedFineTuningJobsResponse: type: object properties: - language: - type: string - description: The language of the output translation (always `english`). - duration: - type: string - description: The duration of the input audio. - text: - type: string - description: The translated text. - segments: + data: type: array - description: Segments of the translated text and their corresponding details. items: - $ref: "#/components/schemas/TranscriptionSegment" - required: [language, duration, text] - - CreateSpeechRequest: - type: object - additionalProperties: false - properties: - model: - description: | - One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` - anyOf: - - type: string - - type: string - enum: ["tts-1", "tts-1-hd"] - x-oaiTypeLabel: string - input: - type: string - description: The text to generate audio for. The maximum length is 4096 characters. - maxLength: 4096 - voice: - description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). - type: string - enum: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] - response_format: - description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." - default: "mp3" + $ref: "#/components/schemas/FineTuningJob" + has_more: + type: boolean + object: type: string - enum: ["mp3", "opus", "aac", "flac", "wav", "pcm"] - speed: - description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." - type: number - default: 1.0 - minimum: 0.25 - maximum: 4.0 + enum: [list] required: - - model - - input - - voice + - object + - data + - has_more - Model: - title: Model - description: Describes an OpenAI model offering that can be used with the API. + CreateChatCompletionStreamResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. properties: id: type: string - description: The model identifier, which can be referenced in the API endpoints. + description: A unique identifier for the chat completion. Each chunk has the same ID. + choices: + type: array + description: | + A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the + last chunk if you set `stream_options: {"include_usage": true}`. + items: + type: object + required: + - delta + - finish_reason + - index + properties: + delta: + $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" + logprobs: *chat_completion_response_logprobs + finish_reason: + type: string + description: *chat_completion_finish_reason_description + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] + nullable: true + index: + type: integer + description: The index of the choice in the list of choices. created: type: integer - description: The Unix timestamp (in seconds) when the model was created. - object: + description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + model: type: string - description: The object type, which is always "model". - enum: [model] - owned_by: + description: The model to generate the completion. + service_tier: + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. type: string - description: The organization that owns the model. + enum: ["scale", "default"] + example: "scale" + nullable: true + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion.chunk`. + enum: [chat.completion.chunk] + usage: + type: object + description: | + An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. + When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request. + properties: + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens required: + - choices + - created - id + - model - object - - created - - owned_by x-oaiMeta: - name: The model object - example: *retrieve_model_response + name: The chat completion chunk object + group: chat + example: *chat_completion_chunk_example - OpenAIFile: - title: OpenAIFile - description: The `File` object represents a document that has been uploaded to OpenAI. + CreateChatCompletionImageResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_image_example + + CreateImageRequest: + type: object properties: - id: + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. type: string - description: The file identifier, which can be referenced in the API endpoints. - bytes: - type: integer - description: The size of the file, in bytes. - created_at: + example: "A cute baby sea otter" + model: + anyOf: + - type: string + - type: string + enum: ["dall-e-2", "dall-e-3"] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-3" + nullable: true + description: The model to use for image generation. + n: &images_n type: integer - description: The Unix timestamp (in seconds) for when the file was created. - filename: - type: string - description: The name of the file. - object: + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. + quality: type: string - description: The object type, which is always `file`. - enum: ["file"] - purpose: + enum: ["standard", "hd"] + default: "standard" + example: "standard" + description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. + response_format: &images_response_format type: string - description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. - enum: - [ - "assistants", - "assistants_output", - "batch", - "batch_output", - "fine-tune", - "fine-tune-results", - "vision", - ] - status: + enum: ["url", "b64_json"] + default: "url" + example: "url" + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. + size: &images_size type: string - deprecated: true - description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. - enum: ["uploaded", "processed", "error"] - status_details: + enum: ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"] + default: "1024x1024" + example: "1024x1024" + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + style: type: string - deprecated: true - description: Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. + enum: ["vivid", "natural"] + default: "vivid" + example: "vivid" + nullable: true + description: The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + user: *end_user_param_configuration required: - - id - - object - - bytes - - created_at - - filename - - purpose - - status - x-oaiMeta: - name: The file object - example: | - { - "id": "file-abc123", - "object": "file", - "bytes": 120000, - "created_at": 1677610602, - "filename": "salesOverview.pdf", - "purpose": "assistants", - } - Embedding: - type: object - description: | - Represents an embedding vector returned by embedding endpoint. + - prompt + + ImagesResponse: properties: - index: + created: type: integer - description: The index of the embedding in the list of embeddings. - embedding: + data: type: array - description: | - The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). items: - type: number - object: - type: string - description: The object type, which is always "embedding". - enum: [embedding] + $ref: "#/components/schemas/Image" required: - - index - - object - - embedding + - created + - data + + Image: + type: object + description: Represents the url or the content of an image generated by the OpenAI API. + properties: + b64_json: + type: string + description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + url: + type: string + description: The URL of the generated image, if `response_format` is `url` (default). + revised_prompt: + type: string + description: The prompt that was used to generate the image, if there was any revision to the prompt. x-oaiMeta: - name: The embedding object + name: The image object example: | { - "object": "embedding", - "embedding": [ - 0.0023064255, - -0.009327292, - .... (1536 floats total for ada-002) - -0.0028842222, - ], - "index": 0 + "url": "...", + "revised_prompt": "..." } - FineTuningJob: + CreateImageEditRequest: type: object - title: FineTuningJob - description: | - The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. properties: - id: - type: string - description: The object identifier, which can be referenced in the API endpoints. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - error: - type: object - nullable: true - description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. - properties: - code: - type: string - description: A machine-readable error code. - message: - type: string - description: A human-readable error message. - param: - type: string - description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. - nullable: true - required: - - code - - message - - param - fine_tuned_model: - type: string - nullable: true - description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. - finished_at: - type: integer - nullable: true - description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. - hyperparameters: - type: object - description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - properties: - n_epochs: - oneOf: - - type: string - enum: [auto] - - type: integer - minimum: 1 - maximum: 50 - default: auto - description: - The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - - "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. - required: - - n_epochs - model: - type: string - description: The base model that is being fine-tuned. - object: + image: + description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. type: string - description: The object type, which is always "fine_tuning.job". - enum: [fine_tuning.job] - organization_id: + format: binary + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters. type: string - description: The organization that owns the fine-tuning job. - result_files: - type: array - description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). - items: - type: string - example: file-abc123 - status: + example: "A cute baby sea otter wearing a beret" + mask: + description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. type: string - description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. - enum: - [ - "validating_files", - "queued", - "running", - "succeeded", - "failed", - "cancelled", - ] - trained_tokens: + format: binary + model: + anyOf: + - type: string + - type: string + enum: ["dall-e-2"] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" + nullable: true + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 nullable: true - description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. - training_file: - type: string - description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). - validation_file: + description: The number of images to generate. Must be between 1 and 10. + size: &dalle2_images_size type: string + enum: ["256x256", "512x512", "1024x1024"] + default: "1024x1024" + example: "1024x1024" nullable: true - description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). - integrations: - type: array - nullable: true - description: A list of integrations to enable for this fine-tuning job. - maxItems: 5 - items: - oneOf: - - $ref: "#/components/schemas/FineTuningIntegration" - x-oaiExpandable: true - seed: - type: integer - description: The seed used for the fine-tuning job. - estimated_finish: - type: integer - nullable: true - description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + response_format: *images_response_format + user: *end_user_param_configuration required: - - created_at - - error - - finished_at - - fine_tuned_model - - hyperparameters - - id - - model - - object - - organization_id - - result_files - - status - - trained_tokens - - training_file - - validation_file - - seed - x-oaiMeta: - name: The fine-tuning job object - example: *fine_tuning_example + - prompt + - image - FineTuningIntegration: + CreateImageVariationRequest: type: object - title: Fine-Tuning Job Integration - required: - - type - - wandb properties: - type: + image: + description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. type: string - description: "The type of the integration being enabled for the fine-tuning job" - enum: ["wandb"] - wandb: - type: object - description: | - The settings for your integration with Weights and Biases. This payload specifies the project that - metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags - to your run, and set a default entity (team, username, etc) to be associated with your run. - required: - - project - properties: - project: - description: | - The name of the project that the new run will be created under. - type: string - example: "my-wandb-project" - name: - description: | - A display name to set for the run. If not set, we will use the Job ID as the name. - nullable: true - type: string - entity: - description: | - The entity to use for the run. This allows you to set the team or username of the WandB user that you would - like associated with the run. If not set, the default entity for the registered WandB API key is used. - nullable: true - type: string - tags: - description: | - A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some - default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". - type: array - items: - type: string - example: "custom-tag" + format: binary + model: + anyOf: + - type: string + - type: string + enum: ["dall-e-2"] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" + nullable: true + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: *images_n + response_format: *images_response_format + size: *dalle2_images_size + user: *end_user_param_configuration + required: + - image - FineTuningJobEvent: + CreateModerationRequest: type: object - description: Fine-tuning job event object properties: - id: - type: string - created_at: - type: integer - level: + input: + description: The input text to classify + oneOf: + - type: string + default: "" + example: "I want to kill them." + - type: array + items: + type: string + default: "" + example: "I want to kill them." + model: + description: | + Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + + The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + nullable: false + default: "text-moderation-latest" + example: "text-moderation-stable" + anyOf: + - type: string + - type: string + enum: ["text-moderation-latest", "text-moderation-stable"] + x-oaiTypeLabel: string + required: + - input + + CreateModerationResponse: + type: object + description: Represents if a given text input is potentially harmful. + properties: + id: type: string - enum: ["info", "warn", "error"] - message: + description: The unique identifier for the moderation request. + model: type: string + description: The model used to generate the moderation results. + results: + type: array + description: A list of moderation objects. + items: + type: object + properties: + flagged: + type: boolean + description: Whether any of the below categories are flagged. + categories: + type: object + description: A list of the categories, and whether they are flagged or not. + properties: + hate: + type: boolean + description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. + hate/threatening: + type: boolean + description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm towards any target. + self-harm: + type: boolean + description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/intent: + type: boolean + description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/instructions: + type: boolean + description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + sexual: + type: boolean + description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic detail. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + category_scores: + type: object + description: A list of the categories along with their scores as predicted by model. + properties: + hate: + type: number + description: The score for the category 'hate'. + hate/threatening: + type: number + description: The score for the category 'hate/threatening'. + harassment: + type: number + description: The score for the category 'harassment'. + harassment/threatening: + type: number + description: The score for the category 'harassment/threatening'. + self-harm: + type: number + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + description: The score for the category 'self-harm/instructions'. + sexual: + type: number + description: The score for the category 'sexual'. + sexual/minors: + type: number + description: The score for the category 'sexual/minors'. + violence: + type: number + description: The score for the category 'violence'. + violence/graphic: + type: number + description: The score for the category 'violence/graphic'. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + required: + - flagged + - categories + - category_scores + required: + - id + - model + - results + x-oaiMeta: + name: The moderation object + example: *moderation_example + + ListFilesResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/OpenAIFile" object: type: string - enum: [fine_tuning.job.event] + enum: [list] required: - - id - object - - created_at - - level - - message - x-oaiMeta: - name: The fine-tuning job event object - example: | - { - "object": "fine_tuning.job.event", - "id": "ftevent-abc123" - "created_at": 1677610602, - "level": "info", - "message": "Created fine-tuning job" - } + - data - FineTuningJobCheckpoint: + CreateFileRequest: type: object - title: FineTuningJobCheckpoint - description: | - The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. + additionalProperties: false properties: - id: + file: + description: | + The File object (not file name) to be uploaded. type: string - description: The checkpoint identifier, which can be referenced in the API endpoints. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the checkpoint was created. - fine_tuned_model_checkpoint: + format: binary + purpose: + description: | + The intended purpose of the uploaded file. + + Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). type: string - description: The name of the fine-tuned checkpoint model that is created. - step_number: - type: integer - description: The step number that the checkpoint was created at. - metrics: - type: object - description: Metrics at the step number during the fine-tuning job. - properties: - step: - type: number - train_loss: - type: number - train_mean_token_accuracy: - type: number - valid_loss: - type: number - valid_mean_token_accuracy: - type: number - full_valid_loss: - type: number - full_valid_mean_token_accuracy: - type: number - fine_tuning_job_id: + enum: ["assistants", "batch", "fine-tune", "vision"] + required: + - file + - purpose + + DeleteFileResponse: + type: object + properties: + id: type: string - description: The name of the fine-tuning job that this checkpoint was created from. object: type: string - description: The object type, which is always "fine_tuning.job.checkpoint". - enum: [fine_tuning.job.checkpoint] + enum: [file] + deleted: + type: boolean required: - - created_at - - fine_tuning_job_id - - fine_tuned_model_checkpoint - id - - metrics - object - - step_number - x-oaiMeta: - name: The fine-tuning job checkpoint object - example: | - { - "object": "fine_tuning.job.checkpoint", - "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", - "created_at": 1712211699, - "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom_suffix:9ABel2dg:ckpt-step-88", - "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", - "metrics": { - "step": 88, - "train_loss": 0.478, - "train_mean_token_accuracy": 0.924, - "valid_loss": 10.112, - "valid_mean_token_accuracy": 0.145, - "full_valid_loss": 0.567, - "full_valid_mean_token_accuracy": 0.944 - }, - "step_number": 88 - } + - deleted - FinetuneChatRequestInput: - type: object - description: The per-line training example of a fine-tuning input file for chat models - properties: - messages: - type: array - minItems: 1 - items: - oneOf: - - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" - - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" - - $ref: "#/components/schemas/FineTuneChatCompletionRequestAssistantMessage" - - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" - - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" - x-oaiExpandable: true - tools: - type: array - description: A list of tools the model may generate JSON inputs for. - items: - $ref: "#/components/schemas/ChatCompletionTool" - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - functions: - deprecated: true - description: - A list of functions the model may generate JSON inputs for. - type: array - minItems: 1 - maxItems: 128 - items: - $ref: "#/components/schemas/ChatCompletionFunctions" - x-oaiMeta: - name: Training format for chat models - example: | - { - "messages": [ - { "role": "user", "content": "What is the weather in San Francisco?" }, - { - "role": "assistant", - "tool_calls": [ - { - "id": "call_id", - "type": "function", - "function": { - "name": "get_current_weather", - "arguments": "{\"location\": \"San Francisco, USA\", \"format\": \"celsius\"}" - } - } - ] - } - ], - "parallel_tool_calls": false, - "tools": [ - { - "type": "function", - "function": { - "name": "get_current_weather", - "description": "Get the current weather", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and country, eg. San Francisco, USA" - }, - "format": { "type": "string", "enum": ["celsius", "fahrenheit"] } - }, - "required": ["location", "format"] - } - } - } - ] - } - - FinetuneCompletionRequestInput: + CreateUploadRequest: type: object - description: The per-line training example of a fine-tuning input file for completions models + additionalProperties: false properties: - prompt: + filename: + description: | + The name of the file to upload. type: string - description: The input prompt for this training example. - completion: + purpose: + description: | + The intended purpose of the uploaded file. + + See the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). type: string - description: The desired completion for this training example. - x-oaiMeta: - name: Training format for completions models - example: | - { - "prompt": "What is the answer to 2+2", - "completion": "4" - } - - CompletionUsage: - type: object - description: Usage statistics for the completion request. - properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - total_tokens: + enum: ["assistants", "batch", "fine-tune", "vision"] + bytes: + description: | + The number of bytes in the file you are uploading. type: integer - description: Total number of tokens used in the request (prompt + completion). + mime_type: + description: | + The MIME type of the file. + + This must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision. + type: string required: - - prompt_tokens - - completion_tokens - - total_tokens + - filename + - purpose + - bytes + - mime_type - RunCompletionUsage: + AddUploadPartRequest: type: object - description: Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). + additionalProperties: false properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). + data: + description: | + The chunk of bytes for this Part. + type: string + format: binary required: - - prompt_tokens - - completion_tokens - - total_tokens - nullable: true + - data - RunStepCompletionUsage: + CompleteUploadRequest: type: object - description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. + additionalProperties: false properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run step. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run step. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). + part_ids: + type: array + description: | + The ordered list of Part IDs. + items: + type: string + md5: + description: | + The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect. + type: string required: - - prompt_tokens - - completion_tokens - - total_tokens - nullable: true - - AssistantsApiResponseFormatOption: - description: | - Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - oneOf: - - type: string - description: > - `auto` is the default value - enum: [none, auto] - - $ref: "#/components/schemas/AssistantsApiResponseFormat" - x-oaiExpandable: true + - part_ids - AssistantsApiResponseFormat: + CancelUploadRequest: type: object - description: | - An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. - properties: - type: - type: string - enum: ["text", "json_object"] - example: "json_object" - default: "text" - description: Must be one of `text` or `json_object`. + additionalProperties: false - AssistantObject: + CreateFineTuningJobRequest: type: object - title: Assistant - description: Represents an `assistant` that can call the model and use tools. properties: - id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `assistant`. - type: string - enum: [assistant] - created_at: - description: The Unix timestamp (in seconds) for when the assistant was created. - type: integer - name: - description: &assistant_name_param_description | - The name of the assistant. The maximum length is 256 characters. - type: string - maxLength: 256 - nullable: true - description: - description: &assistant_description_param_description | - The description of the assistant. The maximum length is 512 characters. - type: string - maxLength: 512 - nullable: true model: - description: *model_description - type: string - instructions: - description: &assistant_instructions_param_description | - The system instructions that the assistant uses. The maximum length is 256,000 characters. + description: | + The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + example: "gpt-4o-mini" + anyOf: + - type: string + - type: string + enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"] + x-oaiTypeLabel: string + training_file: + description: | + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/create) for how to upload a file. + + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + + The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string - maxLength: 256000 - nullable: true - tools: - description: &assistant_tools_param_description | - A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. - default: [] - type: array - maxItems: 128 - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true - tool_resources: + example: "file-abc123" + hyperparameters: type: object - description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + description: The hyperparameters used for the fine-tuning job. properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. - default: [] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. - maxItems: 1 - items: - type: string + batch_size: + description: | + Number of examples in each batch. A larger batch size means that model parameters + are updated less frequently, but with lower variance. + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 256 + default: auto + learning_rate_multiplier: + description: | + Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + overfitting. + oneOf: + - type: string + enum: [auto] + - type: number + minimum: 0 + exclusiveMinimum: true + default: auto + n_epochs: + description: | + The number of epochs to train the model for. An epoch refers to one full cycle + through the training dataset. + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 50 + default: auto + suffix: + description: | + A string of up to 18 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + type: string + minLength: 1 + maxLength: 40 + default: null nullable: true - metadata: - description: &metadata_description | - Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - type: object - x-oaiTypeLabel: map + validation_file: + description: | + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation + metrics periodically during fine-tuning. These metrics can be viewed in + the fine-tuning results file. + The same data should not be present in both train and validation files. + + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + type: string nullable: true - temperature: - description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 + example: "file-abc123" + integrations: + type: array + description: A list of integrations to enable for your fine-tuning job. nullable: true - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 + items: + type: object + required: + - type + - wandb + properties: + type: + description: | + The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. + oneOf: + - type: string + enum: [wandb] + wandb: + type: object + description: | + The settings for your integration with Weights and Biases. This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + required: + - project + properties: + project: + description: | + The name of the project that the new run will be created under. + type: string + example: "my-wandb-project" + name: + description: | + A display name to set for the run. If not set, we will use the Job ID as the name. + nullable: true + type: string + entity: + description: | + The entity to use for the run. This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered WandB API key is used. + nullable: true + type: string + tags: + description: | + A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some + default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: "custom-tag" + + seed: + description: | + The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. + If a seed is not specified, one will be generated for you. + type: integer nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + minimum: 0 + maximum: 2147483647 + example: 42 + required: + - model + - training_file - We generally recommend altering this or temperature but not both. - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + ListFineTuningJobEventsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJobEvent" + object: + type: string + enum: [list] + required: + - object + - data + + ListFineTuningJobCheckpointsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJobCheckpoint" + object: + type: string + enum: [list] + first_id: + type: string + nullable: true + last_id: + type: string nullable: true + has_more: + type: boolean required: - - id - object - - created_at - - name - - description - - model - - instructions - - tools - - metadata - x-oaiMeta: - name: The assistant object - beta: true - example: *create_assistants_example + - data + - has_more - CreateAssistantRequest: + CreateEmbeddingRequest: type: object additionalProperties: false properties: + input: + description: | + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + example: "The quick brown fox jumped over the lazy dog" + oneOf: + - type: string + title: string + description: The string that will be turned into an embedding. + default: "" + example: "This is a test." + - type: array + title: array + description: The array of strings that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: string + default: "" + example: "['This is a test.']" + - type: array + title: array + description: The array of integers that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + title: array + description: The array of arrays containing integers that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + x-oaiExpandable: true model: description: *model_description - example: "gpt-4-turbo" + example: "text-embedding-3-small" anyOf: - type: string - type: string enum: [ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", + "text-embedding-ada-002", + "text-embedding-3-small", + "text-embedding-3-large", ] x-oaiTypeLabel: string - name: - description: *assistant_name_param_description - type: string - nullable: true - maxLength: 256 - description: - description: *assistant_description_param_description - type: string - nullable: true - maxLength: 512 - instructions: - description: *assistant_instructions_param_description + encoding_format: + description: "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/)." + example: "float" + default: "float" type: string - nullable: true - maxLength: 256000 - tools: - description: *assistant_tools_param_description - default: [] + enum: ["float", "base64"] + dimensions: + description: | + The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. + type: integer + minimum: 1 + user: *end_user_param_configuration + required: + - model + - input + + CreateEmbeddingResponse: + type: object + properties: + data: type: array - maxItems: 128 + description: The list of embeddings generated by the model. items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true - tool_resources: + $ref: "#/components/schemas/Embedding" + model: + type: string + description: The name of the model used to generate the embedding. + object: + type: string + description: The object type, which is always "list". + enum: [list] + usage: type: object - description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + description: The usage information for the request. properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. - maxItems: 1 - items: - type: string - vector_stores: - type: array - description: | - A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant. - maxItems: 1 - items: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. - maxItems: 10000 - items: - type: string - chunking_strategy: - # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly - type: object - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - oneOf: - - type: object - title: Auto Chunking Strategy - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - additionalProperties: false - properties: - type: - type: string - description: Always `auto`. - enum: ["auto"] - required: - - type - - type: object - title: Static Chunking Strategy - additionalProperties: false - properties: - type: - type: string - description: Always `static`. - enum: ["static"] - static: - type: object - additionalProperties: false - properties: - max_chunk_size_tokens: - type: integer - minimum: 100 - maximum: 4096 - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: | - The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - required: - - type - - static - x-oaiExpandable: true - metadata: - type: object - description: | - Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - oneOf: - - required: [vector_store_ids] - - required: [vector_stores] - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - temperature: - description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true + prompt_tokens: + type: integer + description: The number of tokens used by the prompt. + total_tokens: + type: integer + description: The total number of tokens used by the request. + required: + - prompt_tokens + - total_tokens required: + - object - model + - data + - usage - ModifyAssistantRequest: + CreateTranscriptionRequest: type: object additionalProperties: false properties: + file: + description: | + The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + type: string + x-oaiTypeLabel: file + format: binary model: - description: *model_description + description: | + ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. + example: whisper-1 anyOf: - type: string - name: - description: *assistant_name_param_description + - type: string + enum: ["whisper-1"] + x-oaiTypeLabel: string + language: + description: | + The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. type: string - nullable: true - maxLength: 256 - description: - description: *assistant_description_param_description + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. type: string - nullable: true - maxLength: 512 - instructions: - description: *assistant_instructions_param_description + response_format: + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. type: string - nullable: true - maxLength: 256000 - tools: - description: *assistant_tools_param_description - default: [] + enum: + - json + - text + - srt + - verbose_json + - vtt + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + type: number + default: 0 + timestamp_granularities[]: + description: | + The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. type: array - maxItems: 128 items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true - tool_resources: - type: object - description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - temperature: - description: *run_temperature_description - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true + type: string + enum: + - word + - segment + default: [segment] + required: + - file + - model - DeleteAssistantResponse: + # Note: This does not currently support the non-default response format types. + CreateTranscriptionResponseJson: type: object + description: Represents a transcription response returned by model, based on the provided input. properties: - id: - type: string - deleted: - type: boolean - object: + text: type: string - enum: [assistant.deleted] + description: The transcribed text. required: - - id - - object - - deleted + - text + x-oaiMeta: + name: The transcription object (JSON) + group: audio + example: *basic_transcription_response_example - ListAssistantsResponse: + TranscriptionSegment: type: object properties: - object: + id: + type: integer + description: Unique identifier of the segment. + seek: + type: integer + description: Seek offset of the segment. + start: + type: number + format: float + description: Start time of the segment in seconds. + end: + type: number + format: float + description: End time of the segment in seconds. + text: type: string - example: "list" - data: + description: Text content of the segment. + tokens: type: array items: - $ref: "#/components/schemas/AssistantObject" - first_id: - type: string - example: "asst_abc123" - last_id: - type: string - example: "asst_abc456" - has_more: - type: boolean - example: false + type: integer + description: Array of token IDs for the text content. + temperature: + type: number + format: float + description: Temperature parameter used for generating the segment. + avg_logprob: + type: number + format: float + description: Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. + compression_ratio: + type: number + format: float + description: Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. + no_speech_prob: + type: number + format: float + description: Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent. required: - - object - - data - - first_id - - last_id - - has_more - x-oaiMeta: - name: List assistants response object - group: chat - example: *list_assistants_example + - id + - seek + - start + - end + - text + - tokens + - temperature + - avg_logprob + - compression_ratio + - no_speech_prob - AssistantToolsCode: + TranscriptionWord: type: object - title: Code interpreter tool properties: - type: + word: type: string - description: "The type of tool being defined: `code_interpreter`" - enum: ["code_interpreter"] - required: - - type + description: The text content of the word. + start: + type: number + format: float + description: Start time of the word in seconds. + end: + type: number + format: float + description: End time of the word in seconds. + required: [word, start, end] - AssistantToolsFileSearch: + CreateTranscriptionResponseVerboseJson: type: object - title: FileSearch tool + description: Represents a verbose json transcription response returned by model, based on the provided input. properties: - type: + language: type: string - description: "The type of tool being defined: `file_search`" - enum: ["file_search"] - file_search: - type: object - description: Overrides for the file search tool. - properties: - max_num_results: - type: integer - minimum: 1 - maximum: 50 - description: | - The maximum number of results the file search tool should output. The default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. - - Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. - required: - - type - - AssistantToolsFileSearchTypeOnly: - type: object - title: FileSearch tool - properties: - type: + description: The language of the input audio. + duration: type: string - description: "The type of tool being defined: `file_search`" - enum: ["file_search"] - required: - - type - - AssistantToolsFunction: - type: object - title: Function tool - properties: - type: + description: The duration of the input audio. + text: type: string - description: "The type of tool being defined: `function`" - enum: ["function"] - function: - $ref: "#/components/schemas/FunctionObject" - required: - - type - - function + description: The transcribed text. + words: + type: array + description: Extracted words and their corresponding timestamps. + items: + $ref: "#/components/schemas/TranscriptionWord" + segments: + type: array + description: Segments of the transcribed text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: [language, duration, text] + x-oaiMeta: + name: The transcription object (Verbose JSON) + group: audio + example: *verbose_transcription_response_example - TruncationObject: + CreateTranslationRequest: type: object - title: Thread Truncation Controls - description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + additionalProperties: false properties: - type: + file: + description: | + The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. type: string - description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. - enum: ["auto", "last_messages"] - last_messages: - type: integer - description: The number of most recent messages from the thread when constructing the context for the run. - minimum: 1 - nullable: true + x-oaiTypeLabel: file + format: binary + model: + description: | + ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. + example: whisper-1 + anyOf: + - type: string + - type: string + enum: ["whisper-1"] + x-oaiTypeLabel: string + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + type: string + response_format: + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + type: string + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + type: number + default: 0 required: - - type + - file + - model - AssistantsApiToolChoiceOption: - description: | - Controls which (if any) tool is called by the model. - `none` means the model will not call any tools and instead generates a message. - `auto` is the default value and means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools before responding to the user. - Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + # Note: This does not currently support the non-default response format types. + CreateTranslationResponseJson: + type: object + properties: + text: + type: string + required: + - text - oneOf: - - type: string - description: > - `none` means the model will not call any tools and instead generates a message. - `auto` means the model can pick between generating a message or calling one or more tools. - `required` means the model must call one or more tools before responding to the user. - enum: [none, auto, required] - - $ref: "#/components/schemas/AssistantsNamedToolChoice" - x-oaiExpandable: true + CreateTranslationResponseVerboseJson: + type: object + properties: + language: + type: string + description: The language of the output translation (always `english`). + duration: + type: string + description: The duration of the input audio. + text: + type: string + description: The translated text. + segments: + type: array + description: Segments of the translated text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: [language, duration, text] - AssistantsNamedToolChoice: + CreateSpeechRequest: type: object - description: Specifies a tool the model should use. Use to force the model to call a specific tool. + additionalProperties: false properties: - type: + model: + description: | + One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + anyOf: + - type: string + - type: string + enum: ["tts-1", "tts-1-hd"] + x-oaiTypeLabel: string + input: type: string - enum: ["function", "code_interpreter", "file_search"] - description: The type of the tool. If type is `function`, the function name must be set - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - required: - - name + description: The text to generate audio for. The maximum length is 4096 characters. + maxLength: 4096 + voice: + description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + type: string + enum: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] + response_format: + description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." + default: "mp3" + type: string + enum: ["mp3", "opus", "aac", "flac", "wav", "pcm"] + speed: + description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." + type: number + default: 1.0 + minimum: 0.25 + maximum: 4.0 required: - - type + - model + - input + - voice - RunObject: - type: object - title: A run on a thread - description: Represents an execution run on a [thread](/docs/api-reference/threads). + Model: + title: Model + description: Describes an OpenAI model offering that can be used with the API. properties: id: - description: The identifier, which can be referenced in API endpoints. type: string + description: The model identifier, which can be referenced in the API endpoints. + created: + type: integer + description: The Unix timestamp (in seconds) when the model was created. object: - description: The object type, which is always `thread.run`. type: string - enum: ["thread.run"] + description: The object type, which is always "model". + enum: [model] + owned_by: + type: string + description: The organization that owns the model. + required: + - id + - object + - created + - owned_by + x-oaiMeta: + name: The model object + example: *retrieve_model_response + + OpenAIFile: + title: OpenAIFile + description: The `File` object represents a document that has been uploaded to OpenAI. + properties: + id: + type: string + description: The file identifier, which can be referenced in the API endpoints. + bytes: + type: integer + description: The size of the file, in bytes. created_at: - description: The Unix timestamp (in seconds) for when the run was created. type: integer - thread_id: - description: The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run. + description: The Unix timestamp (in seconds) for when the file was created. + filename: type: string - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + description: The name of the file. + object: type: string - status: - description: The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`. + description: The object type, which is always `file`. + enum: ["file"] + purpose: type: string + description: The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. enum: [ - "queued", - "in_progress", - "requires_action", - "cancelling", - "cancelled", - "failed", - "completed", - "incomplete", - "expired", + "assistants", + "assistants_output", + "batch", + "batch_output", + "fine-tune", + "fine-tune-results", + "vision", ] - required_action: - type: object - description: Details on the action required to continue the run. Will be `null` if no action is required. - nullable: true - properties: - type: - description: For now, this is always `submit_tool_outputs`. - type: string - enum: ["submit_tool_outputs"] - submit_tool_outputs: - type: object - description: Details on the tool outputs needed for this run to continue. - properties: - tool_calls: - type: array - description: A list of the relevant tool calls. - items: - $ref: "#/components/schemas/RunToolCallObject" - required: - - tool_calls - required: - - type - - submit_tool_outputs - last_error: - type: object - description: The last error associated with this run. Will be `null` if there are no errors. - nullable: true - properties: - code: - type: string - description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - enum: - ["server_error", "rate_limit_exceeded", "invalid_prompt"] - message: - type: string - description: A human-readable description of the error. - required: - - code - - message - expires_at: - description: The Unix timestamp (in seconds) for when the run will expire. - type: integer - nullable: true - started_at: - description: The Unix timestamp (in seconds) for when the run was started. - type: integer - nullable: true - cancelled_at: - description: The Unix timestamp (in seconds) for when the run was cancelled. - type: integer - nullable: true - failed_at: - description: The Unix timestamp (in seconds) for when the run failed. - type: integer - nullable: true - completed_at: - description: The Unix timestamp (in seconds) for when the run was completed. - type: integer - nullable: true - incomplete_details: - description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. - type: object - nullable: true - properties: - reason: - description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. - type: string - enum: ["max_completion_tokens", "max_prompt_tokens"] - model: - description: The model that the [assistant](/docs/api-reference/assistants) used for this run. + status: type: string - instructions: - description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + deprecated: true + description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. + enum: ["uploaded", "processed", "error"] + status_details: type: string - tools: - description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. - default: [] - type: array - maxItems: 20 - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - usage: - $ref: "#/components/schemas/RunCompletionUsage" - temperature: - description: The sampling temperature used for this run. If not set, defaults to 1. - type: number - nullable: true - top_p: - description: The nucleus sampling value used for this run. If not set, defaults to 1. - type: number - nullable: true - max_prompt_tokens: - type: integer - nullable: true - description: | - The maximum number of prompt tokens specified to have been used over the course of the run. - minimum: 256 - max_completion_tokens: - type: integer - nullable: true - description: | - The maximum number of completion tokens specified to have been used over the course of the run. - minimum: 256 - truncation_strategy: - $ref: "#/components/schemas/TruncationObject" - nullable: true - tool_choice: - $ref: "#/components/schemas/AssistantsApiToolChoiceOption" - nullable: true - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true + deprecated: true + description: Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. required: - id - object + - bytes - created_at - - thread_id - - assistant_id + - filename + - purpose - status - - required_action - - last_error + x-oaiMeta: + name: The file object + example: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "salesOverview.pdf", + "purpose": "assistants", + } + Upload: + type: object + title: Upload + description: | + The Upload object can accept byte chunks in the form of Parts. + properties: + id: + type: string + description: The Upload unique identifier, which can be referenced in API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + filename: + type: string + description: The name of the file to be uploaded. + bytes: + type: integer + description: The intended number of bytes to be uploaded. + purpose: + type: string + description: The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values. + status: + type: string + description: The status of the Upload. + enum: ["pending", "completed", "cancelled", "expired"] + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + object: + type: string + description: The object type, which is always "upload". + enum: [upload] + file: + $ref: "#/components/schemas/OpenAIFile" + nullable: true + description: The ready File object after the Upload is completed. + required: + - bytes + - created_at - expires_at - - started_at - - cancelled_at - - failed_at - - completed_at - - model - - instructions - - tools - - metadata - - usage - - incomplete_details - - max_prompt_tokens - - max_completion_tokens - - truncation_strategy - - tool_choice - - parallel_tool_calls - - response_format + - filename + - id + - purpose + - status + - step_number x-oaiMeta: - name: The run object - beta: true + name: The upload object example: | { - "id": "run_abc123", - "object": "thread.run", - "created_at": 1698107661, - "assistant_id": "asst_abc123", - "thread_id": "thread_abc123", + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", "status": "completed", - "started_at": 1699073476, - "expires_at": null, - "cancelled_at": null, - "failed_at": null, - "completed_at": 1699073498, - "last_error": null, - "model": "gpt-4-turbo", - "instructions": null, - "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], - "metadata": {}, - "incomplete_details": null, - "usage": { - "prompt_tokens": 123, - "completion_tokens": 456, - "total_tokens": 579 - }, - "temperature": 1.0, - "top_p": 1.0, - "max_prompt_tokens": 1000, - "max_completion_tokens": 1000, - "truncation_strategy": { - "type": "auto", - "last_messages": null - }, - "response_format": "auto", - "tool_choice": "auto", - "parallel_tool_calls": true + "expires_at": 1719127296, + "file": { + "id": "file-xyz321", + "object": "file", + "bytes": 2147483648, + "created_at": 1719186911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + } } - CreateRunRequest: + UploadPart: type: object - additionalProperties: false + title: UploadPart + description: | + The upload Part represents a chunk of bytes we can add to an Upload object. properties: - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + id: type: string - model: - description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - example: "gpt-4-turbo" - anyOf: - - type: string - - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string - nullable: true - instructions: - description: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. + description: The upload Part unique identifier, which can be referenced in API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Part was created. + upload_id: type: string - nullable: true - additional_instructions: - description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. + description: The ID of the Upload object that this Part was added to. + object: type: string - nullable: true - additional_messages: - description: Adds additional messages to the thread before creating the run. - type: array - items: - $ref: "#/components/schemas/CreateMessageRequest" - nullable: true - tools: - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - nullable: true + description: The object type, which is always `upload.part`. + enum: ['upload.part'] + required: + - created_at + - id + - object + - upload_id + x-oaiMeta: + name: The upload part object + example: | + { + "id": "part_def456", + "object": "upload.part", + "created_at": 1719186911, + "upload_id": "upload_abc123" + } + Embedding: + type: object + description: | + Represents an embedding vector returned by embedding endpoint. + properties: + index: + type: integer + description: The index of the embedding in the list of embeddings. + embedding: type: array - maxItems: 20 + description: | + The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - x-oaiExpandable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *run_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: &run_top_p_description | - An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - stream: - type: boolean - nullable: true - description: | - If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. - max_prompt_tokens: - type: integer - nullable: true - description: | - The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - max_completion_tokens: + type: number + object: + type: string + description: The object type, which is always "embedding". + enum: [embedding] + required: + - index + - object + - embedding + x-oaiMeta: + name: The embedding object + example: | + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } + + FineTuningJob: + type: object + title: FineTuningJob + description: | + The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. + properties: + id: + type: string + description: The object identifier, which can be referenced in the API endpoints. + created_at: type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + error: + type: object nullable: true - description: | - The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - truncation_strategy: - $ref: "#/components/schemas/TruncationObject" - nullable: true - tool_choice: - $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + param: + type: string + description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + nullable: true + required: + - code + - message + - param + fine_tuned_model: + type: string nullable: true - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + finished_at: + type: integer nullable: true - required: - - thread_id - - assistant_id - ListRunsResponse: - type: object - properties: + description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + properties: + n_epochs: + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 50 + default: auto + description: + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + required: + - n_epochs + model: + type: string + description: The base model that is being fine-tuned. object: type: string - example: "list" - data: + description: The object type, which is always "fine_tuning.job". + enum: [fine_tuning.job] + organization_id: + type: string + description: The organization that owns the fine-tuning job. + result_files: type: array + description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). items: - $ref: "#/components/schemas/RunObject" - first_id: + type: string + example: file-abc123 + status: type: string - example: "run_abc123" - last_id: + description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + enum: + [ + "validating_files", + "queued", + "running", + "succeeded", + "failed", + "cancelled", + ] + trained_tokens: + type: integer + nullable: true + description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + training_file: + type: string + description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). + validation_file: type: string - example: "run_abc456" - has_more: - type: boolean - example: false - required: - - object - - data - - first_id - - last_id - - has_more - ModifyRunRequest: - type: object - additionalProperties: false - properties: - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map nullable: true - SubmitToolOutputsRunRequest: - type: object - additionalProperties: false - properties: - tool_outputs: - description: A list of tools for which the outputs are being submitted. + description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). + integrations: type: array + nullable: true + description: A list of integrations to enable for this fine-tuning job. + maxItems: 5 items: - type: object - properties: - tool_call_id: - type: string - description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. - output: - type: string - description: The output of the tool call to be submitted to continue the run. - stream: - type: boolean + oneOf: + - $ref: "#/components/schemas/FineTuningIntegration" + x-oaiExpandable: true + seed: + type: integer + description: The seed used for the fine-tuning job. + estimated_finish: + type: integer nullable: true - description: | - If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. required: - - tool_outputs + - created_at + - error + - finished_at + - fine_tuned_model + - hyperparameters + - id + - model + - object + - organization_id + - result_files + - status + - trained_tokens + - training_file + - validation_file + - seed + x-oaiMeta: + name: The fine-tuning job object + example: *fine_tuning_example - RunToolCallObject: + FineTuningIntegration: type: object - description: Tool call objects + title: Fine-Tuning Job Integration + required: + - type + - wandb properties: - id: - type: string - description: The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. type: type: string - description: The type of tool call the output is required for. For now, this is always `function`. - enum: ["function"] - function: + description: "The type of the integration being enabled for the fine-tuning job" + enum: ["wandb"] + wandb: type: object - description: The function definition. + description: | + The settings for your integration with Weights and Biases. This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + required: + - project properties: + project: + description: | + The name of the project that the new run will be created under. + type: string + example: "my-wandb-project" name: + description: | + A display name to set for the run. If not set, we will use the Job ID as the name. + nullable: true type: string - description: The name of the function. - arguments: + entity: + description: | + The entity to use for the run. This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered WandB API key is used. + nullable: true type: string - description: The arguments that the model expects you to pass to the function. - required: - - name - - arguments - required: - - id - - type - - function + tags: + description: | + A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some + default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: "custom-tag" - CreateThreadAndRunRequest: + FineTuningJobEvent: type: object - additionalProperties: false + description: Fine-tuning job event object properties: - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. - type: string - thread: - $ref: "#/components/schemas/CreateThreadRequest" - description: If no thread is provided, an empty thread will be created. - model: - description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - example: "gpt-4-turbo" - anyOf: - - type: string - - type: string - enum: - [ - "gpt-4o", - "gpt-4o-2024-05-13", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ] - x-oaiTypeLabel: string - nullable: true - instructions: - description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. + id: type: string - nullable: true - tools: - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - nullable: true - type: array - maxItems: 20 - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearch" - - $ref: "#/components/schemas/AssistantToolsFunction" - tool_resources: - type: object - description: | - A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - temperature: - type: number - minimum: 0 - maximum: 2 - default: 1 - example: 1 - nullable: true - description: *run_temperature_description - top_p: - type: number - minimum: 0 - maximum: 1 - default: 1 - example: 1 - nullable: true - description: *run_top_p_description - stream: - type: boolean - nullable: true - description: | - If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. - max_prompt_tokens: - type: integer - nullable: true - description: | - The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - max_completion_tokens: + created_at: type: integer - nullable: true - description: | - The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - minimum: 256 - truncation_strategy: - $ref: "#/components/schemas/TruncationObject" - nullable: true - tool_choice: - $ref: "#/components/schemas/AssistantsApiToolChoiceOption" - nullable: true - parallel_tool_calls: - $ref: "#/components/schemas/ParallelToolCalls" - response_format: - $ref: "#/components/schemas/AssistantsApiResponseFormatOption" - nullable: true - required: - - thread_id - - assistant_id - - ThreadObject: - type: object - title: Thread - description: Represents a thread that contains [messages](/docs/api-reference/messages). - properties: - id: - description: The identifier, which can be referenced in API endpoints. + level: + type: string + enum: ["info", "warn", "error"] + message: type: string object: - description: The object type, which is always `thread`. type: string - enum: ["thread"] - created_at: - description: The Unix timestamp (in seconds) for when the thread was created. - type: integer - tool_resources: - type: object - description: | - A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + enum: [fine_tuning.job.event] required: - id - object - created_at - - tool_resources - - metadata + - level + - message x-oaiMeta: - name: The thread object - beta: true + name: The fine-tuning job event object example: | { - "id": "thread_abc123", - "object": "thread", - "created_at": 1698107661, - "metadata": {} + "object": "fine_tuning.job.event", + "id": "ftevent-abc123" + "created_at": 1677610602, + "level": "info", + "message": "Created fine-tuning job" } - CreateThreadRequest: + FineTuningJobCheckpoint: type: object - additionalProperties: false + title: FineTuningJobCheckpoint + description: | + The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. properties: - messages: - description: A list of [messages](/docs/api-reference/messages) to start the thread with. - type: array - items: - $ref: "#/components/schemas/CreateMessageRequest" - tool_resources: + id: + type: string + description: The checkpoint identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the checkpoint was created. + fine_tuned_model_checkpoint: + type: string + description: The name of the fine-tuned checkpoint model that is created. + step_number: + type: integer + description: The step number that the checkpoint was created at. + metrics: type: object - description: | - A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + description: Metrics at the step number during the fine-tuning job. properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: string - vector_stores: - type: array - description: | - A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. - maxItems: 10000 - items: - type: string - chunking_strategy: - # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly - type: object - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - oneOf: - - type: object - title: Auto Chunking Strategy - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - additionalProperties: false - properties: - type: - type: string - description: Always `auto`. - enum: ["auto"] - required: - - type - - type: object - title: Static Chunking Strategy - additionalProperties: false - properties: - type: - type: string - description: Always `static`. - enum: ["static"] - static: - type: object - additionalProperties: false - properties: - max_chunk_size_tokens: - type: integer - minimum: 100 - maximum: 4096 - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: | - The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - required: - - type - - static - x-oaiExpandable: true - metadata: - type: object - description: | - Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - x-oaiTypeLabel: map - x-oaiExpandable: true - oneOf: - - required: [vector_store_ids] - - required: [vector_stores] - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - - ModifyThreadRequest: - type: object - additionalProperties: false - properties: - tool_resources: - type: object - description: | - A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. - properties: - code_interpreter: - type: object - properties: - file_ids: - type: array - description: | - A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. - default: [] - maxItems: 20 - items: - type: string - file_search: - type: object - properties: - vector_store_ids: - type: array - description: | - The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. - maxItems: 1 - items: - type: string - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - - DeleteThreadResponse: - type: object - properties: - id: + step: + type: number + train_loss: + type: number + train_mean_token_accuracy: + type: number + valid_loss: + type: number + valid_mean_token_accuracy: + type: number + full_valid_loss: + type: number + full_valid_mean_token_accuracy: + type: number + fine_tuning_job_id: type: string - deleted: - type: boolean + description: The name of the fine-tuning job that this checkpoint was created from. object: type: string - enum: [thread.deleted] + description: The object type, which is always "fine_tuning.job.checkpoint". + enum: [fine_tuning.job.checkpoint] required: + - created_at + - fine_tuning_job_id + - fine_tuned_model_checkpoint - id + - metrics - object - - deleted + - step_number + x-oaiMeta: + name: The fine-tuning job checkpoint object + example: | + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", + "created_at": 1712211699, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88", + "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", + "metrics": { + "step": 88, + "train_loss": 0.478, + "train_mean_token_accuracy": 0.924, + "valid_loss": 10.112, + "valid_mean_token_accuracy": 0.145, + "full_valid_loss": 0.567, + "full_valid_mean_token_accuracy": 0.944 + }, + "step_number": 88 + } - ListThreadsResponse: + FinetuneChatRequestInput: + type: object + description: The per-line training example of a fine-tuning input file for chat models properties: - object: - type: string - example: "list" - data: + messages: type: array + minItems: 1 items: - $ref: "#/components/schemas/ThreadObject" - first_id: - type: string - example: "asst_abc123" - last_id: - type: string - example: "asst_abc456" - has_more: - type: boolean - example: false - required: - - object - - data - - first_id - - last_id - - has_more - - MessageObject: - type: object - title: The message object - description: Represents a message within a [thread](/docs/api-reference/threads). - properties: - id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `thread.message`. - type: string - enum: ["thread.message"] - created_at: - description: The Unix timestamp (in seconds) for when the message was created. - type: integer - thread_id: - description: The [thread](/docs/api-reference/threads) ID that this message belongs to. - type: string - status: - description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. - type: string - enum: ["in_progress", "incomplete", "completed"] - incomplete_details: - description: On an incomplete message, details about why the message is incomplete. - type: object - properties: - reason: - type: string - description: The reason the message is incomplete. - enum: - [ - "content_filter", - "max_tokens", - "run_cancelled", - "run_expired", - "run_failed", - ] - nullable: true - required: - - reason - completed_at: - description: The Unix timestamp (in seconds) for when the message was completed. - type: integer - nullable: true - incomplete_at: - description: The Unix timestamp (in seconds) for when the message was marked as incomplete. - type: integer - nullable: true - role: - description: The entity that produced the message. One of `user` or `assistant`. - type: string - enum: ["user", "assistant"] - content: - description: The content of the message in array of text and/or images. + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/FineTuneChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + tools: type: array + description: A list of tools the model may generate JSON inputs for. items: - oneOf: - - $ref: "#/components/schemas/MessageContentImageFileObject" - - $ref: "#/components/schemas/MessageContentImageUrlObject" - - $ref: "#/components/schemas/MessageContentTextObject" - x-oaiExpandable: true - assistant_id: - description: If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. - type: string - nullable: true - run_id: - description: The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. - type: string - nullable: true - attachments: + $ref: "#/components/schemas/ChatCompletionTool" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + functions: + deprecated: true + description: + A list of functions the model may generate JSON inputs for. type: array + minItems: 1 + maxItems: 128 items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - description: The tools to add this file to. - type: array - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" - x-oaiExpandable: true - description: A list of files attached to the message, and the tools they were added to. - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - required: - - id - - object - - created_at - - thread_id - - status - - incomplete_details - - completed_at - - incomplete_at - - role - - content - - assistant_id - - run_id - - attachments - - metadata + $ref: "#/components/schemas/ChatCompletionFunctions" x-oaiMeta: - name: The message object - beta: true + name: Training format for chat models example: | - { - "id": "msg_abc123", - "object": "thread.message", - "created_at": 1698983503, - "thread_id": "thread_abc123", - "role": "assistant", - "content": [ - { - "type": "text", - "text": { - "value": "Hi! How can I help you today?", - "annotations": [] + { + "messages": [ + { "role": "user", "content": "What is the weather in San Francisco?" }, + { + "role": "assistant", + "tool_calls": [ + { + "id": "call_id", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\"location\": \"San Francisco, USA\", \"format\": \"celsius\"}" + } + } + ] + } + ], + "parallel_tool_calls": false, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and country, eg. San Francisco, USA" + }, + "format": { "type": "string", "enum": ["celsius", "fahrenheit"] } + }, + "required": ["location", "format"] } } - ], - "assistant_id": "asst_abc123", - "run_id": "run_abc123", - "attachments": [], - "metadata": {} - } - - MessageDeltaObject: + } + ] + } + + FinetuneCompletionRequestInput: type: object - title: Message delta object - description: | - Represents a message delta i.e. any changed fields on a message during streaming. + description: The per-line training example of a fine-tuning input file for completions models properties: - id: - description: The identifier of the message, which can be referenced in API endpoints. + prompt: type: string - object: - description: The object type, which is always `thread.message.delta`. + description: The input prompt for this training example. + completion: type: string - enum: ["thread.message.delta"] - delta: - description: The delta containing the fields that have changed on the Message. - type: object - properties: - role: - description: The entity that produced the message. One of `user` or `assistant`. - type: string - enum: ["user", "assistant"] - content: - description: The content of the message in array of text and/or images. - type: array - items: - oneOf: - - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" - - $ref: "#/components/schemas/MessageDeltaContentTextObject" - - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" - x-oaiExpandable: true - required: - - id - - object - - delta + description: The desired completion for this training example. x-oaiMeta: - name: The message delta object - beta: true + name: Training format for completions models example: | { - "id": "msg_123", - "object": "thread.message.delta", - "delta": { - "content": [ - { - "index": 0, - "type": "text", - "text": { "value": "Hello", "annotations": [] } - } - ] - } + "prompt": "What is the answer to 2+2", + "completion": "4" } - CreateMessageRequest: - type: object - additionalProperties: false - required: - - role - - content - properties: - role: - type: string - enum: ["user", "assistant"] - description: | - The role of the entity that is creating the message. Allowed values include: - - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. - - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation. - content: - oneOf: - - type: string - description: The text contents of the message. - title: Text content - - type: array - description: An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview). - title: Array of content parts - items: - oneOf: - - $ref: "#/components/schemas/MessageContentImageFileObject" - - $ref: "#/components/schemas/MessageContentImageUrlObject" - - $ref: "#/components/schemas/MessageRequestContentTextObject" - x-oaiExpandable: true - minItems: 1 - x-oaiExpandable: true - attachments: - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - description: The tools to add this file to. - type: array - items: - oneOf: - - $ref: "#/components/schemas/AssistantToolsCode" - - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" - x-oaiExpandable: true - description: A list of files attached to the message, and the tools they should be added to. - required: - - file_id - - tools - nullable: true - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true - - ModifyMessageRequest: + CompletionUsage: type: object - additionalProperties: false + description: Usage statistics for the completion request. properties: - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens - DeleteMessageResponse: + RunCompletionUsage: type: object + description: Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). properties: - id: - type: string - deleted: - type: boolean - object: - type: string - enum: [thread.message.deleted] - required: - - id - - object - - deleted - - ListMessagesResponse: - properties: - object: - type: string - example: "list" - data: - type: array - items: - $ref: "#/components/schemas/MessageObject" - first_id: - type: string - example: "msg_abc123" - last_id: - type: string - example: "msg_abc123" - has_more: - type: boolean - example: false + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). required: - - object - - data - - first_id - - last_id - - has_more + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true - MessageContentImageFileObject: - title: Image file + RunStepCompletionUsage: type: object - description: References an image [File](/docs/api-reference/files) in the content of a message. + description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. properties: - type: - description: Always `image_file`. - type: string - enum: ["image_file"] - image_file: - type: object - properties: - file_id: - description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. - type: string - detail: - type: string - description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] - default: "auto" - required: - - file_id + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run step. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run step. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). required: - - type - - image_file + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true - MessageDeltaContentImageFileObject: - title: Image file + AssistantsApiResponseFormatOption: + description: | + Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + oneOf: + - type: string + description: > + `auto` is the default value + enum: [auto] + - $ref: '#/components/schemas/ResponseFormatText' + - $ref: '#/components/schemas/ResponseFormatJsonObject' + - $ref: '#/components/schemas/ResponseFormatJsonSchema' + x-oaiExpandable: true + + AssistantObject: type: object - description: References an image [File](/docs/api-reference/files) in the content of a message. + title: Assistant + description: Represents an `assistant` that can call the model and use tools. properties: - index: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `assistant`. + type: string + enum: [assistant] + created_at: + description: The Unix timestamp (in seconds) for when the assistant was created. type: integer - description: The index of the content part in the message. - type: - description: Always `image_file`. + name: + description: &assistant_name_param_description | + The name of the assistant. The maximum length is 256 characters. type: string - enum: ["image_file"] - image_file: - type: object - properties: - file_id: - description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. - type: string - detail: - type: string - description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] - default: "auto" - required: - - index - - type - - MessageContentImageUrlObject: - title: Image URL - type: object - description: References an image URL in the content of a message. - properties: - type: + maxLength: 256 + nullable: true + description: + description: &assistant_description_param_description | + The description of the assistant. The maximum length is 512 characters. type: string - enum: ["image_url"] - description: The type of the content part. - image_url: + maxLength: 512 + nullable: true + model: + description: *model_description + type: string + instructions: + description: &assistant_instructions_param_description | + The system instructions that the assistant uses. The maximum length is 256,000 characters. + type: string + maxLength: 256000 + nullable: true + tools: + description: &assistant_tools_param_description | + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + default: [] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - url: - type: string - description: "The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." - format: uri - detail: - type: string - description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` - enum: ["auto", "low", "high"] - default: "auto" - required: - - url + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: &metadata_description | + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: &run_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true required: - - type - - image_url + - id + - object + - created_at + - name + - description + - model + - instructions + - tools + - metadata + x-oaiMeta: + name: The assistant object + beta: true + example: *create_assistants_example - MessageDeltaContentImageUrlObject: - title: Image URL + CreateAssistantRequest: type: object - description: References an image URL in the content of a message. + additionalProperties: false properties: - index: - type: integer - description: The index of the content part in the message. - type: - description: Always `image_url`. + model: + description: *model_description + example: "gpt-4o" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + name: + description: *assistant_name_param_description type: string - enum: ["image_url"] - image_url: - type: object - properties: - url: - description: "The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." - type: string - detail: - type: string - description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. - enum: ["auto", "low", "high"] - default: "auto" - required: - - index - - type - - MessageContentTextObject: - title: Text - type: object - description: The text content that is part of a message. - properties: - type: - description: Always `text`. + nullable: true + maxLength: 256 + description: + description: *assistant_description_param_description type: string - enum: ["text"] - text: + nullable: true + maxLength: 512 + instructions: + description: *assistant_instructions_param_description + type: string + nullable: true + maxLength: 256000 + tools: + description: *assistant_tools_param_description + default: [] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - value: - description: The data that makes up the text. - type: string - annotations: - type: array - items: - oneOf: - - $ref: "#/components/schemas/MessageContentTextAnnotationsFileCitationObject" - - $ref: "#/components/schemas/MessageContentTextAnnotationsFilePathObject" - x-oaiExpandable: true - required: - - value - - annotations + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + vector_stores: + type: array + description: | + A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. + maxItems: 10000 + items: + type: string + chunking_strategy: + # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: ["auto"] + required: + - type + - type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: ["static"] + static: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + required: + - type + - static + x-oaiExpandable: true + metadata: + type: object + description: | + Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + oneOf: + - required: [vector_store_ids] + - required: [vector_stores] + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: *run_temperature_description + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true required: - - type - - text + - model - MessageRequestContentTextObject: - title: Text + ModifyAssistantRequest: type: object - description: The text content that is part of a message. + additionalProperties: false properties: - type: - description: Always `text`. - type: string - enum: ["text"] - text: + model: + description: *model_description + anyOf: + - type: string + name: + description: *assistant_name_param_description type: string - description: Text content to be sent to the model - required: - - type - - text - - MessageContentTextAnnotationsFileCitationObject: - title: File citation - type: object - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. - properties: - type: - description: Always `file_citation`. + nullable: true + maxLength: 256 + description: + description: *assistant_description_param_description type: string - enum: ["file_citation"] - text: - description: The text in the message content that needs to be replaced. + nullable: true + maxLength: 512 + instructions: + description: *assistant_instructions_param_description type: string - file_citation: + nullable: true + maxLength: 256000 + tools: + description: *assistant_tools_param_description + default: [] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - file_id: - description: The ID of the specific File the citation is from. - type: string - required: - - file_id - start_index: - type: integer + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: *run_temperature_description + type: number minimum: 0 - end_index: - type: integer + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + + DeleteAssistantResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: [assistant.deleted] required: - - type - - text - - file_citation - - start_index - - end_index + - id + - object + - deleted - MessageContentTextAnnotationsFilePathObject: - title: File path + ListAssistantsResponse: type: object - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. properties: - type: - description: Always `file_path`. + object: type: string - enum: ["file_path"] - text: - description: The text in the message content that needs to be replaced. + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/AssistantObject" + first_id: type: string - file_path: - type: object - properties: - file_id: - description: The ID of the file that was generated. - type: string - required: - - file_id - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 + example: "asst_abc123" + last_id: + type: string + example: "asst_abc456" + has_more: + type: boolean + example: false required: - - type - - text - - file_path - - start_index - - end_index + - object + - data + - first_id + - last_id + - has_more + x-oaiMeta: + name: List assistants response object + group: chat + example: *list_assistants_example - MessageDeltaContentTextObject: - title: Text + AssistantToolsCode: type: object - description: The text content that is part of a message. + title: Code interpreter tool properties: - index: - type: integer - description: The index of the content part in the message. type: - description: Always `text`. type: string - enum: ["text"] - text: - type: object - properties: - value: - description: The data that makes up the text. - type: string - annotations: - type: array - items: - oneOf: - - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject" - - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject" - x-oaiExpandable: true + description: "The type of tool being defined: `code_interpreter`" + enum: ["code_interpreter"] required: - - index - type - MessageDeltaContentTextAnnotationsFileCitationObject: - title: File citation + AssistantToolsFileSearch: type: object - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + title: FileSearch tool properties: - index: - type: integer - description: The index of the annotation in the text content part. type: - description: Always `file_citation`. - type: string - enum: ["file_citation"] - text: - description: The text in the message content that needs to be replaced. type: string - file_citation: + description: "The type of tool being defined: `file_search`" + enum: ["file_search"] + file_search: type: object + description: Overrides for the file search tool. properties: - file_id: - description: The ID of the specific File the citation is from. - type: string - quote: - description: The specific quote in the file. - type: string - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 + max_num_results: + type: integer + minimum: 1 + maximum: 50 + description: | + The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. required: - - index - type - MessageDeltaContentTextAnnotationsFilePathObject: - title: File path + AssistantToolsFileSearchTypeOnly: type: object - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + title: FileSearch tool properties: - index: - type: integer - description: The index of the annotation in the text content part. type: - description: Always `file_path`. type: string - enum: ["file_path"] - text: - description: The text in the message content that needs to be replaced. + description: "The type of tool being defined: `file_search`" + enum: ["file_search"] + required: + - type + + AssistantToolsFunction: + type: object + title: Function tool + properties: + type: type: string - file_path: - type: object - properties: - file_id: - description: The ID of the file that was generated. - type: string - start_index: - type: integer - minimum: 0 - end_index: - type: integer - minimum: 0 + description: "The type of tool being defined: `function`" + enum: ["function"] + function: + $ref: "#/components/schemas/FunctionObject" required: - - index - type + - function - RunStepObject: + TruncationObject: type: object - title: Run steps - description: | - Represents a step in execution of a run. + title: Thread Truncation Controls + description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. properties: - id: - description: The identifier of the run step, which can be referenced in API endpoints. + type: type: string - object: - description: The object type, which is always `thread.run.step`. + description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. + enum: ["auto", "last_messages"] + last_messages: + type: integer + description: The number of most recent messages from the thread when constructing the context for the run. + minimum: 1 + nullable: true + required: + - type + + AssistantsApiToolChoiceOption: + description: | + Controls which (if any) tool is called by the model. + `none` means the model will not call any tools and instead generates a message. + `auto` is the default value and means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools before responding to the user. + Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + oneOf: + - type: string + description: > + `none` means the model will not call any tools and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools before responding to the user. + enum: [none, auto, required] + - $ref: "#/components/schemas/AssistantsNamedToolChoice" + x-oaiExpandable: true + + AssistantsNamedToolChoice: + type: object + description: Specifies a tool the model should use. Use to force the model to call a specific tool. + properties: + type: type: string - enum: ["thread.run.step"] + enum: ["function", "code_interpreter", "file_search"] + description: The type of the tool. If type is `function`, the function name must be set + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + required: + - type + + RunObject: + type: object + title: A run on a thread + description: Represents an execution run on a [thread](/docs/api-reference/threads). + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.run`. + type: string + enum: ["thread.run"] created_at: - description: The Unix timestamp (in seconds) for when the run step was created. + description: The Unix timestamp (in seconds) for when the run was created. type: integer - assistant_id: - description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. - type: string thread_id: - description: The ID of the [thread](/docs/api-reference/threads) that was run. - type: string - run_id: - description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + description: The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run. type: string - type: - description: The type of run step, which can be either `message_creation` or `tool_calls`. + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. type: string - enum: ["message_creation", "tool_calls"] status: - description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. + description: The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`. type: string - enum: ["in_progress", "cancelled", "failed", "completed", "expired"] - step_details: + enum: + [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "incomplete", + "expired", + ] + required_action: type: object - description: The details of the run step. - oneOf: - - $ref: "#/components/schemas/RunStepDetailsMessageCreationObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsObject" - x-oaiExpandable: true + description: Details on the action required to continue the run. Will be `null` if no action is required. + nullable: true + properties: + type: + description: For now, this is always `submit_tool_outputs`. + type: string + enum: ["submit_tool_outputs"] + submit_tool_outputs: + type: object + description: Details on the tool outputs needed for this run to continue. + properties: + tool_calls: + type: array + description: A list of the relevant tool calls. + items: + $ref: "#/components/schemas/RunToolCallObject" + required: + - tool_calls + required: + - type + - submit_tool_outputs last_error: type: object - description: The last error associated with this run step. Will be `null` if there are no errors. + description: The last error associated with this run. Will be `null` if there are no errors. nullable: true properties: code: type: string - description: One of `server_error` or `rate_limit_exceeded`. - enum: ["server_error", "rate_limit_exceeded"] + description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. + enum: + ["server_error", "rate_limit_exceeded", "invalid_prompt"] message: type: string description: A human-readable description of the error. required: - code - message - expired_at: - description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. + expires_at: + description: The Unix timestamp (in seconds) for when the run will expire. + type: integer + nullable: true + started_at: + description: The Unix timestamp (in seconds) for when the run was started. type: integer nullable: true cancelled_at: - description: The Unix timestamp (in seconds) for when the run step was cancelled. + description: The Unix timestamp (in seconds) for when the run was cancelled. type: integer nullable: true failed_at: - description: The Unix timestamp (in seconds) for when the run step failed. + description: The Unix timestamp (in seconds) for when the run failed. type: integer nullable: true completed_at: - description: The Unix timestamp (in seconds) for when the run step completed. + description: The Unix timestamp (in seconds) for when the run was completed. type: integer nullable: true + incomplete_details: + description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. + type: object + nullable: true + properties: + reason: + description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. + type: string + enum: ["max_completion_tokens", "max_prompt_tokens"] + model: + description: The model that the [assistant](/docs/api-reference/assistants) used for this run. + type: string + instructions: + description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + type: string + tools: + description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + default: [] + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true usage: - $ref: "#/components/schemas/RunStepCompletionUsage" + $ref: "#/components/schemas/RunCompletionUsage" + temperature: + description: The sampling temperature used for this run. If not set, defaults to 1. + type: number + nullable: true + top_p: + description: The nucleus sampling value used for this run. If not set, defaults to 1. + type: number + nullable: true + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens specified to have been used over the course of the run. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: | + The maximum number of completion tokens specified to have been used over the course of the run. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true required: - id - object - created_at - - assistant_id - thread_id - - run_id - - type + - assistant_id - status - - step_details + - required_action - last_error - - expired_at + - expires_at + - started_at - cancelled_at - failed_at - completed_at + - model + - instructions + - tools - metadata - usage + - incomplete_details + - max_prompt_tokens + - max_completion_tokens + - truncation_strategy + - tool_choice + - parallel_tool_calls + - response_format x-oaiMeta: - name: The run step object - beta: true - example: *run_step_object_example - - RunStepDeltaObject: - type: object - title: Run step delta object - description: | - Represents a run step delta i.e. any changed fields on a run step during streaming. - properties: - id: - description: The identifier of the run step, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `thread.run.step.delta`. - type: string - enum: ["thread.run.step.delta"] - delta: - description: The delta containing the fields that have changed on the run step. - type: object - properties: - step_details: - type: object - description: The details of the run step. - oneOf: - - $ref: "#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsObject" - x-oaiExpandable: true - required: - - id - - object - - delta - x-oaiMeta: - name: The run step delta object + name: The run object beta: true example: | { - "id": "step_123", - "object": "thread.run.step.delta", - "delta": { - "step_details": { - "type": "tool_calls", - "tool_calls": [ - { - "index": 0, - "id": "call_123", - "type": "code_interpreter", - "code_interpreter": { "input": "", "outputs": [] } - } - ] - } - } + "id": "run_abc123", + "object": "thread.run", + "created_at": 1698107661, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699073476, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699073498, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], + "metadata": {}, + "incomplete_details": null, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true } - - ListRunStepsResponse: + CreateRunRequest: + type: object + additionalProperties: false + properties: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + type: string + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + example: "gpt-4o" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + nullable: true + instructions: + description: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. + type: string + nullable: true + additional_instructions: + description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. + type: string + nullable: true + additional_messages: + description: Adds additional messages to the thread before creating the run. + type: array + items: + $ref: "#/components/schemas/CreateMessageRequest" + nullable: true + tools: + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *run_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: | + The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - thread_id + - assistant_id + ListRunsResponse: + type: object properties: object: type: string @@ -11932,13 +12667,13 @@ components: data: type: array items: - $ref: "#/components/schemas/RunStepObject" + $ref: "#/components/schemas/RunObject" first_id: type: string - example: "step_abc123" + example: "run_abc123" last_id: type: string - example: "step_abc456" + example: "run_abc456" has_more: type: boolean example: false @@ -11948,423 +12683,547 @@ components: - first_id - last_id - has_more - - RunStepDetailsMessageCreationObject: - title: Message creation + ModifyRunRequest: type: object - description: Details of the message creation by the run step. + additionalProperties: false properties: - type: - description: Always `message_creation`. - type: string - enum: ["message_creation"] - message_creation: + metadata: + description: *metadata_description type: object - properties: - message_id: - type: string - description: The ID of the message that was created by this run step. - required: - - message_id + x-oaiTypeLabel: map + nullable: true + SubmitToolOutputsRunRequest: + type: object + additionalProperties: false + properties: + tool_outputs: + description: A list of tools for which the outputs are being submitted. + type: array + items: + type: object + properties: + tool_call_id: + type: string + description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. + output: + type: string + description: The output of the tool call to be submitted to continue the run. + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. required: - - type - - message_creation + - tool_outputs - RunStepDeltaStepDetailsMessageCreationObject: - title: Message creation + RunToolCallObject: type: object - description: Details of the message creation by the run step. + description: Tool call objects properties: + id: + type: string + description: The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. type: - description: Always `message_creation`. type: string - enum: ["message_creation"] - message_creation: + description: The type of tool call the output is required for. For now, this is always `function`. + enum: ["function"] + function: type: object + description: The function definition. properties: - message_id: + name: type: string - description: The ID of the message that was created by this run step. + description: The name of the function. + arguments: + type: string + description: The arguments that the model expects you to pass to the function. + required: + - name + - arguments required: + - id - type + - function - RunStepDetailsToolCallsObject: - title: Tool calls + CreateThreadAndRunRequest: type: object - description: Details of the tool call. + additionalProperties: false properties: - type: - description: Always `tool_calls`. + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. type: string - enum: ["tool_calls"] - tool_calls: - type: array - description: | - An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. - items: - oneOf: - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsFunctionObject" - x-oaiExpandable: true - required: - - type - - tool_calls - - RunStepDeltaStepDetailsToolCallsObject: - title: Tool calls - type: object - description: Details of the tool call. - properties: - type: - description: Always `tool_calls`. + thread: + $ref: "#/components/schemas/CreateThreadRequest" + description: If no thread is provided, an empty thread will be created. + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + example: "gpt-4o" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-2024-08-06", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + nullable: true + instructions: + description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. type: string - enum: ["tool_calls"] - tool_calls: + nullable: true + tools: + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true type: array - description: | - An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + maxItems: 20 items: oneOf: - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject" - x-oaiExpandable: true + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + tool_resources: + type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *run_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: | + The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true required: - - type + - thread_id + - assistant_id - RunStepDetailsToolCallsCodeObject: - title: Code Interpreter tool call + ThreadObject: type: object - description: Details of the Code Interpreter tool call the run step was involved in. + title: Thread + description: Represents a thread that contains [messages](/docs/api-reference/messages). properties: id: + description: The identifier, which can be referenced in API endpoints. type: string - description: The ID of the tool call. - type: + object: + description: The object type, which is always `thread`. type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: ["code_interpreter"] - code_interpreter: + enum: ["thread"] + created_at: + description: The Unix timestamp (in seconds) for when the thread was created. + type: integer + tool_resources: type: object - description: The Code Interpreter tool call definition. - required: - - input - - outputs + description: | + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - input: - type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. - items: - type: object - oneOf: - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" - x-oaiExpandable: true + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true required: - id - - type - - code_interpreter + - object + - created_at + - tool_resources + - metadata + x-oaiMeta: + name: The thread object + beta: true + example: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1698107661, + "metadata": {} + } - RunStepDeltaStepDetailsToolCallsCodeObject: - title: Code interpreter tool call + CreateThreadRequest: type: object - description: Details of the Code Interpreter tool call the run step was involved in. + additionalProperties: false properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call. - type: - type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - enum: ["code_interpreter"] - code_interpreter: + messages: + description: A list of [messages](/docs/api-reference/messages) to start the thread with. + type: array + items: + $ref: "#/components/schemas/CreateMessageRequest" + tool_resources: type: object - description: The Code Interpreter tool call definition. + description: | + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. properties: - input: - type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. - items: - type: object - oneOf: - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject" - - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject" - x-oaiExpandable: true - required: - - index - - type + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: string + vector_stores: + type: array + description: | + A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. + maxItems: 10000 + items: + type: string + chunking_strategy: + # Ideally we'd reuse the chunking strategy schema here, but it doesn't expand properly + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: ["auto"] + required: + - type + - type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: ["static"] + static: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + required: + - type + - static + x-oaiExpandable: true + metadata: + type: object + description: | + Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + x-oaiExpandable: true + oneOf: + - required: [vector_store_ids] + - required: [vector_stores] + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true - RunStepDetailsToolCallsCodeOutputLogsObject: - title: Code Interpreter log output + ModifyThreadRequest: type: object - description: Text output from the Code Interpreter tool call as part of a run step. + additionalProperties: false properties: - type: - description: Always `logs`. - type: string - enum: ["logs"] - logs: - type: string - description: The text output from the Code Interpreter tool call. - required: - - type - - logs + tool_resources: + type: object + description: | + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: - title: Code interpreter log output + DeleteThreadResponse: type: object - description: Text output from the Code Interpreter tool call as part of a run step. properties: - index: - type: integer - description: The index of the output in the outputs array. - type: - description: Always `logs`. + id: type: string - enum: ["logs"] - logs: + deleted: + type: boolean + object: type: string - description: The text output from the Code Interpreter tool call. + enum: [thread.deleted] required: - - index - - type + - id + - object + - deleted - RunStepDetailsToolCallsCodeOutputImageObject: - title: Code Interpreter image output - type: object + ListThreadsResponse: properties: - type: - description: Always `image`. + object: type: string - enum: ["image"] - image: - type: object - properties: - file_id: - description: The [file](/docs/api-reference/files) ID of the image. - type: string - required: - - file_id - required: - - type - - image - - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: - title: Code interpreter image output - type: object - properties: - index: - type: integer - description: The index of the output in the outputs array. - type: - description: Always `image`. + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/ThreadObject" + first_id: type: string - enum: ["image"] - image: - type: object - properties: - file_id: - description: The [file](/docs/api-reference/files) ID of the image. - type: string + example: "asst_abc123" + last_id: + type: string + example: "asst_abc456" + has_more: + type: boolean + example: false required: - - index - - type + - object + - data + - first_id + - last_id + - has_more - RunStepDetailsToolCallsFileSearchObject: - title: File search tool call + MessageObject: type: object + title: The message object + description: Represents a message within a [thread](/docs/api-reference/threads). properties: id: + description: The identifier, which can be referenced in API endpoints. type: string - description: The ID of the tool call object. - type: + object: + description: The object type, which is always `thread.message`. type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: ["file_search"] - file_search: + enum: ["thread.message"] + created_at: + description: The Unix timestamp (in seconds) for when the message was created. + type: integer + thread_id: + description: The [thread](/docs/api-reference/threads) ID that this message belongs to. + type: string + status: + description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. + type: string + enum: ["in_progress", "incomplete", "completed"] + incomplete_details: + description: On an incomplete message, details about why the message is incomplete. type: object - description: For now, this is always going to be an empty object. - x-oaiTypeLabel: map - required: - - id - - type - - file_search - - RunStepDeltaStepDetailsToolCallsFileSearchObject: - title: File search tool call - type: object - properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call object. - type: - type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - enum: ["file_search"] - file_search: - type: object - description: For now, this is always going to be an empty object. - x-oaiTypeLabel: map - required: - - index - - type - - file_search - - RunStepDetailsToolCallsFunctionObject: - type: object - title: Function tool call - properties: - id: - type: string - description: The ID of the tool call object. - type: - type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: ["function"] - function: - type: object - description: The definition of the function that was called. properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments passed to the function. - output: + reason: type: string - description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. - nullable: true + description: The reason the message is incomplete. + enum: + [ + "content_filter", + "max_tokens", + "run_cancelled", + "run_expired", + "run_failed", + ] + nullable: true required: - - name - - arguments - - output - required: - - id - - type - - function - - RunStepDeltaStepDetailsToolCallsFunctionObject: - type: object - title: Function tool call - properties: - index: + - reason + completed_at: + description: The Unix timestamp (in seconds) for when the message was completed. type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call object. - type: - type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - enum: ["function"] - function: - type: object - description: The definition of the function that was called. - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments passed to the function. - output: - type: string - description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. - nullable: true - required: - - index - - type - - VectorStoreExpirationAfter: - type: object - title: Vector store expiration policy - description: The expiration policy for a vector store. - properties: - anchor: - description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." - type: string - enum: ["last_active_at"] - days: - description: The number of days after the anchor time that the vector store will expire. + nullable: true + incomplete_at: + description: The Unix timestamp (in seconds) for when the message was marked as incomplete. type: integer - minimum: 1 - maximum: 365 - required: - - anchor - - days - - VectorStoreObject: - type: object - title: Vector store - description: A vector store is a collection of processed files can be used by the `file_search` tool. - properties: - id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `vector_store`. + nullable: true + role: + description: The entity that produced the message. One of `user` or `assistant`. type: string - enum: ["vector_store"] - created_at: - description: The Unix timestamp (in seconds) for when the vector store was created. - type: integer - name: - description: The name of the vector store. + enum: ["user", "assistant"] + content: + description: The content of the message in array of text and/or images. + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentImageUrlObject" + - $ref: "#/components/schemas/MessageContentTextObject" + - $ref: "#/components/schemas/MessageContentRefusalObject" + x-oaiExpandable: true + assistant_id: + description: If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. type: string - usage_bytes: - description: The total number of bytes used by the files in the vector store. - type: integer - file_counts: - type: object - properties: - in_progress: - description: The number of files that are currently being processed. - type: integer - completed: - description: The number of files that have been successfully processed. - type: integer - failed: - description: The number of files that have failed to process. - type: integer - cancelled: - description: The number of files that were cancelled. - type: integer - total: - description: The total number of files. - type: integer - required: - - in_progress - - completed - - failed - - cancelled - - total - status: - description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. + nullable: true + run_id: + description: The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. type: string - enum: ["expired", "in_progress", "completed"] - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" - expires_at: - description: The Unix timestamp (in seconds) for when the vector store will expire. - type: integer nullable: true - last_active_at: - description: The Unix timestamp (in seconds) for when the vector store was last active. - type: integer + attachments: + type: array + items: + type: object + properties: + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they were added to. nullable: true metadata: description: *metadata_description @@ -12374,82 +13233,179 @@ components: required: - id - object - - usage_bytes - created_at + - thread_id - status - - last_active_at - - name - - file_counts + - incomplete_details + - completed_at + - incomplete_at + - role + - content + - assistant_id + - run_id + - attachments - metadata x-oaiMeta: - name: The vector store object + name: The message object beta: true example: | { - "id": "vs_123", - "object": "vector_store", - "created_at": 1698107661, - "usage_bytes": 123456, - "last_active_at": 1698107661, - "name": "my_vector_store", - "status": "completed", - "file_counts": { - "in_progress": 0, - "completed": 100, - "cancelled": 0, - "failed": 0, - "total": 100 - }, - "metadata": {}, - "last_used_at": 1698107661 - } - - CreateVectorStoreRequest: - type: object - additionalProperties: false - properties: - file_ids: - description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - type: array - maxItems: 500 - items: - type: string - name: - description: The name of the vector store. + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1698983503, + "thread_id": "thread_abc123", + "role": "assistant", + "content": [ + { + "type": "text", + "text": { + "value": "Hi! How can I help you today?", + "annotations": [] + } + } + ], + "assistant_id": "asst_abc123", + "run_id": "run_abc123", + "attachments": [], + "metadata": {} + } + + MessageDeltaObject: + type: object + title: Message delta object + description: | + Represents a message delta i.e. any changed fields on a message during streaming. + properties: + id: + description: The identifier of the message, which can be referenced in API endpoints. type: string - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" - chunking_strategy: + object: + description: The object type, which is always `thread.message.delta`. + type: string + enum: ["thread.message.delta"] + delta: + description: The delta containing the fields that have changed on the Message. type: object - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. + properties: + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: ["user", "assistant"] + content: + description: The content of the message in array of text and/or images. + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" + - $ref: "#/components/schemas/MessageDeltaContentTextObject" + - $ref: "#/components/schemas/MessageDeltaContentRefusalObject" + - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" + x-oaiExpandable: true + required: + - id + - object + - delta + x-oaiMeta: + name: The message delta object + beta: true + example: | + { + "id": "msg_123", + "object": "thread.message.delta", + "delta": { + "content": [ + { + "index": 0, + "type": "text", + "text": { "value": "Hello", "annotations": [] } + } + ] + } + } + + CreateMessageRequest: + type: object + additionalProperties: false + required: + - role + - content + properties: + role: + type: string + enum: ["user", "assistant"] + description: | + The role of the entity that is creating the message. Allowed values include: + - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. + - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation. + content: oneOf: - - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" - - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview). + title: Array of content parts + items: + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentImageUrlObject" + - $ref: "#/components/schemas/MessageRequestContentTextObject" + x-oaiExpandable: true + minItems: 1 x-oaiExpandable: true + attachments: + type: array + items: + type: object + properties: + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they should be added to. + required: + - file_id + - tools + nullable: true metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true - UpdateVectorStoreRequest: + ModifyMessageRequest: type: object additionalProperties: false properties: - name: - description: The name of the vector store. - type: string - nullable: true - expires_after: - $ref: "#/components/schemas/VectorStoreExpirationAfter" - nullable: true metadata: description: *metadata_description type: object x-oaiTypeLabel: map nullable: true - ListVectorStoresResponse: + DeleteMessageResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: [thread.message.deleted] + required: + - id + - object + - deleted + + ListMessagesResponse: properties: object: type: string @@ -12457,13 +13413,13 @@ components: data: type: array items: - $ref: "#/components/schemas/VectorStoreObject" + $ref: "#/components/schemas/MessageObject" first_id: type: string - example: "vs_abc123" + example: "msg_abc123" last_id: type: string - example: "vs_abc456" + example: "msg_abc123" has_more: type: boolean example: false @@ -12474,904 +13430,2977 @@ components: - last_id - has_more - DeleteVectorStoreResponse: + MessageContentImageFileObject: + title: Image file type: object + description: References an image [File](/docs/api-reference/files) in the content of a message. properties: - id: - type: string - deleted: - type: boolean - object: + type: + description: Always `image_file`. type: string - enum: [vector_store.deleted] + enum: ["image_file"] + image_file: + type: object + properties: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. + type: string + detail: + type: string + description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + enum: ["auto", "low", "high"] + default: "auto" + required: + - file_id required: - - id - - object - - deleted + - type + - image_file - VectorStoreFileObject: + MessageDeltaContentImageFileObject: + title: Image file type: object - title: Vector store files - description: A list of files attached to a vector store. + description: References an image [File](/docs/api-reference/files) in the content of a message. properties: - id: - description: The identifier, which can be referenced in API endpoints. - type: string - object: - description: The object type, which is always `vector_store.file`. - type: string - enum: ["vector_store.file"] - usage_bytes: - description: The total vector store usage in bytes. Note that this may be different from the original file size. - type: integer - created_at: - description: The Unix timestamp (in seconds) for when the vector store file was created. + index: type: integer - vector_store_id: - description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. - type: string - status: - description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. + description: The index of the content part in the message. + type: + description: Always `image_file`. type: string - enum: ["in_progress", "completed", "cancelled", "failed"] - last_error: + enum: ["image_file"] + image_file: type: object - description: The last error associated with this vector store file. Will be `null` if there are no errors. - nullable: true properties: - code: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content. type: string - description: One of `server_error` or `rate_limit_exceeded`. - enum: - [ - "internal_error", - "file_not_found", - "parsing_error", - "unhandled_mime_type", - ] - message: + detail: type: string - description: A human-readable description of the error. - required: - - code - - message - chunking_strategy: - type: object - description: The strategy used to chunk the file. - oneOf: - - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" - - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" - x-oaiExpandable: true + description: Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`. + enum: ["auto", "low", "high"] + default: "auto" required: - - id - - object - - usage_bytes - - created_at - - vector_store_id - - status - - last_error - x-oaiMeta: - name: The vector store file object - beta: true - example: | - { - "id": "file-abc123", - "object": "vector_store.file", - "usage_bytes": 1234, - "created_at": 1698107661, - "vector_store_id": "vs_abc123", - "status": "completed", - "last_error": null, - "chunking_strategy": { - "type": "static", - "static": { - "max_chunk_size_tokens": 800, - "chunk_overlap_tokens": 400 - } - } - } + - index + - type - OtherChunkingStrategyResponseParam: + MessageContentImageUrlObject: + title: Image URL type: object - title: Other Chunking Strategy - description: This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. - additionalProperties: false + description: References an image URL in the content of a message. properties: type: type: string - description: Always `other`. - enum: ["other"] + enum: ["image_url"] + description: The type of the content part. + image_url: + type: object + properties: + url: + type: string + description: "The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." + format: uri + detail: + type: string + description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto` + enum: ["auto", "low", "high"] + default: "auto" + required: + - url required: - type + - image_url - StaticChunkingStrategyResponseParam: + MessageDeltaContentImageUrlObject: + title: Image URL type: object - title: Static Chunking Strategy - additionalProperties: false + description: References an image URL in the content of a message. properties: + index: + type: integer + description: The index of the content part in the message. type: + description: Always `image_url`. type: string - description: Always `static`. - enum: ["static"] - static: - $ref: "#/components/schemas/StaticChunkingStrategy" + enum: ["image_url"] + image_url: + type: object + properties: + url: + description: "The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp." + type: string + detail: + type: string + description: Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. + enum: ["auto", "low", "high"] + default: "auto" required: + - index - type - - static - - StaticChunkingStrategy: - type: object - additionalProperties: false - properties: - max_chunk_size_tokens: - type: integer - minimum: 100 - maximum: 4096 - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: | - The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - AutoChunkingStrategyRequestParam: + MessageContentTextObject: + title: Text type: object - title: Auto Chunking Strategy - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - additionalProperties: false + description: The text content that is part of a message. properties: type: + description: Always `text`. type: string - description: Always `auto`. - enum: ["auto"] + enum: ["text"] + text: + type: object + properties: + value: + description: The data that makes up the text. + type: string + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageContentTextAnnotationsFilePathObject" + x-oaiExpandable: true + required: + - value + - annotations required: - type + - text - StaticChunkingStrategyRequestParam: + MessageContentRefusalObject: + title: Refusal type: object - title: Static Chunking Strategy - additionalProperties: false + description: The refusal content generated by the assistant. properties: type: + description: Always `refusal`. type: string - description: Always `static`. - enum: ["static"] - static: - $ref: "#/components/schemas/StaticChunkingStrategy" + enum: ["refusal"] + refusal: + type: string + nullable: false required: - type - - static - - ChunkingStrategyRequestParam: - type: object - description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. - oneOf: - - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" - - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" - x-oaiExpandable: true + - refusal - CreateVectorStoreFileRequest: + MessageRequestContentTextObject: + title: Text type: object - additionalProperties: false + description: The text content that is part of a message. properties: - file_id: - description: A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + type: + description: Always `text`. type: string - chunking_strategy: - $ref: "#/components/schemas/ChunkingStrategyRequestParam" + enum: ["text"] + text: + type: string + description: Text content to be sent to the model required: - - file_id + - type + - text - ListVectorStoreFilesResponse: + MessageContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. properties: - object: - type: string - example: "list" - data: - type: array - items: - $ref: "#/components/schemas/VectorStoreFileObject" - first_id: + type: + description: Always `file_citation`. type: string - example: "file-abc123" - last_id: + enum: ["file_citation"] + text: + description: The text in the message content that needs to be replaced. type: string - example: "file-abc456" - has_more: - type: boolean - example: false + file_citation: + type: object + properties: + file_id: + description: The ID of the specific File the citation is from. + type: string + required: + - file_id + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 required: - - object - - data - - first_id - - last_id - - has_more + - type + - text + - file_citation + - start_index + - end_index - DeleteVectorStoreFileResponse: + MessageContentTextAnnotationsFilePathObject: + title: File path type: object + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. properties: - id: + type: + description: Always `file_path`. type: string - deleted: - type: boolean - object: + enum: ["file_path"] + text: + description: The text in the message content that needs to be replaced. type: string - enum: [vector_store.file.deleted] + file_path: + type: object + properties: + file_id: + description: The ID of the file that was generated. + type: string + required: + - file_id + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 required: - - id - - object - - deleted + - type + - text + - file_path + - start_index + - end_index - VectorStoreFileBatchObject: + MessageDeltaContentTextObject: + title: Text type: object - title: Vector store file batch - description: A batch of files attached to a vector store. + description: The text content that is part of a message. properties: - id: - description: The identifier, which can be referenced in API endpoints. + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `text`. type: string - object: - description: The object type, which is always `vector_store.file_batch`. + enum: ["text"] + text: + type: object + properties: + value: + description: The data that makes up the text. + type: string + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject" + x-oaiExpandable: true + required: + - index + - type + + MessageDeltaContentRefusalObject: + title: Refusal + type: object + description: The refusal content that is part of a message. + properties: + index: + type: integer + description: The index of the refusal part in the message. + type: + description: Always `refusal`. type: string - enum: ["vector_store.files_batch"] + enum: ["refusal"] + refusal: + type: string + required: + - index + - type + + + MessageDeltaContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_citation`. + type: string + enum: ["file_citation"] + text: + description: The text in the message content that needs to be replaced. + type: string + file_citation: + type: object + properties: + file_id: + description: The ID of the specific File the citation is from. + type: string + quote: + description: The specific quote in the file. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + + MessageDeltaContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_path`. + type: string + enum: ["file_path"] + text: + description: The text in the message content that needs to be replaced. + type: string + file_path: + type: object + properties: + file_id: + description: The ID of the file that was generated. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + + RunStepObject: + type: object + title: Run steps + description: | + Represents a step in execution of a run. + properties: + id: + description: The identifier of the run step, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.run.step`. + type: string + enum: ["thread.run.step"] created_at: - description: The Unix timestamp (in seconds) for when the vector store files batch was created. + description: The Unix timestamp (in seconds) for when the run step was created. type: integer - vector_store_id: - description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + type: string + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was run. + type: string + run_id: + description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. type: string + type: + description: The type of run step, which can be either `message_creation` or `tool_calls`. + type: string + enum: ["message_creation", "tool_calls"] status: - description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. + description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. type: string - enum: ["in_progress", "completed", "cancelled", "failed"] - file_counts: + enum: ["in_progress", "cancelled", "failed", "completed", "expired"] + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsObject" + x-oaiExpandable: true + last_error: type: object + description: The last error associated with this run step. Will be `null` if there are no errors. + nullable: true properties: - in_progress: - description: The number of files that are currently being processed. - type: integer - completed: - description: The number of files that have been processed. - type: integer - failed: - description: The number of files that have failed to process. - type: integer - cancelled: - description: The number of files that where cancelled. - type: integer - total: - description: The total number of files. - type: integer + code: + type: string + description: One of `server_error` or `rate_limit_exceeded`. + enum: ["server_error", "rate_limit_exceeded"] + message: + type: string + description: A human-readable description of the error. required: - - in_progress - - completed - - cancelled - - failed - - total + - code + - message + expired_at: + description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. + type: integer + nullable: true + cancelled_at: + description: The Unix timestamp (in seconds) for when the run step was cancelled. + type: integer + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run step failed. + type: integer + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run step completed. + type: integer + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + usage: + $ref: "#/components/schemas/RunStepCompletionUsage" required: - id - object - created_at - - vector_store_id + - assistant_id + - thread_id + - run_id + - type - status - - file_counts + - step_details + - last_error + - expired_at + - cancelled_at + - failed_at + - completed_at + - metadata + - usage x-oaiMeta: - name: The vector store files batch object + name: The run step object beta: true - example: | - { - "id": "vsfb_123", - "object": "vector_store.files_batch", - "created_at": 1698107661, - "vector_store_id": "vs_abc123", - "status": "completed", - "file_counts": { - "in_progress": 0, - "completed": 100, - "failed": 0, - "cancelled": 0, - "total": 100 - } - } + example: *run_step_object_example - CreateVectorStoreFileBatchRequest: + RunStepDeltaObject: type: object - additionalProperties: false + title: Run step delta object + description: | + Represents a run step delta i.e. any changed fields on a run step during streaming. properties: - file_ids: - description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - type: array - minItems: 1 - maxItems: 500 - items: - type: string - chunking_strategy: - $ref: "#/components/schemas/ChunkingStrategyRequestParam" + id: + description: The identifier of the run step, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.run.step.delta`. + type: string + enum: ["thread.run.step.delta"] + delta: + description: The delta containing the fields that have changed on the run step. + type: object + properties: + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsObject" + x-oaiExpandable: true required: - - file_ids + - id + - object + - delta + x-oaiMeta: + name: The run step delta object + beta: true + example: | + { + "id": "step_123", + "object": "thread.run.step.delta", + "delta": { + "step_details": { + "type": "tool_calls", + "tool_calls": [ + { + "index": 0, + "id": "call_123", + "type": "code_interpreter", + "code_interpreter": { "input": "", "outputs": [] } + } + ] + } + } + } - AssistantStreamEvent: - description: | - Represents an event emitted when streaming a Run. + ListRunStepsResponse: + properties: + object: + type: string + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/RunStepObject" + first_id: + type: string + example: "step_abc123" + last_id: + type: string + example: "step_abc456" + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more - Each event in a server-sent events stream has an `event` and `data` property: + RunStepDetailsMessageCreationObject: + title: Message creation + type: object + description: Details of the message creation by the run step. + properties: + type: + description: Always `message_creation`. + type: string + enum: ["message_creation"] + message_creation: + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + required: + - message_id + required: + - type + - message_creation - ``` - event: thread.created - data: {"id": "thread_123", "object": "thread", ...} - ``` + RunStepDeltaStepDetailsMessageCreationObject: + title: Message creation + type: object + description: Details of the message creation by the run step. + properties: + type: + description: Always `message_creation`. + type: string + enum: ["message_creation"] + message_creation: + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + required: + - type - We emit events whenever a new object is created, transitions to a new state, or is being - streamed in parts (deltas). For example, we emit `thread.run.created` when a new run - is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses - to create a message during a run, we emit a `thread.message.created event`, a - `thread.message.in_progress` event, many `thread.message.delta` events, and finally a - `thread.message.completed` event. + RunStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. + type: string + enum: ["tool_calls"] + tool_calls: + type: array + description: | + An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + - tool_calls - We may add additional events over time, so we recommend handling unknown events gracefully - in your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to - integrate the Assistants API with streaming. - oneOf: - - $ref: "#/components/schemas/ThreadStreamEvent" - - $ref: "#/components/schemas/RunStreamEvent" - - $ref: "#/components/schemas/RunStepStreamEvent" - - $ref: "#/components/schemas/MessageStreamEvent" - - $ref: "#/components/schemas/ErrorEvent" - - $ref: "#/components/schemas/DoneEvent" - x-oaiMeta: - name: Assistant stream events - beta: true + RunStepDeltaStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. + type: string + enum: ["tool_calls"] + tool_calls: + type: array + description: | + An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type - ThreadStreamEvent: - oneOf: - - type: object - properties: - event: - type: string - enum: ["thread.created"] - data: - $ref: "#/components/schemas/ThreadObject" - required: - - event - - data - description: Occurs when a new [thread](/docs/api-reference/threads/object) is created. - x-oaiMeta: - dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" + RunStepDetailsToolCallsCodeObject: + title: Code Interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + enum: ["code_interpreter"] + code_interpreter: + type: object + description: The Code Interpreter tool call definition. + required: + - input + - outputs + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true + required: + - id + - type + - code_interpreter - RunStreamEvent: - oneOf: - - type: object - properties: - event: - type: string - enum: ["thread.run.created"] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a new [run](/docs/api-reference/runs/object) is created. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: ["thread.run.queued"] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: ["thread.run.in_progress"] - data: - $ref: "#/components/schemas/RunObject" - required: - - event - - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status. - x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - - type: object - properties: - event: - type: string - enum: ["thread.run.requires_action"] - data: - $ref: "#/components/schemas/RunObject" - required: - - event + RunStepDeltaStepDetailsToolCallsCodeObject: + title: Code interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + enum: ["code_interpreter"] + code_interpreter: + type: object + description: The Code Interpreter tool call definition. + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true + required: + - index + - type + + RunStepDetailsToolCallsCodeOutputLogsObject: + title: Code Interpreter log output + type: object + description: Text output from the Code Interpreter tool call as part of a run step. + properties: + type: + description: Always `logs`. + type: string + enum: ["logs"] + logs: + type: string + description: The text output from the Code Interpreter tool call. + required: + - type + - logs + + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: + title: Code interpreter log output + type: object + description: Text output from the Code Interpreter tool call as part of a run step. + properties: + index: + type: integer + description: The index of the output in the outputs array. + type: + description: Always `logs`. + type: string + enum: ["logs"] + logs: + type: string + description: The text output from the Code Interpreter tool call. + required: + - index + - type + + RunStepDetailsToolCallsCodeOutputImageObject: + title: Code Interpreter image output + type: object + properties: + type: + description: Always `image`. + type: string + enum: ["image"] + image: + type: object + properties: + file_id: + description: The [file](/docs/api-reference/files) ID of the image. + type: string + required: + - file_id + required: + - type + - image + + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: + title: Code interpreter image output + type: object + properties: + index: + type: integer + description: The index of the output in the outputs array. + type: + description: Always `image`. + type: string + enum: ["image"] + image: + type: object + properties: + file_id: + description: The [file](/docs/api-reference/files) ID of the image. + type: string + required: + - index + - type + + RunStepDetailsToolCallsFileSearchObject: + title: File search tool call + type: object + properties: + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + enum: ["file_search"] + file_search: + type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + required: + - id + - type + - file_search + + RunStepDeltaStepDetailsToolCallsFileSearchObject: + title: File search tool call + type: object + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + enum: ["file_search"] + file_search: + type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + required: + - index + - type + - file_search + + RunStepDetailsToolCallsFunctionObject: + type: object + title: Function tool call + properties: + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `function` for this type of tool call. + enum: ["function"] + function: + type: object + description: The definition of the function that was called. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + nullable: true + required: + - name + - arguments + - output + required: + - id + - type + - function + + RunStepDeltaStepDetailsToolCallsFunctionObject: + type: object + title: Function tool call + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `function` for this type of tool call. + enum: ["function"] + function: + type: object + description: The definition of the function that was called. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + nullable: true + required: + - index + - type + + VectorStoreExpirationAfter: + type: object + title: Vector store expiration policy + description: The expiration policy for a vector store. + properties: + anchor: + description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." + type: string + enum: ["last_active_at"] + days: + description: The number of days after the anchor time that the vector store will expire. + type: integer + minimum: 1 + maximum: 365 + required: + - anchor + - days + + VectorStoreObject: + type: object + title: Vector store + description: A vector store is a collection of processed files can be used by the `file_search` tool. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `vector_store`. + type: string + enum: ["vector_store"] + created_at: + description: The Unix timestamp (in seconds) for when the vector store was created. + type: integer + name: + description: The name of the vector store. + type: string + usage_bytes: + description: The total number of bytes used by the files in the vector store. + type: integer + file_counts: + type: object + properties: + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been successfully processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that were cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - failed + - cancelled + - total + status: + description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. + type: string + enum: ["expired", "in_progress", "completed"] + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + expires_at: + description: The Unix timestamp (in seconds) for when the vector store will expire. + type: integer + nullable: true + last_active_at: + description: The Unix timestamp (in seconds) for when the vector store was last active. + type: integer + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + required: + - id + - object + - usage_bytes + - created_at + - status + - last_active_at + - name + - file_counts + - metadata + x-oaiMeta: + name: The vector store object + beta: true + example: | + { + "id": "vs_123", + "object": "vector_store", + "created_at": 1698107661, + "usage_bytes": 123456, + "last_active_at": 1698107661, + "name": "my_vector_store", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "cancelled": 0, + "failed": 0, + "total": 100 + }, + "metadata": {}, + "last_used_at": 1698107661 + } + + CreateVectorStoreRequest: + type: object + additionalProperties: false + properties: + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + type: array + maxItems: 500 + items: + type: string + name: + description: The name of the vector store. + type: string + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + chunking_strategy: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + + UpdateVectorStoreRequest: + type: object + additionalProperties: false + properties: + name: + description: The name of the vector store. + type: string + nullable: true + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + + ListVectorStoresResponse: + properties: + object: + type: string + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/VectorStoreObject" + first_id: + type: string + example: "vs_abc123" + last_id: + type: string + example: "vs_abc456" + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + + DeleteVectorStoreResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: [vector_store.deleted] + required: + - id + - object + - deleted + + VectorStoreFileObject: + type: object + title: Vector store files + description: A list of files attached to a vector store. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `vector_store.file`. + type: string + enum: ["vector_store.file"] + usage_bytes: + description: The total vector store usage in bytes. Note that this may be different from the original file size. + type: integer + created_at: + description: The Unix timestamp (in seconds) for when the vector store file was created. + type: integer + vector_store_id: + description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. + type: string + status: + description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. + type: string + enum: ["in_progress", "completed", "cancelled", "failed"] + last_error: + type: object + description: The last error associated with this vector store file. Will be `null` if there are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error` or `rate_limit_exceeded`. + enum: + [ + "server_error", + "unsupported_file", + "invalid_file", + ] + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + chunking_strategy: + type: object + description: The strategy used to chunk the file. + oneOf: + - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" + - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" + x-oaiExpandable: true + required: + - id + - object + - usage_bytes + - created_at + - vector_store_id + - status + - last_error + x-oaiMeta: + name: The vector store file object + beta: true + example: | + { + "id": "file-abc123", + "object": "vector_store.file", + "usage_bytes": 1234, + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "last_error": null, + "chunking_strategy": { + "type": "static", + "static": { + "max_chunk_size_tokens": 800, + "chunk_overlap_tokens": 400 + } + } + } + + OtherChunkingStrategyResponseParam: + type: object + title: Other Chunking Strategy + description: This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. + additionalProperties: false + properties: + type: + type: string + description: Always `other`. + enum: ["other"] + required: + - type + + StaticChunkingStrategyResponseParam: + type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: ["static"] + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static + + StaticChunkingStrategy: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: | + The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + + AutoChunkingStrategyRequestParam: + type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: ["auto"] + required: + - type + + StaticChunkingStrategyRequestParam: + type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: ["static"] + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static + + ChunkingStrategyRequestParam: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true + + CreateVectorStoreFileRequest: + type: object + additionalProperties: false + properties: + file_id: + description: A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" + required: + - file_id + + ListVectorStoreFilesResponse: + properties: + object: + type: string + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/VectorStoreFileObject" + first_id: + type: string + example: "file-abc123" + last_id: + type: string + example: "file-abc456" + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + + DeleteVectorStoreFileResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: [vector_store.file.deleted] + required: + - id + - object + - deleted + + VectorStoreFileBatchObject: + type: object + title: Vector store file batch + description: A batch of files attached to a vector store. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `vector_store.file_batch`. + type: string + enum: ["vector_store.files_batch"] + created_at: + description: The Unix timestamp (in seconds) for when the vector store files batch was created. + type: integer + vector_store_id: + description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. + type: string + status: + description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. + type: string + enum: ["in_progress", "completed", "cancelled", "failed"] + file_counts: + type: object + properties: + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that where cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - cancelled + - failed + - total + required: + - id + - object + - created_at + - vector_store_id + - status + - file_counts + x-oaiMeta: + name: The vector store files batch object + beta: true + example: | + { + "id": "vsfb_123", + "object": "vector_store.files_batch", + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "failed": 0, + "cancelled": 0, + "total": 100 + } + } + + CreateVectorStoreFileBatchRequest: + type: object + additionalProperties: false + properties: + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + type: array + minItems: 1 + maxItems: 500 + items: + type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" + required: + - file_ids + + AssistantStreamEvent: + description: | + Represents an event emitted when streaming a Run. + + Each event in a server-sent events stream has an `event` and `data` property: + + ``` + event: thread.created + data: {"id": "thread_123", "object": "thread", ...} + ``` + + We emit events whenever a new object is created, transitions to a new state, or is being + streamed in parts (deltas). For example, we emit `thread.run.created` when a new run + is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses + to create a message during a run, we emit a `thread.message.created event`, a + `thread.message.in_progress` event, many `thread.message.delta` events, and finally a + `thread.message.completed` event. + + We may add additional events over time, so we recommend handling unknown events gracefully + in your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to + integrate the Assistants API with streaming. + oneOf: + - $ref: "#/components/schemas/ThreadStreamEvent" + - $ref: "#/components/schemas/RunStreamEvent" + - $ref: "#/components/schemas/RunStepStreamEvent" + - $ref: "#/components/schemas/MessageStreamEvent" + - $ref: "#/components/schemas/ErrorEvent" + - $ref: "#/components/schemas/DoneEvent" + x-oaiMeta: + name: Assistant stream events + beta: true + + ThreadStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: ["thread.created"] + data: + $ref: "#/components/schemas/ThreadObject" + required: + - event + - data + description: Occurs when a new [thread](/docs/api-reference/threads/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" + + RunStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: ["thread.run.created"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a new [run](/docs/api-reference/runs/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.queued"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.in_progress"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.requires_action"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.completed"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.incomplete" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.failed"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) fails. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.cancelling"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.cancelled"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.expired"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) expires. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + + RunStepStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: ["thread.run.step.created"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.in_progress"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) moves to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.delta"] + data: + $ref: "#/components/schemas/RunStepDeltaObject" + required: + - event + - data + description: Occurs when parts of a [run step](/docs/api-reference/runs/step-object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.completed"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.failed"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status. + description: Occurs when a [run step](/docs/api-reference/runs/step-object) fails. x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" - type: object properties: event: type: string - enum: ["thread.run.completed"] + enum: ["thread.run.step.cancelled"] data: - $ref: "#/components/schemas/RunObject" + $ref: "#/components/schemas/RunStepObject" required: - event - data - description: Occurs when a [run](/docs/api-reference/runs/object) is completed. + description: Occurs when a [run step](/docs/api-reference/runs/step-object) is cancelled. x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" - type: object properties: event: type: string - enum: [ "thread.run.incomplete" ] + enum: ["thread.run.step.expired"] data: - $ref: "#/components/schemas/RunObject" + $ref: "#/components/schemas/RunStepObject" required: - event - data - description: Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`. + description: Occurs when a [run step](/docs/api-reference/runs/step-object) expires. x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + + MessageStreamEvent: + oneOf: - type: object properties: event: type: string - enum: ["thread.run.failed"] + enum: ["thread.message.created"] data: - $ref: "#/components/schemas/RunObject" + $ref: "#/components/schemas/MessageObject" required: - event - data - description: Occurs when a [run](/docs/api-reference/runs/object) fails. + description: Occurs when a [message](/docs/api-reference/messages/object) is created. x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - type: object properties: event: type: string - enum: ["thread.run.cancelling"] + enum: ["thread.message.in_progress"] data: - $ref: "#/components/schemas/RunObject" + $ref: "#/components/schemas/MessageObject" required: - event - data - description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status. + description: Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state. x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - type: object properties: event: type: string - enum: ["thread.run.cancelled"] + enum: ["thread.message.delta"] data: - $ref: "#/components/schemas/RunObject" + $ref: "#/components/schemas/MessageDeltaObject" required: - event - data - description: Occurs when a [run](/docs/api-reference/runs/object) is cancelled. + description: Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed. x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + dataDescription: "`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)" - type: object properties: event: type: string - enum: ["thread.run.expired"] + enum: ["thread.message.completed"] data: - $ref: "#/components/schemas/RunObject" + $ref: "#/components/schemas/MessageObject" required: - event - data - description: Occurs when a [run](/docs/api-reference/runs/object) expires. + description: Occurs when a [message](/docs/api-reference/messages/object) is completed. x-oaiMeta: - dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: ["thread.message.incomplete"] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + + ErrorEvent: + type: object + properties: + event: + type: string + enum: ["error"] + data: + $ref: "#/components/schemas/Error" + required: + - event + - data + description: Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. + x-oaiMeta: + dataDescription: "`data` is an [error](/docs/guides/error-codes/api-errors)" + + DoneEvent: + type: object + properties: + event: + type: string + enum: ["done"] + data: + type: string + enum: ["[DONE]"] + required: + - event + - data + description: Occurs when a stream ends. + x-oaiMeta: + dataDescription: "`data` is `[DONE]`" + + Batch: + type: object + properties: + id: + type: string + object: + type: string + enum: [batch] + description: The object type, which is always `batch`. + endpoint: + type: string + description: The OpenAI API endpoint used by the batch. + + errors: + type: object + properties: + object: + type: string + description: The object type, which is always `list`. + data: + type: array + items: + type: object + properties: + code: + type: string + description: An error code identifying the error type. + message: + type: string + description: A human-readable message providing more details about the error. + param: + type: string + description: The name of the parameter that caused the error, if applicable. + nullable: true + line: + type: integer + description: The line number of the input file where the error occurred, if applicable. + nullable: true + input_file_id: + type: string + description: The ID of the input file for the batch. + completion_window: + type: string + description: The time frame within which the batch should be processed. + status: + type: string + description: The current status of the batch. + enum: + - validating + - failed + - in_progress + - finalizing + - completed + - expired + - cancelling + - cancelled + output_file_id: + type: string + description: The ID of the file containing the outputs of successfully executed requests. + error_file_id: + type: string + description: The ID of the file containing the outputs of requests with errors. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was created. + in_progress_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started processing. + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch will expire. + finalizing_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started finalizing. + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was completed. + failed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch failed. + expired_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch expired. + cancelling_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started cancelling. + cancelled_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was cancelled. + request_counts: + type: object + properties: + total: + type: integer + description: Total number of requests in the batch. + completed: + type: integer + description: Number of requests that have been completed successfully. + failed: + type: integer + description: Number of requests that have failed. + required: + - total + - completed + - failed + description: The request counts for different statuses within the batch. + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + required: + - id + - object + - endpoint + - input_file_id + - completion_window + - status + - created_at + x-oaiMeta: + name: The batch object + example: *batch_object + + BatchRequestInput: + type: object + description: The per-line object of the batch input file + properties: + custom_id: + type: string + description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. + method: + type: string + enum: ["POST"] + description: The HTTP method to be used for the request. Currently only `POST` is supported. + url: + type: string + description: The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. + x-oaiMeta: + name: The request input object + example: | + {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}]}} + + BatchRequestOutput: + type: object + description: The per-line object of the batch output and error files + properties: + id: + type: string + custom_id: + type: string + description: A developer-provided per-request id that will be used to match outputs to inputs. + response: + type: object + nullable: true + properties: + status_code: + type: integer + description: The HTTP status code of the response + request_id: + type: string + description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. + body: + type: object + x-oaiTypeLabel: map + description: The JSON body of the response + error: + type: object + nullable: true + description: For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + x-oaiMeta: + name: The request output object + example: | + {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": {"status_code": 200, "request_id": "req_c187b3", "body": {"id": "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, "model": "gpt-4o-mini", "choices": [{"index": 0, "message": {"role": "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 24, "completion_tokens": 15, "total_tokens": 39}, "system_fingerprint": null}}, "error": null} + + ListBatchesResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/Batch" + first_id: + type: string + example: "batch_abc123" + last_id: + type: string + example: "batch_abc456" + has_more: + type: boolean + object: + type: string + enum: [list] + required: + - object + - data + - has_more + + AuditLogActorServiceAccount: + type: object + description: The service account that performed the audit logged action. + properties: + id: + type: string + description: The service account id. + + AuditLogActorUser: + type: object + description: The user who performed the audit logged action. + properties: + id: + type: string + description: The user id. + email: + type: string + description: The user email. + + AuditLogActorApiKey: + type: object + description: The API Key used to perform the audit logged action. + properties: + id: + type: string + description: The tracking id of the API key. + type: + type: string + description: The type of API key. Can be either `user` or `service_account`. + enum: ["user", "service_account"] + user: + $ref: "#/components/schemas/AuditLogActorUser" + service_account: + $ref: "#/components/schemas/AuditLogActorServiceAccount" + + AuditLogActorSession: + type: object + description: The session in which the audit logged action was performed. + properties: + user: + $ref: "#/components/schemas/AuditLogActorUser" + ip_address: + type: string + description: The IP address from which the action was performed. + + AuditLogActor: + type: object + description: The actor who performed the audit logged action. + properties: + type: + type: string + description: The type of actor. Is either `session` or `api_key`. + enum: ["session", "api_key"] + session: + type: object + $ref: "#/components/schemas/AuditLogActorSession" + api_key: + type: object + $ref: "#/components/schemas/AuditLogActorApiKey" + + + AuditLogEventType: + type: string + description: The event type. + x-oaiExpandable: true + enum: + - api_key.created + - api_key.updated + - api_key.deleted + - invite.sent + - invite.accepted + - invite.deleted + - login.succeeded + - login.failed + - logout.succeeded + - logout.failed + - organization.updated + - project.created + - project.updated + - project.archived + - service_account.created + - service_account.updated + - service_account.deleted + - user.added + - user.updated + - user.deleted + + AuditLog: + type: object + description: A log of a user action or configuration change within this organization. + properties: + id: + type: string + description: The ID of this log. + type: + $ref: "#/components/schemas/AuditLogEventType" + + effective_at: + type: integer + description: The Unix timestamp (in seconds) of the event. + project: + type: object + description: The project that the action was scoped to. Absent for actions not scoped to projects. + properties: + id: + type: string + description: The project ID. + name: + type: string + description: The project title. + actor: + $ref: "#/components/schemas/AuditLogActor" + api_key.created: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The tracking ID of the API key. + data: + type: object + description: The payload used to create the API key. + properties: + scopes: + type: array + items: + type: string + description: A list of scopes allowed for the API key, e.g. `["api.model.request"]` + api_key.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The tracking ID of the API key. + changes_requested: + type: object + description: The payload used to update the API key. + properties: + scopes: + type: array + items: + type: string + description: A list of scopes allowed for the API key, e.g. `["api.model.request"]` + api_key.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The tracking ID of the API key. + invite.sent: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The ID of the invite. + data: + type: object + description: The payload used to create the invite. + properties: + email: + type: string + description: The email invited to the organization. + role: + type: string + description: The role the email was invited to be. Is either `owner` or `member`. + invite.accepted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The ID of the invite. + invite.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The ID of the invite. + login.failed: + type: object + description: The details for events with this `type`. + properties: + error_code: + type: string + description: The error code of the failure. + error_message: + type: string + description: The error message of the failure. + logout.failed: + type: object + description: The details for events with this `type`. + properties: + error_code: + type: string + description: The error code of the failure. + error_message: + type: string + description: The error message of the failure. + organization.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The organization ID. + changes_requested: + type: object + description: The payload used to update the organization settings. + properties: + title: + type: string + description: The organization title. + description: + type: string + description: The organization description. + name: + type: string + description: The organization name. + settings: + type: object + properties: + threads_ui_visibility: + type: string + description: Visibility of the threads page which shows messages created with the Assistants API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`. + usage_dashboard_visibility: + type: string + description: Visibility of the usage dashboard which shows activity and costs for your organization. One of `ANY_ROLE` or `OWNERS`. + project.created: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + data: + type: object + description: The payload used to create the project. + properties: + name: + type: string + description: The project name. + title: + type: string + description: The title of the project as seen on the dashboard. + project.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + changes_requested: + type: object + description: The payload used to update the project. + properties: + title: + type: string + description: The title of the project as seen on the dashboard. + project.archived: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + service_account.created: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The service account ID. + data: + type: object + description: The payload used to create the service account. + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + service_account.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The service account ID. + changes_requested: + type: object + description: The payload used to updated the service account. + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + service_account.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The service account ID. + user.added: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The user ID. + data: + type: object + description: The payload used to add the user to the project. + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + user.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + changes_requested: + type: object + description: The payload used to update the user. + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + user.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The user ID. + required: + - id + - type + - effective_at + - actor + x-oaiMeta: + name: The audit log object + example: | + { + "id": "req_xxx_20240101", + "type": "api_key.created", + "effective_at": 1720804090, + "actor": { + "type": "session", + "session": { + "user": { + "id": "user-xxx", + "email": "user@example.com" + }, + "ip_address": "127.0.0.1", + "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + } + }, + "api_key.created": { + "id": "key_xxxx", + "data": { + "scopes": ["resource.operation"] + } + } + } + + ListAuditLogsResponse: + type: object + properties: + object: + type: string + enum: [list] + data: + type: array + items: + $ref: "#/components/schemas/AuditLog" + first_id: + type: string + example: "audit_log-defb456h8dks" + last_id: + type: string + example: "audit_log-hnbkd8s93s" + has_more: + type: boolean + + required: + - object + - data + - first_id + - last_id + - has_more + + Invite: + type: object + description: Represents an individual `invite` to the organization. + properties: + object: + type: string + enum: [organization.invite] + description: The object type, which is always `organization.invite` + id: + type: string + description: The identifier, which can be referenced in API endpoints + email: + type: string + description: The email address of the individual to whom the invite was sent + role: + type: string + enum: [owner, reader] + description: "`owner` or `reader`" + status: + type: string + enum: [accepted, expired, pending] + description: "`accepted`,`expired`, or `pending`" + invited_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was sent. + expires_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite expires. + accepted_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was accepted. + + required: + - object + - id + - email + - role + - status + - invited_at + - expires_at + x-oaiMeta: + name: The invite object + example: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } - RunStepStreamEvent: - oneOf: - - type: object - properties: - event: - type: string - enum: ["thread.run.step.created"] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) is created. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" - - type: object - properties: - event: - type: string - enum: ["thread.run.step.in_progress"] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) moves to an `in_progress` state. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" - - type: object - properties: - event: - type: string - enum: ["thread.run.step.delta"] - data: - $ref: "#/components/schemas/RunStepDeltaObject" - required: - - event - - data - description: Occurs when parts of a [run step](/docs/api-reference/runs/step-object) are being streamed. - x-oaiMeta: - dataDescription: "`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)" - - type: object - properties: - event: - type: string - enum: ["thread.run.step.completed"] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) is completed. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" - - type: object - properties: - event: - type: string - enum: ["thread.run.step.failed"] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) fails. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" - - type: object - properties: - event: - type: string - enum: ["thread.run.step.cancelled"] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) is cancelled. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" - - type: object - properties: - event: - type: string - enum: ["thread.run.step.expired"] - data: - $ref: "#/components/schemas/RunStepObject" - required: - - event - - data - description: Occurs when a [run step](/docs/api-reference/runs/step-object) expires. - x-oaiMeta: - dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + InviteListResponse: + type: object + properties: + object: + type: string + enum: [list] + description: The object type, which is always `list` + data: + type: array + items: + $ref: '#/components/schemas/Invite' + first_id: + type: string + description: The first `invite_id` in the retrieved `list` + last_id: + type: string + description: The last `invite_id` in the retrieved `list` + has_more: + type: boolean + description: The `has_more` property is used for pagination to indicate there are additional results. + required: + - object + - data - MessageStreamEvent: - oneOf: - - type: object - properties: - event: - type: string - enum: ["thread.message.created"] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) is created. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - - type: object - properties: - event: - type: string - enum: ["thread.message.in_progress"] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - - type: object - properties: - event: - type: string - enum: ["thread.message.delta"] - data: - $ref: "#/components/schemas/MessageDeltaObject" - required: - - event - - data - description: Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed. - x-oaiMeta: - dataDescription: "`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)" - - type: object - properties: - event: - type: string - enum: ["thread.message.completed"] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) is completed. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" - - type: object - properties: - event: - type: string - enum: ["thread.message.incomplete"] - data: - $ref: "#/components/schemas/MessageObject" - required: - - event - - data - description: Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed. - x-oaiMeta: - dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + InviteRequest: + type: object + properties: + email: + type: string + description: "Send an email to this address" + role: + type: string + enum: [reader, owner] + description: "`owner` or `reader`" + required: + - email + - role + + InviteDeleteResponse: + type: object + properties: + object: + type: string + enum: [organization.invite.deleted] + description: The object type, which is always `organization.invite.deleted` + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + + User: + type: object + description: Represents an individual `user` within an organization. + properties: + object: + type: string + enum: [organization.user] + description: The object type, which is always `organization.user` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + type: string + enum: [owner, reader] + description: "`owner` or `reader`" + added_at: + type: integer + description: The Unix timestamp (in seconds) of when the user was added. + required: + - object + - id + - name + - email + - role + - added_at + x-oaiMeta: + name: The user object + example: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + + UserListResponse: + type: object + properties: + object: + type: string + enum: [list] + data: + type: array + items: + $ref: '#/components/schemas/User' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + + UserRoleUpdateRequest: + type: object + properties: + role: + type: string + enum: [owner,reader] + description: "`owner` or `reader`" + required: + - role + + UserDeleteResponse: + type: object + properties: + object: + type: string + enum: [organization.user.deleted] + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted - ErrorEvent: + Project: type: object + description: Represents an individual project. properties: - event: + id: type: string - enum: ["error"] + description: The identifier, which can be referenced in API endpoints + object: + type: string + enum: [organization.project] + description: The object type, which is always `organization.project` + name: + type: string + description: The name of the project. This appears in reporting. + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was created. + archived_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) of when the project was archived or `null`. + status: + type: string + enum: [active, archived] + description: "`active` or `archived`" + required: + - id + - object + - name + - created_at + - status + x-oaiMeta: + name: The project object + example: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + + ProjectListResponse: + type: object + properties: + object: + type: string + enum: [list] data: - $ref: "#/components/schemas/Error" + type: array + items: + $ref: '#/components/schemas/Project' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean required: - - event + - object - data - description: Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. + - first_id + - last_id + - has_more + + ProjectCreateRequest: + type: object + properties: + name: + type: string + description: The friendly name of the project, this name appears in reports. + required: + - name + + ProjectUpdateRequest: + type: object + properties: + name: + type: string + description: The updated name of the project, this name appears in reports. + required: + - name + + DefaultProjectErrorResponse: + type: object + properties: + code: + type: integer + message: + type: string + required: + - code + - message + + ProjectUser: + type: object + description: Represents an individual user in a project. + properties: + object: + type: string + enum: [organization.project.user] + description: The object type, which is always `organization.project.user` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + type: string + enum: [owner, member] + description: "`owner` or `member`" + added_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was added. + + required: + - object + - id + - name + - email + - role + - added_at x-oaiMeta: - dataDescription: "`data` is an [error](/docs/guides/error-codes/api-errors)" + name: The project user object + example: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } - DoneEvent: + ProjectUserListResponse: type: object properties: - event: + object: type: string - enum: ["done"] data: + type: array + items: + $ref: '#/components/schemas/ProjectUser' + first_id: type: string - enum: ["[DONE]"] + last_id: + type: string + has_more: + type: boolean required: - - event + - object - data - description: Occurs when a stream ends. - x-oaiMeta: - dataDescription: "`data` is `[DONE]`" + - first_id + - last_id + - has_more - Batch: + ProjectUserCreateRequest: type: object properties: - id: + user_id: type: string - object: + description: The ID of the user. + role: type: string - enum: [batch] - description: The object type, which is always `batch`. - endpoint: + enum: [owner, member] + description: "`owner` or `member`" + required: + - user_id + - role + + ProjectUserUpdateRequest: + type: object + properties: + role: type: string - description: The OpenAI API endpoint used by the batch. + enum: [owner, member] + description: "`owner` or `member`" + required: + - role - errors: - type: object - properties: - object: - type: string - description: The object type, which is always `list`. - data: - type: array - items: - type: object - properties: - code: - type: string - description: An error code identifying the error type. - message: - type: string - description: A human-readable message providing more details about the error. - param: - type: string - description: The name of the parameter that caused the error, if applicable. - nullable: true - line: - type: integer - description: The line number of the input file where the error occurred, if applicable. - nullable: true - input_file_id: + ProjectUserDeleteResponse: + type: object + properties: + object: type: string - description: The ID of the input file for the batch. - completion_window: + enum: [organization.project.user.deleted] + id: type: string - description: The time frame within which the batch should be processed. - status: + deleted: + type: boolean + required: + - object + - id + - deleted + + ProjectServiceAccount: + type: object + description: Represents an individual service account in a project. + properties: + object: type: string - description: The current status of the batch. - enum: - - validating - - failed - - in_progress - - finalizing - - completed - - expired - - cancelling - - cancelled - output_file_id: + enum: [organization.project.service_account] + description: The object type, which is always `organization.project.service_account` + id: type: string - description: The ID of the file containing the outputs of successfully executed requests. - error_file_id: + description: The identifier, which can be referenced in API endpoints + name: type: string - description: The ID of the file containing the outputs of requests with errors. + description: The name of the service account + role: + type: string + enum: [owner, member] + description: "`owner` or `member`" created_at: type: integer - description: The Unix timestamp (in seconds) for when the batch was created. - in_progress_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started processing. - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch will expire. - finalizing_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started finalizing. - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was completed. - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch failed. - expired_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch expired. - cancelling_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started cancelling. - cancelled_at: + description: The Unix timestamp (in seconds) of when the service account was created + required: + - object + - id + - name + - role + - created_at + x-oaiMeta: + name: The project service account object + example: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + + ProjectServiceAccountListResponse: + type: object + properties: + object: + type: string + enum: [list] + data: + type: array + items: + $ref: '#/components/schemas/ProjectServiceAccount' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + + ProjectServiceAccountCreateRequest: + type: object + properties: + name: + type: string + description: The name of the service account being created. + required: + - name + + ProjectServiceAccountCreateResponse: + type: object + properties: + object: + type: string + enum: [organization.project.service_account] + id: + type: string + name: + type: string + role: + type: string + enum: [member] + description: Service accounts can only have one role of type `member` + created_at: type: integer - description: The Unix timestamp (in seconds) for when the batch was cancelled. - request_counts: - type: object - properties: - total: - type: integer - description: Total number of requests in the batch. - completed: - type: integer - description: Number of requests that have been completed successfully. - failed: - type: integer - description: Number of requests that have failed. - required: - - total - - completed - - failed - description: The request counts for different statuses within the batch. - metadata: - description: *metadata_description - type: object - x-oaiTypeLabel: map - nullable: true + api_key: + $ref: '#/components/schemas/ProjectServiceAccountApiKey' required: - - id - object - - endpoint - - input_file_id - - completion_window - - status + - id + - name + - role - created_at - x-oaiMeta: - name: The batch object - example: *batch_object + - api_key - BatchRequestInput: + ProjectServiceAccountApiKey: type: object - description: The per-line object of the batch input file properties: - custom_id: + object: type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. - method: + enum: [organization.project.service_account.api_key] + description: The object type, which is always `organization.project.service_account.api_key` + + value: type: string - enum: ["POST"] - description: The HTTP method to be used for the request. Currently only `POST` is supported. - url: + name: type: string - description: The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - x-oaiMeta: - name: The request input object - example: | - {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}]}} + created_at: + type: integer + id: + type: string + required: + - object + - value + - name + - created_at + - id - BatchRequestOutput: + ProjectServiceAccountDeleteResponse: type: object - description: The per-line object of the batch output and error files properties: + object: + type: string + enum: [organization.project.service_account.deleted] id: type: string - custom_id: + deleted: + type: boolean + required: + - object + - id + - deleted + + ProjectApiKey: + type: object + description: Represents an individual API key in a project. + properties: + object: type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. - response: - type: object - nullable: true - properties: - status_code: - type: integer - description: The HTTP status code of the response - request_id: - type: string - description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. - body: - type: object - x-oaiTypeLabel: map - description: The JSON body of the response - error: + enum: [organization.project.api_key] + description: The object type, which is always `organization.project.api_key` + redacted_value: + type: string + description: The redacted value of the API key + name: + type: string + description: The name of the API key + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the API key was created + id: + type: string + description: The identifier, which can be referenced in API endpoints + owner: type: object - nullable: true - description: For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure. properties: - code: - type: string - description: A machine-readable error code. - message: + type: type: string - description: A human-readable error message. + enum: [user, service_account] + description: "`user` or `service_account`" + user: + $ref: '#/components/schemas/ProjectUser' + service_account: + $ref: '#/components/schemas/ProjectServiceAccount' + required: + - object + - redacted_value + - name + - created_at + - id + - owner x-oaiMeta: - name: The request output object + name: The project API key object example: | - {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": {"status_code": 200, "request_id": "req_c187b3", "body": {"id": "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, "model": "gpt-3.5-turbo", "choices": [{"index": 0, "message": {"role": "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 24, "completion_tokens": 15, "total_tokens": 39}, "system_fingerprint": null}}, "error": null} + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + } + } - ListBatchesResponse: + ProjectApiKeyListResponse: type: object properties: + object: + type: string + enum: [list] data: type: array items: - $ref: "#/components/schemas/Batch" + $ref: '#/components/schemas/ProjectApiKey' first_id: type: string - example: "batch_abc123" last_id: type: string - example: "batch_abc456" has_more: type: boolean - object: - type: string - enum: [list] required: - object - data + - first_id + - last_id - has_more + ProjectApiKeyDeleteResponse: + type: object + properties: + object: + type: string + enum: [organization.project.api_key.deleted] + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + security: - ApiKeyAuth: [] @@ -13381,6 +16410,8 @@ x-oaiMeta: title: Endpoints - id: assistants title: Assistants + - id: administration + title: Administration - id: legacy title: Legacy groups: @@ -13553,6 +16584,30 @@ x-oaiMeta: - type: object key: OpenAIFile path: object + - id: uploads + title: Uploads + description: | + Allows you to upload large files in multiple parts. + navigationGroup: endpoints + sections: + - type: endpoint + key: createUpload + path: create + - type: endpoint + key: addUploadPart + path: add-part + - type: endpoint + key: completeUpload + path: complete + - type: endpoint + key: cancelUpload + path: cancel + - type: object + key: Upload + path: object + - type: object + key: UploadPart + path: part-object - id: images title: Images description: | @@ -13605,6 +16660,8 @@ x-oaiMeta: - type: object key: CreateModerationResponse path: object + + - id: assistants title: Assistants beta: true @@ -13832,6 +16889,175 @@ x-oaiMeta: - type: object key: AssistantStreamEvent path: events + + - id: administration + title: Overview + description: | + Programmatically manage your organization. + + The Audit Logs endpoint provides a log of all actions taken in the + organization for security and monitoring purposes. + + To access these endpoints please generate an Admin API Key through the [API Platform Organization overview](/organization/admin-keys). Admin API keys cannot be used for non-administration endpoints. + + For best practices on setting up your organization, please refer to this [guide](/docs/guides/production-best-practices/setting-up-your-organization) + navigationGroup: administration + + - id: invite + title: Invites + description: Invite and manage invitations for an organization. Invited users are automatically added to the Default project. + navigationGroup: administration + sections: + - type: endpoint + key: list-invites + path: list + - type: endpoint + key: inviteUser + path: create + - type: endpoint + key: retrieve-invite + path: retrieve + - type: endpoint + key: delete-invite + path: delete + - type: object + key: Invite + path: object + + - id: users + title: Users + description: | + Manage users and their role in an organization. Users will be automatically added to the Default project. + navigationGroup: administration + sections: + - type: endpoint + key: list-users + path: list + - type: endpoint + key: modify-user + path: modify + - type: endpoint + key: retrieve-user + path: retrieve + - type: endpoint + key: delete-user + path: delete + - type: object + key: User + path: object + + - id: projects + title: Projects + description: | + Manage the projects within an orgnanization includes creation, updating, and archiving or projects. + The Default project cannot be modified or archived. + navigationGroup: administration + sections: + - type: endpoint + key: list-projects + path: list + - type: endpoint + key: create-project + path: create + - type: endpoint + key: retrieve-project + path: retrieve + - type: endpoint + key: modify-project + path: modify + - type: endpoint + key: archive-project + path: archive + - type: object + key: Project + path: object + + - id: project-users + title: Project Users + description: | + Manage users within a project, including adding, updating roles, and removing users. + Users cannot be removed from the Default project, unless they are being removed from the organization. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-users + path: list + - type: endpoint + key: create-project-user + path: creeate + - type: endpoint + key: retrieve-project-user + path: retrieve + - type: endpoint + key: modify-project-user + path: modify + - type: endpoint + key: delete-project-user + path: delete + - type: object + key: ProjectUser + path: object + + - id: project-service-accounts + title: Project Service Accounts + description: | + Manage service accounts within a project. A service account is a bot user that is not associated with a user. + If a user leaves an organization, their keys and membership in projects will no longer work. Service accounts + do not have this limitation. However, service accounts can also be deleted from a project. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-service-accounts + path: list + - type: endpoint + key: create-project-service-account + path: create + - type: endpoint + key: retrieve-project-service-account + path: retrieve + - type: endpoint + key: delete-project-service-account + path: delete + - type: object + key: ProjectServiceAccount + path: object + + - id: project-api-keys + title: Project API Keys + description: | + Manage API keys for a given project. Supports listing and deleting keys for users. + This API does not allow issuing keys for users, as users need to authorize themselves to generate keys. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-api-keys + path: list + - type: endpoint + key: retrieve-project-api-key + path: retrieve + - type: endpoint + key: delete-project-api-key + path: delete + - type: object + key: ProjectApiKey + path: object + + - id: audit-logs + title: Audit Logs + description: | + Logs of user actions and configuration changes within this organization. + + To log events, you must activate logging in the [Organization Settings](/settings/organization/general). + Once activated, for security reasons, logging cannot be deactivated. + navigationGroup: administration + sections: + - type: endpoint + key: list-audit-logs + path: list + - type: object + key: AuditLog + path: object + - id: completions title: Completions legacy: true