Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,8 @@ model ChatCompletionsOptions {
""")
@projectedName("json", "model")
@projectedName("csharp", "InternalNonAzureModelName")
"model"?: string;
};
`model`?: string;
}

@doc("""
The representation of a single prompt completion as part of an overall chat completions request.
Expand All @@ -158,7 +158,7 @@ model ChatChoice {

@doc("The reason that this chat completions choice completed its generated.")
@projectedName("json", "finish_reason")
finishReason: CompletionsFinishReason | null;
finishReason?: CompletionsFinishReason;

@doc("The delta message content for a streaming response.")
@projectedName("json", "delta")
Expand Down Expand Up @@ -197,4 +197,4 @@ model ChatCompletions {
""")
@projectedName("json", "usage")
usage: CompletionsUsage;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,8 @@ model CompletionsOptions {
""")
@projectedName("json", "model")
@projectedName("csharp", "InternalNonAzureModelName")
"model"?: string;
};
`model`?: string;
}

@doc("""
Representation of the response data from a completions request.
Expand Down Expand Up @@ -189,11 +189,11 @@ model Choice {
@doc("The log probabilities model for tokens associated with this completions choice.")
@projectedName("json", "logprobs")
@projectedName("csharp", "LogProbabilityModel")
logprobs: CompletionsLogProbabilityModel | null;
logprobs?: CompletionsLogProbabilityModel;

@doc("Reason for finishing")
@projectedName("json", "finish_reason")
finishReason: CompletionsFinishReason | null;
finishReason?: CompletionsFinishReason;
}

alias NullableFloat = float32 | null;
Expand All @@ -220,4 +220,4 @@ model CompletionsLogProbabilityModel {
@projectedName("json", "text_offset")
@projectedName("csharp", "TextOffsets")
textOffset: int32[];
}
}
33 changes: 20 additions & 13 deletions specification/cognitiveservices/OpenAI.Inference/routes.tsp
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import "@azure-tools/typespec-azure-core";
import "@typespec/rest";
import "@typespec/http";
import "@azure-tools/typespec-client-generator-core";
//import "@azure-tools/typespec-client-generator-core";

import "./models/completions.create.tsp";
import "./models/chat.completions.tsp";
Expand All @@ -10,34 +10,41 @@ import "./models/embeddings.create.tsp";
using TypeSpec.Rest;
using TypeSpec.Http;
using Azure.Core;
using Azure.ClientGenerator.Core;
//using Azure.ClientGenerator.Core;

namespace Azure.OpenAI;

@doc("Return the embeddings for a given prompt.")
@TypeSpec.Rest.actionSeparator("/")
@action("embeddings")
@convenientAPI(true)
op getEmbeddings is Azure.Core.ResourceAction<Deployment, EmbeddingsOptions, Embeddings>;

@action("embeddings") //@convenientAPI(true)
op getEmbeddings is Azure.Core.ResourceAction<
Deployment,
EmbeddingsOptions,
Embeddings
>;

@doc("""
Gets completions for the provided input prompts.
Completions support a wide variety of tasks and generate text that continues from or "completes"
provided prompt data.
""")
@TypeSpec.Rest.actionSeparator("/")
@action("completions")
@convenientAPI(true)
op getCompletions is Azure.Core.ResourceAction<Deployment, CompletionsOptions, Completions>;

@action("completions") //@convenientAPI(true)
op getCompletions is Azure.Core.ResourceAction<
Deployment,
CompletionsOptions,
Completions
>;

@doc("""
Gets chat completions for the provided chat messages.
Completions support a wide variety of tasks and generate text that continues from or "completes"
provided prompt data.
""")
@TypeSpec.Rest.actionSeparator("/")
@action("chat/completions")
@convenientAPI(true)
op getChatCompletions is Azure.Core.ResourceAction<Deployment, ChatCompletionsOptions, ChatCompletions>;
@action("chat/completions") //@convenientAPI(true)
op getChatCompletions is Azure.Core.ResourceAction<
Deployment,
ChatCompletionsOptions,
ChatCompletions
>;
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
emit:
- "@azure-tools/typespec-autorest"
- "@azure-tools/cadl-apiview"
# - "@azure-tools/typespec-autorest"
# - "@azure-tools/cadl-apiview"
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is currently no config for typespec-autorest, so I don't think it will output any swagger? Once we add config, should it output to this folder?

https://github.com/Azure/azure-rest-api-specs/tree/main/specification/cognitiveservices/data-plane/AzureOpenAI/inference

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If this started with a new api-version, that would be the case. As it is, this older api-version is very unlikely to match the swagger in that folder (though it would hopefully be sematically equivalent)

Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This change causes the CI pipeline to "pass", but it's only because no emitter is run:

+ pushd specification/cognitiveservices/OpenAI.Inference
+ npx tsp compile .
TypeSpec compiler v0.43.0

Diagnostics were reported during compilation:

warning emitter-not-found: Emitter with name @azure-tools/typespec-csharp is not found.
warning emitter-not-found: Emitter with name @azure-tools/typespec-java is not found.

Found 2 warnings.

No emitter was configured, no output was generated. Use `--emit <emitterName>` to pick emitter or specify it in the typespec config.

Do we want specs in the repo with no emitters configured? What is the scenario for this?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It looks like this is here purely for client generation. If we can agree on where to emit the swagger, it would be good to emit it there

Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm fine merging this PR as-is. Later we will need to decide what to do with specs that are client-only (don't emit swagger).

options:
# Uncomment this line and add "@azure-tools/cadl-python" to your package.json to generate Python code
# "@azure-tools/cadl-python":
Expand Down