Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update openai client to 3.4.1 #388

Merged
merged 2 commits into from
Sep 5, 2023
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
update openai client to 3.4.1
raulraja committed Sep 5, 2023
commit eb53a94ceb33a04db17e548b6a5a06ebf0b86b62
4 changes: 2 additions & 2 deletions gradle/libs.versions.toml
Original file line number Diff line number Diff line change
@@ -33,7 +33,7 @@ pdfbox = "3.0.0"
mysql = "8.0.33"
semverGradle = "0.5.0-rc.5"
scala = "3.3.0"
openai-client-version = "3.3.2"
openai-client-version = "3.4.1"
gpt4all-java = "1.1.5"
ai-djl = "0.23.0"
jackson = "2.15.2"
@@ -144,4 +144,4 @@ semver-gradle = { id="com.javiersc.semver", version.ref="semverGradle" }
suspend-transform-plugin = { id="love.forte.plugin.suspend-transform", version.ref="suspend-transform" }
resources = { id="com.goncalossilva.resources", version.ref="resources-kmp" }
detekt = { id="io.gitlab.arturbosch.detekt", version.ref="detekt"}
node-gradle = { id = "com.github.node-gradle.node", version.ref = "node-gradle" }
node-gradle = { id = "com.github.node-gradle.node", version.ref = "node-gradle" }
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package com.xebia.functional.xef.conversation.llm.openai

import com.aallam.openai.api.BetaOpenAI
import com.aallam.openai.api.LegacyOpenAI
import com.aallam.openai.api.chat.*
import com.aallam.openai.api.chat.ChatChunk as OpenAIChatChunk
import com.aallam.openai.api.chat.ChatCompletionChunk as OpenAIChatCompletionChunk
@@ -58,9 +59,10 @@ class OpenAIModel(
headers = mapOf("Authorization" to " Bearer $openAI.token")
)

@OptIn(LegacyOpenAI::class)
raulraja marked this conversation as resolved.
Show resolved Hide resolved
override suspend fun createCompletion(request: CompletionRequest): CompletionResult {
fun completionChoice(it: OpenAIChoice): CompletionChoice =
CompletionChoice(it.text, it.index, null, it.finishReason)
CompletionChoice(it.text, it.index, null, it.finishReason.value)

val response = client.completion(toCompletionRequest(request))
return CompletionResult(
@@ -73,7 +75,6 @@ class OpenAIModel(
)
}

@OptIn(BetaOpenAI::class)
override suspend fun createChatCompletion(
request: ChatCompletionRequest
): ChatCompletionResponse {
@@ -86,8 +87,8 @@ class OpenAIModel(

fun toChoice(choice: ChatChoice): Choice =
Choice(
message = choice.message?.let { chatMessage(it) },
finishReason = choice.finishReason,
message = chatMessage(choice.message),
finishReason = choice.finishReason.value,
index = choice.index,
)

@@ -102,7 +103,6 @@ class OpenAIModel(
)
}

@OptIn(BetaOpenAI::class)
override suspend fun createChatCompletions(
request: ChatCompletionRequest
): Flow<ChatCompletionChunk> {
@@ -114,7 +114,7 @@ class OpenAIModel(
)

fun chatChunk(chunk: OpenAIChatChunk): ChatChunk =
ChatChunk(chunk.index, chunk.delta?.let { chatDelta(it) }, chunk.finishReason)
ChatChunk(chunk.index, chatDelta(chunk.delta), chunk.finishReason?.value)

fun chatCompletionChunk(response: OpenAIChatCompletionChunk): ChatCompletionChunk =
ChatCompletionChunk(
@@ -128,7 +128,6 @@ class OpenAIModel(
return client.chatCompletions(toChatCompletionRequest(request)).map { chatCompletionChunk(it) }
}

@OptIn(BetaOpenAI::class)
override suspend fun createChatCompletionWithFunctions(
request: ChatCompletionRequest
): ChatCompletionResponseWithFunctions {
@@ -169,8 +168,8 @@ class OpenAIModel(

fun choiceWithFunctions(choice: ChatChoice): ChoiceWithFunctions =
ChoiceWithFunctions(
message = choice.message?.let { fromOpenAI(it) },
finishReason = choice.finishReason,
message = fromOpenAI(choice.message),
finishReason = choice.finishReason.value,
index = choice.index,
)

@@ -243,7 +242,6 @@ class OpenAIModel(
totalTokens = usage?.totalTokens,
)

@OptIn(BetaOpenAI::class)
private fun toRole(it: ChatRole?) =
when (it) {
ChatRole.User -> Role.USER
@@ -253,15 +251,13 @@ class OpenAIModel(
else -> Role.ASSISTANT
}

@OptIn(BetaOpenAI::class)
private fun fromRole(it: Role) =
when (it) {
Role.USER -> ChatRole.User
Role.ASSISTANT -> ChatRole.Assistant
Role.SYSTEM -> ChatRole.System
}

@OptIn(BetaOpenAI::class)
private fun toChatCompletionRequest(request: ChatCompletionRequest): OpenAIChatCompletionRequest =
chatCompletionRequest {
model = ModelId(request.model)