diff --git a/core/build.gradle.kts b/core/build.gradle.kts index a1bf05ac9..924d68bfa 100644 --- a/core/build.gradle.kts +++ b/core/build.gradle.kts @@ -67,7 +67,7 @@ kotlin { api(libs.kotlinx.serialization.json) api(libs.ktor.utils) api(projects.xefTokenizer) - + implementation(libs.bundles.ktor.client) implementation(libs.klogging) implementation(libs.uuid) } @@ -87,10 +87,15 @@ kotlin { implementation(libs.logback) implementation(libs.skrape) implementation(libs.rss.reader) + api(libs.ktor.client.cio) } } - val jsMain by getting + val jsMain by getting { + dependencies { + api(libs.ktor.client.js) + } + } val jvmTest by getting { dependencies { @@ -98,10 +103,26 @@ kotlin { } } - val linuxX64Main by getting - val macosX64Main by getting - val macosArm64Main by getting - val mingwX64Main by getting + val linuxX64Main by getting { + dependencies { + implementation(libs.ktor.client.cio) + } + } + val macosX64Main by getting { + dependencies { + implementation(libs.ktor.client.cio) + } + } + val macosArm64Main by getting { + dependencies { + implementation(libs.ktor.client.cio) + } + } + val mingwX64Main by getting { + dependencies { + implementation(libs.ktor.client.winhttp) + } + } val linuxX64Test by getting val macosX64Test by getting val macosArm64Test by getting diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/Chat.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/Chat.kt index aa84df05f..c6611f85c 100644 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/Chat.kt +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/Chat.kt @@ -46,7 +46,7 @@ interface Chat : LLM { ): Flow = flow { val memories: List = memories(conversationId, context, promptConfiguration) - val promptWithContext: String = + val promptWithContext: List = createPromptWithContextAwareOfTokens( memories = memories, ctxInfo = context.similaritySearch(prompt.message, promptConfiguration.docsInContext), @@ -55,7 +55,7 @@ interface Chat : LLM { minResponseTokens = promptConfiguration.minResponseTokens ) - val messages: List = messages(memories, promptWithContext) + val messages: List = messagesFromMemory(memories) + promptWithContext fun checkTotalLeftChatTokens(): Int { val maxContextLength: Int = modelType.maxContextLength @@ -138,7 +138,7 @@ interface Chat : LLM { @AiDsl suspend fun promptMessages( - prompt: Prompt, + messages: List, context: VectorStore, conversationId: ConversationId? = null, functions: List = emptyList(), @@ -146,24 +146,14 @@ interface Chat : LLM { ): List { val memories: List = memories(conversationId, context, promptConfiguration) - - val promptWithContext: String = - createPromptWithContextAwareOfTokens( - memories = memories, - ctxInfo = context.similaritySearch(prompt.message, promptConfiguration.docsInContext), - modelType = modelType, - prompt = prompt.message, - minResponseTokens = promptConfiguration.minResponseTokens - ) - - val messages: List = messages(memories, promptWithContext) + val allMessages = messagesFromMemory(memories) + messages fun checkTotalLeftChatTokens(): Int { val maxContextLength: Int = modelType.maxContextLength - val messagesTokens: Int = tokensFromMessages(messages) + val messagesTokens: Int = tokensFromMessages(allMessages) val totalLeftTokens: Int = maxContextLength - messagesTokens if (totalLeftTokens < 0) { - throw AIError.MessagesExceedMaxTokenLength(messages, messagesTokens, maxContextLength) + throw AIError.MessagesExceedMaxTokenLength(allMessages, messagesTokens, maxContextLength) } return totalLeftTokens } @@ -217,6 +207,29 @@ interface Chat : LLM { } } + @AiDsl + suspend fun promptMessages( + prompt: Prompt, + context: VectorStore, + conversationId: ConversationId? = null, + functions: List = emptyList(), + promptConfiguration: PromptConfiguration = PromptConfiguration.DEFAULTS + ): List { + + val memories: List = memories(conversationId, context, promptConfiguration) + + val promptWithContext: List = + createPromptWithContextAwareOfTokens( + memories = memories, + ctxInfo = context.similaritySearch(prompt.message, promptConfiguration.docsInContext), + modelType = modelType, + prompt = prompt.message, + minResponseTokens = promptConfiguration.minResponseTokens + ) + + return promptMessages(promptWithContext, context, conversationId, functions, promptConfiguration) + } + private suspend fun List.addChoiceWithFunctionsToMemory( request: ChatCompletionRequestWithFunctions, context: VectorStore, @@ -274,8 +287,8 @@ interface Chat : LLM { } } - private fun messages(memories: List, promptWithContext: String): List = - memories.map { it.content } + listOf(Message(Role.USER, promptWithContext, Role.USER.name)) + private fun messagesFromMemory(memories: List): List = + memories.map { it.content } private suspend fun memories( conversationId: ConversationId?, @@ -288,13 +301,13 @@ interface Chat : LLM { emptyList() } - private fun createPromptWithContextAwareOfTokens( + private suspend fun createPromptWithContextAwareOfTokens( memories: List, ctxInfo: List, modelType: ModelType, prompt: String, minResponseTokens: Int, - ): String { + ): List { val maxContextLength: Int = modelType.maxContextLength val promptTokens: Int = modelType.encoding.countTokens(prompt) val memoryTokens = tokensFromMessages(memories.map { it.content }) @@ -311,17 +324,10 @@ interface Chat : LLM { // alternatively we could summarize the context, but that's not implemented yet val ctxTruncated: String = modelType.encoding.truncateText(ctx, remainingTokens) - """|```Context - |${ctxTruncated} - |``` - |The context is related to the question try to answer the `goal` as best as you can - |or provide information about the found content - |```goal - |${prompt} - |``` - |ANSWER: - |""" - .trimMargin() - } else prompt + listOf( + Message.assistantMessage { "Context: $ctxTruncated" }, + Message.userMessage { prompt } + ) + } else listOf(Message.userMessage { prompt }) } } diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/ChatWithFunctions.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/ChatWithFunctions.kt index 160a42b6e..7284d053f 100644 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/ChatWithFunctions.kt +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/ChatWithFunctions.kt @@ -7,6 +7,7 @@ import com.xebia.functional.xef.auto.AiDsl import com.xebia.functional.xef.auto.PromptConfiguration import com.xebia.functional.xef.llm.models.chat.ChatCompletionRequestWithFunctions import com.xebia.functional.xef.llm.models.chat.ChatCompletionResponseWithFunctions +import com.xebia.functional.xef.llm.models.chat.Message import com.xebia.functional.xef.llm.models.functions.CFunction import com.xebia.functional.xef.llm.models.functions.encodeJsonSchema import com.xebia.functional.xef.prompt.Prompt @@ -45,6 +46,29 @@ interface ChatWithFunctions : Chat { promptConfiguration: PromptConfiguration = PromptConfiguration.DEFAULTS, ): A = prompt(prompt, context, conversationId, functions, serializer, promptConfiguration) + @AiDsl + suspend fun prompt( + messages: List, + context: VectorStore, + serializer: KSerializer, + conversationId: ConversationId? = null, + functions: List = generateCFunction(serializer.descriptor), + promptConfiguration: PromptConfiguration = PromptConfiguration.DEFAULTS, + ): A { + return tryDeserialize( + { json -> Json.decodeFromString(serializer, json) }, + promptConfiguration.maxDeserializationAttempts + ) { + promptMessages( + messages = messages, + context = context, + conversationId = conversationId, + functions = functions, + promptConfiguration + ) + } + } + @AiDsl suspend fun prompt( prompt: Prompt, diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Message.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Message.kt index 9f6ef6ed6..2a78143bf 100644 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Message.kt +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/models/chat/Message.kt @@ -1,3 +1,14 @@ package com.xebia.functional.xef.llm.models.chat -data class Message(val role: Role, val content: String, val name: String) +data class Message(val role: Role, val content: String, val name: String) { + companion object { + suspend fun systemMessage(message: suspend () -> String) = + Message(role = Role.SYSTEM, content = message(), name = Role.SYSTEM.name) + + suspend fun userMessage(message: suspend () -> String) = + Message(role = Role.USER, content = message(), name = Role.USER.name) + + suspend fun assistantMessage(message: suspend () -> String) = + Message(role = Role.ASSISTANT, content = message(), name = Role.ASSISTANT.name) + } +} diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/prompt/expressions/Expression.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/prompt/expressions/Expression.kt new file mode 100644 index 000000000..fce7eec71 --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/prompt/expressions/Expression.kt @@ -0,0 +1,100 @@ +package com.xebia.functional.xef.prompt.expressions + +import com.xebia.functional.xef.auto.CoreAIScope +import com.xebia.functional.xef.auto.PromptConfiguration +import com.xebia.functional.xef.llm.ChatWithFunctions +import com.xebia.functional.xef.llm.models.chat.Message +import com.xebia.functional.xef.llm.models.chat.Role +import com.xebia.functional.xef.prompt.experts.ExpertSystem +import io.github.oshai.kotlinlogging.KLogger +import io.github.oshai.kotlinlogging.KotlinLogging + +class Expression( + private val scope: CoreAIScope, + private val model: ChatWithFunctions, + val block: suspend Expression.() -> Unit +) { + + private val logger: KLogger = KotlinLogging.logger {} + + private val messages: MutableList = mutableListOf() + + private val generationKeys: MutableList = mutableListOf() + + suspend fun system(message: suspend () -> String) { + messages.add(Message.systemMessage(message)) + } + + suspend fun user(message: suspend () -> String) { + messages.add(Message.userMessage(message)) + } + + suspend fun assistant(message: suspend () -> String) { + messages.add(Message.assistantMessage(message)) + } + + fun prompt(key: String): String { + generationKeys.add(key) + return "{{$key}}" + } + + suspend fun run( + promptConfiguration: PromptConfiguration = PromptConfiguration.DEFAULTS + ): ExpressionResult { + block() + val instructionMessage = + Message( + role = Role.USER, + content = + ExpertSystem( + system = "You are an expert in replacing variables in templates", + query = + """ + |I want to replace the following variables in the following template: + | + |The variables are: + |${generationKeys.joinToString("\n") { it }} + """ + .trimMargin(), + instructions = + listOf( + "Create a `ReplacedValues` object with the `replacements` where the keys are the variable names and the values are the values to replace them with.", + ) + ) + .message, + name = Role.USER.name + ) + val values: ReplacedValues = + model.prompt( + messages = messages + instructionMessage, + context = scope.context, + serializer = ReplacedValues.serializer(), + conversationId = scope.conversationId, + promptConfiguration = promptConfiguration + ) + logger.info { "replaced: ${values.replacements.joinToString { it.key }}" } + val replacedTemplate = + messages.fold("") { acc, message -> + val replacedMessage = + generationKeys.fold(message.content) { acc, key -> + acc.replace( + "{{$key}}", + values.replacements.firstOrNull { it.key == key }?.value ?: "{{$key}}" + ) + } + acc + replacedMessage + "\n" + } + return ExpressionResult(messages = messages, result = replacedTemplate, values = values) + } + + companion object { + suspend fun run( + scope: CoreAIScope, + model: ChatWithFunctions, + block: suspend Expression.() -> Unit + ): ExpressionResult = Expression(scope, model, block).run() + + } +} diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/prompt/expressions/ExpressionResult.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/prompt/expressions/ExpressionResult.kt new file mode 100644 index 000000000..cc8dbad2f --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/prompt/expressions/ExpressionResult.kt @@ -0,0 +1,9 @@ +package com.xebia.functional.xef.prompt.expressions + +import com.xebia.functional.xef.llm.models.chat.Message + +data class ExpressionResult( + val messages: List, + val result: String, + val values: ReplacedValues, +) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/prompt/expressions/ReplacedValues.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/prompt/expressions/ReplacedValues.kt new file mode 100644 index 000000000..9a73ac3b5 --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/prompt/expressions/ReplacedValues.kt @@ -0,0 +1,10 @@ +package com.xebia.functional.xef.prompt.expressions + +import com.xebia.functional.xef.auto.Description +import kotlinx.serialization.Serializable + +@Serializable +data class ReplacedValues( + @Description(["The values that are generated for the template"]) + val replacements: List +) diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/prompt/expressions/Replacement.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/prompt/expressions/Replacement.kt new file mode 100644 index 000000000..c341a1e95 --- /dev/null +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/prompt/expressions/Replacement.kt @@ -0,0 +1,12 @@ +package com.xebia.functional.xef.prompt.expressions + +import com.xebia.functional.xef.auto.Description +import kotlinx.serialization.Serializable + +@Serializable +data class Replacement( + @Description(["The key originally in {{key}} format that was going to get replaced"]) + val key: String, + @Description(["The Assistant generated value that the `key` should be replaced with"]) + val value: String +) diff --git a/examples/java/src/main/java/com/xebia/functional/xef/java/auto/jdk21/reasoning/ToolSelectionExample.java b/examples/java/src/main/java/com/xebia/functional/xef/java/auto/jdk21/reasoning/ToolSelectionExample.java deleted file mode 100644 index 2899b31ca..000000000 --- a/examples/java/src/main/java/com/xebia/functional/xef/java/auto/jdk21/reasoning/ToolSelectionExample.java +++ /dev/null @@ -1,42 +0,0 @@ -package com.xebia.functional.xef.java.auto.jdk21.reasoning; - -import com.xebia.functional.xef.auto.llm.openai.OpenAI; -import com.xebia.functional.xef.java.auto.AIScope; -import com.xebia.functional.xef.java.auto.ExecutionContext; -import com.xebia.functional.xef.reasoning.filesystem.Files; -import com.xebia.functional.xef.reasoning.pdf.PDF; -import com.xebia.functional.xef.reasoning.text.Text; -import com.xebia.functional.xef.reasoning.tools.ToolSelection; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.Executors; - -public class ToolSelectionExample { - - public static void main(String[] args) { - try (var scope = new AIScope(new ExecutionContext(Executors.newVirtualThreadPerTaskExecutor()))) { - var model = OpenAI.DEFAULT_CHAT; - var serialization = OpenAI.DEFAULT_SERIALIZATION; - var text = Text.create(model, scope.getScope()); - var files = Files.create(serialization, scope.getScope(), Collections.emptyList()); - var pdf = PDF.create(model, serialization, scope.getScope()); - - var toolSelection = new ToolSelection( - serialization, - scope.getScope(), - List.of( - text.summarize, - pdf.readPDFFromUrl, - files.readFile, - files.writeToTextFile - ), - Collections.emptyList() - ); - - var inputText = "Extract information from https://arxiv.org/pdf/2305.10601.pdf"; - var result = toolSelection.applyInferredToolsBlocking(inputText); - System.out.println(result); - } - } -} - diff --git a/examples/java/src/main/java/com/xebia/functional/xef/java/auto/jdk21/tot/Problems.java b/examples/java/src/main/java/com/xebia/functional/xef/java/auto/jdk21/tot/Problems.java index 370a3b489..44ca246e9 100644 --- a/examples/java/src/main/java/com/xebia/functional/xef/java/auto/jdk21/tot/Problems.java +++ b/examples/java/src/main/java/com/xebia/functional/xef/java/auto/jdk21/tot/Problems.java @@ -98,7 +98,7 @@ public Memory addResult(Solutions.Solution result) { private static void checkAIScope() { if(aiScope == null){ - aiScope = new AIScope(new ExecutionContext(Executors.newVirtualThreadPerTaskExecutor())); + aiScope = new AIScope(new ExecutionContext(Executors.newSingleThreadExecutor())); } } diff --git a/examples/java/src/main/java/com/xebia/functional/xef/java/auto/jdk8/reasoning/ToolSelectionExample.java b/examples/java/src/main/java/com/xebia/functional/xef/java/auto/jdk8/reasoning/ToolSelectionExample.java deleted file mode 100644 index 4db9edce6..000000000 --- a/examples/java/src/main/java/com/xebia/functional/xef/java/auto/jdk8/reasoning/ToolSelectionExample.java +++ /dev/null @@ -1,42 +0,0 @@ -package com.xebia.functional.xef.java.auto.jdk8.reasoning; - -import com.xebia.functional.xef.auto.CoreAIScope; -import com.xebia.functional.xef.auto.llm.openai.OpenAI; -import com.xebia.functional.xef.auto.llm.openai.OpenAIEmbeddings; -import com.xebia.functional.xef.auto.llm.openai.OpenAIModel; -import com.xebia.functional.xef.reasoning.filesystem.Files; -import com.xebia.functional.xef.reasoning.pdf.PDF; -import com.xebia.functional.xef.reasoning.text.Text; -import com.xebia.functional.xef.reasoning.tools.ToolSelection; -import java.util.Collections; -import java.util.List; - -public class ToolSelectionExample { - - public static void main(String[] args) { - try (CoreAIScope scope = new CoreAIScope(new OpenAIEmbeddings(OpenAI.DEFAULT_EMBEDDING))) { - OpenAIModel model = OpenAI.DEFAULT_CHAT; - OpenAIModel serialization = OpenAI.DEFAULT_SERIALIZATION; - Text text = Text.create(model, scope); - Files files = Files.create(serialization, scope, Collections.emptyList()); - PDF pdf = PDF.create(model, serialization, scope); - - ToolSelection toolSelection = new ToolSelection( - serialization, - scope, - List.of( - text.summarize, - pdf.readPDFFromUrl, - files.readFile, - files.writeToTextFile - ), - Collections.emptyList() - ); - - String inputText = "Extract information from https://arxiv.org/pdf/2305.10601.pdf"; - var result = toolSelection.applyInferredToolsBlocking(inputText); - System.out.println(result); - } - } -} - diff --git a/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/expressions/WorkoutPlanProgram.kt b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/expressions/WorkoutPlanProgram.kt new file mode 100644 index 000000000..311a280d4 --- /dev/null +++ b/examples/kotlin/src/main/kotlin/com/xebia/functional/xef/auto/expressions/WorkoutPlanProgram.kt @@ -0,0 +1,55 @@ +package com.xebia.functional.xef.auto.expressions + +import com.xebia.functional.xef.auto.CoreAIScope +import com.xebia.functional.xef.auto.ai +import com.xebia.functional.xef.auto.llm.openai.OpenAI +import com.xebia.functional.xef.auto.llm.openai.getOrThrow +import com.xebia.functional.xef.llm.ChatWithFunctions +import com.xebia.functional.xef.prompt.expressions.Expression +import com.xebia.functional.xef.prompt.expressions.ExpressionResult + +suspend fun workoutPlan( + scope: CoreAIScope, + model: ChatWithFunctions, + goal: String, + experienceLevel: String, + equipment: String, + timeAvailable: Int +): ExpressionResult = Expression.run(scope = scope, model = model, block = { + system { "You are a personal fitness trainer" } + user { + """ + |I want to achieve $goal. + |My experience level is $experienceLevel, and I have access to the following equipment: $equipment. + |I can dedicate $timeAvailable minutes per day. + |Can you create a workout plan for me? + """.trimMargin() + } + assistant { + """ + |Sure! Based on your goal, experience level, equipment available, and time commitment, here's a customized workout plan: + |${prompt("workout_plan")} + """.trimMargin() + } +}) + +suspend fun main() { + val model = OpenAI.DEFAULT_SERIALIZATION + ai { + val plan = workoutPlan( + scope = this, + model = model, + goal = "building muscle", + experienceLevel = "intermediate", + equipment = "dumbbells, bench, resistance bands", + timeAvailable = 45 + ) + println("--------------------") + println("Workout Plan") + println("--------------------") + println("🤖 replaced: ${plan.values.replacements.joinToString { it.key }}") + println("--------------------") + println(plan.result) + println("--------------------") + }.getOrThrow() +} diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 2ebfb0e1c..c5cff0a41 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -2,7 +2,6 @@ arrow = "1.2.0" arrowGradle = "0.12.0-rc.5" kotlin = "1.8.22" -openai = "0.14.0" kotlinx-json = "1.5.1" ktor = "2.3.2" spotless = "6.20.0" @@ -33,7 +32,7 @@ pdfbox = "2.0.29" mysql = "8.0.33" semverGradle = "0.5.0-rc.1" scala = "3.3.0" -openai-client-version = "3.3.2" +openai-client-version = "3.3.1" gpt4all-java = "1.1.5" ai-djl = "0.23.0" jackson = "2.15.2" @@ -44,7 +43,6 @@ suspend-transform = "0.3.1" [libraries] arrow-core = { module = "io.arrow-kt:arrow-core", version.ref = "arrow" } arrow-fx-coroutines = { module = "io.arrow-kt:arrow-fx-coroutines", version.ref = "arrow" } -open-ai = { module = "com.theokanning.openai-gpt3-java:service", version.ref = "openai" } kotlinx-serialization-json = { module = "org.jetbrains.kotlinx:kotlinx-serialization-json", version.ref = "kotlinx-json" } kotlinx-coroutines = { module = "org.jetbrains.kotlinx:kotlinx-coroutines-core", version.ref="kotlinx-coroutines" } kotlinx-coroutines-reactive = { module = "org.jetbrains.kotlinx:kotlinx-coroutines-reactive", version.ref="kotlinx-coroutines-reactive" } diff --git a/reasoning/build.gradle.kts b/reasoning/build.gradle.kts index 452dc534f..64db97aed 100644 --- a/reasoning/build.gradle.kts +++ b/reasoning/build.gradle.kts @@ -15,7 +15,6 @@ plugins { alias(libs.plugins.dokka) alias(libs.plugins.arrow.gradle.publish) alias(libs.plugins.semver.gradle) - alias(libs.plugins.suspend.transform.plugin) //id("com.xebia.asfuture").version("0.0.1") } @@ -174,12 +173,6 @@ tasks { } } -suspendTransform { - enabled = true // default: true - includeRuntime = true // default: true - useJvmDefault() -} - tasks.withType { dependsOn(tasks.withType()) } diff --git a/reasoning/src/commonMain/kotlin/com/xebia/functional/xef/reasoning/tools/ToolSelection.kt b/reasoning/src/commonMain/kotlin/com/xebia/functional/xef/reasoning/tools/ToolSelection.kt index 3d006c190..31f6b7674 100644 --- a/reasoning/src/commonMain/kotlin/com/xebia/functional/xef/reasoning/tools/ToolSelection.kt +++ b/reasoning/src/commonMain/kotlin/com/xebia/functional/xef/reasoning/tools/ToolSelection.kt @@ -4,8 +4,6 @@ import com.xebia.functional.xef.auto.CoreAIScope import com.xebia.functional.xef.llm.ChatWithFunctions import com.xebia.functional.xef.prompt.experts.ExpertSystem import io.github.oshai.kotlinlogging.KotlinLogging -import love.forte.plugin.suspendtrans.annotation.JvmAsync -import love.forte.plugin.suspendtrans.annotation.JvmBlocking class ToolSelection( private val model: ChatWithFunctions, @@ -27,8 +25,6 @@ class ToolSelection( } } - @JvmBlocking - @JvmAsync suspend fun applyInferredTools(task: String): ToolsExecutionTrace { logger.info { "🔍 Applying inferred tools for task: $task" } val plan = createExecutionPlan(task)