Skip to content

Commit 371067f

Browse files
committed
More small suggestions
- Replace blocks with equals / single expressions
1 parent ef49ba7 commit 371067f

File tree

3 files changed

+28
-38
lines changed

3 files changed

+28
-38
lines changed

core/src/commonMain/kotlin/com/xebia/functional/xef/auto/CoreAIScope.kt

+2-3
Original file line numberDiff line numberDiff line change
@@ -99,15 +99,14 @@ class CoreAIScope(
9999
functions: List<CFunction>,
100100
serializer: (json: String) -> A,
101101
promptConfiguration: PromptConfiguration,
102-
): A {
103-
return prompt(
102+
): A =
103+
prompt(
104104
prompt = Prompt(prompt),
105105
context = context,
106106
functions = functions,
107107
serializer = serializer,
108108
promptConfiguration = promptConfiguration,
109109
)
110-
}
111110

112111
@AiDsl
113112
suspend fun Chat.promptMessage(

core/src/commonMain/kotlin/com/xebia/functional/xef/llm/ChatWithFunctions.kt

+4-6
Original file line numberDiff line numberDiff line change
@@ -24,16 +24,15 @@ interface ChatWithFunctions : Chat {
2424
functions: List<CFunction>,
2525
serializer: (json: String) -> A,
2626
promptConfiguration: PromptConfiguration,
27-
): A {
28-
return tryDeserialize(serializer, promptConfiguration.maxDeserializationAttempts) {
27+
): A =
28+
tryDeserialize(serializer, promptConfiguration.maxDeserializationAttempts) {
2929
promptMessage(
3030
prompt = Prompt(prompt),
3131
context = context,
3232
functions = functions,
3333
promptConfiguration
3434
)
3535
}
36-
}
3736

3837
@AiDsl
3938
suspend fun <A> prompt(
@@ -42,11 +41,10 @@ interface ChatWithFunctions : Chat {
4241
functions: List<CFunction>,
4342
serializer: (json: String) -> A,
4443
promptConfiguration: PromptConfiguration,
45-
): A {
46-
return tryDeserialize(serializer, promptConfiguration.maxDeserializationAttempts) {
44+
): A =
45+
tryDeserialize(serializer, promptConfiguration.maxDeserializationAttempts) {
4746
promptMessage(prompt = prompt, context = context, functions = functions, promptConfiguration)
4847
}
49-
}
5048

5149
private suspend fun <A> tryDeserialize(
5250
serializer: (json: String) -> A,

gpt4all-kotlin/src/jvmMain/kotlin/com/xebia/functional/gpt4all/GPT4All.kt

+22-29
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,8 @@ interface GPT4All : AutoCloseable, Chat, Completion {
4646

4747
override suspend fun createCompletion(request: CompletionRequest): CompletionResult =
4848
with(request) {
49-
val response: String = generateCompletion(prompt, generationConfig)
49+
val response: String =
50+
gpt4allModel.prompt(messages.buildPrompt(), llmModelContext(generationConfig))
5051
return CompletionResult(
5152
UUID.randomUUID().toString(),
5253
path.name,
@@ -59,8 +60,8 @@ interface GPT4All : AutoCloseable, Chat, Completion {
5960

6061
override suspend fun createChatCompletion(request: ChatCompletionRequest): ChatCompletionResponse =
6162
with(request) {
62-
val prompt: String = messages.buildPrompt()
63-
val response: String = generateCompletion(prompt, generationConfig)
63+
val response: String =
64+
gpt4allModel.prompt(messages.buildPrompt(), llmModelContext(generationConfig))
6465
return ChatCompletionResponse(
6566
UUID.randomUUID().toString(),
6667
path.name,
@@ -71,9 +72,7 @@ interface GPT4All : AutoCloseable, Chat, Completion {
7172
)
7273
}
7374

74-
override fun tokensFromMessages(messages: List<Message>): Int {
75-
return 0
76-
}
75+
override fun tokensFromMessages(messages: List<Message>): Int = 0
7776

7877
override val name: String = path.name
7978

@@ -92,31 +91,25 @@ interface GPT4All : AutoCloseable, Chat, Completion {
9291
return "$messages\n### Response:"
9392
}
9493

95-
private fun generateCompletion(
96-
prompt: String,
97-
generationConfig: GenerationConfig
98-
): String {
99-
val context = LLModelContext(
100-
logits_size = LibCAPI.size_t(generationConfig.logitsSize.toLong()),
101-
tokens_size = LibCAPI.size_t(generationConfig.tokensSize.toLong()),
102-
n_past = generationConfig.nPast,
103-
n_ctx = generationConfig.nCtx,
104-
n_predict = generationConfig.nPredict,
105-
top_k = generationConfig.topK,
106-
top_p = generationConfig.topP.toFloat(),
107-
temp = generationConfig.temp.toFloat(),
108-
n_batch = generationConfig.nBatch,
109-
repeat_penalty = generationConfig.repeatPenalty.toFloat(),
110-
repeat_last_n = generationConfig.repeatLastN,
111-
context_erase = generationConfig.contextErase.toFloat()
112-
)
113-
114-
return gpt4allModel.prompt(prompt, context)
115-
}
94+
private fun llmModelContext(generationConfig: GenerationConfig): String =
95+
with(generationConfig) {
96+
LLModelContext(
97+
logits_size = LibCAPI.size_t(logitsSize.toLong()),
98+
tokens_size = LibCAPI.size_t(tokensSize.toLong()),
99+
n_past = nPast,
100+
n_ctx = nCtx,
101+
n_predict = nPredict,
102+
top_k = topK,
103+
top_p = topP.toFloat(),
104+
temp = temp.toFloat(),
105+
n_batch = nBatch,
106+
repeat_penalty = repeatPenalty.toFloat(),
107+
repeat_last_n = repeatLastN,
108+
context_erase = contextErase.toFloat()
109+
)
110+
}
116111
}
117112

118-
119113
}
120114
}
121115

122-

0 commit comments

Comments
 (0)