Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CU-865ca0fvw Pass LLMModel through all chains #43

Merged
merged 6 commits into from
May 10, 2023
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Fix broken tests
franciscodr committed May 9, 2023

Verified

This commit was signed with the committer’s verified signature.
sparrc Cameron Sparr
commit 076b7ab15bf05b67f3ead6f06d9ae833dfda5f77
Original file line number Diff line number Diff line change
@@ -2,6 +2,7 @@ package com.xebia.functional.chains

import arrow.core.raise.either
import com.xebia.functional.Document
import com.xebia.functional.llm.openai.LLMModel
import com.xebia.functional.prompt.PromptTemplate
import io.kotest.assertions.arrow.core.shouldBeLeft
import io.kotest.assertions.arrow.core.shouldBeRight
@@ -15,7 +16,14 @@ class CombineDocsChainSpec : StringSpec({
either {
val promptTemplate = PromptTemplate(testTemplate, listOf("context", "question"))
val docs = listOf(Document("foo foo foo"), Document("bar bar bar"), Document("baz baz baz"))
val chain = CombineDocsChain(testLLM, promptTemplate, docs, documentVariableName, outputVariable)
val chain = CombineDocsChain(
testLLM,
promptTemplate,
LLMModel.GPT_3_5_TURBO,
docs,
documentVariableName,
outputVariable
)
chain.combine(docs)
} shouldBeRight testContextOutput
}
@@ -24,7 +32,14 @@ class CombineDocsChainSpec : StringSpec({
either {
val promptTemplate = PromptTemplate(testTemplate, listOf("context", "question"))
val docs = listOf(Document("foo foo foo"), Document("bar bar bar"), Document("baz baz baz"))
val chain = CombineDocsChain(testLLM, promptTemplate, docs, documentVariableName, outputVariable)
val chain = CombineDocsChain(
testLLM,
promptTemplate,
LLMModel.GPT_3_5_TURBO,
docs,
documentVariableName,
outputVariable
)
chain.run("What do you think?").bind()
} shouldBeRight testOutputIDK
}
@@ -34,7 +49,14 @@ class CombineDocsChainSpec : StringSpec({
val promptTemplate = PromptTemplate(testTemplateInputs, listOf("context", "name", "age"))
val docs = listOf(Document("foo foo foo"), Document("bar bar bar"), Document("baz baz baz"))
val chain = CombineDocsChain(
testLLM, promptTemplate, docs, documentVariableName, outputVariable, Chain.ChainOutput.InputAndOutput)
testLLM,
promptTemplate,
LLMModel.GPT_3_5_TURBO,
docs,
documentVariableName,
outputVariable,
Chain.ChainOutput.InputAndOutput
)
chain.run(mapOf("name" to "Scala", "age" to "28")).bind()
} shouldBeRight testOutputInputs + mapOf("context" to testContext, "name" to "Scala", "age" to "28")
}
@@ -44,7 +66,14 @@ class CombineDocsChainSpec : StringSpec({
val promptTemplate = PromptTemplate(testTemplateInputs, listOf("context", "name", "age"))
val docs = listOf(Document("foo foo foo"), Document("bar bar bar"), Document("baz baz baz"))
val chain = CombineDocsChain(
testLLM, promptTemplate, docs, documentVariableName, outputVariable, Chain.ChainOutput.InputAndOutput)
testLLM,
promptTemplate,
LLMModel.GPT_3_5_TURBO,
docs,
documentVariableName,
outputVariable,
Chain.ChainOutput.InputAndOutput
)
chain.run(mapOf("name" to "Scala", "city" to "Seattle")).bind()
} shouldBeLeft
Chain.InvalidInputs(
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package com.xebia.functional.chains

import arrow.core.raise.either
import com.xebia.functional.llm.openai.LLMModel
import com.xebia.functional.prompt.PromptTemplate
import io.kotest.assertions.arrow.core.shouldBeLeft
import io.kotest.assertions.arrow.core.shouldBeRight
@@ -11,7 +12,7 @@ class LLMChainSpec : StringSpec({
val template = "Tell me {foo}."
either {
val promptTemplate = PromptTemplate(template, listOf("foo"))
val chain = LLMChain(testLLM, promptTemplate, outputVariable = "answer")
val chain = LLMChain(testLLM, promptTemplate, LLMModel.GPT_3_5_TURBO, outputVariable = "answer")
chain.run("a joke").bind()
} shouldBeRight mapOf("answer" to "I'm not good at jokes")
}
@@ -20,8 +21,10 @@ class LLMChainSpec : StringSpec({
val template = "Tell me {foo}."
either {
val prompt = PromptTemplate(template, listOf("foo"))
val chain = LLMChain(testLLM, prompt, outputVariable = "answer",
chainOutput = Chain.ChainOutput.InputAndOutput)
val chain = LLMChain(
testLLM, prompt, LLMModel.GPT_3_5_TURBO, outputVariable = "answer",
chainOutput = Chain.ChainOutput.InputAndOutput
)
chain.run("a joke").bind()
} shouldBeRight mapOf("foo" to "a joke", "answer" to "I'm not good at jokes")
}
@@ -30,8 +33,10 @@ class LLMChainSpec : StringSpec({
val template = "My name is {name} and I'm {age} years old"
either {
val prompt = PromptTemplate(template, listOf("name", "age"))
val chain = LLMChain(testLLM, prompt, outputVariable = "answer",
chainOutput = Chain.ChainOutput.InputAndOutput)
val chain = LLMChain(
testLLM, prompt, LLMModel.GPT_3_5_TURBO, outputVariable = "answer",
chainOutput = Chain.ChainOutput.InputAndOutput
)
chain.run(mapOf("age" to "28", "name" to "foo")).bind()
} shouldBeRight mapOf("age" to "28", "name" to "foo", "answer" to "Hello there! Nice to meet you foo")
}
@@ -40,8 +45,10 @@ class LLMChainSpec : StringSpec({
val template = "My name is {name} and I'm {age} years old"
either {
val prompt = PromptTemplate(template, listOf("name", "age"))
val chain = LLMChain(testLLM, prompt, outputVariable = "answer",
chainOutput = Chain.ChainOutput.InputAndOutput)
val chain = LLMChain(
testLLM, prompt, LLMModel.GPT_3_5_TURBO, outputVariable = "answer",
chainOutput = Chain.ChainOutput.InputAndOutput
)
chain.run(mapOf("age" to "28", "brand" to "foo")).bind()
} shouldBeLeft
Chain.InvalidInputs(
@@ -52,8 +59,10 @@ class LLMChainSpec : StringSpec({
val template = "My name is {name} and I'm {age} years old"
either {
val prompt = PromptTemplate(template, listOf("name", "age"))
val chain = LLMChain(testLLM, prompt, outputVariable = "answer",
chainOutput = Chain.ChainOutput.InputAndOutput)
val chain = LLMChain(
testLLM, prompt, LLMModel.GPT_3_5_TURBO, outputVariable = "answer",
chainOutput = Chain.ChainOutput.InputAndOutput
)
chain.run("foo").bind()
} shouldBeLeft Chain.InvalidInputs("The expected inputs are more than one: {name}, {age}")
}
Original file line number Diff line number Diff line change
@@ -4,6 +4,7 @@ import arrow.core.raise.either
import arrow.fx.coroutines.resourceScope
import com.xebia.functional.Document
import com.xebia.functional.embeddings.Embedding
import com.xebia.functional.llm.openai.LLMModel
import com.xebia.functional.vectorstores.DocumentVectorId
import com.xebia.functional.vectorstores.VectorStore
import io.kotest.assertions.arrow.core.shouldBeLeft
@@ -20,7 +21,7 @@ class VectorQAChainSpec : StringSpec({
resourceScope {
either {
val vectorStore = testVectorStore
val chain = VectorQAChain(testLLM, vectorStore, numOfDocs, outputVariable)
val chain = VectorQAChain(testLLM, LLMModel.GPT_3_5_TURBO, vectorStore, numOfDocs, outputVariable)
chain.run("What do you think?").bind()
} shouldBeRight testOutputIDK
}
@@ -30,7 +31,7 @@ class VectorQAChainSpec : StringSpec({
resourceScope {
either {
val vectorStore = testVectorStore
val chain = VectorQAChain(testLLM, vectorStore, numOfDocs, outputVariable)
val chain = VectorQAChain(testLLM, LLMModel.GPT_3_5_TURBO, vectorStore, numOfDocs, outputVariable)
chain.run(mapOf("question" to "What do you think?")).bind()
} shouldBeRight testOutputIDK
}
@@ -40,7 +41,7 @@ class VectorQAChainSpec : StringSpec({
resourceScope {
either {
val vectorStore = testVectorStore
val chain = VectorQAChain(testLLM, vectorStore, numOfDocs, outputVariable)
val chain = VectorQAChain(testLLM, LLMModel.GPT_3_5_TURBO, vectorStore, numOfDocs, outputVariable)
chain.run(mapOf("question" to "What do you think?", "foo" to "bla bla bla")).bind()
} shouldBeRight testOutputIDK
}
@@ -50,7 +51,7 @@ class VectorQAChainSpec : StringSpec({
resourceScope {
either {
val vectorStore = testVectorStore
val chain = VectorQAChain(testLLM, vectorStore, numOfDocs, outputVariable)
val chain = VectorQAChain(testLLM, LLMModel.GPT_3_5_TURBO, vectorStore, numOfDocs, outputVariable)
chain.run(mapOf("foo" to "What do you think?")).bind()
} shouldBeLeft
Chain.InvalidInputs(