diff --git a/docs/evaluation/capturing-feedback.mdx b/docs/evaluation/capturing-feedback.mdx index aa434f51d6e7b..2b4c24eb5c6fc 100644 --- a/docs/evaluation/capturing-feedback.mdx +++ b/docs/evaluation/capturing-feedback.mdx @@ -11,6 +11,8 @@ import { # Working with feedback +This guide will walk you through feedback in LangSmith. For more end-to-end examples incorporating feedback into a workflow, see the [LangSmith Cookbook](https://github.com/langchain-ai/langsmith-cookbook/tree/main/feedback-examples). + Feedback comes in two forms: automated and human. Both are key to delivering a consistently high quality application experience. Automated feedback is generated any time you use the `run_on_dataset` method to test your component on a dataset or any time you call `client.evaluate_run(run_id, run_evaluator)`. This guide will focus on human feedback using a common workflow. diff --git a/docs/tracing/tracing-faq.mdx b/docs/tracing/tracing-faq.mdx index 7a7b719c36d6b..f496614cb8dfe 100644 --- a/docs/tracing/tracing-faq.mdx +++ b/docs/tracing/tracing-faq.mdx @@ -84,7 +84,7 @@ from langchain.prompts import PromptTemplate\n llm = ChatOpenAI(temperature=0, tags=["my-llm-tag"]) prompt = PromptTemplate.from_template("Say {input}") chain = LLMChain(llm=llm, prompt=prompt, tags=["my-bash-tag", "another-tag"])\n -chain("Hello, World!", tags=["shared-tags"]) +chain.invoke("Hello, World!", {"tags": ["shared-tags"]}) `), TypeScriptBlock(`import { LLMChain } from "langchain/chains"; import { PromptTemplate } from "langchain/prompts"; @@ -112,7 +112,7 @@ Similar to tags, LangSmith permits associating arbitrary key-value pairs as meta from langchain.chains import LLMChain\n chat_model = ChatOpenAI() chain = LLMChain.from_string(llm=chat_model, template="What's the answer to {input}?")\n -chain({"input": "What is the meaning of life?"}, metadata={"my_key": "My Value"})`), +chain.invoke({"input": "What is the meaning of life?"}, {"metadata": {"my_key": "My Value"}})`), TypeScriptBlock(`import { ChatOpenAI } from "langchain/chat_models/openai"; import { LLMChain } from "langchain/chains"; import { PromptTemplate } from "langchain/prompts";\n @@ -147,7 +147,7 @@ Inject the experiment ID or testing variant(s) in as metadata values, then use t from langchain.chains import LLMChain\n chat_model = ChatOpenAI() chain = LLMChain.from_string(llm=chat_model, template="What's the answer to {input}?")\n -chain({"input": "What is the meaning of life?"}, metadata={"variant": "abc123"})`), +chain.invoke({"input": "What is the meaning of life?"}, {"metadata": {"variant": "abc123"}})`), TypeScriptBlock(`import { ChatOpenAI } from "langchain/chat_models/openai"; import { LLMChain } from "langchain/chains"; import { PromptTemplate } from "langchain/prompts";\n diff --git a/tests/py_unit_tests/tracing/test_tracing-faq.py b/tests/py_unit_tests/tracing/test_tracing-faq.py index aaab4482fe448..48ada365b6e2b 100644 --- a/tests/py_unit_tests/tracing/test_tracing-faq.py +++ b/tests/py_unit_tests/tracing/test_tracing-faq.py @@ -14,6 +14,7 @@ async def test_code_block_0(): @pytest.mark.asyncio async def test_code_block_1(): from langchain.callbacks.tracers import LangChainTracer + tracer = LangChainTracer(project_name="My Project") chain.invoke({"query": "How many people live in canada as of 2023?"}, config={"callbacks": [tracer]}) @@ -28,7 +29,7 @@ async def test_code_block_2(): prompt = PromptTemplate.from_template("Say {input}") chain = LLMChain(llm=llm, prompt=prompt, tags=["my-bash-tag", "another-tag"]) - chain("Hello, World!", tags=["shared-tags"]) + chain.invoke("Hello, World!", {"tags": ["shared-tags"]}) @pytest.mark.asyncio @@ -39,7 +40,7 @@ async def test_code_block_3(): chat_model = ChatOpenAI() chain = LLMChain.from_string(llm=chat_model, template="What's the answer to {input}?") - chain({"input": "What is the meaning of life?"}, metadata={"my_key": "My Value"}) + chain.invoke({"input": "What is the meaning of life?"}, {"metadata": {"my_key": "My Value"}}) @pytest.mark.asyncio async def test_code_block_4(): @@ -49,7 +50,7 @@ async def test_code_block_4(): chat_model = ChatOpenAI() chain = LLMChain.from_string(llm=chat_model, template="What's the answer to {input}?") - chain({"input": "What is the meaning of life?"}, metadata={"variant": "abc123"}) + chain.invoke({"input": "What is the meaning of life?"}, {"metadata": {"variant": "abc123"}}) @pytest.mark.asyncio async def test_code_block_5():