Skip to content

Commit

Permalink
Merge pull request langchain-ai#11 from langchain-ai/wfh/syntax
Browse files Browse the repository at this point in the history
Rm raw calls
  • Loading branch information
hinthornw authored Aug 16, 2023
2 parents a8c3776 + cc04a5f commit dc00382
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 6 deletions.
2 changes: 2 additions & 0 deletions docs/evaluation/capturing-feedback.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ import {

# Working with feedback

This guide will walk you through feedback in LangSmith. For more end-to-end examples incorporating feedback into a workflow, see the [LangSmith Cookbook](https://github.com/langchain-ai/langsmith-cookbook/tree/main/feedback-examples).

Feedback comes in two forms: automated and human. Both are key to delivering a consistently high quality application experience.

Automated feedback is generated any time you use the `run_on_dataset` method to test your component on a dataset or any time you call `client.evaluate_run(run_id, run_evaluator)`. This guide will focus on human feedback using a common workflow.
Expand Down
6 changes: 3 additions & 3 deletions docs/tracing/tracing-faq.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ from langchain.prompts import PromptTemplate\n
llm = ChatOpenAI(temperature=0, tags=["my-llm-tag"])
prompt = PromptTemplate.from_template("Say {input}")
chain = LLMChain(llm=llm, prompt=prompt, tags=["my-bash-tag", "another-tag"])\n
chain("Hello, World!", tags=["shared-tags"])
chain.invoke("Hello, World!", {"tags": ["shared-tags"]})
`),
TypeScriptBlock(`import { LLMChain } from "langchain/chains";
import { PromptTemplate } from "langchain/prompts";
Expand Down Expand Up @@ -112,7 +112,7 @@ Similar to tags, LangSmith permits associating arbitrary key-value pairs as meta
from langchain.chains import LLMChain\n
chat_model = ChatOpenAI()
chain = LLMChain.from_string(llm=chat_model, template="What's the answer to {input}?")\n
chain({"input": "What is the meaning of life?"}, metadata={"my_key": "My Value"})`),
chain.invoke({"input": "What is the meaning of life?"}, {"metadata": {"my_key": "My Value"}})`),
TypeScriptBlock(`import { ChatOpenAI } from "langchain/chat_models/openai";
import { LLMChain } from "langchain/chains";
import { PromptTemplate } from "langchain/prompts";\n
Expand Down Expand Up @@ -147,7 +147,7 @@ Inject the experiment ID or testing variant(s) in as metadata values, then use t
from langchain.chains import LLMChain\n
chat_model = ChatOpenAI()
chain = LLMChain.from_string(llm=chat_model, template="What's the answer to {input}?")\n
chain({"input": "What is the meaning of life?"}, metadata={"variant": "abc123"})`),
chain.invoke({"input": "What is the meaning of life?"}, {"metadata": {"variant": "abc123"}})`),
TypeScriptBlock(`import { ChatOpenAI } from "langchain/chat_models/openai";
import { LLMChain } from "langchain/chains";
import { PromptTemplate } from "langchain/prompts";\n
Expand Down
7 changes: 4 additions & 3 deletions tests/py_unit_tests/tracing/test_tracing-faq.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ async def test_code_block_0():
@pytest.mark.asyncio
async def test_code_block_1():
from langchain.callbacks.tracers import LangChainTracer

tracer = LangChainTracer(project_name="My Project")
chain.invoke({"query": "How many people live in canada as of 2023?"}, config={"callbacks": [tracer]})

Expand All @@ -28,7 +29,7 @@ async def test_code_block_2():
prompt = PromptTemplate.from_template("Say {input}")
chain = LLMChain(llm=llm, prompt=prompt, tags=["my-bash-tag", "another-tag"])

chain("Hello, World!", tags=["shared-tags"])
chain.invoke("Hello, World!", {"tags": ["shared-tags"]})


@pytest.mark.asyncio
Expand All @@ -39,7 +40,7 @@ async def test_code_block_3():
chat_model = ChatOpenAI()
chain = LLMChain.from_string(llm=chat_model, template="What's the answer to {input}?")

chain({"input": "What is the meaning of life?"}, metadata={"my_key": "My Value"})
chain.invoke({"input": "What is the meaning of life?"}, {"metadata": {"my_key": "My Value"}})

@pytest.mark.asyncio
async def test_code_block_4():
Expand All @@ -49,7 +50,7 @@ async def test_code_block_4():
chat_model = ChatOpenAI()
chain = LLMChain.from_string(llm=chat_model, template="What's the answer to {input}?")

chain({"input": "What is the meaning of life?"}, metadata={"variant": "abc123"})
chain.invoke({"input": "What is the meaning of life?"}, {"metadata": {"variant": "abc123"}})

@pytest.mark.asyncio
async def test_code_block_5():
Expand Down

0 comments on commit dc00382

Please sign in to comment.