diff --git a/.changeset/lovely-stingrays-fail.md b/.changeset/lovely-stingrays-fail.md
new file mode 100644
index 000000000000..933d9f01ebfc
--- /dev/null
+++ b/.changeset/lovely-stingrays-fail.md
@@ -0,0 +1,5 @@
+---
+'ai': patch
+---
+
+feat (ai/streams): add LangChainAdapter.toAIStream()
diff --git a/content/providers/04-adapters/index.mdx b/content/providers/04-adapters/index.mdx
new file mode 100644
index 000000000000..50650059461c
--- /dev/null
+++ b/content/providers/04-adapters/index.mdx
@@ -0,0 +1,14 @@
+---
+title: Adapters
+description: Learn how to use AI SDK Adapters.
+---
+
+# Adapters
+
+Adapters are lightweight integrations that enable you to use
+the Vercel AI SDK UI functions (`useChat` and `useCompletion`)
+with 3rd party libraries.
+
+The following adapters are currently available:
+
+- [LangChain](./langchain)
diff --git a/content/providers/04-adapters/langchain.mdx b/content/providers/04-adapters/langchain.mdx
new file mode 100644
index 000000000000..0b3bdd0b017d
--- /dev/null
+++ b/content/providers/04-adapters/langchain.mdx
@@ -0,0 +1,66 @@
+---
+title: LangChain
+description: Learn how to use LangChain with the Vercel AI SDK.
+---
+
+# LangChain
+
+[LangChain](https://js.langchain.com/docs/) is a framework for developing applications powered by language models.
+It provides tools and abstractions for working with AI models, agents, vector stores, and other data sources for retrieval augmented generation (RAG).
+However, LangChain does not provide a way to easily build UIs or a standard way to stream data to the client.
+
+## Example: Completion
+
+Here is a basic example that uses both Vercel AI SDK and LangChain together with the [Next.js](https://nextjs.org/docs) App Router.
+
+The AI SDK `LangChainAdapter` uses the result from [LangChain ExpressionLanguage streaming](https://js.langchain.com/docs/expression_language/streaming) to pipe text to the client.
+`LangChainAdapter.toAIStream()` is compatible with the LangChain Expression Language `.stream()` function response.
+
+```tsx filename="app/api/completion/route.ts" highlight={"17"}
+import { ChatOpenAI } from '@langchain/openai';
+import { LangChainAdapter, StreamingTextResponse } from 'ai';
+
+export const dynamic = 'force-dynamic';
+export const maxDuration = 60;
+
+export async function POST(req: Request) {
+ const { prompt } = await req.json();
+
+ const model = new ChatOpenAI({
+ model: 'gpt-3.5-turbo-0125',
+ temperature: 0,
+ });
+
+ const stream = await model.stream(prompt);
+
+ const aiStream = LangChainAdapter.toAIStream(stream);
+
+ return new StreamingTextResponse(aiStream);
+}
+```
+
+Then, we use the Vercel AI SDK's [`useCompletion`](/docs/ai-sdk-ui/completion) method in the page component to handle the completion:
+
+```tsx filename="app/page.tsx"
+'use client';
+
+import { useCompletion } from 'ai/react';
+
+export default function Chat() {
+ const { completion, input, handleInputChange, handleSubmit } =
+ useCompletion();
+
+ return (
+
+ {completion}
+
+
+ );
+}
+```
+
+## More Examples
+
+You can find additional examples in the Vercel AI SDK [examples/next-langchain](https://github.com/vercel/ai/tree/main/examples/next-langchain) folder.
diff --git a/content/providers/04-legacy-providers/langchain.mdx b/content/providers/04-legacy-providers/langchain.mdx
deleted file mode 100644
index 9b9ea49c4631..000000000000
--- a/content/providers/04-legacy-providers/langchain.mdx
+++ /dev/null
@@ -1,134 +0,0 @@
----
-title: LangChain
-description: Learn how to use LangChain with the Vercel AI SDK.
----
-
-# LangChain
-
-[LangChain](https://js.langchain.com/docs/) is a framework for developing applications powered by language models.
-It provides tools and abstractions for working with AI models, agents, vector stores, and other data sources for retrieval augmented generation (RAG).
-However, LangChain does not provide a way to easily build UIs or a standard way to stream data to the client.
-
-## Example
-
-Here is an example implementation of a chat application that uses both Vercel AI SDK and a composed LangChain chain together with the
-[Next.js](https://nextjs.org/docs) App Router. It includes a LangChain [`PromptTemplate`](https://js.langchain.com/docs/modules/model_io/prompts/quick_start/)
-to pass input into a [`ChatOpenAI`](https://js.langchain.com/docs/modules/model_io/models/chat/integrations/openai) model wrapper,
-then streams the result through an encoding output parser.
-
-It takes this stream and uses Vercel AI SDK's [`StreamingTextResponse`](/docs/reference/stream-helpers/streaming-text-response)
-to pipe text to the client and then Vercel AI SDK's `useChat` to handle the chat UI.
-
-```tsx filename="app/api/chat/route.ts"
-import { NextRequest } from 'next/server';
-import { Message as VercelChatMessage, StreamingTextResponse } from 'ai';
-
-import { ChatOpenAI } from 'langchain/chat_models/openai';
-import { BytesOutputParser } from 'langchain/schema/output_parser';
-import { PromptTemplate } from 'langchain/prompts';
-
-/**
- * Basic memory formatter that stringifies and passes
- * message history directly into the model.
- */
-const formatMessage = (message: VercelChatMessage) => {
- return `${message.role}: ${message.content}`;
-};
-
-const TEMPLATE = `You are a pirate named Patchy. All responses must be extremely verbose and in pirate dialect.
-
-Current conversation:
-{chat_history}
-
-User: {input}
-AI:`;
-
-/*
- * This handler initializes and calls a simple chain with a prompt,
- * chat model, and output parser. See the docs for more information:
- *
- * https://js.langchain.com/docs/guides/expression_language/cookbook#prompttemplate--llm--outputparser
- */
-export async function POST(req: NextRequest) {
- const body = await req.json();
- const messages = body.messages ?? [];
- const formattedPreviousMessages = messages.slice(0, -1).map(formatMessage);
- const currentMessageContent = messages[messages.length - 1].content;
-
- const prompt = PromptTemplate.fromTemplate(TEMPLATE);
- /**
- * See a full list of supported models at:
- * https://js.langchain.com/docs/modules/model_io/models/
- */
- const model = new ChatOpenAI({
- temperature: 0.8,
- });
-
- /**
- * Chat models stream message chunks rather than bytes, so this
- * output parser handles serialization and encoding.
- */
- const outputParser = new BytesOutputParser();
-
- /*
- * Can also initialize as:
- *
- * import { RunnableSequence } from "langchain/schema/runnable";
- * const chain = RunnableSequence.from([prompt, model, outputParser]);
- */
- const chain = prompt.pipe(model).pipe(outputParser);
-
- const stream = await chain.stream({
- chat_history: formattedPreviousMessages.join('\n'),
- input: currentMessageContent,
- });
-
- return new StreamingTextResponse(stream);
-}
-```
-
-Then, we use the Vercel AI SDK's [`useChat`](/docs/reference/ai-sdk-ui/use-chat) method:
-
-```tsx filename="app/page.tsx"
-'use client';
-
-import { useChat } from 'ai/react';
-
-export default function Chat() {
- const { messages, input, handleInputChange, handleSubmit } = useChat();
-
- return (
-