diff --git a/llmexamples-app/src/pages/ExamplePage.tsx b/llmexamples-app/src/pages/ExamplePage.tsx
index b33f7ccf..78851afa 100644
--- a/llmexamples-app/src/pages/ExamplePage.tsx
+++ b/llmexamples-app/src/pages/ExamplePage.tsx
@@ -14,6 +14,8 @@ export function ExamplePage() {
// The latest reply from the LLM.
const [reply, setReply] = useState('');
+ const [useStream, setUseStream] = useState(false);
+
const [started, setStarted] = useState(false);
const [finished, setFinished] = useState(true);
@@ -30,32 +32,47 @@ export function ExamplePage() {
setStarted(true);
setFinished(false);
- // Stream the completions. Each element is the next stream chunk.
- const stream = llms.openai.streamChatCompletions({
- model: 'gpt-3.5-turbo',
- messages: [
- { role: 'system', content: 'You are a cynical assistant.' },
- { role: 'user', content: message },
- ],
- }).pipe(
- // Accumulate the stream content into a stream of strings, where each
- // element contains the accumulated message so far.
- llms.openai.accumulateContent(),
- // The stream is just a regular Observable, so we can use standard rxjs
- // functionality to update state, e.g. recording when the stream
- // has completed.
- // The operator decision tree on the rxjs website is a useful resource:
- // https://rxjs.dev/operator-decision-tree.
- finalize(() => {
- setStarted(false);
- setFinished(true);
- })
- );
- // Subscribe to the stream and update the state for each returned value.
- return {
- enabled,
- stream: stream.subscribe(setReply),
- };
+ if (!useStream) {
+ // Make a single request to the LLM.
+ const response = await llms.openai.chatCompletions({
+ model: 'gpt-3.5-turbo',
+ messages: [
+ { role: 'system', content: 'You are a cynical assistant.' },
+ { role: 'user', content: message },
+ ],
+ });
+ setReply(response.choices[0].message.content);
+ setStarted(false);
+ setFinished(true);
+ return { enabled, response };
+ } else {
+ // Stream the completions. Each element is the next stream chunk.
+ const stream = llms.openai.streamChatCompletions({
+ model: 'gpt-3.5-turbo',
+ messages: [
+ { role: 'system', content: 'You are a cynical assistant.' },
+ { role: 'user', content: message },
+ ],
+ }).pipe(
+ // Accumulate the stream content into a stream of strings, where each
+ // element contains the accumulated message so far.
+ llms.openai.accumulateContent(),
+ // The stream is just a regular Observable, so we can use standard rxjs
+ // functionality to update state, e.g. recording when the stream
+ // has completed.
+ // The operator decision tree on the rxjs website is a useful resource:
+ // https://rxjs.dev/operator-decision-tree.
+ finalize(() => {
+ setStarted(false);
+ setFinished(true);
+ })
+ );
+ // Subscribe to the stream and update the state for each returned value.
+ return {
+ enabled,
+ stream: stream.subscribe(setReply),
+ };
+ }
}, [message]);
if (error) {
@@ -73,7 +90,8 @@ export function ExamplePage() {
placeholder="Enter a message"
/>
-
+
+