From ac755a699ca2d0fc8afdf468c58445a48354755b Mon Sep 17 00:00:00 2001 From: Edward Qian Date: Tue, 26 Sep 2023 11:12:17 -0400 Subject: [PATCH 1/2] added an extra button for submitting non-streaming req --- llmexamples-app/src/pages/ExamplePage.tsx | 75 ++++++++++++++--------- 1 file changed, 46 insertions(+), 29 deletions(-) diff --git a/llmexamples-app/src/pages/ExamplePage.tsx b/llmexamples-app/src/pages/ExamplePage.tsx index b33f7ccf..ee654c51 100644 --- a/llmexamples-app/src/pages/ExamplePage.tsx +++ b/llmexamples-app/src/pages/ExamplePage.tsx @@ -14,6 +14,8 @@ export function ExamplePage() { // The latest reply from the LLM. const [reply, setReply] = useState(''); + const [useStream, setUseStream] = useState(false); + const [started, setStarted] = useState(false); const [finished, setFinished] = useState(true); @@ -28,34 +30,48 @@ export function ExamplePage() { return { enabled }; } - setStarted(true); - setFinished(false); - // Stream the completions. Each element is the next stream chunk. - const stream = llms.openai.streamChatCompletions({ - model: 'gpt-3.5-turbo', - messages: [ - { role: 'system', content: 'You are a cynical assistant.' }, - { role: 'user', content: message }, - ], - }).pipe( - // Accumulate the stream content into a stream of strings, where each - // element contains the accumulated message so far. - llms.openai.accumulateContent(), - // The stream is just a regular Observable, so we can use standard rxjs - // functionality to update state, e.g. recording when the stream - // has completed. - // The operator decision tree on the rxjs website is a useful resource: - // https://rxjs.dev/operator-decision-tree. - finalize(() => { - setStarted(false); - setFinished(true); - }) - ); - // Subscribe to the stream and update the state for each returned value. - return { - enabled, - stream: stream.subscribe(setReply), - }; + if (!useStream) { + // Make a single request to the LLM. + const response = await llms.openai.chatCompletions({ + model: 'gpt-3.5-turbo', + messages: [ + { role: 'system', content: 'You are a cynical assistant.' }, + { role: 'user', content: message }, + ], + }); + setReply(response.choices[0].message.content); + return { enabled, response }; + } else { + setStarted(true); + setFinished(false); + // Stream the completions. Each element is the next stream chunk. + console.log("AAAAAAAAAAAAA") + const stream = llms.openai.streamChatCompletions({ + model: 'gpt-3.5-turbo', + messages: [ + { role: 'system', content: 'You are a cynical assistant.' }, + { role: 'user', content: message }, + ], + }).pipe( + // Accumulate the stream content into a stream of strings, where each + // element contains the accumulated message so far. + llms.openai.accumulateContent(), + // The stream is just a regular Observable, so we can use standard rxjs + // functionality to update state, e.g. recording when the stream + // has completed. + // The operator decision tree on the rxjs website is a useful resource: + // https://rxjs.dev/operator-decision-tree. + finalize(() => { + setStarted(false); + setFinished(true); + }) + ); + // Subscribe to the stream and update the state for each returned value. + return { + enabled, + stream: stream.subscribe(setReply), + }; + } }, [message]); if (error) { @@ -73,7 +89,8 @@ export function ExamplePage() { placeholder="Enter a message" />
- + +
{loading ? : reply}
{started ? "Response is started" : "Response is not started"}
From 04eee2e533f5b8e1e126ca98001d966885ce29ab Mon Sep 17 00:00:00 2001 From: Edward Qian Date: Tue, 26 Sep 2023 11:22:59 -0400 Subject: [PATCH 2/2] added started and finished --- llmexamples-app/src/pages/ExamplePage.tsx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/llmexamples-app/src/pages/ExamplePage.tsx b/llmexamples-app/src/pages/ExamplePage.tsx index ee654c51..78851afa 100644 --- a/llmexamples-app/src/pages/ExamplePage.tsx +++ b/llmexamples-app/src/pages/ExamplePage.tsx @@ -30,6 +30,8 @@ export function ExamplePage() { return { enabled }; } + setStarted(true); + setFinished(false); if (!useStream) { // Make a single request to the LLM. const response = await llms.openai.chatCompletions({ @@ -40,12 +42,11 @@ export function ExamplePage() { ], }); setReply(response.choices[0].message.content); + setStarted(false); + setFinished(true); return { enabled, response }; } else { - setStarted(true); - setFinished(false); // Stream the completions. Each element is the next stream chunk. - console.log("AAAAAAAAAAAAA") const stream = llms.openai.streamChatCompletions({ model: 'gpt-3.5-turbo', messages: [