|
| 1 | +import * as webllm from "@mlc-ai/web-llm"; |
| 2 | +import { imageURLToBase64 } from "./utils"; |
| 3 | + |
| 4 | +function setLabel(id: string, text: string) { |
| 5 | + const label = document.getElementById(id); |
| 6 | + if (label == null) { |
| 7 | + throw Error("Cannot find label " + id); |
| 8 | + } |
| 9 | + label.innerText = text; |
| 10 | +} |
| 11 | + |
| 12 | +const proxyUrl = "https://cors-anywhere.herokuapp.com/"; |
| 13 | +const url_https_street = "https://www.ilankelman.org/stopsigns/australia.jpg"; |
| 14 | +const url_https_tree = "https://www.ilankelman.org/sunset.jpg"; |
| 15 | +const url_https_sea = |
| 16 | + "https://www.islandvulnerability.org/index/silhouette.jpg"; |
| 17 | + |
| 18 | +async function main() { |
| 19 | + // can feed request with either base64 or http url |
| 20 | + const url_base64_street = await imageURLToBase64(proxyUrl + url_https_street); |
| 21 | + |
| 22 | + const initProgressCallback = (report: webllm.InitProgressReport) => { |
| 23 | + setLabel("init-label", report.text); |
| 24 | + }; |
| 25 | + const selectedModel = "Phi-3.5-vision-instruct-q4f16_1-MLC"; |
| 26 | + const engine: webllm.MLCEngineInterface = await webllm.CreateMLCEngine( |
| 27 | + selectedModel, |
| 28 | + { |
| 29 | + initProgressCallback: initProgressCallback, |
| 30 | + logLevel: "INFO", // specify the log level |
| 31 | + }, |
| 32 | + { |
| 33 | + context_window_size: 6144, |
| 34 | + }, |
| 35 | + ); |
| 36 | + |
| 37 | + // 1. Single image input (with choices) |
| 38 | + const messages: webllm.ChatCompletionMessageParam[] = [ |
| 39 | + { |
| 40 | + role: "user", |
| 41 | + content: [ |
| 42 | + { type: "text", text: "List the items in each image concisely." }, |
| 43 | + { |
| 44 | + type: "image_url", |
| 45 | + image_url: { |
| 46 | + url: url_base64_street, |
| 47 | + }, |
| 48 | + }, |
| 49 | + { |
| 50 | + type: "image_url", |
| 51 | + image_url: { |
| 52 | + url: proxyUrl + url_https_sea, |
| 53 | + }, |
| 54 | + }, |
| 55 | + ], |
| 56 | + }, |
| 57 | + ]; |
| 58 | + const request0: webllm.ChatCompletionRequest = { |
| 59 | + stream: false, // can be streaming, same behavior |
| 60 | + messages: messages, |
| 61 | + }; |
| 62 | + const reply0 = await engine.chat.completions.create(request0); |
| 63 | + const replyMessage0 = await engine.getMessage(); |
| 64 | + console.log(reply0); |
| 65 | + console.log(replyMessage0); |
| 66 | + console.log(reply0.usage); |
| 67 | + |
| 68 | + // 2. A follow up text-only question |
| 69 | + messages.push({ role: "assistant", content: replyMessage0 }); |
| 70 | + messages.push({ role: "user", content: "What is special about each image?" }); |
| 71 | + const request1: webllm.ChatCompletionRequest = { |
| 72 | + stream: false, // can be streaming, same behavior |
| 73 | + messages: messages, |
| 74 | + }; |
| 75 | + const reply1 = await engine.chat.completions.create(request1); |
| 76 | + const replyMessage1 = await engine.getMessage(); |
| 77 | + console.log(reply1); |
| 78 | + console.log(replyMessage1); |
| 79 | + console.log(reply1.usage); |
| 80 | + |
| 81 | + // 3. A follow up multi-image question |
| 82 | + messages.push({ role: "assistant", content: replyMessage1 }); |
| 83 | + messages.push({ |
| 84 | + role: "user", |
| 85 | + content: [ |
| 86 | + { type: "text", text: "What about this image? Answer concisely." }, |
| 87 | + { |
| 88 | + type: "image_url", |
| 89 | + image_url: { url: proxyUrl + url_https_tree }, |
| 90 | + }, |
| 91 | + ], |
| 92 | + }); |
| 93 | + const request2: webllm.ChatCompletionRequest = { |
| 94 | + stream: false, // can be streaming, same behavior |
| 95 | + messages: messages, |
| 96 | + }; |
| 97 | + const reply2 = await engine.chat.completions.create(request2); |
| 98 | + const replyMessage2 = await engine.getMessage(); |
| 99 | + console.log(reply2); |
| 100 | + console.log(replyMessage2); |
| 101 | + console.log(reply2.usage); |
| 102 | +} |
| 103 | + |
| 104 | +main(); |
0 commit comments