Skip to content

Commit c0b5cb0

Browse files
authored
[Example] Add simple-chat-js example (#434)
Add one more simple example chatbot project in vanilla JavaScript, and hosted it on both JSFiddle and CodePen. https://jsfiddle.net/neetnestor/4nmgvsa2/ https://codepen.io/neetnestor/pen/vYwgZaG
1 parent 3731fe6 commit c0b5cb0

16 files changed

+350
-35
lines changed

.eslintrc.cjs

+9-1
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,13 @@ module.exports = {
66
rules: {
77
"@typescript-eslint/no-explicit-any": "off",
88
"@typescript-eslint/no-empty-function": "off"
9-
}
9+
},
10+
overrides: [
11+
{
12+
"files": ["examples/**/*.js"],
13+
"rules": {
14+
"no-undef": "off"
15+
}
16+
}
17+
]
1018
};

.lintstagedrc.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
"./**/*.{js,ts,jsx,tsx,json,css}": ["eslint --fix", "prettier --write"]
2+
"./**/*.{js,ts,jsx,tsx,json}": ["eslint --fix", "prettier --write"]
33
}

examples/README.md

+8-4
Original file line numberDiff line numberDiff line change
@@ -3,16 +3,20 @@
33
This page contains a curated list of examples, tutorials, blogs about WebLLM usecases.
44
Please send a pull request if you find things that belongs to here.
55

6-
## Tutorial Examples
6+
## Example Projects
77

88
Note that all examples below run in-browser and use WebGPU as a backend.
99

10-
#### Basic Chat Completion
10+
#### Project List
1111
- [get-started](get-started): minimum get started example with chat completion.
1212
- [get-started-web-worker](get-started-web-worker): same as get-started, but using web worker.
13-
- [multi-round-chat](multi-round-chat): while APIs are functional, we internally optimize so that multi round chat usage can reuse KV cache
14-
- [simple-chat](simple-chat): a mininum and complete chat bot app.
13+
- [simple-chat-js](simple-chat-js): a mininum and complete chat bot app in vanilla JavaScript.
14+
15+
[![Open on JSFiddle](https://img.shields.io/badge/open-JSFiddle-blue?logo=jsfiddle&logoColor=white)](https://jsfiddle.net/neetnestor/4nmgvsa2/)
16+
[![Open on Codepen](https://img.shields.io/badge/open-codepen-black?logo=codepen)](https://codepen.io/neetnestor/pen/vYwgZaG)
17+
- [simple-chat-ts](simple-chat-ts): a mininum and complete chat bot app.
1518
- [next-simple-chat](next-simple-chat): a mininum and complete chat bot app with [Next.js](https://nextjs.org/).
19+
- [multi-round-chat](multi-round-chat): while APIs are functional, we internally optimize so that multi round chat usage can reuse KV cache
1620

1721
#### Advanced OpenAI API Capabilities
1822
These examples demonstrate various capabilities via WebLLM's OpenAI-like API.

examples/simple-chat-js/index.css

+104
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
body,
2+
html {
3+
font-family: Arial, sans-serif;
4+
padding: 10px 20px;
5+
}
6+
7+
.download-container {
8+
display: flex;
9+
justify-content: space-between;
10+
margin-bottom: 20px;
11+
}
12+
13+
#download-status {
14+
border: solid 1px black;
15+
box-shadow: 0 10px 15px -3px rgba(0, 0, 0, .1), 0 4px 6px -2px rgba(0, 0, 0, .05);
16+
padding: 10px;
17+
}
18+
19+
.chat-container {
20+
height: 400px;
21+
width: 100%;
22+
border: 2px solid black;
23+
display: flex;
24+
flex-direction: column;
25+
}
26+
27+
.chat-box {
28+
overflow-y: scroll;
29+
background-color: #c3c3c3;
30+
border: 1px solid #ccc;
31+
padding: 5px;
32+
flex: 1 1;
33+
}
34+
35+
.chat-stats {
36+
background-color: #D3ECEB;
37+
flex: 0 0;
38+
padding: 10px;
39+
font-size: 0.75rem;
40+
}
41+
42+
.message-container {
43+
width: 100%;
44+
display: flex;
45+
}
46+
47+
.message {
48+
padding: 10px;
49+
margin: 10px 0;
50+
border-radius: 10px;
51+
width: fit-content;
52+
}
53+
54+
.message-container.user {
55+
justify-content: end;
56+
}
57+
58+
.message-container.assistant {
59+
justify-content: start;
60+
}
61+
62+
.message-container.user .message {
63+
background: #007bff;
64+
color: #fff;
65+
}
66+
67+
.message-container.assistant .message {
68+
background: #f1f0f0;
69+
color: #333;
70+
}
71+
72+
.chat-input-container {
73+
min-height: 40px;
74+
flex: 0 0;
75+
display: flex;
76+
}
77+
78+
#user-input {
79+
width: 70%;
80+
padding: 10px;
81+
border: 1px solid #ccc;
82+
}
83+
84+
button {
85+
width: 25%;
86+
padding: 10px;
87+
border: none;
88+
background-color: #007bff;
89+
color: white;
90+
cursor: pointer;
91+
}
92+
93+
button:disabled {
94+
background-color: lightgray;
95+
cursor: not-allowed;
96+
}
97+
98+
button:hover:not(:disabled) {
99+
background-color: #0056b3;
100+
}
101+
102+
.hidden {
103+
display: none;
104+
}

examples/simple-chat-js/index.html

+38
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
<!DOCTYPE html>
2+
<html>
3+
4+
<head>
5+
<title>Simple Chatbot</title>
6+
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
7+
<meta charset="UTF-8" />
8+
<link rel="stylesheet" href="./index.css" />
9+
</head>
10+
11+
<body>
12+
<p>
13+
Step 1: Initialize WebLLM and Download Model
14+
</p>
15+
<div class="download-container">
16+
<select id="model-selection"></select>
17+
<button id="download">
18+
Download
19+
</button>
20+
</div>
21+
<p id="download-status" class="hidden"></p>
22+
23+
<p>
24+
Step 2: Chat
25+
</p>
26+
<div class="chat-container">
27+
<div id="chat-box" class="chat-box"></div>
28+
<div id="chat-stats" class="chat-stats hidden"></div>
29+
<div class="chat-input-container">
30+
<input type="text" id="user-input" placeholder="Type a message..." />
31+
<button id="send" disabled>Send</button>
32+
</div>
33+
</div>
34+
35+
<script src="./index.js" type="module"></script>
36+
</body>
37+
38+
</html>

examples/simple-chat-js/index.js

+142
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,142 @@
1+
import * as webllm from "https://esm.run/@mlc-ai/web-llm";
2+
3+
/*************** WebLLM logic ***************/
4+
const messages = [
5+
{
6+
content: "You are a helpful AI agent helping users.",
7+
role: "system",
8+
},
9+
];
10+
11+
const availableModels = webllm.prebuiltAppConfig.model_list.map(
12+
(m) => m.model_id,
13+
);
14+
let selectedModel = "TinyLlama-1.1B-Chat-v0.4-q4f32_1-1k";
15+
16+
// Callback function for initializing progress
17+
function updateEngineInitProgressCallback(report) {
18+
console.log("initialize", report.progress);
19+
document.getElementById("download-status").textContent = report.text;
20+
}
21+
22+
// Create engine instance
23+
const engine = new webllm.MLCEngine();
24+
engine.setInitProgressCallback(updateEngineInitProgressCallback);
25+
26+
async function initializeWebLLMEngine() {
27+
document.getElementById("download-status").classList.remove("hidden");
28+
selectedModel = document.getElementById("model-selection").value;
29+
const config = {
30+
temperature: 1.0,
31+
top_p: 1,
32+
};
33+
await engine.reload(selectedModel, config);
34+
}
35+
36+
async function streamingGenerating(messages, onUpdate, onFinish, onError) {
37+
try {
38+
let curMessage = "";
39+
const completion = await engine.chat.completions.create({
40+
stream: true,
41+
messages,
42+
});
43+
for await (const chunk of completion) {
44+
const curDelta = chunk.choices[0].delta.content;
45+
if (curDelta) {
46+
curMessage += curDelta;
47+
}
48+
onUpdate(curMessage);
49+
}
50+
const finalMessage = await engine.getMessage();
51+
onFinish(finalMessage);
52+
} catch (err) {
53+
onError(err);
54+
}
55+
}
56+
57+
/*************** UI logic ***************/
58+
function onMessageSend() {
59+
const input = document.getElementById("user-input").value.trim();
60+
const message = {
61+
content: input,
62+
role: "user",
63+
};
64+
if (input.length === 0) {
65+
return;
66+
}
67+
document.getElementById("send").disabled = true;
68+
69+
messages.push(message);
70+
appendMessage(message);
71+
72+
document.getElementById("user-input").value = "";
73+
document
74+
.getElementById("user-input")
75+
.setAttribute("placeholder", "Generating...");
76+
77+
const aiMessage = {
78+
content: "typing...",
79+
role: "assistant",
80+
};
81+
appendMessage(aiMessage);
82+
83+
const onFinishGenerating = (finalMessage) => {
84+
updateLastMessage(finalMessage);
85+
document.getElementById("send").disabled = false;
86+
engine.runtimeStatsText().then((statsText) => {
87+
document.getElementById("chat-stats").classList.remove("hidden");
88+
document.getElementById("chat-stats").textContent = statsText;
89+
});
90+
};
91+
92+
streamingGenerating(
93+
messages,
94+
updateLastMessage,
95+
onFinishGenerating,
96+
console.error,
97+
);
98+
}
99+
100+
function appendMessage(message) {
101+
const chatBox = document.getElementById("chat-box");
102+
const container = document.createElement("div");
103+
container.classList.add("message-container");
104+
const newMessage = document.createElement("div");
105+
newMessage.classList.add("message");
106+
newMessage.textContent = message.content;
107+
108+
if (message.role === "user") {
109+
container.classList.add("user");
110+
} else {
111+
container.classList.add("assistant");
112+
}
113+
114+
container.appendChild(newMessage);
115+
chatBox.appendChild(container);
116+
chatBox.scrollTop = chatBox.scrollHeight; // Scroll to the latest message
117+
}
118+
119+
function updateLastMessage(content) {
120+
const messageDoms = document
121+
.getElementById("chat-box")
122+
.querySelectorAll(".message");
123+
const lastMessageDom = messageDoms[messageDoms.length - 1];
124+
lastMessageDom.textContent = content;
125+
}
126+
127+
/*************** UI binding ***************/
128+
availableModels.forEach((modelId) => {
129+
const option = document.createElement("option");
130+
option.value = modelId;
131+
option.textContent = modelId;
132+
document.getElementById("model-selection").appendChild(option);
133+
});
134+
document.getElementById("model-selection").value = selectedModel;
135+
document.getElementById("download").addEventListener("click", function () {
136+
initializeWebLLMEngine().then(() => {
137+
document.getElementById("send").disabled = false;
138+
});
139+
});
140+
document.getElementById("send").addEventListener("click", function () {
141+
onMessageSend();
142+
});
File renamed without changes.
File renamed without changes.
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { prebuiltAppConfig } from "@mlc-ai/web-llm";
22

33
export default {
4-
"model_list": prebuiltAppConfig.model_list,
5-
"use_web_worker": true
6-
}
4+
model_list: prebuiltAppConfig.model_list,
5+
use_web_worker: true,
6+
};

0 commit comments

Comments
 (0)