Skip to content

Commit 0464a1a

Browse files
committed
ADD Chatbot UI example
1 parent fb62f92 commit 0464a1a

File tree

3 files changed

+115
-0
lines changed

3 files changed

+115
-0
lines changed

examples/chatbot-ui/Dockerfile

+70
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
FROM ubuntu
2+
3+
ENV NODE_VERSION=18.16.0
4+
ENV NODE_DIR /node
5+
ENV NODE_PATH ${NODE_DIR}/lib/node_modules
6+
7+
ENV LLAMA_CPP_BRANCH=master
8+
ENV GPT_LLAMA_CPP_BRANCH=master
9+
10+
ENV PATH ${NODE_DIR}/bin:${PATH}
11+
12+
# Make sure apt is non-interactive
13+
RUN set -x \
14+
&& echo 'debconf debconf/frontend select Noninteractive' \
15+
| debconf-set-selections
16+
17+
# Install deps
18+
RUN set -x \
19+
&& apt update --yes \
20+
&& apt install --yes --no-install-recommends \
21+
ca-certificates \
22+
curl \
23+
g++ \
24+
gcc \
25+
git \
26+
make \
27+
python-is-python3 \
28+
python3-pip \
29+
xz-utils
30+
31+
32+
# Install node
33+
RUN set -x \
34+
&& mkdir --parents "${NODE_DIR}" \
35+
&& curl \
36+
--location \
37+
--output /tmp/node.tar.gz \
38+
"https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-x64.tar.gz" \
39+
&& tar \
40+
--strip-components=1 \
41+
--ungzip \
42+
--extract \
43+
--file="/tmp/node.tar.gz" \
44+
--directory="${NODE_DIR}" \
45+
&& rm -f /tmp/node.tar.gz
46+
47+
# Install LLaMA CPP
48+
RUN set -x \
49+
&& git clone \
50+
--branch "${LLAMA_CPP_BRANCH}" \
51+
--depth 1 \
52+
https://github.com/ggerganov/llama.cpp \
53+
&& cd /llama.cpp \
54+
&& make -j \
55+
&& python -m pip install -r requirements.txt \
56+
&& mkdir -p models
57+
58+
# Install GPT LLaMA CPP
59+
RUN set -x \
60+
&& git clone \
61+
--branch "${GPT_LLAMA_CPP_BRANCH}" \
62+
--depth 1 \
63+
https://github.com/keldenl/gpt-llama.cpp \
64+
&& cd /gpt-llama.cpp \
65+
&& npm install
66+
67+
EXPOSE 443
68+
WORKDIR /gpt-llama.cpp
69+
ENTRYPOINT ["/bin/bash", "-c"]
70+
CMD ["npm start"]

examples/chatbot-ui/README.md

+19
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
# Chatbot UI with local LLMs
2+
3+
This example makes use of three projects [llama.cpp](https://github.com/ggerganov/llama.cpp), [gpt-llama.cpp](https://github.com/keldenl/gpt-llama.cpp) and [Chatbot UI](https://github.com/mckaywrigley/chatbot-ui) to provide a ChatGPT UI like experience with llama.cpp.
4+
5+
## How to use
6+
1. Edit the volume bind in `compose.yaml` with the path to the mode you wish to use
7+
8+
volumes:
9+
- type: bind
10+
source: /llm_models/something.ggml.q4_0.bin
11+
target: /llama.cpp/models/model.bin
12+
13+
1. Start services with `docker-compose`
14+
15+
docker-compose up --build
16+
17+
1. When updating use the following `docker-compose` command to make sure everything gets updated
18+
19+
docker-compose up --no-cache --build --pull-always --force-recreate

examples/chatbot-ui/compose.yaml

+26
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
version: "3.9"
2+
services:
3+
gpt_llama:
4+
privileged: true
5+
build: .
6+
volumes:
7+
- type: bind
8+
source: /PATH/TO/MODEL.bin
9+
target: /llama.cpp/models/model.bin
10+
command: ["npm start mlock threads 7 "]
11+
networks:
12+
- llocal
13+
chatbot_ui:
14+
image: ghcr.io/mckaywrigley/chatbot-ui:main
15+
ports:
16+
- "3000:3000"
17+
environment:
18+
DEFAULT_MODEL: gpt-3.5-turbo
19+
DEFAULT_SYSTEM_PROMPT: You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.
20+
OPENAI_API_KEY: /llama.cpp/models/model.bin
21+
OPENAI_API_HOST: http://gpt_llama:443
22+
networks:
23+
- llocal
24+
25+
networks:
26+
llocal: {}

0 commit comments

Comments
 (0)