Skip to content

Commit 588ccff

Browse files
pinzonthrau
andauthored
add openai mock extension (#36)
Co-authored-by: Thomas Rausch <[email protected]>
1 parent 560a4ad commit 588ccff

File tree

11 files changed

+501
-0
lines changed

11 files changed

+501
-0
lines changed

Diff for: openai/LICENSE.txt

+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
Copyright (c) 2017+ LocalStack contributors
2+
Copyright (c) 2016 Atlassian Pty Ltd
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.

Diff for: openai/Makefile

+39
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
VENV_BIN = python3 -m venv
2+
VENV_DIR ?= .venv
3+
VENV_ACTIVATE = $(VENV_DIR)/bin/activate
4+
VENV_RUN = . $(VENV_ACTIVATE)
5+
6+
venv: $(VENV_ACTIVATE)
7+
8+
$(VENV_ACTIVATE): setup.py setup.cfg
9+
test -d .venv || $(VENV_BIN) .venv
10+
$(VENV_RUN); pip install --upgrade pip setuptools plux wheel
11+
$(VENV_RUN); pip install --upgrade black isort pyproject-flake8 flake8-black flake8-isort
12+
$(VENV_RUN); pip install -e .
13+
touch $(VENV_DIR)/bin/activate
14+
15+
clean:
16+
rm -rf .venv/
17+
rm -rf build/
18+
rm -rf .eggs/
19+
rm -rf *.egg-info/
20+
21+
lint: ## Run code linter to check code style
22+
($(VENV_RUN); python -m pflake8 --show-source)
23+
24+
format: ## Run black and isort code formatter
25+
$(VENV_RUN); python -m isort .; python -m black .
26+
27+
install: venv
28+
$(VENV_RUN); python setup.py develop
29+
30+
dist: venv
31+
$(VENV_RUN); python setup.py sdist bdist_wheel
32+
33+
publish: clean-dist venv dist
34+
$(VENV_RUN); pip install --upgrade twine; twine upload dist/*
35+
36+
clean-dist: clean
37+
rm -rf dist/
38+
39+
.PHONY: clean clean-dist dist install publish

Diff for: openai/README.md

+61
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
# LocalStack OpenAI Extension
2+
3+
![GitHub license](https://img.shields.io/badge/license-Apache%202.0-blue.svg)
4+
![Python version](https://img.shields.io/badge/python-3.11%2B-blue)
5+
[![Build Status](https://travis-ci.com/yourusername/localstack-openai-mock.svg?branch=master)](https://travis-ci.com/yourusername/localstack-openai-mock)
6+
7+
This is a LocalStack extension that allows you to mock the OpenAI API for testing and development purposes. It provides a convenient way to interact with a mock OpenAI service locally using LocalStack.
8+
9+
## Installation
10+
11+
You can install this extension directly using the LocalStack extension manager:
12+
13+
```bash
14+
localstack extensions install localstack-extension-openai
15+
```
16+
17+
## Using
18+
19+
Once installed, you can access the OpenAI Mock API through `localhost:4510/v1`.
20+
21+
### Example
22+
23+
```python
24+
25+
import openai
26+
openai.organization = "org-test"
27+
openai.api_key = "test"
28+
openai.api_base = "http://localhost:4510/v1"
29+
30+
completion = openai.ChatCompletion.create(
31+
model="gpt-3.5-turbo",
32+
messages=[
33+
{"role": "system", "content": "You are a helpful assistant."},
34+
{"role": "user", "content": "Hello!"}
35+
]
36+
)
37+
print(completion.choices)
38+
```
39+
40+
## Coverage
41+
- [x] Chat completion
42+
- [x] Engines Listing
43+
- [x] Transcribe
44+
- [x] Translate
45+
- [x] Generate Image URL
46+
- [ ] Generate Image Base64
47+
- [ ] Embeddings
48+
- [ ] Fine Tuning
49+
- [ ] Files
50+
- [ ] Moderations
51+
52+
53+
54+
## Authors
55+
**Cristopher Pinzon** [email protected]
56+
57+
58+
## Licensing
59+
* The extension code is licensed under the Apache 2.0 License
60+
61+
### Thank you for using the LocalStack OpenAI Extension!

Diff for: openai/localstack_openai/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
__version__ = "0.1.0"

Diff for: openai/localstack_openai/extension.py

+38
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
import logging
2+
3+
from localstack import config
4+
from localstack.extensions.api import Extension, http
5+
from rolo.router import RuleAdapter, WithHost
6+
from werkzeug.routing import Submount
7+
8+
LOG = logging.getLogger(__name__)
9+
10+
11+
class LocalstackOpenAIExtension(Extension):
12+
name = "openai"
13+
14+
submount = "/_extension/openai"
15+
subdomain = "openai"
16+
17+
def on_extension_load(self):
18+
logging.getLogger("localstack_openai").setLevel(
19+
logging.DEBUG if config.DEBUG else logging.INFO
20+
)
21+
22+
def update_gateway_routes(self, router: http.Router[http.RouteHandler]):
23+
from localstack_openai.mock_openai import Api
24+
25+
api = RuleAdapter(Api())
26+
27+
# add path routes for localhost:4566/v1/chat/completion
28+
router.add(
29+
[
30+
Submount(self.submount, [api]),
31+
WithHost(f"{self.subdomain}.{config.LOCALSTACK_HOST.host}<__host__>", [api]),
32+
]
33+
)
34+
35+
LOG.info(
36+
"OpenAI mock available at %s%s", str(config.LOCALSTACK_HOST).rstrip("/"), self.submount
37+
)
38+
LOG.info("OpenAI mock available at %s", f"{self.subdomain}.{config.LOCALSTACK_HOST}")

Diff for: openai/localstack_openai/mock_openai.py

+239
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,239 @@
1+
import json
2+
import time
3+
4+
from faker import Faker
5+
from rolo import Request, Response, route
6+
7+
faker = Faker()
8+
9+
res_len = 20
10+
11+
12+
class ChunkReader:
13+
def __init__(self, chunks, delay):
14+
self.ID = ""
15+
self.Created = 0
16+
self.Chunks = chunks
17+
self.SentFinished = False
18+
self.SentDone = False
19+
self.Delay = delay
20+
21+
22+
def new_chunk_reader(cs, d):
23+
return ChunkReader(cs, d)
24+
25+
26+
def done(r):
27+
return r.SentFinished and r.SentDone
28+
29+
30+
def next_chunk(r):
31+
if r.SentDone:
32+
return None, None
33+
34+
if r.SentFinished:
35+
b = b"data: [DONE]\n\n"
36+
r.SentDone = True
37+
return b, None
38+
39+
if len(r.Chunks) == 0:
40+
d = {
41+
"id": r.ID,
42+
"object": "chat.completion.chunk",
43+
"created": r.Created,
44+
"model": "gpt-3.5-turbo",
45+
"choices": [
46+
{
47+
"index": 0,
48+
"delta": {},
49+
"finish_reason": "stop",
50+
}
51+
],
52+
}
53+
54+
b = json.dumps(d).encode()
55+
r.SentFinished = True
56+
b = b"data: " + b + b"\n\n"
57+
return b, None
58+
59+
c = r.Chunks[0] + " "
60+
d = {
61+
"id": r.ID,
62+
"object": "chat.completion.chunk",
63+
"created": r.Created,
64+
"model": "gpt-3.5-turbo",
65+
"choices": [
66+
{
67+
"index": 0,
68+
"delta": {
69+
"content": c,
70+
},
71+
"finish_reason": None,
72+
}
73+
],
74+
}
75+
b = json.dumps(d).encode()
76+
r.Chunks = r.Chunks[1:]
77+
b = b"data: " + b + b"\n\n"
78+
return b, None
79+
80+
81+
def read(r, p):
82+
if done(r):
83+
return 0, None
84+
85+
if r.SentFinished:
86+
b = b"data: [DONE]\n\n"
87+
n = min(len(b), len(p))
88+
p[:n] = b[:n]
89+
r.SentDone = True
90+
return n, None
91+
92+
if len(r.Chunks) == 0:
93+
d = {
94+
"id": r.ID,
95+
"object": "chat.completion.chunk",
96+
"created": r.Created,
97+
"model": "gpt-3.5-turbo",
98+
"choices": [
99+
{
100+
"index": 0,
101+
"delta": {},
102+
"finish_reason": "stop",
103+
}
104+
],
105+
}
106+
b = json.dumps(d).encode()
107+
b = b"data: " + b + b"\n\n"
108+
n = min(len(b), len(p))
109+
p[:n] = b[:n]
110+
r.SentFinished = True
111+
return n, None
112+
113+
c = r.Chunks[0] + " "
114+
d = {
115+
"id": r.ID,
116+
"object": "chat.completion.chunk",
117+
"created": r.Created,
118+
"model": "gpt-3.5-turbo",
119+
"choices": [
120+
{
121+
"index": 0,
122+
"delta": {
123+
"content": c,
124+
},
125+
"finish_reason": None,
126+
}
127+
],
128+
}
129+
b = json.dumps(d).encode()
130+
b = b"data: " + b + b"\n\n"
131+
n = min(len(b), len(p))
132+
p[:n] = b[:n]
133+
r.Chunks = r.Chunks[1:]
134+
time.sleep(r.Delay)
135+
return n, None
136+
137+
138+
class Api:
139+
140+
@route("/v1/chat/completions", methods=["POST"])
141+
def chat_completions(self, request: Request):
142+
data = request.get_data()
143+
req = json.loads(data)
144+
145+
ws = [faker.word() for _ in range(res_len)]
146+
ws = [" " + w if i > 0 else w for i, w in enumerate(ws)]
147+
148+
if not req.get("stream"):
149+
m = "".join(ws)
150+
return {
151+
"choices": [
152+
{
153+
"index": 0,
154+
"message": {
155+
"role": "assistant",
156+
"content": m,
157+
},
158+
}
159+
]
160+
}
161+
162+
id = faker.uuid4()
163+
ct = int(time.time())
164+
sd = 0.5
165+
166+
def generate():
167+
for w in ws:
168+
b, _ = next_chunk(chunk_reader)
169+
if b is not None:
170+
yield b
171+
time.sleep(sd)
172+
173+
b, _ = next_chunk(chunk_reader)
174+
if b is not None:
175+
yield b
176+
177+
yield b"[done]\n"
178+
179+
chunk_reader = new_chunk_reader(ws, sd)
180+
return Response(generate(), content_type="text/event-stream")
181+
182+
@route("/v1/audio/transcriptions", methods=["POST"])
183+
def transcribe(self, request: Request):
184+
return {
185+
"text": faker.sentence(),
186+
}
187+
188+
@route("/v1/audio/translations", methods=["POST"])
189+
def translate(self, request: Request):
190+
return {
191+
"text": faker.sentence(),
192+
}
193+
194+
@route("/v1/images/generations", methods=["POST"])
195+
def generate_image(self, request: Request):
196+
return {"created": int(time.time()), "data": [{"url": faker.image_url()}]}
197+
198+
@route("/v1/engines", methods=["GET"])
199+
def list_engines(self, request: Request):
200+
return {
201+
"object": "list",
202+
"data": [
203+
{
204+
"id": "model-id-0",
205+
"object": "model",
206+
"created": 1686935002,
207+
"owned_by": "organization-owner",
208+
},
209+
{
210+
"id": "model-id-1",
211+
"object": "model",
212+
"created": 1686935002,
213+
"owned_by": "organization-owner",
214+
},
215+
{
216+
"id": "model-id-2",
217+
"object": "model",
218+
"created": 1686935002,
219+
"owned_by": "openai",
220+
},
221+
],
222+
}
223+
224+
225+
def run(port=1323):
226+
from rolo import Router
227+
from rolo.dispatcher import handler_dispatcher
228+
from werkzeug import Request, run_simple
229+
230+
r = Router(dispatcher=handler_dispatcher())
231+
r.add(Api())
232+
233+
app = Request.application(r.dispatch)
234+
235+
run_simple("0.0.0.0", port, app)
236+
237+
238+
if __name__ == "__main__":
239+
run()

0 commit comments

Comments
 (0)