Skip to content

Commit dff2007

Browse files
committed
remove flask dependency and serve extension through gateway
1 parent e7a49ba commit dff2007

File tree

6 files changed

+166
-119
lines changed

6 files changed

+166
-119
lines changed

openai/Makefile

+39
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
VENV_BIN = python3 -m venv
2+
VENV_DIR ?= .venv
3+
VENV_ACTIVATE = $(VENV_DIR)/bin/activate
4+
VENV_RUN = . $(VENV_ACTIVATE)
5+
6+
venv: $(VENV_ACTIVATE)
7+
8+
$(VENV_ACTIVATE): setup.py setup.cfg
9+
test -d .venv || $(VENV_BIN) .venv
10+
$(VENV_RUN); pip install --upgrade pip setuptools plux wheel
11+
$(VENV_RUN); pip install --upgrade black isort pyproject-flake8 flake8-black flake8-isort
12+
$(VENV_RUN); pip install -e .
13+
touch $(VENV_DIR)/bin/activate
14+
15+
clean:
16+
rm -rf .venv/
17+
rm -rf build/
18+
rm -rf .eggs/
19+
rm -rf *.egg-info/
20+
21+
lint: ## Run code linter to check code style
22+
($(VENV_RUN); python -m pflake8 --show-source)
23+
24+
format: ## Run black and isort code formatter
25+
$(VENV_RUN); python -m isort .; python -m black .
26+
27+
install: venv
28+
$(VENV_RUN); python setup.py develop
29+
30+
dist: venv
31+
$(VENV_RUN); python setup.py sdist bdist_wheel
32+
33+
publish: clean-dist venv dist
34+
$(VENV_RUN); pip install --upgrade twine; twine upload dist/*
35+
36+
clean-dist: clean
37+
rm -rf dist/
38+
39+
.PHONY: clean clean-dist dist install publish

openai/localstack_openai/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "0.1.0"
1+
__version__ = "0.1.0"

openai/localstack_openai/extension.py

+23-18
Original file line numberDiff line numberDiff line change
@@ -1,33 +1,38 @@
1-
import atexit
21
import logging
32

4-
from localstack.extensions.api import Extension, http, services
3+
from localstack import config
4+
from localstack.extensions.api import Extension, http
5+
from rolo.router import RuleAdapter, WithHost
6+
from werkzeug.routing import Submount
57

68
LOG = logging.getLogger(__name__)
79

810

911
class LocalstackOpenAIExtension(Extension):
1012
name = "openai"
1113

12-
backend_url: str
14+
submount = "/_extension/openai"
15+
subdomain = "openai"
1316

14-
def on_platform_start(self):
15-
# start localstripe when localstack starts
16-
from . import mock_openai
17-
18-
port = services.external_service_ports.reserve_port()
19-
self.backend_url = f"http://localhost:{port}"
20-
21-
print(f"Starting mock OpenAI service on {self.backend_url}")
22-
mock_openai.run(port)
23-
atexit.register(mock_openai.stop)
17+
def on_extension_load(self):
18+
logging.getLogger("localstack_openai").setLevel(
19+
logging.DEBUG if config.DEBUG else logging.INFO
20+
)
2421

2522
def update_gateway_routes(self, router: http.Router[http.RouteHandler]):
26-
# a ProxyHandler forwards all incoming requests to the backend URL
27-
endpoint = http.ProxyHandler(self.backend_url)
23+
from localstack_openai.mock_openai import Api
24+
25+
api = RuleAdapter(Api())
2826

2927
# add path routes for localhost:4566/v1/chat/completion
3028
router.add(
31-
"/v1/chat/completion",
32-
endpoint=endpoint,
33-
)
29+
[
30+
Submount(self.submount, [api]),
31+
WithHost(f"{self.subdomain}.{config.LOCALSTACK_HOST.host}<__host__>", [api]),
32+
]
33+
)
34+
35+
LOG.info(
36+
"OpenAI mock available at %s%s", str(config.LOCALSTACK_HOST).rstrip("/"), self.submount
37+
)
38+
LOG.info("OpenAI mock available at %s", f"{self.subdomain}.{config.LOCALSTACK_HOST}")

openai/localstack_openai/mock_openai.py

+89-85
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
11
import json
22
import time
3-
from faker import Faker
4-
from flask import Flask, request, Response
53

4+
from faker import Faker
5+
from rolo import Request, Response, route
66

77
faker = Faker()
8-
app = Flask(__name__)
98

109
res_len = 20
1110

11+
1212
class ChunkReader:
1313
def __init__(self, chunks, delay):
1414
self.ID = ""
@@ -18,12 +18,15 @@ def __init__(self, chunks, delay):
1818
self.SentDone = False
1919
self.Delay = delay
2020

21+
2122
def new_chunk_reader(cs, d):
2223
return ChunkReader(cs, d)
2324

25+
2426
def done(r):
2527
return r.SentFinished and r.SentDone
2628

29+
2730
def next_chunk(r):
2831
if r.SentDone:
2932
return None, None
@@ -74,6 +77,7 @@ def next_chunk(r):
7477
b = b"data: " + b + b"\n\n"
7578
return b, None
7679

80+
7781
def read(r, p):
7882
if done(r):
7983
return 0, None
@@ -130,106 +134,106 @@ def read(r, p):
130134
time.sleep(r.Delay)
131135
return n, None
132136

133-
@app.route('/v1/chat/completions', methods=['POST'])
134-
def chat_completions():
135-
data = request.get_data()
136-
req = json.loads(data)
137137

138-
ws = [faker.word() for _ in range(res_len)]
139-
ws = [" " + w if i > 0 else w for i, w in enumerate(ws)]
138+
class Api:
139+
140+
@route("/v1/chat/completions", methods=["POST"])
141+
def chat_completions(self, request: Request):
142+
data = request.get_data()
143+
req = json.loads(data)
144+
145+
ws = [faker.word() for _ in range(res_len)]
146+
ws = [" " + w if i > 0 else w for i, w in enumerate(ws)]
147+
148+
if not req.get("stream"):
149+
m = "".join(ws)
150+
return {
151+
"choices": [
152+
{
153+
"index": 0,
154+
"message": {
155+
"role": "assistant",
156+
"content": m,
157+
},
158+
}
159+
]
160+
}
140161

141-
if not req.get("stream"):
142-
m = "".join(ws)
143-
return {
144-
"choices": [
145-
{
146-
"index": 0,
147-
"message": {
148-
"role": "assistant",
149-
"content": m,
150-
},
151-
}
152-
]
153-
}
162+
id = faker.uuid4()
163+
ct = int(time.time())
164+
sd = 0.5
154165

155-
id = faker.uuid4()
156-
ct = int(time.time())
157-
sd = 0.5
166+
def generate():
167+
for w in ws:
168+
b, _ = next_chunk(chunk_reader)
169+
if b is not None:
170+
yield b
171+
time.sleep(sd)
158172

159-
def generate():
160-
for w in ws:
161173
b, _ = next_chunk(chunk_reader)
162174
if b is not None:
163175
yield b
164-
time.sleep(sd)
165-
166-
b, _ = next_chunk(chunk_reader)
167-
if b is not None:
168-
yield b
169176

170-
yield b"[done]\n"
177+
yield b"[done]\n"
171178

172-
chunk_reader = new_chunk_reader(ws, sd)
173-
return Response(generate(), content_type='text/event-stream')
179+
chunk_reader = new_chunk_reader(ws, sd)
180+
return Response(generate(), content_type="text/event-stream")
174181

182+
@route("/v1/audio/transcriptions", methods=["POST"])
183+
def transcribe(self, request: Request):
184+
return {
185+
"text": faker.sentence(),
186+
}
175187

176-
@app.route('/v1/audio/transcriptions', methods=['POST'])
177-
def transcribe():
178-
return {
179-
"text": faker.sentence(),
180-
}
181-
182-
183-
@app.route('/v1/audio/translations', methods=['POST'])
184-
def translate():
185-
return {
186-
"text": faker.sentence(),
187-
}
188-
188+
@route("/v1/audio/translations", methods=["POST"])
189+
def translate(self, request: Request):
190+
return {
191+
"text": faker.sentence(),
192+
}
189193

190-
@app.route('/v1/images/generations', methods=['POST'])
191-
def generate_image():
192-
return {
193-
"created": int(time.time()),
194-
"data": [
195-
{"url": faker.image_url()}
196-
]
197-
}
194+
@route("/v1/images/generations", methods=["POST"])
195+
def generate_image(self, request: Request):
196+
return {"created": int(time.time()), "data": [{"url": faker.image_url()}]}
198197

199-
@app.route('/v1/engines', methods=['GET'])
200-
def list_engines():
201-
return {
202-
"object": "list",
203-
"data": [
204-
{
205-
"id": "model-id-0",
206-
"object": "model",
207-
"created": 1686935002,
208-
"owned_by": "organization-owner"
209-
},
210-
{
211-
"id": "model-id-1",
212-
"object": "model",
213-
"created": 1686935002,
214-
"owned_by": "organization-owner",
215-
},
216-
{
217-
"id": "model-id-2",
218-
"object": "model",
219-
"created": 1686935002,
220-
"owned_by": "openai"
221-
},
222-
],
223-
"object": "list"
224-
}
198+
@route("/v1/engines", methods=["GET"])
199+
def list_engines(self, request: Request):
200+
return {
201+
"object": "list",
202+
"data": [
203+
{
204+
"id": "model-id-0",
205+
"object": "model",
206+
"created": 1686935002,
207+
"owned_by": "organization-owner",
208+
},
209+
{
210+
"id": "model-id-1",
211+
"object": "model",
212+
"created": 1686935002,
213+
"owned_by": "organization-owner",
214+
},
215+
{
216+
"id": "model-id-2",
217+
"object": "model",
218+
"created": 1686935002,
219+
"owned_by": "openai",
220+
},
221+
],
222+
"object": "list",
223+
}
225224

226225

227226
def run(port=1323):
228-
app.run(host="0.0.0.0",port=port, debug=True)
227+
from rolo import Router
228+
from rolo.dispatcher import handler_dispatcher
229+
from werkzeug import Request, run_simple
230+
231+
r = Router(dispatcher=handler_dispatcher())
232+
r.add(Api())
229233

234+
app = Request.application(r.dispatch)
230235

231-
def stop():
232-
app.stop()
236+
run_simple("0.0.0.0", port, app)
233237

234238

235239
if __name__ == "__main__":

openai/setup.cfg

+4-3
Original file line numberDiff line numberDiff line change
@@ -23,16 +23,17 @@ classifiers =
2323
zip_safe = False
2424
packages = find:
2525
install_requires =
26-
flask>=2.0.1
2726
faker>=8.12.1
28-
localstack>=1.0
27+
localstack>=3.1
2928
plux>=1.3
29+
rolo>=0.3
3030
test_requires =
31-
openai>=0.10.2
31+
openai>=0.10.2,<1.0
3232
pytest>=6.2.4
3333

3434
[options.extras_require]
3535
dev =
36+
openai>=0.10.2,<1.0
3637
pytest>=6.2.4
3738
black==22.3.0
3839
isort==5.10.1
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
import openai
2+
23
openai.organization = "org-test"
34
openai.api_key = "test"
45
# openai.api_base = "http://localhost:1323/v1"
5-
openai.api_base = "http://localhost:4510/v1"
6+
openai.api_base = "http://localhost:4566/_extension/openai/v1"
67

78

89
def test_list_models():
@@ -12,14 +13,15 @@ def test_list_models():
1213

1314
def test_chat_completion():
1415
completion = openai.ChatCompletion.create(
15-
model="gpt-3.5-turbo",
16-
messages=[
17-
{"role": "system", "content": "You are a helpful assistant."},
18-
{"role": "user", "content": "Hello!"}
19-
]
16+
model="gpt-3.5-turbo",
17+
messages=[
18+
{"role": "system", "content": "You are a helpful assistant."},
19+
{"role": "user", "content": "Hello!"},
20+
],
2021
)
2122
assert len(completion.choices) > 0
2223

24+
2325
def test_transcribe():
2426
transcript = openai.Audio.transcribe("whisper-1", open("sample.wav", "rb"))
2527
assert len(transcript.text) > 0
@@ -31,9 +33,5 @@ def test_translate():
3133

3234

3335
def test_generate_image():
34-
response = openai.Image.create(
35-
prompt="a white siamese cat",
36-
n=1,
37-
size="1024x1024"
38-
)
39-
assert response['data'][0]['url']
36+
response = openai.Image.create(prompt="a white siamese cat", n=1, size="1024x1024")
37+
assert response["data"][0]["url"]

0 commit comments

Comments
 (0)