Skip to content

Commit e7a49ba

Browse files
pinzonthrau
authored andcommitted
new extension
1 parent 560a4ad commit e7a49ba

File tree

9 files changed

+437
-0
lines changed

9 files changed

+437
-0
lines changed

openai/README.md

+57
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
# LocalStack OpenAI Extension
2+
3+
![GitHub license](https://img.shields.io/badge/license-Apache%202.0-blue.svg)
4+
![Python version](https://img.shields.io/badge/python-3.11%2B-blue)
5+
[![Build Status](https://travis-ci.com/yourusername/localstack-openai-mock.svg?branch=master)](https://travis-ci.com/yourusername/localstack-openai-mock)
6+
7+
This is a LocalStack extension that allows you to mock the OpenAI API for testing and development purposes. It provides a convenient way to interact with a mock OpenAI service locally using LocalStack.
8+
9+
## Installation
10+
11+
You can install this extension directly using the LocalStack extension manager:
12+
13+
```bash
14+
localstack extensions install localstack-extension-openai
15+
```
16+
17+
## Using
18+
19+
Once installed, you can access the OpenAI Mock API through `localhost:4510/v1`.
20+
21+
### Example
22+
23+
```python
24+
25+
import openai
26+
openai.organization = "org-test"
27+
openai.api_key = "test"
28+
openai.api_base = "http://localhost:4510/v1"
29+
30+
completion = openai.ChatCompletion.create(
31+
model="gpt-3.5-turbo",
32+
messages=[
33+
{"role": "system", "content": "You are a helpful assistant."},
34+
{"role": "user", "content": "Hello!"}
35+
]
36+
)
37+
print(completion.choices)
38+
```
39+
40+
## Coverage
41+
- [x] Chat completion
42+
- [x] Engines Listing
43+
- [x] Transcribe
44+
- [x] Translate
45+
- [x] Generate Image URL
46+
- [ ] Generate Image Base64
47+
- [ ] Embeddings
48+
- [ ] Fine Tuning
49+
- [ ] Files
50+
- [ ] Moderations
51+
52+
53+
54+
## Authors
55+
**Cristopher Pinzon** [email protected]
56+
57+
### Thank you for using the LocalStack OpenAI Extension!

openai/localstack_openai/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
__version__ = "0.1.0"

openai/localstack_openai/extension.py

+33
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
import atexit
2+
import logging
3+
4+
from localstack.extensions.api import Extension, http, services
5+
6+
LOG = logging.getLogger(__name__)
7+
8+
9+
class LocalstackOpenAIExtension(Extension):
10+
name = "openai"
11+
12+
backend_url: str
13+
14+
def on_platform_start(self):
15+
# start localstripe when localstack starts
16+
from . import mock_openai
17+
18+
port = services.external_service_ports.reserve_port()
19+
self.backend_url = f"http://localhost:{port}"
20+
21+
print(f"Starting mock OpenAI service on {self.backend_url}")
22+
mock_openai.run(port)
23+
atexit.register(mock_openai.stop)
24+
25+
def update_gateway_routes(self, router: http.Router[http.RouteHandler]):
26+
# a ProxyHandler forwards all incoming requests to the backend URL
27+
endpoint = http.ProxyHandler(self.backend_url)
28+
29+
# add path routes for localhost:4566/v1/chat/completion
30+
router.add(
31+
"/v1/chat/completion",
32+
endpoint=endpoint,
33+
)
+236
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,236 @@
1+
import json
2+
import time
3+
from faker import Faker
4+
from flask import Flask, request, Response
5+
6+
7+
faker = Faker()
8+
app = Flask(__name__)
9+
10+
res_len = 20
11+
12+
class ChunkReader:
13+
def __init__(self, chunks, delay):
14+
self.ID = ""
15+
self.Created = 0
16+
self.Chunks = chunks
17+
self.SentFinished = False
18+
self.SentDone = False
19+
self.Delay = delay
20+
21+
def new_chunk_reader(cs, d):
22+
return ChunkReader(cs, d)
23+
24+
def done(r):
25+
return r.SentFinished and r.SentDone
26+
27+
def next_chunk(r):
28+
if r.SentDone:
29+
return None, None
30+
31+
if r.SentFinished:
32+
b = b"data: [DONE]\n\n"
33+
r.SentDone = True
34+
return b, None
35+
36+
if len(r.Chunks) == 0:
37+
d = {
38+
"id": r.ID,
39+
"object": "chat.completion.chunk",
40+
"created": r.Created,
41+
"model": "gpt-3.5-turbo",
42+
"choices": [
43+
{
44+
"index": 0,
45+
"delta": {},
46+
"finish_reason": "stop",
47+
}
48+
],
49+
}
50+
51+
b = json.dumps(d).encode()
52+
r.SentFinished = True
53+
b = b"data: " + b + b"\n\n"
54+
return b, None
55+
56+
c = r.Chunks[0] + " "
57+
d = {
58+
"id": r.ID,
59+
"object": "chat.completion.chunk",
60+
"created": r.Created,
61+
"model": "gpt-3.5-turbo",
62+
"choices": [
63+
{
64+
"index": 0,
65+
"delta": {
66+
"content": c,
67+
},
68+
"finish_reason": None,
69+
}
70+
],
71+
}
72+
b = json.dumps(d).encode()
73+
r.Chunks = r.Chunks[1:]
74+
b = b"data: " + b + b"\n\n"
75+
return b, None
76+
77+
def read(r, p):
78+
if done(r):
79+
return 0, None
80+
81+
if r.SentFinished:
82+
b = b"data: [DONE]\n\n"
83+
n = min(len(b), len(p))
84+
p[:n] = b[:n]
85+
r.SentDone = True
86+
return n, None
87+
88+
if len(r.Chunks) == 0:
89+
d = {
90+
"id": r.ID,
91+
"object": "chat.completion.chunk",
92+
"created": r.Created,
93+
"model": "gpt-3.5-turbo",
94+
"choices": [
95+
{
96+
"index": 0,
97+
"delta": {},
98+
"finish_reason": "stop",
99+
}
100+
],
101+
}
102+
b = json.dumps(d).encode()
103+
b = b"data: " + b + b"\n\n"
104+
n = min(len(b), len(p))
105+
p[:n] = b[:n]
106+
r.SentFinished = True
107+
return n, None
108+
109+
c = r.Chunks[0] + " "
110+
d = {
111+
"id": r.ID,
112+
"object": "chat.completion.chunk",
113+
"created": r.Created,
114+
"model": "gpt-3.5-turbo",
115+
"choices": [
116+
{
117+
"index": 0,
118+
"delta": {
119+
"content": c,
120+
},
121+
"finish_reason": None,
122+
}
123+
],
124+
}
125+
b = json.dumps(d).encode()
126+
b = b"data: " + b + b"\n\n"
127+
n = min(len(b), len(p))
128+
p[:n] = b[:n]
129+
r.Chunks = r.Chunks[1:]
130+
time.sleep(r.Delay)
131+
return n, None
132+
133+
@app.route('/v1/chat/completions', methods=['POST'])
134+
def chat_completions():
135+
data = request.get_data()
136+
req = json.loads(data)
137+
138+
ws = [faker.word() for _ in range(res_len)]
139+
ws = [" " + w if i > 0 else w for i, w in enumerate(ws)]
140+
141+
if not req.get("stream"):
142+
m = "".join(ws)
143+
return {
144+
"choices": [
145+
{
146+
"index": 0,
147+
"message": {
148+
"role": "assistant",
149+
"content": m,
150+
},
151+
}
152+
]
153+
}
154+
155+
id = faker.uuid4()
156+
ct = int(time.time())
157+
sd = 0.5
158+
159+
def generate():
160+
for w in ws:
161+
b, _ = next_chunk(chunk_reader)
162+
if b is not None:
163+
yield b
164+
time.sleep(sd)
165+
166+
b, _ = next_chunk(chunk_reader)
167+
if b is not None:
168+
yield b
169+
170+
yield b"[done]\n"
171+
172+
chunk_reader = new_chunk_reader(ws, sd)
173+
return Response(generate(), content_type='text/event-stream')
174+
175+
176+
@app.route('/v1/audio/transcriptions', methods=['POST'])
177+
def transcribe():
178+
return {
179+
"text": faker.sentence(),
180+
}
181+
182+
183+
@app.route('/v1/audio/translations', methods=['POST'])
184+
def translate():
185+
return {
186+
"text": faker.sentence(),
187+
}
188+
189+
190+
@app.route('/v1/images/generations', methods=['POST'])
191+
def generate_image():
192+
return {
193+
"created": int(time.time()),
194+
"data": [
195+
{"url": faker.image_url()}
196+
]
197+
}
198+
199+
@app.route('/v1/engines', methods=['GET'])
200+
def list_engines():
201+
return {
202+
"object": "list",
203+
"data": [
204+
{
205+
"id": "model-id-0",
206+
"object": "model",
207+
"created": 1686935002,
208+
"owned_by": "organization-owner"
209+
},
210+
{
211+
"id": "model-id-1",
212+
"object": "model",
213+
"created": 1686935002,
214+
"owned_by": "organization-owner",
215+
},
216+
{
217+
"id": "model-id-2",
218+
"object": "model",
219+
"created": 1686935002,
220+
"owned_by": "openai"
221+
},
222+
],
223+
"object": "list"
224+
}
225+
226+
227+
def run(port=1323):
228+
app.run(host="0.0.0.0",port=port, debug=True)
229+
230+
231+
def stop():
232+
app.stop()
233+
234+
235+
if __name__ == "__main__":
236+
run()

openai/pyproject.toml

+12
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
[tool.black]
2+
line_length = 100
3+
include = '(localstack_openai|tests)/.*\.py$'
4+
5+
[tool.isort]
6+
profile = 'black'
7+
line_length = 100
8+
9+
[tool.flake8]
10+
max-line-length = 100
11+
ignore = 'E501'
12+
exclude = './setup.py,.venv*,dist,build'

openai/setup.cfg

+49
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
[metadata]
2+
name = localstack-extension-openai
3+
version = attr: localstack_openai.__version__
4+
url = https://github.com/localstack/localstack-extensions/tree/main/openai
5+
author = Cristopher Pinzon
6+
author_email = [email protected]
7+
summary = LocalStack Extension: OpenAI
8+
description = OpenAI extension for LocalStack
9+
long_description = file: README.md
10+
long_description_content_type = text/markdown; charset=UTF-8
11+
license = Apache License 2.0
12+
classifiers =
13+
Development Status :: 5 - Production/Stable
14+
License :: OSI Approved :: Apache Software License
15+
Operating System :: OS Independent
16+
Programming Language :: Python :: 3
17+
Programming Language :: Python :: 3.8
18+
Programming Language :: Python :: 3.9
19+
Topic :: Software Development :: Libraries
20+
Topic :: Utilities
21+
22+
[options]
23+
zip_safe = False
24+
packages = find:
25+
install_requires =
26+
flask>=2.0.1
27+
faker>=8.12.1
28+
localstack>=1.0
29+
plux>=1.3
30+
test_requires =
31+
openai>=0.10.2
32+
pytest>=6.2.4
33+
34+
[options.extras_require]
35+
dev =
36+
pytest>=6.2.4
37+
black==22.3.0
38+
isort==5.10.1
39+
40+
[options.packages.find]
41+
exclude =
42+
tests*
43+
44+
[options.package_data]
45+
* = *.md
46+
47+
[options.entry_points]
48+
localstack.extensions =
49+
localstack_openai = localstack_openai.extension:LocalstackOpenAIExtension

0 commit comments

Comments
 (0)