Skip to content

Commit 6e7a959

Browse files
CalebCourierNefertiti  RogersNefertiti  Rogersirgoliczsimjee
authored
Validation Outcome (#431)
* start validation outcome changes * fix gather_reasks for non-structured output * lint fixes * more lint fixes * start test fixes, debug types * fix tests * fix types with overloads * fix tests * lint fixes * lint fixes * fix tests * lint fixes * switch to generics for ValidationOutcome * allow destructuring * remove None from generic type * init commit, changes to handle error in guard * handle error a layer deeper * update return in text2sql * remove extra fx in validation outcome * use error instead of exception * remove print statements plus lint * fix type * fix typing while maintaining type hinting * fix other type issues * autoformat * lint fixes * test fixes * autoformat * type fixes * lint fix * unused import * uncomment test parameters * merge/type fixes * guard: Allow calling parse with preconfigured num_reasks (#423) * Cron nb (#425) * install deps + pkg for nb runs * lock nb runner to 3.11.x * use cohere api key from environ * ref env vars for cohere + openai * fix bad merge in code originally from validators.py (#427) * fix bad merge in code originally from validators.py * lint fixes * bump version (#428) * update notebooks * Setup passed password (#429) * use pypi pass from env * upgrade pip before installing deps * pass pypi pass explicitly * use environ competently * list -> List * lint and test fixes * autoformat * lint and type fix * fix test * fix llm_output type * ' -> " * fix tests * lint fixes * fix notebooks again * fix docs * debug * validated_response -> validated_output --------- Co-authored-by: Nefertiti Rogers <[email protected]> Co-authored-by: Nefertiti Rogers <[email protected]> Co-authored-by: rafael <[email protected]> Co-authored-by: zsimjee <[email protected]>
1 parent d8dfb2a commit 6e7a959

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+1567
-1511
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ Call the `Guard` object with the LLM API call as the first argument and add any
154154
import openai
155155

156156
# Wrap the OpenAI API call with the `guard` object
157-
raw_llm_output, validated_output = guard(
157+
raw_llm_output, validated_output, *rest = guard(
158158
openai.Completion.create,
159159
engine="text-davinci-003",
160160
max_tokens=1024,

docs/concepts/guard.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ from guardrails import Guard
1919

2020
guard = Guard.from_rail(...)
2121

22-
raw_output, validated_output = guard(
22+
raw_output, validated_output, *rest = guard(
2323
openai.Completion.create,
2424
engine="text-davinci-003",
2525
max_tokens=1024,

docs/concepts/validators.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ Sometimes validators need addtional parameters that are only availble during run
3535
```python
3636
guard = Guard.from_rail("my_railspec.rail")
3737

38-
raw_output, guarded_output = guard(
38+
raw_output, guarded_output, *rest = guard(
3939
llm_api=openai.ChatCompletion.create,
4040
model="gpt-3.5-turbo",
4141
num_reasks=3,
@@ -134,7 +134,7 @@ ${guardrails.complete_json_suffix}
134134

135135
guard = Guard.from_rail_string(rail_string=rail_str)
136136

137-
raw_output, guarded_output = guard(
137+
raw_output, guarded_output, *rest = guard(
138138
llm_api=openai.ChatCompletion.create,
139139
model="gpt-3.5-turbo"
140140
)

docs/defining_guards/pydantic.ipynb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@
7171
"\"\"\"\n",
7272
"guard = Guard.from_pydantic(output_class=Pet, prompt=prompt)\n",
7373
"\n",
74-
"raw_llm_output, validated_output = guard(\n",
74+
"raw_llm_output, validated_output, *rest = guard(\n",
7575
" llm_api=openai.Completion.create,\n",
7676
" engine=\"text-davinci-003\"\n",
7777
")\n",
@@ -378,7 +378,7 @@
378378
"\n",
379379
"guard = Guard.from_pydantic(output_class=Pet, prompt=prompt)\n",
380380
"\n",
381-
"raw_llm_output, validated_output = guard(\n",
381+
"raw_llm_output, validated_output, *rest = guard(\n",
382382
" llm_api=openai.Completion.create,\n",
383383
" engine=\"text-davinci-003\",\n",
384384
" max_tokens=1024,\n",
@@ -576,7 +576,7 @@
576576
"\"\"\"\n",
577577
"\n",
578578
"guard = Guard.from_pydantic(output_class=Pet, prompt=prompt)\n",
579-
"raw_llm_output, validated_output = guard(\n",
579+
"raw_llm_output, validated_output, *rest = guard(\n",
580580
" llm_api=openai.Completion.create,\n",
581581
" engine=\"text-davinci-003\",\n",
582582
" max_tokens=1024,\n",

docs/defining_guards/rail.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ import guardrails as gd
9898

9999
# Create a Guard object
100100
guard = gd.Guard.from_rail('path/to/rail/spec.xml') # (1)!
101-
validated_output = guard(
101+
_, validated_output, *rest = guard(
102102
openai.Completion.create, # (2)!
103103
**prompt_args,
104104
*args,

docs/defining_guards/strings.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,7 @@
209209
" prompt=\"Generate a puppy name\"\n",
210210
")\n",
211211
"\n",
212-
"raw_llm_output, validated_llm_response = guard(openai.Completion.create)\n",
212+
"raw_llm_output, validated_llm_response, *rest = guard(openai.Completion.create)\n",
213213
"print(validated_llm_response)\n",
214214
"print(guard.state.most_recent_call.tree)"
215215
]

docs/examples/bug_free_python_code.ipynb

Lines changed: 19 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
},
3939
{
4040
"cell_type": "code",
41-
"execution_count": 39,
41+
"execution_count": 1,
4242
"metadata": {},
4343
"outputs": [],
4444
"source": [
@@ -75,7 +75,7 @@
7575
},
7676
{
7777
"cell_type": "code",
78-
"execution_count": 40,
78+
"execution_count": 2,
7979
"metadata": {},
8080
"outputs": [],
8181
"source": [
@@ -113,7 +113,7 @@
113113
},
114114
{
115115
"cell_type": "code",
116-
"execution_count": 41,
116+
"execution_count": 3,
117117
"metadata": {},
118118
"outputs": [],
119119
"source": [
@@ -131,18 +131,9 @@
131131
},
132132
{
133133
"cell_type": "code",
134-
"execution_count": 42,
134+
"execution_count": 4,
135135
"metadata": {},
136-
"outputs": [
137-
{
138-
"name": "stderr",
139-
"output_type": "stream",
140-
"text": [
141-
"/Users/zaydsimjee/workspace/guardrails/guardrails/validatorsattr.py:285: UserWarning: Validator bug-free-python is not valid for element pythoncode.\n",
142-
" warnings.warn(\n"
143-
]
144-
}
145-
],
136+
"outputs": [],
146137
"source": [
147138
"guard = gd.Guard.from_rail_string(rail_str)"
148139
]
@@ -156,7 +147,7 @@
156147
},
157148
{
158149
"cell_type": "code",
159-
"execution_count": 43,
150+
"execution_count": 5,
160151
"metadata": {},
161152
"outputs": [],
162153
"source": [
@@ -173,7 +164,7 @@
173164
},
174165
{
175166
"cell_type": "code",
176-
"execution_count": 44,
167+
"execution_count": 6,
177168
"metadata": {},
178169
"outputs": [
179170
{
@@ -254,7 +245,7 @@
254245
},
255246
{
256247
"cell_type": "code",
257-
"execution_count": 45,
248+
"execution_count": 7,
258249
"metadata": {},
259250
"outputs": [],
260251
"source": [
@@ -264,7 +255,7 @@
264255
"Given a string s, find the longest palindromic substring in s. You may assume that the maximum length of s is 1000.\n",
265256
"\"\"\"\n",
266257
"\n",
267-
"raw_llm_response, validated_response = guard(\n",
258+
"response = guard(\n",
268259
" openai.Completion.create,\n",
269260
" prompt_params={\"leetcode_problem\": leetcode_problem},\n",
270261
" engine=\"text-davinci-003\",\n",
@@ -285,7 +276,7 @@
285276
},
286277
{
287278
"cell_type": "code",
288-
"execution_count": 46,
279+
"execution_count": 8,
289280
"metadata": {},
290281
"outputs": [
291282
{
@@ -313,7 +304,7 @@
313304
}
314305
],
315306
"source": [
316-
"print(validated_response)"
307+
"print(response.validated_output)"
317308
]
318309
},
319310
{
@@ -326,7 +317,7 @@
326317
},
327318
{
328319
"cell_type": "code",
329-
"execution_count": 47,
320+
"execution_count": 9,
330321
"metadata": {},
331322
"outputs": [
332323
{
@@ -358,7 +349,10 @@
358349
}
359350
],
360351
"source": [
361-
"print(validated_response[\"python_code\"])"
352+
"if response.validated_output is not None:\n",
353+
" print(response.validated_output[\"python_code\"])\n",
354+
"elif response.error is not None:\n",
355+
" print(response.error)"
362356
]
363357
},
364358
{
@@ -371,7 +365,7 @@
371365
},
372366
{
373367
"cell_type": "code",
374-
"execution_count": 48,
368+
"execution_count": 10,
375369
"metadata": {},
376370
"outputs": [
377371
{
@@ -390,7 +384,7 @@
390384
],
391385
"source": [
392386
"try:\n",
393-
" exec(validated_response[\"python_code\"])\n",
387+
" exec(response.validated_output[\"python_code\"])\n",
394388
" print(\"Success!\")\n",
395389
"except Exception as e:\n",
396390
" print(\"Failed!\")"
@@ -413,7 +407,7 @@
413407
"name": "python",
414408
"nbconvert_exporter": "python",
415409
"pygments_lexer": "ipython3",
416-
"version": "3.11.5"
410+
"version": "3.11.6"
417411
},
418412
"orig_nbformat": 4,
419413
"vscode": {

0 commit comments

Comments
 (0)