8
8
import guardrails as gd
9
9
from guardrails .guard import Guard
10
10
from guardrails .utils .openai_utils import (
11
- static_openai_chat_create_func ,
12
- static_openai_create_func ,
11
+ get_static_openai_chat_create_func ,
12
+ get_static_openai_create_func ,
13
13
)
14
14
from guardrails .utils .reask_utils import FieldReAsk
15
15
from guardrails .validators import FailResult , OneLine
@@ -140,7 +140,7 @@ def test_entity_extraction_with_reask(
140
140
guard = guard_initializer (rail , prompt )
141
141
142
142
_ , final_output = guard (
143
- llm_api = static_openai_create_func ,
143
+ llm_api = get_static_openai_create_func () ,
144
144
prompt_params = {"document" : content [:6000 ]},
145
145
num_reasks = 1 ,
146
146
max_tokens = 2000 ,
@@ -219,7 +219,7 @@ def test_entity_extraction_with_noop(mocker, rail, prompt):
219
219
content = gd .docs_utils .read_pdf ("docs/examples/data/chase_card_agreement.pdf" )
220
220
guard = guard_initializer (rail , prompt )
221
221
_ , final_output = guard (
222
- llm_api = static_openai_create_func ,
222
+ llm_api = get_static_openai_create_func () ,
223
223
prompt_params = {"document" : content [:6000 ]},
224
224
num_reasks = 1 ,
225
225
)
@@ -255,7 +255,7 @@ def test_entity_extraction_with_filter(mocker, rail, prompt):
255
255
content = gd .docs_utils .read_pdf ("docs/examples/data/chase_card_agreement.pdf" )
256
256
guard = guard_initializer (rail , prompt )
257
257
_ , final_output = guard (
258
- llm_api = static_openai_create_func ,
258
+ llm_api = get_static_openai_create_func () ,
259
259
prompt_params = {"document" : content [:6000 ]},
260
260
num_reasks = 1 ,
261
261
)
@@ -290,7 +290,7 @@ def test_entity_extraction_with_fix(mocker, rail, prompt):
290
290
content = gd .docs_utils .read_pdf ("docs/examples/data/chase_card_agreement.pdf" )
291
291
guard = guard_initializer (rail , prompt )
292
292
_ , final_output = guard (
293
- llm_api = static_openai_create_func ,
293
+ llm_api = get_static_openai_create_func () ,
294
294
prompt_params = {"document" : content [:6000 ]},
295
295
num_reasks = 1 ,
296
296
)
@@ -326,7 +326,7 @@ def test_entity_extraction_with_refrain(mocker, rail, prompt):
326
326
content = gd .docs_utils .read_pdf ("docs/examples/data/chase_card_agreement.pdf" )
327
327
guard = guard_initializer (rail , prompt )
328
328
_ , final_output = guard (
329
- llm_api = static_openai_create_func ,
329
+ llm_api = get_static_openai_create_func () ,
330
330
prompt_params = {"document" : content [:6000 ]},
331
331
num_reasks = 1 ,
332
332
)
@@ -369,7 +369,7 @@ def test_entity_extraction_with_fix_chat_models(mocker, rail, prompt, instructio
369
369
content = gd .docs_utils .read_pdf ("docs/examples/data/chase_card_agreement.pdf" )
370
370
guard = guard_initializer (rail , prompt , instructions )
371
371
_ , final_output = guard (
372
- llm_api = static_openai_chat_create_func ,
372
+ llm_api = get_static_openai_chat_create_func () ,
373
373
prompt_params = {"document" : content [:6000 ]},
374
374
num_reasks = 1 ,
375
375
)
@@ -399,7 +399,7 @@ def test_string_output(mocker):
399
399
400
400
guard = gd .Guard .from_rail_string (string .RAIL_SPEC_FOR_STRING )
401
401
_ , final_output = guard (
402
- llm_api = static_openai_create_func ,
402
+ llm_api = get_static_openai_create_func () ,
403
403
prompt_params = {"ingredients" : "tomato, cheese, sour cream" },
404
404
num_reasks = 1 ,
405
405
)
@@ -421,7 +421,7 @@ def test_string_reask(mocker):
421
421
422
422
guard = gd .Guard .from_rail_string (string .RAIL_SPEC_FOR_STRING_REASK )
423
423
_ , final_output = guard (
424
- llm_api = static_openai_create_func ,
424
+ llm_api = get_static_openai_create_func () ,
425
425
prompt_params = {"ingredients" : "tomato, cheese, sour cream" },
426
426
num_reasks = 1 ,
427
427
max_tokens = 100 ,
@@ -454,7 +454,7 @@ def test_skeleton_reask(mocker):
454
454
content = gd .docs_utils .read_pdf ("docs/examples/data/chase_card_agreement.pdf" )
455
455
guard = gd .Guard .from_rail_string (entity_extraction .RAIL_SPEC_WITH_SKELETON_REASK )
456
456
_ , final_output = guard (
457
- llm_api = static_openai_create_func ,
457
+ llm_api = get_static_openai_create_func () ,
458
458
prompt_params = {"document" : content [:6000 ]},
459
459
max_tokens = 1000 ,
460
460
num_reasks = 1 ,
@@ -497,7 +497,7 @@ def test_skeleton_reask(mocker):
497
497
498
498
guard = gd.Guard.from_rail_string(string.RAIL_SPEC_FOR_LIST)
499
499
_, final_output = guard(
500
- llm_api=static_openai_create_func ,
500
+ llm_api=get_static_openai_create_func() ,
501
501
num_reasks=1,
502
502
)
503
503
assert final_output == string.LIST_LLM_OUTPUT
@@ -523,7 +523,7 @@ def test_skeleton_reask(mocker):
523
523
entity_extraction .OPTIONAL_PROMPT_COMPLETION_MODEL ,
524
524
None ,
525
525
None ,
526
- static_openai_create_func ,
526
+ get_static_openai_create_func () ,
527
527
entity_extraction .COMPILED_PROMPT ,
528
528
None ,
529
529
entity_extraction .COMPILED_PROMPT_REASK ,
@@ -534,7 +534,7 @@ def test_skeleton_reask(mocker):
534
534
entity_extraction .OPTIONAL_PROMPT_CHAT_MODEL ,
535
535
entity_extraction .OPTIONAL_INSTRUCTIONS_CHAT_MODEL ,
536
536
None ,
537
- static_openai_chat_create_func ,
537
+ get_static_openai_chat_create_func () ,
538
538
entity_extraction .COMPILED_PROMPT_WITHOUT_INSTRUCTIONS ,
539
539
entity_extraction .COMPILED_INSTRUCTIONS ,
540
540
entity_extraction .COMPILED_PROMPT_REASK_WITHOUT_INSTRUCTIONS ,
@@ -545,7 +545,7 @@ def test_skeleton_reask(mocker):
545
545
None ,
546
546
None ,
547
547
entity_extraction .OPTIONAL_MSG_HISTORY ,
548
- static_openai_chat_create_func ,
548
+ get_static_openai_chat_create_func () ,
549
549
None ,
550
550
None ,
551
551
entity_extraction .COMPILED_PROMPT_REASK_WITHOUT_INSTRUCTIONS ,
@@ -566,7 +566,7 @@ def test_entity_extraction_with_reask_with_optional_prompts(
566
566
expected_reask_instructions ,
567
567
):
568
568
"""Test that the entity extraction works with re-asking."""
569
- if llm_api == static_openai_create_func :
569
+ if llm_api == get_static_openai_create_func () :
570
570
mocker .patch ("guardrails.llm_providers.OpenAICallable" , new = MockOpenAICallable )
571
571
else :
572
572
mocker .patch (
@@ -653,7 +653,7 @@ def test_string_with_message_history_reask(mocker):
653
653
654
654
guard = gd .Guard .from_rail_string (string .RAIL_SPEC_FOR_MSG_HISTORY )
655
655
_ , final_output = guard (
656
- llm_api = static_openai_chat_create_func ,
656
+ llm_api = get_static_openai_chat_create_func () ,
657
657
msg_history = string .MOVIE_MSG_HISTORY ,
658
658
temperature = 0.0 ,
659
659
model = "gpt-3.5-turbo" ,
@@ -689,7 +689,7 @@ def test_pydantic_with_message_history_reask(mocker):
689
689
690
690
guard = gd .Guard .from_pydantic (output_class = pydantic .WITH_MSG_HISTORY )
691
691
raw_output , guarded_output = guard (
692
- llm_api = static_openai_chat_create_func ,
692
+ llm_api = get_static_openai_chat_create_func () ,
693
693
msg_history = string .MOVIE_MSG_HISTORY ,
694
694
temperature = 0.0 ,
695
695
model = "gpt-3.5-turbo" ,
@@ -731,7 +731,7 @@ def test_sequential_validator_log_is_not_duplicated(mocker):
731
731
)
732
732
733
733
_ , final_output = guard (
734
- llm_api = static_openai_create_func ,
734
+ llm_api = get_static_openai_create_func () ,
735
735
prompt_params = {"document" : content [:6000 ]},
736
736
num_reasks = 1 ,
737
737
)
@@ -765,7 +765,7 @@ def test_in_memory_validator_log_is_not_duplicated(mocker):
765
765
)
766
766
767
767
_ , final_output = guard (
768
- llm_api = static_openai_create_func ,
768
+ llm_api = get_static_openai_create_func () ,
769
769
prompt_params = {"document" : content [:6000 ]},
770
770
num_reasks = 1 ,
771
771
)
0 commit comments