Skip to content

Commit bb5bc27

Browse files
authored
Add support for 4o-mini and 3o (#968)
- This adds supports, and tests, for the 3o and 4o-mini class of models
1 parent 4cccc6c commit bb5bc27

5 files changed

+114
-1
lines changed

chat_stream_test.go

+50
Original file line numberDiff line numberDiff line change
@@ -959,6 +959,56 @@ func TestCreateChatCompletionStreamReasoningValidatorFails(t *testing.T) {
959959
}
960960
}
961961

962+
func TestCreateChatCompletionStreamO3ReasoningValidatorFails(t *testing.T) {
963+
client, _, _ := setupOpenAITestServer()
964+
965+
stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
966+
MaxTokens: 100, // This will trigger the validator to fail
967+
Model: openai.O3,
968+
Messages: []openai.ChatCompletionMessage{
969+
{
970+
Role: openai.ChatMessageRoleUser,
971+
Content: "Hello!",
972+
},
973+
},
974+
Stream: true,
975+
})
976+
977+
if stream != nil {
978+
t.Error("Expected nil stream when validation fails")
979+
stream.Close()
980+
}
981+
982+
if !errors.Is(err, openai.ErrReasoningModelMaxTokensDeprecated) {
983+
t.Errorf("Expected ErrReasoningModelMaxTokensDeprecated for O3, got: %v", err)
984+
}
985+
}
986+
987+
func TestCreateChatCompletionStreamO4MiniReasoningValidatorFails(t *testing.T) {
988+
client, _, _ := setupOpenAITestServer()
989+
990+
stream, err := client.CreateChatCompletionStream(context.Background(), openai.ChatCompletionRequest{
991+
MaxTokens: 100, // This will trigger the validator to fail
992+
Model: openai.O4Mini,
993+
Messages: []openai.ChatCompletionMessage{
994+
{
995+
Role: openai.ChatMessageRoleUser,
996+
Content: "Hello!",
997+
},
998+
},
999+
Stream: true,
1000+
})
1001+
1002+
if stream != nil {
1003+
t.Error("Expected nil stream when validation fails")
1004+
stream.Close()
1005+
}
1006+
1007+
if !errors.Is(err, openai.ErrReasoningModelMaxTokensDeprecated) {
1008+
t.Errorf("Expected ErrReasoningModelMaxTokensDeprecated for O4Mini, got: %v", err)
1009+
}
1010+
}
1011+
9621012
func compareChatStreamResponseChoices(c1, c2 openai.ChatCompletionStreamChoice) bool {
9631013
if c1.Index != c2.Index {
9641014
return false

completion.go

+8
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,12 @@ const (
1616
O1Preview20240912 = "o1-preview-2024-09-12"
1717
O1 = "o1"
1818
O120241217 = "o1-2024-12-17"
19+
O3 = "o3"
20+
O320250416 = "o3-2025-04-16"
1921
O3Mini = "o3-mini"
2022
O3Mini20250131 = "o3-mini-2025-01-31"
23+
O4Mini = "o4-mini"
24+
O4Mini2020416 = "o4-mini-2025-04-16"
2125
GPT432K0613 = "gpt-4-32k-0613"
2226
GPT432K0314 = "gpt-4-32k-0314"
2327
GPT432K = "gpt-4-32k"
@@ -99,6 +103,10 @@ var disabledModelsForEndpoints = map[string]map[string]bool{
99103
O1Preview20240912: true,
100104
O3Mini: true,
101105
O3Mini20250131: true,
106+
O4Mini: true,
107+
O4Mini2020416: true,
108+
O3: true,
109+
O320250416: true,
102110
GPT3Dot5Turbo: true,
103111
GPT3Dot5Turbo0301: true,
104112
GPT3Dot5Turbo0613: true,

completion_test.go

+36
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,42 @@ func TestCompletionsWrongModel(t *testing.T) {
3333
}
3434
}
3535

36+
// TestCompletionsWrongModelO3 Tests the completions endpoint with O3 model which is not supported.
37+
func TestCompletionsWrongModelO3(t *testing.T) {
38+
config := openai.DefaultConfig("whatever")
39+
config.BaseURL = "http://localhost/v1"
40+
client := openai.NewClientWithConfig(config)
41+
42+
_, err := client.CreateCompletion(
43+
context.Background(),
44+
openai.CompletionRequest{
45+
MaxTokens: 5,
46+
Model: openai.O3,
47+
},
48+
)
49+
if !errors.Is(err, openai.ErrCompletionUnsupportedModel) {
50+
t.Fatalf("CreateCompletion should return ErrCompletionUnsupportedModel for O3, but returned: %v", err)
51+
}
52+
}
53+
54+
// TestCompletionsWrongModelO4Mini Tests the completions endpoint with O4Mini model which is not supported.
55+
func TestCompletionsWrongModelO4Mini(t *testing.T) {
56+
config := openai.DefaultConfig("whatever")
57+
config.BaseURL = "http://localhost/v1"
58+
client := openai.NewClientWithConfig(config)
59+
60+
_, err := client.CreateCompletion(
61+
context.Background(),
62+
openai.CompletionRequest{
63+
MaxTokens: 5,
64+
Model: openai.O4Mini,
65+
},
66+
)
67+
if !errors.Is(err, openai.ErrCompletionUnsupportedModel) {
68+
t.Fatalf("CreateCompletion should return ErrCompletionUnsupportedModel for O4Mini, but returned: %v", err)
69+
}
70+
}
71+
3672
func TestCompletionWithStream(t *testing.T) {
3773
config := openai.DefaultConfig("whatever")
3874
client := openai.NewClientWithConfig(config)

models_test.go

+18
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,24 @@ func TestGetModel(t *testing.T) {
4747
checks.NoError(t, err, "GetModel error")
4848
}
4949

50+
// TestGetModelO3 Tests the retrieve O3 model endpoint of the API using the mocked server.
51+
func TestGetModelO3(t *testing.T) {
52+
client, server, teardown := setupOpenAITestServer()
53+
defer teardown()
54+
server.RegisterHandler("/v1/models/o3", handleGetModelEndpoint)
55+
_, err := client.GetModel(context.Background(), "o3")
56+
checks.NoError(t, err, "GetModel error for O3")
57+
}
58+
59+
// TestGetModelO4Mini Tests the retrieve O4Mini model endpoint of the API using the mocked server.
60+
func TestGetModelO4Mini(t *testing.T) {
61+
client, server, teardown := setupOpenAITestServer()
62+
defer teardown()
63+
server.RegisterHandler("/v1/models/o4-mini", handleGetModelEndpoint)
64+
_, err := client.GetModel(context.Background(), "o4-mini")
65+
checks.NoError(t, err, "GetModel error for O4Mini")
66+
}
67+
5068
func TestAzureGetModel(t *testing.T) {
5169
client, server, teardown := setupAzureTestServer()
5270
defer teardown()

reasoning_validator.go

+2-1
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,9 @@ func NewReasoningValidator() *ReasoningValidator {
4040
func (v *ReasoningValidator) Validate(request ChatCompletionRequest) error {
4141
o1Series := strings.HasPrefix(request.Model, "o1")
4242
o3Series := strings.HasPrefix(request.Model, "o3")
43+
o4Series := strings.HasPrefix(request.Model, "o4")
4344

44-
if !o1Series && !o3Series {
45+
if !o1Series && !o3Series && !o4Series {
4546
return nil
4647
}
4748

0 commit comments

Comments
 (0)