@@ -52,6 +52,199 @@ func TestChatCompletionsWrongModel(t *testing.T) {
52
52
checks .ErrorIs (t , err , openai .ErrChatCompletionInvalidModel , msg )
53
53
}
54
54
55
+ func TestO1ModelsChatCompletionsDeprecatedFields (t * testing.T ) {
56
+ tests := []struct {
57
+ name string
58
+ in openai.ChatCompletionRequest
59
+ expectedError error
60
+ }{
61
+ {
62
+ name : "o1-preview_MaxTokens_deprecated" ,
63
+ in : openai.ChatCompletionRequest {
64
+ MaxTokens : 5 ,
65
+ Model : openai .O1Preview ,
66
+ },
67
+ expectedError : openai .ErrO1MaxTokensDeprecated ,
68
+ },
69
+ {
70
+ name : "o1-mini_MaxTokens_deprecated" ,
71
+ in : openai.ChatCompletionRequest {
72
+ MaxTokens : 5 ,
73
+ Model : openai .O1Mini ,
74
+ },
75
+ expectedError : openai .ErrO1MaxTokensDeprecated ,
76
+ },
77
+ }
78
+
79
+ for _ , tt := range tests {
80
+ t .Run (tt .name , func (t * testing.T ) {
81
+ config := openai .DefaultConfig ("whatever" )
82
+ config .BaseURL = "http://localhost/v1"
83
+ client := openai .NewClientWithConfig (config )
84
+ ctx := context .Background ()
85
+
86
+ _ , err := client .CreateChatCompletion (ctx , tt .in )
87
+ checks .HasError (t , err )
88
+ msg := fmt .Sprintf ("CreateChatCompletion should return wrong model error, returned: %s" , err )
89
+ checks .ErrorIs (t , err , tt .expectedError , msg )
90
+ })
91
+ }
92
+ }
93
+
94
+ func TestO1ModelsChatCompletionsBetaLimitations (t * testing.T ) {
95
+ tests := []struct {
96
+ name string
97
+ in openai.ChatCompletionRequest
98
+ expectedError error
99
+ }{
100
+ {
101
+ name : "log_probs_unsupported" ,
102
+ in : openai.ChatCompletionRequest {
103
+ MaxCompletionsTokens : 1000 ,
104
+ LogProbs : true ,
105
+ Model : openai .O1Preview ,
106
+ },
107
+ expectedError : openai .ErrO1BetaLimitationsLogprobs ,
108
+ },
109
+ {
110
+ name : "message_type_unsupported" ,
111
+ in : openai.ChatCompletionRequest {
112
+ MaxCompletionsTokens : 1000 ,
113
+ Model : openai .O1Mini ,
114
+ Messages : []openai.ChatCompletionMessage {
115
+ {
116
+ Role : openai .ChatMessageRoleSystem ,
117
+ },
118
+ },
119
+ },
120
+ expectedError : openai .ErrO1BetaLimitationsMessageTypes ,
121
+ },
122
+ {
123
+ name : "tool_unsupported" ,
124
+ in : openai.ChatCompletionRequest {
125
+ MaxCompletionsTokens : 1000 ,
126
+ Model : openai .O1Mini ,
127
+ Messages : []openai.ChatCompletionMessage {
128
+ {
129
+ Role : openai .ChatMessageRoleUser ,
130
+ },
131
+ {
132
+ Role : openai .ChatMessageRoleAssistant ,
133
+ },
134
+ },
135
+ Tools : []openai.Tool {
136
+ {
137
+ Type : openai .ToolTypeFunction ,
138
+ },
139
+ },
140
+ },
141
+ expectedError : openai .ErrO1BetaLimitationsTools ,
142
+ },
143
+ {
144
+ name : "set_temperature_unsupported" ,
145
+ in : openai.ChatCompletionRequest {
146
+ MaxCompletionsTokens : 1000 ,
147
+ Model : openai .O1Mini ,
148
+ Messages : []openai.ChatCompletionMessage {
149
+ {
150
+ Role : openai .ChatMessageRoleUser ,
151
+ },
152
+ {
153
+ Role : openai .ChatMessageRoleAssistant ,
154
+ },
155
+ },
156
+ Temperature : float32 (2 ),
157
+ },
158
+ expectedError : openai .ErrO1BetaLimitationsOther ,
159
+ },
160
+ {
161
+ name : "set_top_unsupported" ,
162
+ in : openai.ChatCompletionRequest {
163
+ MaxCompletionsTokens : 1000 ,
164
+ Model : openai .O1Mini ,
165
+ Messages : []openai.ChatCompletionMessage {
166
+ {
167
+ Role : openai .ChatMessageRoleUser ,
168
+ },
169
+ {
170
+ Role : openai .ChatMessageRoleAssistant ,
171
+ },
172
+ },
173
+ Temperature : float32 (1 ),
174
+ TopP : float32 (0.1 ),
175
+ },
176
+ expectedError : openai .ErrO1BetaLimitationsOther ,
177
+ },
178
+ {
179
+ name : "set_n_unsupported" ,
180
+ in : openai.ChatCompletionRequest {
181
+ MaxCompletionsTokens : 1000 ,
182
+ Model : openai .O1Mini ,
183
+ Messages : []openai.ChatCompletionMessage {
184
+ {
185
+ Role : openai .ChatMessageRoleUser ,
186
+ },
187
+ {
188
+ Role : openai .ChatMessageRoleAssistant ,
189
+ },
190
+ },
191
+ Temperature : float32 (1 ),
192
+ TopP : float32 (1 ),
193
+ N : 2 ,
194
+ },
195
+ expectedError : openai .ErrO1BetaLimitationsOther ,
196
+ },
197
+ {
198
+ name : "set_presence_penalty_unsupported" ,
199
+ in : openai.ChatCompletionRequest {
200
+ MaxCompletionsTokens : 1000 ,
201
+ Model : openai .O1Mini ,
202
+ Messages : []openai.ChatCompletionMessage {
203
+ {
204
+ Role : openai .ChatMessageRoleUser ,
205
+ },
206
+ {
207
+ Role : openai .ChatMessageRoleAssistant ,
208
+ },
209
+ },
210
+ PresencePenalty : float32 (1 ),
211
+ },
212
+ expectedError : openai .ErrO1BetaLimitationsOther ,
213
+ },
214
+ {
215
+ name : "set_frequency_penalty_unsupported" ,
216
+ in : openai.ChatCompletionRequest {
217
+ MaxCompletionsTokens : 1000 ,
218
+ Model : openai .O1Mini ,
219
+ Messages : []openai.ChatCompletionMessage {
220
+ {
221
+ Role : openai .ChatMessageRoleUser ,
222
+ },
223
+ {
224
+ Role : openai .ChatMessageRoleAssistant ,
225
+ },
226
+ },
227
+ FrequencyPenalty : float32 (0.1 ),
228
+ },
229
+ expectedError : openai .ErrO1BetaLimitationsOther ,
230
+ },
231
+ }
232
+
233
+ for _ , tt := range tests {
234
+ t .Run (tt .name , func (t * testing.T ) {
235
+ config := openai .DefaultConfig ("whatever" )
236
+ config .BaseURL = "http://localhost/v1"
237
+ client := openai .NewClientWithConfig (config )
238
+ ctx := context .Background ()
239
+
240
+ _ , err := client .CreateChatCompletion (ctx , tt .in )
241
+ checks .HasError (t , err )
242
+ msg := fmt .Sprintf ("CreateChatCompletion should return wrong model error, returned: %s" , err )
243
+ checks .ErrorIs (t , err , tt .expectedError , msg )
244
+ })
245
+ }
246
+ }
247
+
55
248
func TestChatRequestOmitEmpty (t * testing.T ) {
56
249
data , err := json .Marshal (openai.ChatCompletionRequest {
57
250
// We set model b/c it's required, so omitempty doesn't make sense
@@ -97,6 +290,24 @@ func TestChatCompletions(t *testing.T) {
97
290
checks .NoError (t , err , "CreateChatCompletion error" )
98
291
}
99
292
293
+ // TestCompletions Tests the completions endpoint of the API using the mocked server.
294
+ func TestO1ModelChatCompletions (t * testing.T ) {
295
+ client , server , teardown := setupOpenAITestServer ()
296
+ defer teardown ()
297
+ server .RegisterHandler ("/v1/chat/completions" , handleChatCompletionEndpoint )
298
+ _ , err := client .CreateChatCompletion (context .Background (), openai.ChatCompletionRequest {
299
+ Model : openai .O1Preview ,
300
+ MaxCompletionsTokens : 1000 ,
301
+ Messages : []openai.ChatCompletionMessage {
302
+ {
303
+ Role : openai .ChatMessageRoleUser ,
304
+ Content : "Hello!" ,
305
+ },
306
+ },
307
+ })
308
+ checks .NoError (t , err , "CreateChatCompletion error" )
309
+ }
310
+
100
311
// TestCompletions Tests the completions endpoint of the API using the mocked server.
101
312
func TestChatCompletionsWithHeaders (t * testing.T ) {
102
313
client , server , teardown := setupOpenAITestServer ()
0 commit comments