@@ -80,19 +80,22 @@ def prepare_sample_records_doc_bin(
80
80
return prefixed_doc_bin
81
81
82
82
83
- def test_openai_llm_connection (api_key : str , model : str ):
83
+ def test_openai_llm_connection (api_key : str , model : str , is_o_series : bool = False ):
84
84
# more here: https://platform.openai.com/docs/api-reference/making-requests
85
85
headers = {
86
86
"Content-Type" : "application/json" ,
87
87
"Authorization" : f"Bearer { api_key } " ,
88
88
}
89
-
89
+ if is_o_series :
90
+ add_payload = {"max_completion_tokens" : 5 }
91
+ else :
92
+ add_payload = {"max_tokens" : 5 }
90
93
payload = {
91
94
"model" : model ,
92
95
"messages" : [
93
96
{"role" : "user" , "content" : [{"type" : "text" , "text" : "only say 'hello'" }]},
94
97
],
95
- "max_tokens" : 5 ,
98
+ ** add_payload ,
96
99
}
97
100
98
101
response = requests .post (
@@ -124,7 +127,11 @@ def test_azure_foundry_llm_connection(api_key: str, base_endpoint: str):
124
127
125
128
126
129
def test_azure_llm_connection (
127
- api_key : str , base_endpoint : str , api_version : str , model : str
130
+ api_key : str ,
131
+ base_endpoint : str ,
132
+ api_version : str ,
133
+ model : str ,
134
+ is_o_series : bool = False ,
128
135
):
129
136
# more here: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference-preview
130
137
base_endpoint = base_endpoint .rstrip ("/" )
@@ -146,11 +153,15 @@ def test_azure_llm_connection(
146
153
"api-key" : api_key ,
147
154
}
148
155
156
+ if is_o_series :
157
+ add_payload = {"max_completion_tokens" : 5 }
158
+ else :
159
+ add_payload = {"max_tokens" : 5 }
149
160
payload = {
150
161
"messages" : [
151
162
{"role" : "user" , "content" : [{"type" : "text" , "text" : "only say 'hello'" }]},
152
163
],
153
- "max_tokens" : 5 ,
164
+ ** add_payload ,
154
165
}
155
166
156
167
response = requests .post (final_endpoint , headers = headers , json = payload )
@@ -190,13 +201,15 @@ def validate_llm_config(llm_config: Dict[str, Any]):
190
201
test_openai_llm_connection (
191
202
api_key = llm_config ["apiKey" ],
192
203
model = llm_config ["model" ],
204
+ is_o_series = llm_config .get ("openAioSeries" , False ),
193
205
)
194
206
elif llm_config ["llmIdentifier" ] == enums .LLMProvider .AZURE .value :
195
207
test_azure_llm_connection (
196
208
api_key = llm_config ["apiKey" ],
197
209
model = llm_config ["model" ],
198
210
base_endpoint = llm_config ["apiBase" ],
199
211
api_version = llm_config ["apiVersion" ],
212
+ is_o_series = llm_config .get ("openAioSeries" , False ),
200
213
)
201
214
elif llm_config ["llmIdentifier" ] == enums .LLMProvider .AZURE_FOUNDRY .value :
202
215
test_azure_foundry_llm_connection (
@@ -291,6 +304,8 @@ async def ac(record):
291
304
"@@CACHE_FILE_UPLOAD_LINK@@" : llm_config .get (
292
305
"llmAcCacheFileUploadLink" , ""
293
306
),
307
+ # string quotes are replaced since bool("False") == True
308
+ '"@@IS_O_SERIES@@"' : str (llm_config .get ("openAioSeries" , False )),
294
309
}
295
310
except KeyError :
296
311
raise LlmResponseError (
0 commit comments