@@ -2,8 +2,9 @@ use crate::conversation::message::{Message, MessageContent};
22use crate :: model:: ModelConfig ;
33use crate :: providers:: formats:: anthropic:: { thinking_effort, thinking_type, ThinkingType } ;
44use crate :: providers:: utils:: {
5- convert_image, detect_image_path, is_valid_function_name, load_image_file, safely_parse_json,
6- sanitize_function_name, ImageFormat ,
5+ convert_image, detect_image_path, extract_reasoning_effort, is_openai_responses_model,
6+ is_valid_function_name, load_image_file, safely_parse_json, sanitize_function_name,
7+ ImageFormat ,
78} ;
89use anyhow:: { anyhow, Error } ;
910use rmcp:: model:: {
@@ -581,24 +582,8 @@ pub fn create_request(
581582 ) ) ;
582583 }
583584
584- let is_openai_reasoning_model = model_config. is_openai_reasoning_model ( ) ;
585- let ( model_name, reasoning_effort) = if is_openai_reasoning_model {
586- let parts: Vec < & str > = model_config. model_name . split ( '-' ) . collect ( ) ;
587- let last_part = parts. last ( ) . unwrap ( ) ;
588-
589- match * last_part {
590- "low" | "medium" | "high" => {
591- let base_name = parts[ ..parts. len ( ) - 1 ] . join ( "-" ) ;
592- ( base_name, Some ( last_part. to_string ( ) ) )
593- }
594- _ => (
595- model_config. model_name . to_string ( ) ,
596- Some ( "medium" . to_string ( ) ) ,
597- ) ,
598- }
599- } else {
600- ( model_config. model_name . to_string ( ) , None )
601- } ;
585+ let ( model_name, reasoning_effort) = extract_reasoning_effort ( & model_config. model_name ) ;
586+ let is_openai_reasoning_model = is_openai_responses_model ( & model_name) ;
602587
603588 let system_message = DatabricksMessage {
604589 role : "system" . to_string ( ) ,
@@ -1073,6 +1058,63 @@ mod tests {
10731058 Ok ( ( ) )
10741059 }
10751060
1061+ #[ test]
1062+ fn test_create_request_reasoning_effort_xhigh ( ) -> anyhow:: Result < ( ) > {
1063+ let model_config = ModelConfig {
1064+ model_name : "o3-xhigh" . to_string ( ) ,
1065+ context_limit : Some ( 4096 ) ,
1066+ temperature : None ,
1067+ max_tokens : Some ( 1024 ) ,
1068+ toolshim : false ,
1069+ toolshim_model : None ,
1070+ fast_model_config : None ,
1071+ request_params : None ,
1072+ reasoning : None ,
1073+ } ;
1074+ let request = create_request ( & model_config, "system" , & [ ] , & [ ] , & ImageFormat :: OpenAi ) ?;
1075+ assert_eq ! ( request[ "model" ] , "o3" ) ;
1076+ assert_eq ! ( request[ "reasoning_effort" ] , "xhigh" ) ;
1077+ Ok ( ( ) )
1078+ }
1079+
1080+ #[ test]
1081+ fn test_create_request_reasoning_effort_none ( ) -> anyhow:: Result < ( ) > {
1082+ let model_config = ModelConfig {
1083+ model_name : "o3-none" . to_string ( ) ,
1084+ context_limit : Some ( 4096 ) ,
1085+ temperature : None ,
1086+ max_tokens : Some ( 1024 ) ,
1087+ toolshim : false ,
1088+ toolshim_model : None ,
1089+ fast_model_config : None ,
1090+ request_params : None ,
1091+ reasoning : None ,
1092+ } ;
1093+ let request = create_request ( & model_config, "system" , & [ ] , & [ ] , & ImageFormat :: OpenAi ) ?;
1094+ assert_eq ! ( request[ "model" ] , "o3" ) ;
1095+ assert_eq ! ( request[ "reasoning_effort" ] , "none" ) ;
1096+ Ok ( ( ) )
1097+ }
1098+
1099+ #[ test]
1100+ fn test_create_request_reasoning_effort_for_prefixed_gpt5_model ( ) -> anyhow:: Result < ( ) > {
1101+ let model_config = ModelConfig {
1102+ model_name : "databricks-gpt-5.4-high" . to_string ( ) ,
1103+ context_limit : Some ( 4096 ) ,
1104+ temperature : None ,
1105+ max_tokens : Some ( 1024 ) ,
1106+ toolshim : false ,
1107+ toolshim_model : None ,
1108+ fast_model_config : None ,
1109+ request_params : None ,
1110+ reasoning : None ,
1111+ } ;
1112+ let request = create_request ( & model_config, "system" , & [ ] , & [ ] , & ImageFormat :: OpenAi ) ?;
1113+ assert_eq ! ( request[ "model" ] , "databricks-gpt-5.4" ) ;
1114+ assert_eq ! ( request[ "reasoning_effort" ] , "high" ) ;
1115+ Ok ( ( ) )
1116+ }
1117+
10761118 #[ test]
10771119 fn test_create_request_adaptive_thinking_for_46_models ( ) -> anyhow:: Result < ( ) > {
10781120 let _guard = env_lock:: lock_env ( [
0 commit comments