@@ -54,16 +54,20 @@ def __init__(self, *, url: str, token: str | None, model: str, proxy: str | None
5454 )
5555
5656
57- default_llm = OpenAIClient
58- llm_id = os .environ .get ('LLM_API_TYPE' , default_llm .ID )
59- logger .debug ('Using LLM: %s' , llm_id )
60-
61- llm_kwargs = {
62- 'url' : os .environ .get ('LLM_API_URL' ),
63- 'token' : os .environ .get ('LLM_API_TOKEN' ) or None ,
64- 'model' : os .environ .get ('LLM_API_MODEL' ),
65- 'proxy' : os .environ .get ('LLM_API_PROXY' ) or None ,
66- }
67- logger .debug ('Using LLM args: %s' , llm_kwargs )
68-
69- llm = llms [llm_id ](** llm_kwargs ).chat_model
57+ def chat_model_from_env () -> BaseChatModel :
58+ default_llm = OpenAIClient
59+ llm_id = os .environ .get ('LLM_API_TYPE' , default_llm .ID )
60+ logger .debug ('Using LLM: %s' , llm_id )
61+ llm_kwargs = {
62+ 'url' : os .environ .get ('LLM_API_URL' ),
63+ 'token' : os .environ .get ('LLM_API_TOKEN' ) or None ,
64+ 'model' : os .environ .get ('LLM_API_MODEL' ),
65+ 'proxy' : os .environ .get ('LLM_API_PROXY' ) or None ,
66+ }
67+ logger .debug ('Using LLM args: %s' , llm_kwargs )
68+ return llms [llm_id ](** llm_kwargs ).chat_model
69+
70+
71+ llm = chat_model_from_env () if 'LLM_API_TYPE' in os .environ else None
72+ if not llm :
73+ logger .warning ('LLM is not configured' )
0 commit comments