2
2
import traceback
3
3
4
4
from fastapi import Request
5
- from fastapi .params import Header , Body
6
- from osbot_prefect .flows .Flow_Events__To__Prefect_Server import Flow_Events__To__Prefect_Server
7
- from osbot_utils .helpers .Random_Guid import Random_Guid
8
- from osbot_utils .helpers .flows .Flow import Flow
9
- from osbot_utils .helpers .flows .decorators .flow import flow
10
- from osbot_utils .helpers .flows .models .Flow__Config import Flow__Config
11
- from osbot_utils .utils .Dev import pprint
5
+ from fastapi .params import Header
6
+ from osbot_utils .helpers .flows .Flow import Flow
7
+ from osbot_utils .helpers .flows .decorators .flow import flow
8
+ from osbot_utils .helpers .flows .models .Flow_Run__Config import Flow_Run__Config
12
9
from starlette .responses import StreamingResponse
13
10
from osbot_fast_api .api .Fast_API_Routes import Fast_API_Routes
14
11
from osbot_utils .context_managers .capture_duration import capture_duration
15
12
from osbot_llms .OSBot_LLMs__Shared_Objects import osbot_llms__shared_objects
16
13
from osbot_llms .fast_api .routes .Routes__OpenAI import Routes__OpenAI
17
14
from osbot_llms .llms .chats .LLM__Chat_Completion__Resolve_Engine import LLM__Chat_Completion__Resolve_Engine
18
15
from osbot_llms .llms .storage .Chats_Storage__S3_Minio import Chats_Storage__S3_Minio
19
- from osbot_llms .models .LLMs__Chat_Completion import LLMs__Chat_Completion , SWAGGER_EXAMPLE__LLMs__Chat_Completion
16
+ from osbot_llms .models .LLMs__Chat_Completion import LLMs__Chat_Completion , SWAGGER_EXAMPLE__LLMs__Chat_Completion
20
17
21
18
ROUTES_PATHS__CONFIG = ['/config/status' , '/config/version' ]
22
19
HEADER_NAME__CHAT_ID = 'osbot-llms-chat-id'
@@ -36,38 +33,35 @@ def execute_llm_request(self, llm_chat_completion):
36
33
return 'no engine'
37
34
38
35
async def handle_other_llms (self , llm_chat_completion : LLMs__Chat_Completion , request : Request , request_id : str ):
39
- @flow (flow_config = Flow__Config (log_to_console = True ))
36
+ @flow (flow_config = Flow_Run__Config (log_to_console = True ))
40
37
def handle_other_llms__streamer () -> Flow :
41
38
print ("in handle_other_llms__streamer" )
42
39
print (llm_chat_completion .json ())
43
40
return StreamingResponse (self .handle_other_llms__streamer (llm_chat_completion , request , request_id ),media_type = 'text/event-stream"; charset=utf-8' )
44
41
45
42
stream = llm_chat_completion .stream
46
43
if stream :
47
- with Flow_Events__To__Prefect_Server ():
48
- with handle_other_llms__streamer () as _ :
49
- _ .execute_flow ()
50
- return _ .flow_return_value
44
+ with handle_other_llms__streamer () as _ :
45
+ _ .execute_flow ()
46
+ return _ .flow_return_value
51
47
else :
52
48
return await self .handle_other_llms__no_stream (llm_chat_completion , request , request_id )
53
49
54
50
async def handle_other_llms__no_stream (self , llm_chat_completion : LLMs__Chat_Completion , request : Request , request_id : str ):
55
- @flow (flow_config = Flow__Config (log_to_console = True ))
51
+ @flow (flow_config = Flow_Run__Config (log_to_console = True ))
56
52
def flow_handle_other_llms__no_stream () -> Flow :
57
53
print ("in handle_other_llms__streamer" )
58
54
print (llm_chat_completion .json ())
59
55
complete_answer = self .execute_llm_request (llm_chat_completion )
60
56
try :
61
- #request_headers = {key: value for key, value in request.headers.items()}
62
57
llm_chat_completion .llm_answer = complete_answer
63
58
except :
64
59
pass
65
60
return complete_answer
66
61
67
- with Flow_Events__To__Prefect_Server () :
68
- with flow_handle_other_llms__no_stream () as _ :
69
- _ .execute_flow ()
70
- return _ .flow_return_value
62
+ with flow_handle_other_llms__no_stream () as _ :
63
+ _ .execute_flow ()
64
+ return _ .flow_return_value
71
65
72
66
73
67
async def handle_other_llms__streamer (self , llm_chat_completion : LLMs__Chat_Completion , request : Request , request_id : str ):
0 commit comments