|
3 | 3 |
|
4 | 4 | from fastapi import Request
|
5 | 5 | from fastapi.params import Header
|
6 |
| -from osbot_utils.helpers.flows.Flow import Flow |
7 |
| -from osbot_utils.helpers.flows.decorators.flow import flow |
8 |
| -from osbot_utils.helpers.flows.models.Flow_Run__Config import Flow_Run__Config |
9 | 6 | from starlette.responses import StreamingResponse
|
10 | 7 | from osbot_fast_api.api.Fast_API_Routes import Fast_API_Routes
|
11 | 8 | from osbot_utils.context_managers.capture_duration import capture_duration
|
@@ -33,35 +30,26 @@ def execute_llm_request(self, llm_chat_completion):
|
33 | 30 | return 'no engine'
|
34 | 31 |
|
35 | 32 | async def handle_other_llms(self, llm_chat_completion: LLMs__Chat_Completion, request: Request, request_id: str):
|
36 |
| - @flow(flow_config=Flow_Run__Config(log_to_console=True)) |
37 |
| - def handle_other_llms__streamer() -> Flow: |
| 33 | + def handle_other_llms__streamer(): |
38 | 34 | print("in handle_other_llms__streamer")
|
39 | 35 | print(llm_chat_completion.json())
|
40 | 36 | return StreamingResponse(self.handle_other_llms__streamer(llm_chat_completion, request, request_id),media_type='text/event-stream"; charset=utf-8')
|
41 | 37 |
|
42 | 38 | stream = llm_chat_completion.stream
|
43 | 39 | if stream:
|
44 |
| - with handle_other_llms__streamer() as _: |
45 |
| - _.execute_flow() |
46 |
| - return _.flow_return_value |
| 40 | + return handle_other_llms__streamer() |
47 | 41 | else:
|
48 | 42 | return await self.handle_other_llms__no_stream(llm_chat_completion, request, request_id)
|
49 | 43 |
|
50 | 44 | async def handle_other_llms__no_stream(self, llm_chat_completion: LLMs__Chat_Completion, request: Request, request_id: str):
|
51 |
| - @flow(flow_config=Flow_Run__Config(log_to_console=True)) |
52 |
| - def flow_handle_other_llms__no_stream() -> Flow: |
53 |
| - print("in handle_other_llms__streamer") |
54 |
| - print(llm_chat_completion.json()) |
55 |
| - complete_answer = self.execute_llm_request(llm_chat_completion) |
56 |
| - try: |
57 |
| - llm_chat_completion.llm_answer = complete_answer |
58 |
| - except: |
59 |
| - pass |
60 |
| - return complete_answer |
61 |
| - |
62 |
| - with flow_handle_other_llms__no_stream() as _: |
63 |
| - _.execute_flow() |
64 |
| - return _.flow_return_value |
| 45 | + print("in handle_other_llms__streamer") |
| 46 | + print(llm_chat_completion.json()) |
| 47 | + complete_answer = self.execute_llm_request(llm_chat_completion) |
| 48 | + try: |
| 49 | + llm_chat_completion.llm_answer = complete_answer |
| 50 | + except: |
| 51 | + pass |
| 52 | + return complete_answer |
65 | 53 |
|
66 | 54 |
|
67 | 55 | async def handle_other_llms__streamer(self, llm_chat_completion: LLMs__Chat_Completion, request: Request, request_id: str):
|
|
0 commit comments