55settings = settings .settings
66
77import time
8- import asyncio
9- import json
108import httpx
119import netaddr
1210import uvicorn
1311import requests
1412import traceback
1513import bittensor as bt
16- from starlette .responses import JSONResponse
1714from loguru import logger
18- from fastapi import APIRouter , Depends , FastAPI , Request , HTTPException
15+ from fastapi import APIRouter , FastAPI , Request , HTTPException
1916from starlette .background import BackgroundTask
2017from starlette .responses import StreamingResponse
2118from bittensor .subtensor import serve_extrinsic
3431SYSTEM_PROMPT = """You are a helpful agent that does it's best to answer all questions!"""
3532
3633
37- class OpenAIMiner ():
38-
34+ class OpenAIMiner :
3935 def __init__ (self ):
4036 self .should_exit = False
4137 self .client = httpx .AsyncClient (
42- base_url = "https://api.openai.com/v1" ,
43- headers = {
44- "Authorization" : f"Bearer { settings .OPENAI_API_KEY } " ,
45- "Content-Type" : "application/json" ,
46- },
47- )
38+ base_url = "https://api.openai.com/v1" ,
39+ headers = {
40+ "Authorization" : f"Bearer { settings .OPENAI_API_KEY } " ,
41+ "Content-Type" : "application/json" ,
42+ },
43+ )
4844 print ("OpenAI Key: " , settings .OPENAI_API_KEY )
4945
5046 async def format_openai_query (self , request : Request ):
5147 # Read the JSON data once
5248 data = await request .json ()
53-
49+
5450 # Extract the required fields
5551 openai_request = {}
5652 for key in ["messages" , "model" , "stream" ]:
5753 if key in data :
5854 openai_request [key ] = data [key ]
5955 openai_request ["model" ] = MODEL_ID
60-
56+
6157 return openai_request
62-
58+
6359 async def create_chat_completion (self , request : Request ):
6460 bt .logging .info (
6561 "\u2713 " ,
6662 f"Getting Chat Completion request from { request .headers .get ('Epistula-Signed-By' , '' )[:8 ]} !" ,
6763 )
68- req = self .client .build_request (
69- "POST" , "chat/completions" , json = await self .format_openai_query (request )
70- )
64+ logger .debug ("Starting chat completion request..." )
65+ req = self .client .build_request ("POST" , "chat/completions" , json = await self .format_openai_query (request ))
7166 r = await self .client .send (req , stream = True )
72- return StreamingResponse (
73- r .aiter_raw (), background = BackgroundTask (r .aclose ), headers = r .headers
74- )
67+ logger .debug ("Chat completion request returning..." )
68+ return StreamingResponse (r .aiter_raw (), background = BackgroundTask (r .aclose ), headers = r .headers )
7569
7670 # async def create_chat_completion(self, request: Request):
7771 # bt.logging.info(
@@ -104,7 +98,7 @@ async def create_chat_completion(self, request: Request):
10498 # "\u2713",
10599 # f"Getting Chat Completion request from {request.headers.get('Epistula-Signed-By', '')[:8]}!",
106100 # )
107-
101+
108102 # async def word_stream():
109103 # words = "This is a test stream".split()
110104 # for word in words:
@@ -133,30 +127,27 @@ async def create_chat_completion(self, request: Request):
133127 # }
134128 # yield f"data: {json.dumps(data)}\n\n"
135129 # yield "data: [DONE]\n\n"
136-
130+
137131 # return StreamingResponse(word_stream(), media_type='text/event-stream')
138132
139133 async def check_availability (self , request : Request ):
140134 print ("Checking availability" )
141135 # Parse the incoming JSON request
142136 data = await request .json ()
143- task_availabilities = data .get (' task_availabilities' , {})
144- llm_model_availabilities = data .get (' llm_model_availabilities' , {})
145-
137+ task_availabilities = data .get (" task_availabilities" , {})
138+ llm_model_availabilities = data .get (" llm_model_availabilities" , {})
139+
146140 # Set all task availabilities to True
147141 task_response = {key : True for key in task_availabilities }
148-
142+
149143 # Set all model availabilities to False
150144 model_response = {key : False for key in llm_model_availabilities }
151-
145+
152146 # Construct the response dictionary
153- response = {
154- 'task_availabilities' : task_response ,
155- 'llm_model_availabilities' : model_response
156- }
157-
147+ response = {"task_availabilities" : task_response , "llm_model_availabilities" : model_response }
148+
158149 return response
159-
150+
160151 async def verify_request (
161152 self ,
162153 request : Request ,
@@ -170,18 +161,14 @@ async def verify_request(
170161 signed_by = request .headers .get ("Epistula-Signed-By" )
171162 signed_for = request .headers .get ("Epistula-Signed-For" )
172163 if signed_for != self .wallet .hotkey .ss58_address :
173- raise HTTPException (
174- status_code = 400 , detail = "Bad Request, message is not intended for self"
175- )
164+ raise HTTPException (status_code = 400 , detail = "Bad Request, message is not intended for self" )
176165 if signed_by not in self .metagraph .hotkeys :
177166 raise HTTPException (status_code = 401 , detail = "Signer not in metagraph" )
178167
179168 uid = self .metagraph .hotkeys .index (signed_by )
180169 stake = self .metagraph .S [uid ].item ()
181170 if not self .config .no_force_validator_permit and stake < 10000 :
182- bt .logging .warning (
183- f"Blacklisting request from { signed_by } [uid={ uid } ], not enough stake -- { stake } "
184- )
171+ bt .logging .warning (f"Blacklisting request from { signed_by } [uid={ uid } ], not enough stake -- { stake } " )
185172 raise HTTPException (status_code = 401 , detail = "Stake below minimum: {stake}" )
186173
187174 # If anything is returned here, we can throw
@@ -200,8 +187,7 @@ async def verify_request(
200187 raise HTTPException (status_code = 400 , detail = err )
201188
202189 def run (self ):
203-
204- external_ip = None #settings.EXTERNAL_IP
190+ external_ip = None # settings.EXTERNAL_IP
205191 if not external_ip or external_ip == "[::]" :
206192 try :
207193 external_ip = requests .get ("https://checkip.amazonaws.com" ).text .strip ()
@@ -232,7 +218,7 @@ def run(self):
232218 router .add_api_route (
233219 "/v1/chat/completions" ,
234220 self .create_chat_completion ,
235- #dependencies=[Depends(self.verify_request)],
221+ # dependencies=[Depends(self.verify_request)],
236222 methods = ["POST" ],
237223 )
238224 router .add_api_route (
@@ -244,7 +230,8 @@ def run(self):
244230 fast_config = uvicorn .Config (
245231 app ,
246232 host = "0.0.0.0" ,
247- port = settings .AXON_PORT ,
233+ # port=settings.AXON_PORT,
234+ port = 8008 ,
248235 log_level = "info" ,
249236 loop = "asyncio" ,
250237 )
0 commit comments