3
3
import os
4
4
import traceback
5
5
from functools import partial , wraps
6
- from pathlib import Path as Pathlib
7
6
from time import sleep
8
7
from typing import Any , Dict , List , Optional , Union
9
8
10
9
import asgi_correlation_id
11
10
import uvicorn
12
11
from fastapi import BackgroundTasks , Depends , FastAPI , Path , Query , Request
13
- from fastapi .responses import FileResponse , JSONResponse , RedirectResponse , Response
12
+ from fastapi .responses import JSONResponse , RedirectResponse , Response
14
13
from fastapi .staticfiles import StaticFiles
15
14
from fastapi_cprofile .profiler import CProfileMiddleware
16
- from starlette .convertors import StringConvertor , register_url_convertor
17
15
from starlette .middleware .base import BaseHTTPMiddleware
18
16
19
17
from inference .core import logger
@@ -267,35 +265,40 @@ def with_route_exceptions(route):
267
265
async def wrapped_route (* args , ** kwargs ):
268
266
try :
269
267
return await route (* args , ** kwargs )
270
- except ContentTypeInvalid :
268
+ except ContentTypeInvalid as error :
269
+ logger .error ("%s: %s" , type (error ).__name__ , error )
271
270
resp = JSONResponse (
272
271
status_code = 400 ,
273
272
content = {
274
273
"message" : "Invalid Content-Type header provided with request."
275
274
},
276
275
)
277
276
traceback .print_exc ()
278
- except ContentTypeMissing :
277
+ except ContentTypeMissing as error :
278
+ logger .error ("%s: %s" , type (error ).__name__ , error )
279
279
resp = JSONResponse (
280
280
status_code = 400 ,
281
281
content = {"message" : "Content-Type header not provided with request." },
282
282
)
283
283
traceback .print_exc ()
284
- except InputImageLoadError as e :
284
+ except InputImageLoadError as error :
285
+ logger .error ("%s: %s" , type (error ).__name__ , error )
285
286
resp = JSONResponse (
286
287
status_code = 400 ,
287
288
content = {
288
- "message" : f"Could not load input image. Cause: { e .get_public_error_details ()} "
289
+ "message" : f"Could not load input image. Cause: { error .get_public_error_details ()} "
289
290
},
290
291
)
291
292
traceback .print_exc ()
292
- except InvalidModelIDError :
293
+ except InvalidModelIDError as error :
294
+ logger .error ("%s: %s" , type (error ).__name__ , error )
293
295
resp = JSONResponse (
294
296
status_code = 400 ,
295
297
content = {"message" : "Invalid Model ID sent in request." },
296
298
)
297
299
traceback .print_exc ()
298
- except InvalidMaskDecodeArgument :
300
+ except InvalidMaskDecodeArgument as error :
301
+ logger .error ("%s: %s" , type (error ).__name__ , error )
299
302
resp = JSONResponse (
300
303
status_code = 400 ,
301
304
content = {
@@ -304,7 +307,8 @@ async def wrapped_route(*args, **kwargs):
304
307
},
305
308
)
306
309
traceback .print_exc ()
307
- except MissingApiKeyError :
310
+ except MissingApiKeyError as error :
311
+ logger .error ("%s: %s" , type (error ).__name__ , error )
308
312
resp = JSONResponse (
309
313
status_code = 400 ,
310
314
content = {
@@ -319,6 +323,7 @@ async def wrapped_route(*args, **kwargs):
319
323
ExecutionGraphStructureError ,
320
324
StepInputDimensionalityError ,
321
325
) as error :
326
+ logger .error ("%s: %s" , type (error ).__name__ , error )
322
327
content = WorkflowErrorResponse (
323
328
message = str (error .public_message ),
324
329
error_type = error .__class__ .__name__ ,
@@ -338,6 +343,7 @@ async def wrapped_route(*args, **kwargs):
338
343
WorkflowExecutionEngineVersionError ,
339
344
NotSupportedExecutionEngineError ,
340
345
) as error :
346
+ logger .error ("%s: %s" , type (error ).__name__ , error )
341
347
resp = JSONResponse (
342
348
status_code = 400 ,
343
349
content = {
@@ -353,6 +359,7 @@ async def wrapped_route(*args, **kwargs):
353
359
MalformedPayloadError ,
354
360
MessageToBigError ,
355
361
) as error :
362
+ logger .error ("%s: %s" , type (error ).__name__ , error )
356
363
resp = JSONResponse (
357
364
status_code = 400 ,
358
365
content = {
@@ -361,7 +368,11 @@ async def wrapped_route(*args, **kwargs):
361
368
"inner_error_type" : error .inner_error_type ,
362
369
},
363
370
)
364
- except (RoboflowAPINotAuthorizedError , ProcessesManagerAuthorisationError ):
371
+ except (
372
+ RoboflowAPINotAuthorizedError ,
373
+ ProcessesManagerAuthorisationError ,
374
+ ) as error :
375
+ logger .error ("%s: %s" , type (error ).__name__ , error )
365
376
resp = JSONResponse (
366
377
status_code = 401 ,
367
378
content = {
@@ -371,7 +382,8 @@ async def wrapped_route(*args, **kwargs):
371
382
},
372
383
)
373
384
traceback .print_exc ()
374
- except (RoboflowAPINotNotFoundError , InferenceModelNotFound ):
385
+ except (RoboflowAPINotNotFoundError , InferenceModelNotFound ) as error :
386
+ logger .error ("%s: %s" , type (error ).__name__ , error )
375
387
resp = JSONResponse (
376
388
status_code = 404 ,
377
389
content = {
@@ -381,6 +393,7 @@ async def wrapped_route(*args, **kwargs):
381
393
)
382
394
traceback .print_exc ()
383
395
except ProcessesManagerNotFoundError as error :
396
+ logger .error ("%s: %s" , type (error ).__name__ , error )
384
397
resp = JSONResponse (
385
398
status_code = 404 ,
386
399
content = {
@@ -394,28 +407,32 @@ async def wrapped_route(*args, **kwargs):
394
407
InvalidEnvironmentVariableError ,
395
408
MissingServiceSecretError ,
396
409
ServiceConfigurationError ,
397
- ):
410
+ ) as error :
411
+ logger .error ("%s: %s" , type (error ).__name__ , error )
398
412
resp = JSONResponse (
399
413
status_code = 500 , content = {"message" : "Service misconfiguration." }
400
414
)
401
415
traceback .print_exc ()
402
416
except (
403
417
PreProcessingError ,
404
418
PostProcessingError ,
405
- ):
419
+ ) as error :
420
+ logger .error ("%s: %s" , type (error ).__name__ , error )
406
421
resp = JSONResponse (
407
422
status_code = 500 ,
408
423
content = {
409
424
"message" : "Model configuration related to pre- or post-processing is invalid."
410
425
},
411
426
)
412
427
traceback .print_exc ()
413
- except ModelArtefactError :
428
+ except ModelArtefactError as error :
429
+ logger .error ("%s: %s" , type (error ).__name__ , error )
414
430
resp = JSONResponse (
415
431
status_code = 500 , content = {"message" : "Model package is broken." }
416
432
)
417
433
traceback .print_exc ()
418
- except OnnxProviderNotAvailable :
434
+ except OnnxProviderNotAvailable as error :
435
+ logger .error ("%s: %s" , type (error ).__name__ , error )
419
436
resp = JSONResponse (
420
437
status_code = 501 ,
421
438
content = {
@@ -429,21 +446,24 @@ async def wrapped_route(*args, **kwargs):
429
446
RoboflowAPIUnsuccessfulRequestError ,
430
447
WorkspaceLoadError ,
431
448
MalformedWorkflowResponseError ,
432
- ):
449
+ ) as error :
450
+ logger .error ("%s: %s" , type (error ).__name__ , error )
433
451
resp = JSONResponse (
434
452
status_code = 502 ,
435
453
content = {"message" : "Internal error. Request to Roboflow API failed." },
436
454
)
437
455
traceback .print_exc ()
438
- except RoboflowAPIConnectionError :
456
+ except RoboflowAPIConnectionError as error :
457
+ logger .error ("%s: %s" , type (error ).__name__ , error )
439
458
resp = JSONResponse (
440
459
status_code = 503 ,
441
460
content = {
442
461
"message" : "Internal error. Could not connect to Roboflow API."
443
462
},
444
463
)
445
464
traceback .print_exc ()
446
- except RoboflowAPITimeoutError :
465
+ except RoboflowAPITimeoutError as error :
466
+ logger .error ("%s: %s" , type (error ).__name__ , error )
447
467
resp = JSONResponse (
448
468
status_code = 504 ,
449
469
content = {
@@ -452,6 +472,7 @@ async def wrapped_route(*args, **kwargs):
452
472
)
453
473
traceback .print_exc ()
454
474
except StepExecutionError as error :
475
+ logger .error ("%s: %s" , type (error ).__name__ , error )
455
476
content = WorkflowErrorResponse (
456
477
message = str (error .public_message ),
457
478
error_type = error .__class__ .__name__ ,
@@ -471,6 +492,7 @@ async def wrapped_route(*args, **kwargs):
471
492
)
472
493
traceback .print_exc ()
473
494
except WorkflowError as error :
495
+ logger .error ("%s: %s" , type (error ).__name__ , error )
474
496
resp = JSONResponse (
475
497
status_code = 500 ,
476
498
content = {
@@ -486,6 +508,7 @@ async def wrapped_route(*args, **kwargs):
486
508
ProcessesManagerClientError ,
487
509
CommunicationProtocolError ,
488
510
) as error :
511
+ logger .error ("%s: %s" , type (error ).__name__ , error )
489
512
resp = JSONResponse (
490
513
status_code = 500 ,
491
514
content = {
@@ -495,7 +518,8 @@ async def wrapped_route(*args, **kwargs):
495
518
},
496
519
)
497
520
traceback .print_exc ()
498
- except Exception :
521
+ except Exception as error :
522
+ logger .error ("%s: %s" , type (error ).__name__ , error )
499
523
resp = JSONResponse (status_code = 500 , content = {"message" : "Internal error." })
500
524
traceback .print_exc ()
501
525
return resp
@@ -557,6 +581,17 @@ def __init__(
557
581
root_path = root_path ,
558
582
)
559
583
584
+ app .mount (
585
+ "/static" ,
586
+ StaticFiles (directory = "./inference/landing/out/static" , html = True ),
587
+ name = "static" ,
588
+ )
589
+ app .mount (
590
+ "/_next/static" ,
591
+ StaticFiles (directory = "./inference/landing/out/_next/static" , html = True ),
592
+ name = "_next_static" ,
593
+ )
594
+
560
595
@app .on_event ("shutdown" )
561
596
async def on_shutdown ():
562
597
logger .info ("Shutting down %s" , description )
@@ -2194,19 +2229,9 @@ async def notebook_start(browserless: bool = False):
2194
2229
app .include_router (builder_router , prefix = "/build" , tags = ["builder" ])
2195
2230
2196
2231
if LEGACY_ROUTE_ENABLED :
2197
-
2198
- class IntStringConvertor (StringConvertor ):
2199
- """
2200
- Match digits but keep them as string.
2201
- """
2202
-
2203
- regex = "\d+"
2204
-
2205
- register_url_convertor ("int_string" , IntStringConvertor ())
2206
-
2207
2232
# Legacy object detection inference path for backwards compatability
2208
2233
@app .get (
2209
- "/{dataset_id}/{version_id:int_string }" ,
2234
+ "/{dataset_id}/{version_id:str }" ,
2210
2235
# Order matters in this response model Union. It will use the first matching model. For example, Object Detection Inference Response is a subset of Instance segmentation inference response, so instance segmentation must come first in order for the matching logic to work.
2211
2236
response_model = Union [
2212
2237
InstanceSegmentationInferenceResponse ,
@@ -2220,7 +2245,7 @@ class IntStringConvertor(StringConvertor):
2220
2245
response_model_exclude_none = True ,
2221
2246
)
2222
2247
@app .post (
2223
- "/{dataset_id}/{version_id:int_string }" ,
2248
+ "/{dataset_id}/{version_id:str }" ,
2224
2249
# Order matters in this response model Union. It will use the first matching model. For example, Object Detection Inference Response is a subset of Instance segmentation inference response, so instance segmentation must come first in order for the matching logic to work.
2225
2250
response_model = Union [
2226
2251
InstanceSegmentationInferenceResponse ,
@@ -2239,10 +2264,10 @@ async def legacy_infer_from_request(
2239
2264
background_tasks : BackgroundTasks ,
2240
2265
request : Request ,
2241
2266
dataset_id : str = Path (
2242
- description = "ID of a Roboflow dataset corresponding to the model to use for inference"
2267
+ description = "ID of a Roboflow dataset corresponding to the model to use for inference OR workspace ID "
2243
2268
),
2244
2269
version_id : str = Path (
2245
- description = "ID of a Roboflow dataset version corresponding to the model to use for inference"
2270
+ description = "ID of a Roboflow dataset version corresponding to the model to use for inference OR model ID "
2246
2271
),
2247
2272
api_key : Optional [str ] = Query (
2248
2273
None ,
@@ -2336,8 +2361,8 @@ async def legacy_infer_from_request(
2336
2361
2337
2362
Args:
2338
2363
background_tasks: (BackgroundTasks) pool of fastapi background tasks
2339
- dataset_id (str): ID of a Roboflow dataset corresponding to the model to use for inference.
2340
- version_id (str): ID of a Roboflow dataset version corresponding to the model to use for inference.
2364
+ dataset_id (str): ID of a Roboflow dataset corresponding to the model to use for inference OR workspace ID
2365
+ version_id (str): ID of a Roboflow dataset version corresponding to the model to use for inference OR model ID
2341
2366
api_key (Optional[str], default None): Roboflow API Key passed to the model during initialization for artifact retrieval.
2342
2367
# Other parameters described in the function signature...
2343
2368
@@ -2390,6 +2415,7 @@ async def legacy_infer_from_request(
2390
2415
"Service secret is required to disable inference usage tracking"
2391
2416
)
2392
2417
if LAMBDA :
2418
+ logger .debug ("request.scope: %s" , request .scope )
2393
2419
request_model_id = (
2394
2420
request .scope ["aws.event" ]["requestContext" ]["authorizer" ][
2395
2421
"lambda"
@@ -2520,7 +2546,7 @@ async def model_add_legacy(
2520
2546
app .mount (
2521
2547
"/" ,
2522
2548
StaticFiles (directory = "./inference/landing/out" , html = True ),
2523
- name = "static " ,
2549
+ name = "root " ,
2524
2550
)
2525
2551
2526
2552
def run (self ):
0 commit comments