@@ -405,11 +405,10 @@ def wrapped(fn: AsyncCron[R] | SyncCron[R]) -> RegisteredCron[C, R]:
405405 )
406406 if task .fn_name in self .registry :
407407 raise StreaqError (
408- f"A task named { task .fn_name !r } has already been registered!"
408+ f"A task named { task .fn_name } has already been registered!"
409409 )
410410 self .cron_jobs [task .fn_name ] = task
411411 self .registry [task .fn_name ] = task
412- logger .debug (f"cron job { task .fn_name } registered in worker { self .id } " )
413412 return task
414413
415414 return wrapped # type: ignore
@@ -460,10 +459,9 @@ def wrapped(
460459 )
461460 if task .fn_name in self .registry :
462461 raise StreaqError (
463- f"A task named { task .fn_name !r } has already been registered!"
462+ f"A task named { task .fn_name } has already been registered!"
464463 )
465464 self .registry [task .fn_name ] = task
466- logger .debug (f"task { task .fn_name } registered in worker { self .id } " )
467465 return task
468466
469467 return wrapped # type: ignore
@@ -637,7 +635,9 @@ async def abort_tasks(self, tasks: set[str]) -> None:
637635 and not self ._cancel_scopes [task_id ].cancel_called
638636 ):
639637 self ._cancel_scopes [task_id ].cancel ()
640- logger .debug (f"aborting task { task_id } in worker { self .id } " )
638+ logger .debug (
639+ f"task ⊘ { task_id } marked for abortion in worker { self .id } "
640+ )
641641
642642 async def schedule_cron_jobs (self ) -> None :
643643 """
@@ -767,7 +767,7 @@ def key(mid: str) -> str:
767767 if len (output ) > truncate_length :
768768 output = f"{ output [:truncate_length ]} …"
769769 if not silent :
770- logger .info (f"task { task_id } ← { output } " )
770+ logger .info (f"task { fn_name } ■ { task_id } ← { output } " )
771771 if triggers :
772772 args = self .serialize (to_tuple (return_value ))
773773 pipe .set (key (REDIS_PREVIOUS ), args , ex = timedelta (minutes = 5 ))
@@ -841,26 +841,26 @@ def key(mid: str) -> str:
841841 await pipe .execute ()
842842 raw , task_try , abort , active = await asyncio .gather (* commands )
843843 if not raw :
844- logger .warning (f"task { task_id } expired † " )
844+ logger .warning (f"task † { task_id } expired" )
845845 return await self .finish_failed_task (
846846 msg , StreaqError ("Task expired!" ), task_try
847847 )
848848 if not active :
849- logger .warning (f"task { task_id } reclaimed ↩ from worker { self .id } " )
849+ logger .warning (f"task ↩ { task_id } reclaimed from worker { self .id } " )
850850 self .counters ["relinquished" ] += 1
851851 return None
852852
853853 try :
854854 data = self .deserialize (raw )
855855 except StreaqError as e :
856- logger .exception (f"Failed to deserialize task { task_id } ! " )
856+ logger .error (f"task ☒ { task_id } failed to deserialize " )
857857 return await self .finish_failed_task (msg , e , task_try )
858858
859859 if (fn_name := data ["f" ]) not in self .registry :
860- logger .error (f"Missing function { fn_name } , can't execute task { task_id } ! " )
860+ logger .error (f"task { fn_name } ⊘ { task_id } aborted, missing function " )
861861 return await self .finish_failed_task (
862862 msg ,
863- StreaqError ("Nonexistent function!" ),
863+ StreaqError (f"Missing function { fn_name } !" ),
864864 task_try ,
865865 enqueue_time = data ["t" ],
866866 fn_name = data ["f" ],
@@ -869,27 +869,27 @@ def key(mid: str) -> str:
869869
870870 if abort :
871871 if not task .silent :
872- logger .info (f"task { task_id } aborted ⊘ prior to run" )
872+ logger .info (f"task { fn_name } ⊘ { task_id } aborted prior to run" )
873873 return await self .finish_failed_task (
874874 msg ,
875875 asyncio .CancelledError ("Task aborted prior to run!" ),
876876 task_try ,
877877 enqueue_time = data ["t" ],
878- fn_name = data [ "f" ] ,
878+ fn_name = fn_name ,
879879 silent = task .silent ,
880880 ttl = task .ttl ,
881881 )
882882 if task .max_tries and task_try > task .max_tries :
883883 if not task .silent :
884884 logger .warning (
885- f"task { task_id } failed × after { task .max_tries } retries"
885+ f"task { fn_name } × { task_id } failed after { task .max_tries } retries"
886886 )
887887 return await self .finish_failed_task (
888888 msg ,
889- StreaqError (f "Max retry attempts reached for task { task_id } !" ),
889+ StreaqError ("Max retry attempts reached for task!" ),
890890 task_try ,
891891 enqueue_time = data ["t" ],
892- fn_name = data [ "f" ] ,
892+ fn_name = fn_name ,
893893 silent = task .silent ,
894894 ttl = task .ttl ,
895895 )
@@ -901,7 +901,7 @@ def key(mid: str) -> str:
901901 after = data .get ("A" )
902902 pipe = await self .redis .pipeline (transaction = True )
903903 if task .unique :
904- lock_key = self .prefix + REDIS_UNIQUE + task . fn_name
904+ lock_key = self .prefix + REDIS_UNIQUE + fn_name
905905 locked = pipe .set (
906906 lock_key , task_id , get = True , condition = PureToken .NX , pxat = timeout
907907 )
@@ -917,7 +917,8 @@ def key(mid: str) -> str:
917917 if existing and existing != task_id :
918918 if not task .silent :
919919 logger .warning (
920- f"unique task { task_id } clashed ↯ with running task { existing } "
920+ f"task { fn_name } ↯ { task_id } clashed with unique task "
921+ f"{ existing } "
921922 )
922923 return await self .finish_failed_task (
923924 msg ,
@@ -927,7 +928,7 @@ def key(mid: str) -> str:
927928 ),
928929 task_try ,
929930 enqueue_time = data ["t" ],
930- fn_name = data [ "f" ] ,
931+ fn_name = fn_name ,
931932 silent = task .silent ,
932933 ttl = task .ttl ,
933934 )
@@ -959,7 +960,7 @@ async def _fn(*args: Any, **kwargs: Any) -> Any:
959960 return await task .fn (* args , ** kwargs )
960961
961962 if not task .silent :
962- logger .info (f"task { task_id } → worker { self .id } " )
963+ logger .info (f"task { task . fn_name } □ { task_id } → worker { self .id } " )
963964
964965 wrapped = _fn
965966 for middleware in reversed (self .middlewares ):
@@ -978,7 +979,7 @@ async def _fn(*args: Any, **kwargs: Any) -> Any:
978979 success = False
979980 done = True
980981 if not task .silent :
981- logger .info (f"task { task_id } aborted ⊘ " )
982+ logger .info (f"task { task . fn_name } ⊘ { task_id } aborted" )
982983 self .counters ["aborted" ] += 1
983984 self .counters ["failed" ] -= 1 # this will get incremented later
984985 except StreaqRetry as e :
@@ -988,22 +989,26 @@ async def _fn(*args: Any, **kwargs: Any) -> Any:
988989 schedule = datetime_ms (e .schedule )
989990 if not task .silent :
990991 logger .exception (f"Retrying task { task_id } !" )
991- logger .info (f"retrying ↻ task { task_id } at { schedule } " )
992+ logger .info (
993+ f"task { task .fn_name } ↻ { task_id } retrying at { schedule } "
994+ )
992995 else :
993996 delay = to_ms (e .delay ) if e .delay is not None else task_try ** 2 * 1000
994997 schedule = now_ms () + delay
995998 if not task .silent :
996999 logger .exception (f"Retrying task { task_id } !" )
997- logger .info (f"retrying ↻ task { task_id } in { delay } s" )
1000+ logger .info (f"task { task . fn_name } ↻ { task_id } retrying in { delay } s" )
9981001 except TimeoutError as e :
9991002 if not task .silent :
1000- logger .error (f"task { task_id } timed out … " )
1003+ logger .error (f"task { task . fn_name } … { task_id } timed out" )
10011004 result = e
10021005 success = False
10031006 done = True
10041007 except asyncio .CancelledError :
10051008 if not task .silent :
1006- logger .info (f"task { task_id } cancelled, will be retried ↻" )
1009+ logger .info (
1010+ f"task { task .fn_name } ↻ { task_id } cancelled, will be retried"
1011+ )
10071012 success = False
10081013 done = False
10091014 raise # best practice from anyio docs
@@ -1012,8 +1017,8 @@ async def _fn(*args: Any, **kwargs: Any) -> Any:
10121017 success = False
10131018 done = True
10141019 if not task .silent :
1015- logger .info (f"task { task_id } failed ×" )
10161020 logger .exception (f"Task { task_id } failed!" )
1021+ logger .info (f"task { task .fn_name } × { task_id } failed" )
10171022 finally :
10181023 with CancelScope (shield = True ):
10191024 finish_time = now_ms ()
@@ -1057,7 +1062,7 @@ async def fail_task_dependents(self, dependents: list[str]) -> None:
10571062 self .counters ["failed" ] += len (dependents )
10581063 to_delete : list [KeyT ] = []
10591064 for dep_id in dependents :
1060- logger .info (f"task { dep_id } dependency failed × " )
1065+ logger .info (f"task dependent × { dep_id } failed" )
10611066 to_delete .append (self .prefix + REDIS_TASK + dep_id )
10621067 pipe .set (self .results_key + dep_id , result , ex = 300 )
10631068 pipe .publish (self ._channel_key + dep_id , result )
0 commit comments