@@ -367,8 +367,10 @@ def execute(args):
367367 # EVALUATION
368368 ###################################################################
369369
370- def concept_retrieval (retriever_func , c ) -> Tuple [Set [str ], float ]:
370+ def concept_retrieval (retriever_func , c , timeout = None ) -> Tuple [Set [str ], float ]:
371371 start_time = time .time ()
372+ if timeout is not None :
373+ return {i .str for i in retriever_func .instances (c , timeout = timeout )}, time .time () - start_time
372374 return {i .str for i in retriever_func .instances (c )}, time .time () - start_time
373375
374376 # Collect concepts for evaluation
@@ -411,6 +413,7 @@ def concept_retrieval(retriever_func, c) -> Tuple[Set[str], float]:
411413
412414 data = []
413415 skipped_expressions = [] # CEs skipped due to reasoner bugs
416+ timeout_expressions = [] # CEs skipped due to timeout
414417
415418 # Iterate over OWL Class Expressions
416419 for expression in (tqdm_bar := tqdm (concepts , position = 0 , leave = True )):
@@ -421,8 +424,20 @@ def concept_retrieval(retriever_func, c) -> Tuple[Set[str], float]:
421424 print (f"\n >>> [{ type (expression ).__name__ } ] { dl_str } " , flush = True )
422425 print (f" GT ..." , end = "" , flush = True )
423426
424- # Retrieve ground truth results
425- retrieval_y , runtime_y = concept_retrieval (symbolic_kb , expression )
427+ # Retrieve ground truth results (with timeout)
428+ retrieval_y , runtime_y = concept_retrieval (symbolic_kb , expression , timeout = args .timeout )
429+
430+ # Detect timeout: if runtime >= timeout, the reasoner likely timed out
431+ if runtime_y >= args .timeout * 0.95 :
432+ timeout_expressions .append ({
433+ "Expression" : dl_str ,
434+ "Type" : type (expression ).__name__ ,
435+ "Runtime" : runtime_y ,
436+ })
437+ tqdm_bar .write (f"[TIMEOUT] GT reasoner timed out after { runtime_y :.1f} s for: { dl_str } " )
438+ # Reinitialize GT reasoner to clear zombie threads from the timed-out call
439+ symbolic_kb = SyncReasoner (ontology = args .path_kg , reasoner = args .reasoner )
440+ continue
426441
427442 if args .verbose :
428443 print (f" { len (retrieval_y )} instances in { runtime_y :.3f} s" , flush = True )
@@ -540,6 +555,24 @@ def concept_retrieval(retriever_func, c) -> Tuple[Set[str], float]:
540555 f .write (latex_output )
541556 print (f"\n LaTeX table saved to { latex_filename } " )
542557
558+ # Report timed-out expressions
559+ if timeout_expressions :
560+ print ("\n " + "=" * 70 )
561+ print (f"TIMED OUT EXPRESSIONS (>{ args .timeout } s): { len (timeout_expressions )} " )
562+ print ("=" * 70 )
563+ timeout_df = pd .DataFrame (timeout_expressions )
564+ print (f"\n By Type:" )
565+ print (timeout_df ["Type" ].value_counts ().to_string ())
566+ print (f"\n All timed-out CEs:" )
567+ for entry in timeout_expressions :
568+ print (f" [{ entry ['Type' ]} ] { entry ['Expression' ]} ({ entry ['Runtime' ]:.1f} s)" )
569+ # Save timed-out expressions alongside the main report
570+ timeout_path = args .path_report .replace (".csv" , "_timeout.csv" )
571+ timeout_df .to_csv (timeout_path , index = False )
572+ print (f"\n Timed-out expressions saved to { timeout_path } " )
573+ else :
574+ print ("\n No expressions timed out." )
575+
543576 # Report skipped expressions due to reasoner bugs
544577 if skipped_expressions :
545578 print ("\n " + "=" * 70 )
@@ -586,6 +619,9 @@ def get_default_arguments():
586619 "None (default) means no sampling — all properties are used." )
587620 parser .add_argument ("--min_jaccard_similarity" , type = float , default = 0.0 ,
588621 help = "Minimum mean Jaccard similarity threshold" )
622+ parser .add_argument ("--timeout" , type = float , default = 10 ,
623+ help = "Timeout in seconds for the ground truth reasoner per expression. "
624+ "Expressions that exceed this are skipped for both GT and DDP (default: 10)." )
589625 parser .add_argument ("--num_nominals" , type = int , default = 10 ,
590626 help = "Number of OWL named individuals to sample for nominals" )
591627 parser .add_argument ("--path_report" , type = str , default = "DDP_Reasoning_Eval_Results.csv" ,
0 commit comments