|
13 | 13 | from ..process.process import Process, LoopItems |
14 | 14 | import asyncio |
15 | 15 | import uuid |
| 16 | +from enum import Enum |
| 17 | + |
| 18 | +# Task status constants |
| 19 | +class TaskStatus(Enum): |
| 20 | + """Enumeration for task status values to ensure consistency""" |
| 21 | + COMPLETED = "completed" |
| 22 | + IN_PROGRESS = "in progress" |
| 23 | + NOT_STARTED = "not started" |
| 24 | + FAILED = "failed" |
| 25 | + UNKNOWN = "unknown" |
16 | 26 |
|
17 | 27 | # Set up logger |
18 | 28 | logger = logging.getLogger(__name__) |
@@ -49,6 +59,55 @@ def process_video(video_path: str, seconds_per_frame=2): |
49 | 59 | video.release() |
50 | 60 | return base64_frames |
51 | 61 |
|
| 62 | +def process_task_context(context_item, verbose=0, user_id=None): |
| 63 | + """ |
| 64 | + Process a single context item for task execution. |
| 65 | + This helper function avoids code duplication between async and sync execution methods. |
| 66 | + |
| 67 | + Args: |
| 68 | + context_item: The context item to process (can be string, list, task object, or dict) |
| 69 | + verbose: Verbosity level for logging |
| 70 | + user_id: User ID for database queries |
| 71 | + |
| 72 | + Returns: |
| 73 | + str: Formatted context string for this item |
| 74 | + """ |
| 75 | + if isinstance(context_item, str): |
| 76 | + return f"Input Content:\n{context_item}" |
| 77 | + elif isinstance(context_item, list): |
| 78 | + return f"Input Content: {' '.join(str(x) for x in context_item)}" |
| 79 | + elif hasattr(context_item, 'result'): # Task object |
| 80 | + # Ensure the previous task is completed before including its result |
| 81 | + task_status = getattr(context_item, 'status', None) |
| 82 | + task_name = context_item.name if context_item.name else context_item.description |
| 83 | + |
| 84 | + if context_item.result and task_status == TaskStatus.COMPLETED.value: |
| 85 | + return f"Result of previous task {task_name}:\n{context_item.result.raw}" |
| 86 | + elif task_status == TaskStatus.COMPLETED.value and not context_item.result: |
| 87 | + return f"Previous task {task_name} completed but produced no result." |
| 88 | + else: |
| 89 | + return f"Previous task {task_name} is not yet completed (status: {task_status or TaskStatus.UNKNOWN.value})." |
| 90 | + elif isinstance(context_item, dict) and "vector_store" in context_item: |
| 91 | + from ..knowledge.knowledge import Knowledge |
| 92 | + try: |
| 93 | + # Handle both string and dict configs |
| 94 | + cfg = context_item["vector_store"] |
| 95 | + if isinstance(cfg, str): |
| 96 | + cfg = json.loads(cfg) |
| 97 | + |
| 98 | + knowledge = Knowledge(config={"vector_store": cfg}, verbose=verbose) |
| 99 | + |
| 100 | + # Only use user_id as filter |
| 101 | + db_results = knowledge.search( |
| 102 | + context_item.get("query", ""), # Use query from context if available |
| 103 | + user_id=user_id if user_id else None |
| 104 | + ) |
| 105 | + return f"[DB Context]: {str(db_results)}" |
| 106 | + except Exception as e: |
| 107 | + return f"[Vector DB Error]: {e}" |
| 108 | + else: |
| 109 | + return str(context_item) # Fallback for unknown types |
| 110 | + |
52 | 111 | class PraisonAIAgents: |
53 | 112 | def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None, memory=False, memory_config=None, embedder=None, user_id=None, max_iter=10, stream=True, name: Optional[str] = None): |
54 | 113 | # Add check at the start if memory is requested |
@@ -250,44 +309,20 @@ async def aexecute_task(self, task_id): |
250 | 309 | if task.context: |
251 | 310 | context_results = [] # Use list to avoid duplicates |
252 | 311 | for context_item in task.context: |
253 | | - if isinstance(context_item, str): |
254 | | - context_results.append(f"Input Content:\n{context_item}") |
255 | | - elif isinstance(context_item, list): |
256 | | - context_results.append(f"Input Content: {' '.join(str(x) for x in context_item)}") |
257 | | - elif hasattr(context_item, 'result'): # Task object |
258 | | - if context_item.result: |
259 | | - context_results.append( |
260 | | - f"Result of previous task {context_item.name if context_item.name else context_item.description}:\n{context_item.result.raw}" |
261 | | - ) |
262 | | - else: |
263 | | - context_results.append( |
264 | | - f"Previous task {context_item.name if context_item.name else context_item.description} has no result yet." |
265 | | - ) |
266 | | - elif isinstance(context_item, dict) and "vector_store" in context_item: |
267 | | - from ..knowledge.knowledge import Knowledge |
268 | | - try: |
269 | | - # Handle both string and dict configs |
270 | | - cfg = context_item["vector_store"] |
271 | | - if isinstance(cfg, str): |
272 | | - cfg = json.loads(cfg) |
273 | | - |
274 | | - knowledge = Knowledge(config={"vector_store": cfg}, verbose=self.verbose) |
275 | | - |
276 | | - # Only use user_id as filter |
277 | | - db_results = knowledge.search( |
278 | | - task.description, |
279 | | - user_id=self.user_id if self.user_id else None |
280 | | - ) |
281 | | - context_results.append(f"[DB Context]: {str(db_results)}") |
282 | | - except Exception as e: |
283 | | - context_results.append(f"[Vector DB Error]: {e}") |
| 312 | + # Use the centralized helper function |
| 313 | + context_str = process_task_context(context_item, self.verbose, self.user_id) |
| 314 | + context_results.append(context_str) |
284 | 315 |
|
285 | | - # Join unique context results |
| 316 | + # Join unique context results with proper formatting |
286 | 317 | unique_contexts = list(dict.fromkeys(context_results)) # Remove duplicates |
| 318 | + if self.verbose >= 3: |
| 319 | + logger.info(f"Task {task_id} context items: {len(unique_contexts)}") |
| 320 | + for i, ctx in enumerate(unique_contexts): |
| 321 | + logger.info(f"Context {i+1}: {ctx[:100]}...") |
287 | 322 | task_prompt += f""" |
288 | 323 | Context: |
289 | 324 |
|
290 | | -{' '.join(unique_contexts)} |
| 325 | +{'\n\n'.join(unique_contexts)} |
291 | 326 | """ |
292 | 327 | task_prompt += "Please provide only the final result of your work. Do not add any conversation or extra explanation." |
293 | 328 |
|
@@ -573,44 +608,20 @@ def execute_task(self, task_id): |
573 | 608 | if task.context: |
574 | 609 | context_results = [] # Use list to avoid duplicates |
575 | 610 | for context_item in task.context: |
576 | | - if isinstance(context_item, str): |
577 | | - context_results.append(f"Input Content:\n{context_item}") |
578 | | - elif isinstance(context_item, list): |
579 | | - context_results.append(f"Input Content: {' '.join(str(x) for x in context_item)}") |
580 | | - elif hasattr(context_item, 'result'): # Task object |
581 | | - if context_item.result: |
582 | | - context_results.append( |
583 | | - f"Result of previous task {context_item.name if context_item.name else context_item.description}:\n{context_item.result.raw}" |
584 | | - ) |
585 | | - else: |
586 | | - context_results.append( |
587 | | - f"Previous task {context_item.name if context_item.name else context_item.description} has no result yet." |
588 | | - ) |
589 | | - elif isinstance(context_item, dict) and "vector_store" in context_item: |
590 | | - from ..knowledge.knowledge import Knowledge |
591 | | - try: |
592 | | - # Handle both string and dict configs |
593 | | - cfg = context_item["vector_store"] |
594 | | - if isinstance(cfg, str): |
595 | | - cfg = json.loads(cfg) |
596 | | - |
597 | | - knowledge = Knowledge(config={"vector_store": cfg}, verbose=self.verbose) |
598 | | - |
599 | | - # Only use user_id as filter |
600 | | - db_results = knowledge.search( |
601 | | - task.description, |
602 | | - user_id=self.user_id if self.user_id else None |
603 | | - ) |
604 | | - context_results.append(f"[DB Context]: {str(db_results)}") |
605 | | - except Exception as e: |
606 | | - context_results.append(f"[Vector DB Error]: {e}") |
| 611 | + # Use the centralized helper function |
| 612 | + context_str = process_task_context(context_item, self.verbose, self.user_id) |
| 613 | + context_results.append(context_str) |
607 | 614 |
|
608 | | - # Join unique context results |
| 615 | + # Join unique context results with proper formatting |
609 | 616 | unique_contexts = list(dict.fromkeys(context_results)) # Remove duplicates |
| 617 | + if self.verbose >= 3: |
| 618 | + logger.info(f"Task {task_id} context items: {len(unique_contexts)}") |
| 619 | + for i, ctx in enumerate(unique_contexts): |
| 620 | + logger.info(f"Context {i+1}: {ctx[:100]}...") |
610 | 621 | task_prompt += f""" |
611 | 622 | Context: |
612 | 623 |
|
613 | | -{' '.join(unique_contexts)} |
| 624 | +{'\n\n'.join(unique_contexts)} |
614 | 625 | """ |
615 | 626 |
|
616 | 627 | # Add memory context if available |
|
0 commit comments