diff --git a/adala/agents/base.py b/adala/agents/base.py index 8d688f21..9a70ce27 100644 --- a/adala/agents/base.py +++ b/adala/agents/base.py @@ -260,7 +260,7 @@ async def arun( "When using asynchronous run with `agent.arun()`, the runtime must be an AsyncRuntime." ) else: - print(f"Using runtime {type(runtime)}") + logger.info("Using runtime %s", type(runtime)) if input is None: if self.environment is None: @@ -276,7 +276,7 @@ async def arun( batch_size=runtime.batch_size ) if data_batch.empty: - print_text("No more data in the environment. Exiting.") + logger.info("No more data in the environment. Exiting.") break except Exception as e: # TODO: environment should raise a specific exception + log error diff --git a/adala/skills/skillset.py b/adala/skills/skillset.py index 4a9ed84d..ff316793 100644 --- a/adala/skills/skillset.py +++ b/adala/skills/skillset.py @@ -1,3 +1,4 @@ +import logging from pydantic import BaseModel, model_validator, field_validator from abc import ABC, abstractmethod from typing import List, Union, Dict, Any, Optional, Mapping, Type @@ -18,6 +19,8 @@ SynthesisSkill, ) +logger = logging.getLogger(__name__) + class SkillSet(BaseModel, ABC): """ @@ -199,7 +202,7 @@ def apply( for i, skill_name in enumerate(skill_sequence): skill = self.skills[skill_name] # use input dataset for the first node in the pipeline - print_text(f"Applying skill: {skill_name}") + logger.info("Applying skill: %s", skill_name) skill_output = skill.apply(skill_input, runtime) # Commented out to not log customer data. Can be used when debugging if needed @@ -248,7 +251,7 @@ async def aapply( for i, skill_name in enumerate(skill_sequence): skill = self.skills[skill_name] # use input dataset for the first node in the pipeline - print_text(f"Applying skill: {skill_name}") + logger.info("Applying skill: %s", skill_name) skill_output = await skill.aapply(skill_input, runtime) # Commented out to not log customer data. Can be used when debugging if needed @@ -322,7 +325,7 @@ def apply( for i, skill_name in enumerate(skill_sequence): skill = self.skills[skill_name] # use input dataset for the first node in the pipeline - print_text(f"Applying skill: {skill_name}") + logger.info("Applying skill: %s", skill_name) skill_output = skill.apply(input, runtime) skill_outputs.append(skill_output) if not skill_outputs: diff --git a/server/log_middleware.py b/server/log_middleware.py index 73f427b5..24cbf7cb 100644 --- a/server/log_middleware.py +++ b/server/log_middleware.py @@ -1,5 +1,6 @@ import json import logging +import os from logging import Formatter from starlette.middleware.base import BaseHTTPMiddleware @@ -20,11 +21,13 @@ def format(self, record): return json.dumps(json_record) +LOG_LEVEL = os.environ.get("LOG_LEVEL", "INFO").upper() + logger = logging.root handler = logging.StreamHandler() handler.setFormatter(JsonFormatter()) logger.handlers = [handler] -logger.setLevel(logging.DEBUG) +logger.setLevel(getattr(logging, LOG_LEVEL, logging.INFO)) logging.getLogger("uvicorn.access").disabled = True