diff --git a/src/llmcompressor/entrypoints/oneshot.py b/src/llmcompressor/entrypoints/oneshot.py index 67c1e2ba2..bedca7392 100644 --- a/src/llmcompressor/entrypoints/oneshot.py +++ b/src/llmcompressor/entrypoints/oneshot.py @@ -1,5 +1,8 @@ +import os +from datetime import datetime from typing import Optional +from loguru import logger from torch.utils.data import DataLoader from transformers import PreTrainedModel @@ -82,6 +85,7 @@ class Oneshot: def __init__( self, + log_dir: Optional[str] = "sparse_logs", **kwargs, ): """ @@ -100,6 +104,15 @@ def __init__( :param output_dir: Path to save the output model after carrying out oneshot """ + # Set up logging + if log_dir: + os.makedirs(log_dir, exist_ok=True) + date_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + logger.add( + f"{log_dir}/oneshot_{date_str}.log", + level="DEBUG", + ) + model_args, dataset_args, recipe_args, _, output_dir = parse_args(**kwargs) self.model_args = model_args diff --git a/src/llmcompressor/modifiers/stage.py b/src/llmcompressor/modifiers/stage.py index fb160955b..def52ad8b 100644 --- a/src/llmcompressor/modifiers/stage.py +++ b/src/llmcompressor/modifiers/stage.py @@ -67,7 +67,6 @@ def initialize(self, state: "State", **kwargs): modifier.initialize(state, **kwargs) if accelerator: accelerator.wait_for_everyone() - state.loggers.system.info(tag="stage", string="Modifiers initialized") def finalize(self, state: "State", **kwargs): """ @@ -88,7 +87,6 @@ def finalize(self, state: "State", **kwargs): accelerator.wait_for_everyone() self.applied = True - state.loggers.system.info(tag="stage", string="Modifiers finalized") def update_event(self, state: "State", event: "Event", **kwargs): """