diff --git a/README.md b/README.md index d539c089b..52c3789f4 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,22 @@ If you are looking for a simple challenge configuration that you can replicate t 11. To update the challenge on EvalAI, make changes in the repository and push on `challenge` branch and wait for the build to complete. +### Printing and Logging in Evaluation Script +`print` statements will show up on the console directly. +In order to get `logging` statements from the evaluation script, ensure that the logger has a `stdout` handler added. We redirect the output from `stdout` to the submission workers console. +An example logger can be created like so: + +```python + eval_script_logger = logging.getLogger(name='eval_script') + eval_script_logger.setLevel(logging.DEBUG) + + handler = logging.StreamHandler(sys.stdout) + handler.setLevel(logging.DEBUG) + eval_script_logger.addHandler(handler) +``` + +Then, we can use this logger anywhere in the script and the corresponding level logs will show up in the output. + ## Create challenge using config 1. Fork this repository. diff --git a/evaluation_script/main.py b/evaluation_script/main.py index 61c73d9b5..6aba54de0 100644 --- a/evaluation_script/main.py +++ b/evaluation_script/main.py @@ -1,8 +1,20 @@ import random - +import logging +import time +import sys def evaluate(test_annotation_file, user_submission_file, phase_codename, **kwargs): print("Starting Evaluation.....") + + eval_script_logger = logging.getLogger(name='eval_script') + eval_script_logger.setLevel(logging.DEBUG) + + handler = logging.StreamHandler(sys.stdout) + handler.setLevel(logging.DEBUG) + formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + handler.setFormatter(formatter) + eval_script_logger.addHandler(handler) + """ Evaluates the submission for a particular challenge phase and returns score Arguments: @@ -54,7 +66,7 @@ def evaluate(test_annotation_file, user_submission_file, phase_codename, **kwarg ] # To display the results in the result file output["submission_result"] = output["result"][0]["train_split"] - print("Completed evaluation for Dev Phase") + eval_script_logger.info("Completed evaluation for Dev Phase") elif phase_codename == "test": print("Evaluating for Test Phase") output["result"] = [ @@ -77,5 +89,5 @@ def evaluate(test_annotation_file, user_submission_file, phase_codename, **kwarg ] # To display the results in the result file output["submission_result"] = output["result"][0] - print("Completed evaluation for Test Phase") + eval_script_logger.info("Completed evaluation for Test Phase") return output