Skip to content

Commit 46b652c

Browse files
committed
Revert "Subclassed logging.Logger to add the bcdebug, verbose, and success methods directly to the logger instance"
This reverts commit a8be713.
1 parent b273603 commit 46b652c

File tree

1 file changed

+88
-95
lines changed

1 file changed

+88
-95
lines changed

bidscoin/bcoin.py

Lines changed: 88 additions & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,7 @@
3131
yaml = YAML()
3232
yaml.representer.ignore_aliases = lambda *data: True # Expand aliases (https://stackoverflow.com/questions/58091449/disabling-alias-for-yaml-file-in-python)
3333

34-
# Define custom logging levels
35-
BCDEBUG, BCDEBUG_LEVEL = 'BCDEBUG', 11 # NB: using the standard debug mode will generate may debug messages from imports
36-
VERBOSE, VERBOSE_LEVEL = 'VERBOSE', 15
37-
SUCCESS, SUCCESS_LEVEL = 'SUCCESS', 25
34+
LOGGER = logging.getLogger(__name__)
3835

3936

4037
class TqdmUpTo(tqdm):
@@ -53,39 +50,76 @@ def update_to(self, b=1, bsize=1, tsize=None):
5350
self.update(b * bsize - self.n) # will also set self.n = b * bsize
5451

5552

56-
class CustomLogger(logging.Logger):
57-
"""Extend the Logger class to add custom methods for the new levels"""
53+
def drmaa_nativespec(specs: str, session) -> str:
54+
"""
55+
Converts (CLI default) native Torque walltime and memory specifications to the DRMAA implementation (currently only Slurm is supported)
5856
59-
def bcdebug(self, message, *args, **kwargs):
60-
"""Custom BIDSCOIN DEBUG messages"""
61-
if self.isEnabledFor(BCDEBUG_LEVEL):
62-
self._log(BCDEBUG_LEVEL, message, args, **kwargs)
57+
:param specs: Native Torque walltime and memory specifications, e.g. '-l walltime=00:10:00,mem=2gb'
58+
:param session: The DRMAA session
59+
:return: The converted native specifications
60+
"""
6361

64-
def verbose(self, message, *args, **kwargs):
65-
"""Custom BIDSCOIN VERBOSE messages"""
66-
if self.isEnabledFor(VERBOSE_LEVEL):
67-
self._log(VERBOSE_LEVEL, message, args, **kwargs)
62+
jobmanager: str = session.drmaaImplementation
63+
64+
if '-l ' in specs and 'pbs' not in jobmanager.lower():
6865

69-
def success(self, message, *args, **kwargs):
70-
"""Custom BIDSCOIN SUCCESS messages"""
71-
if self.isEnabledFor(SUCCESS_LEVEL):
72-
self._log(SUCCESS_LEVEL, message, args, **kwargs)
66+
if 'slurm' in jobmanager.lower():
67+
specs = (specs.replace('-l ', '')
68+
.replace(',', ' ')
69+
.replace('walltime', '--time')
70+
.replace('mem', '--mem')
71+
.replace('gb','000'))
72+
else:
73+
LOGGER.warning(f"Default `--cluster` native specifications are not (yet) provided for {jobmanager}. Please add them to your command if you get DRMAA errors")
74+
specs = ''
7375

76+
return specs.strip()
7477

75-
# Get a logger from the custom logger class
76-
logging.setLoggerClass(CustomLogger)
77-
LOGGER = logging.getLogger(__name__)
78+
79+
def synchronize(pbatch, jobids: list, wait: int=15):
80+
"""
81+
Shows tqdm progress bars for queued and running DRMAA jobs. Waits until all jobs have finished +
82+
some extra wait time to give NAS systems the opportunity to fully synchronize
83+
84+
:param pbatch: The DRMAA session
85+
:param jobids: The job ids
86+
:param wait: The extra wait time for the NAS
87+
:return:
88+
"""
89+
90+
with logging_redirect_tqdm():
91+
92+
qbar = tqdm(total=len(jobids), desc='Queued ', unit='job', leave=False, bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}]')
93+
rbar = tqdm(total=len(jobids), desc='Running', unit='job', leave=False, bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}]', colour='green')
94+
done = 0
95+
while done < len(jobids):
96+
jobs = [pbatch.jobStatus(jobid) for jobid in jobids]
97+
done = sum(status in ('done', 'failed', 'undetermined') for status in jobs)
98+
qbar.n = sum(status == 'queued_active' for status in jobs)
99+
rbar.n = sum(status == 'running' for status in jobs)
100+
qbar.refresh(), rbar.refresh()
101+
time.sleep(2)
102+
qbar.close(), rbar.close()
103+
104+
failedjobs = [jobid for jobid in jobids if pbatch.jobStatus(jobid)=='failed']
105+
if failedjobs:
106+
LOGGER.error(f"{len(failedjobs)} HPC jobs failed to run:\n{failedjobs}\nThis may well be due to an underspecified `--cluster` input option (e.g. not enough memory)")
107+
108+
# Give NAS systems some time to fully synchronize
109+
for t in tqdm(range(wait*100), desc='synchronizing', leave=False, bar_format='{l_bar}{bar}| [{elapsed}]'):
110+
time.sleep(.01)
78111

79112

80113
def setup_logging(logfile: Path=Path()):
81114
"""
82115
Set up the logging framework:
83-
1) Add custom logging levels: 'bcdebug', 'verbose', and 'success'.
84-
2) Add a console stream handler for generating terminal output.
85-
3) Optionally add file handlers for normal log and warning/error log if logfile is provided.
116+
1) Add a 'bcdebug', 'verbose' and a 'success' logging level
117+
2) Add a console streamhandler
118+
3) If logfile then add a normal log and a warning/error filehandler
86119
87-
:param logfile: Path to the logfile. If none, logging is console-only
88-
"""
120+
:param logfile: Name of the logfile
121+
:return:
122+
"""
89123

90124
# Set the default formats
91125
if DEBUG:
@@ -96,17 +130,36 @@ def setup_logging(logfile: Path=Path()):
96130
cfmt = '%(levelname)s | %(message)s'
97131
datefmt = '%Y-%m-%d %H:%M:%S'
98132

99-
# Add custom log levels to logging
100-
logging.addLevelName(BCDEBUG_LEVEL, BCDEBUG)
101-
logging.addLevelName(VERBOSE_LEVEL, VERBOSE)
102-
logging.addLevelName(SUCCESS_LEVEL, SUCCESS)
103-
104-
# Get the root logger and set the appropriate level
133+
# Add a BIDScoin debug logging level = 11 (NB: using the standard debug mode will generate may debug messages from imports)
134+
logging.BCDEBUG = 11
135+
logging.addLevelName(logging.BCDEBUG, 'BCDEBUG')
136+
logging.__all__ += ['BCDEBUG'] if 'BCDEBUG' not in logging.__all__ else []
137+
def bcdebug(self, message, *args, **kws):
138+
if self.isEnabledFor(logging.BCDEBUG): self._log(logging.BCDEBUG, message, args, **kws)
139+
logging.Logger.bcdebug = bcdebug
140+
141+
# Add a verbose logging level = 15
142+
logging.VERBOSE = 15
143+
logging.addLevelName(logging.VERBOSE, 'VERBOSE')
144+
logging.__all__ += ['VERBOSE'] if 'VERBOSE' not in logging.__all__ else []
145+
def verbose(self, message, *args, **kws):
146+
if self.isEnabledFor(logging.VERBOSE): self._log(logging.VERBOSE, message, args, **kws)
147+
logging.Logger.verbose = verbose
148+
149+
# Add a success logging level = 25
150+
logging.SUCCESS = 25
151+
logging.addLevelName(logging.SUCCESS, 'SUCCESS')
152+
logging.__all__ += ['SUCCESS'] if 'SUCCESS' not in logging.__all__ else []
153+
def success(self, message, *args, **kws):
154+
if self.isEnabledFor(logging.SUCCESS): self._log(logging.SUCCESS, message, args, **kws)
155+
logging.Logger.success = success
156+
157+
# Set the root logging level
105158
logger = logging.getLogger()
106-
logger.setLevel(BCDEBUG_LEVEL if DEBUG else VERBOSE_LEVEL)
159+
logger.setLevel('BCDEBUG' if DEBUG else 'VERBOSE')
107160

108161
# Add the console streamhandler and bring some color to those boring logs! :-)
109-
coloredlogs.install(level=BCDEBUG if DEBUG else VERBOSE if not logfile.name else 'INFO', fmt=cfmt, datefmt=datefmt) # NB: Using tqdm sets the streamhandler level to 0, see: https://github.com/tqdm/tqdm/pull/1235
162+
coloredlogs.install(level='BCDEBUG' if DEBUG else 'VERBOSE' if not logfile.name else 'INFO', fmt=cfmt, datefmt=datefmt) # NB: Using tqdm sets the streamhandler level to 0, see: https://github.com/tqdm/tqdm/pull/1235
110163
coloredlogs.DEFAULT_LEVEL_STYLES['verbose']['color'] = 245 # = Gray
111164

112165
if logfile.name:
@@ -115,7 +168,7 @@ def setup_logging(logfile: Path=Path()):
115168
logfile.parent.mkdir(parents=True, exist_ok=True) # Create the log dir if it does not exist
116169
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
117170
loghandler = logging.FileHandler(logfile)
118-
loghandler.setLevel(BCDEBUG)
171+
loghandler.setLevel('BCDEBUG')
119172
loghandler.setFormatter(formatter)
120173
loghandler.set_name('loghandler')
121174
logger.addHandler(loghandler)
@@ -166,66 +219,6 @@ def reporterrors() -> str:
166219
return errors
167220

168221

169-
def drmaa_nativespec(specs: str, session) -> str:
170-
"""
171-
Converts (CLI default) native Torque walltime and memory specifications to the DRMAA implementation (currently only Slurm is supported)
172-
173-
:param specs: Native Torque walltime and memory specifications, e.g. '-l walltime=00:10:00,mem=2gb'
174-
:param session: The DRMAA session
175-
:return: The converted native specifications
176-
"""
177-
178-
jobmanager: str = session.drmaaImplementation
179-
180-
if '-l ' in specs and 'pbs' not in jobmanager.lower():
181-
182-
if 'slurm' in jobmanager.lower():
183-
specs = (specs.replace('-l ', '')
184-
.replace(',', ' ')
185-
.replace('walltime', '--time')
186-
.replace('mem', '--mem')
187-
.replace('gb','000'))
188-
else:
189-
LOGGER.warning(f"Default `--cluster` native specifications are not (yet) provided for {jobmanager}. Please add them to your command if you get DRMAA errors")
190-
specs = ''
191-
192-
return specs.strip()
193-
194-
195-
def synchronize(pbatch, jobids: list, wait: int=15):
196-
"""
197-
Shows tqdm progress bars for queued and running DRMAA jobs. Waits until all jobs have finished +
198-
some extra wait time to give NAS systems the opportunity to fully synchronize
199-
200-
:param pbatch: The DRMAA session
201-
:param jobids: The job ids
202-
:param wait: The extra wait time for the NAS
203-
:return:
204-
"""
205-
206-
with logging_redirect_tqdm():
207-
208-
qbar = tqdm(total=len(jobids), desc='Queued ', unit='job', leave=False, bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}]')
209-
rbar = tqdm(total=len(jobids), desc='Running', unit='job', leave=False, bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}]', colour='green')
210-
done = 0
211-
while done < len(jobids):
212-
jobs = [pbatch.jobStatus(jobid) for jobid in jobids]
213-
done = sum(status in ('done', 'failed', 'undetermined') for status in jobs)
214-
qbar.n = sum(status == 'queued_active' for status in jobs)
215-
rbar.n = sum(status == 'running' for status in jobs)
216-
qbar.refresh(), rbar.refresh()
217-
time.sleep(2)
218-
qbar.close(), rbar.close()
219-
220-
failedjobs = [jobid for jobid in jobids if pbatch.jobStatus(jobid)=='failed']
221-
if failedjobs:
222-
LOGGER.error(f"{len(failedjobs)} HPC jobs failed to run:\n{failedjobs}\nThis may well be due to an underspecified `--cluster` input option (e.g. not enough memory)")
223-
224-
# Give NAS systems some time to fully synchronize
225-
for t in tqdm(range(wait*100), desc='synchronizing', leave=False, bar_format='{l_bar}{bar}| [{elapsed}]'):
226-
time.sleep(.01)
227-
228-
229222
def list_executables(show: bool=False) -> list:
230223
"""
231224
:param show: Print the installed console scripts if True

0 commit comments

Comments
 (0)