diff --git a/pyomo/common/log.py b/pyomo/common/log.py index 4416bec3482..aab019908e4 100644 --- a/pyomo/common/log.py +++ b/pyomo/common/log.py @@ -21,6 +21,7 @@ import inspect import io import logging +import os import re import sys import textwrap @@ -286,14 +287,22 @@ class LoggingIntercept(object): ---------- output: io.TextIOBase the file stream to send log messages to + module: str - the target logger name to intercept + the target logger name to intercept. `logger` and `module` are + mutually exclusive. + level: int the logging level to intercept + formatter: logging.Formatter the formatter to use when rendering the log messages. If not specified, uses `'%(message)s'` + logger: logging.Logger + the target logger to intercept. `logger` and `module` are + mutually exclusive. + Examples -------- >>> import io, logging @@ -306,10 +315,24 @@ class LoggingIntercept(object): """ - def __init__(self, output=None, module=None, level=logging.WARNING, formatter=None): + def __init__( + self, + output=None, + module=None, + level=logging.WARNING, + formatter=None, + logger=None, + ): self.handler = None self.output = output - self.module = module + if logger is not None: + if module is not None: + raise ValueError( + "LoggingIntercept: only one of 'module' and 'logger' is allowed" + ) + self._logger = logger + else: + self._logger = logging.getLogger(module) self._level = level if formatter is None: formatter = logging.Formatter('%(message)s') @@ -317,6 +340,11 @@ def __init__(self, output=None, module=None, level=logging.WARNING, formatter=No self._save = None def __enter__(self): + # Get the logger for the scope we will be overriding + logger = self._logger + self._save = logger.level, logger.propagate, logger.handlers + if self._level is None: + self._level = logger.getEffectiveLevel() # Set up the handler output = self.output if output is None: @@ -326,16 +354,14 @@ def __enter__(self): self.handler.setFormatter(self._formatter) self.handler.setLevel(self._level) # Register the handler with the appropriate module scope - logger = logging.getLogger(self.module) - self._save = logger.level, logger.propagate, logger.handlers logger.handlers = [] - logger.propagate = 0 + logger.propagate = False logger.setLevel(self.handler.level) logger.addHandler(self.handler) return output def __exit__(self, et, ev, tb): - logger = logging.getLogger(self.module) + logger = self._logger logger.removeHandler(self.handler) self.handler = None logger.setLevel(self._save[0]) @@ -343,6 +369,10 @@ def __exit__(self, et, ev, tb): assert not logger.handlers logger.handlers.extend(self._save[2]) + @property + def module(self): + return self._logger.name + class LogStream(io.TextIOBase): """ @@ -357,6 +387,8 @@ def __init__(self, level, logger): self._buffer = '' def write(self, s: str) -> int: + if not s: + return 0 res = len(s) if self._buffer: s = self._buffer + s @@ -369,3 +401,76 @@ def write(self, s: str) -> int: def flush(self): if self._buffer: self.write('\n') + + def redirect_streams(self, redirects): + """Redirect StreamHandler objects to the original file descriptors + + This utility method for py:class:`~pyomo.common.tee.capture_output` + locates any StreamHandlers that would process messages from the + logger assigned to this :py:class:`LogStream` that would write + to the file descriptors redirected by `capture_output` and + yields context managers that will redirect those StreamHandlers + back to duplicates of the original file descriptors. + + """ + found = 0 + logger = self._logger + while logger: + for handler in logger.handlers: + found += 1 + if not isinstance(handler, logging.StreamHandler): + continue + try: + fd = handler.stream.fileno() + except (AttributeError, OSError): + fd = None + if fd not in redirects: + continue + yield _StreamRedirector(handler, redirects[fd].original_fd) + if not logger.propagate: + break + else: + logger = logger.parent + if not found: + fd = logging.lastResort.stream.fileno() + if not redirects: + yield _LastResortRedirector(fd) + elif fd in redirects: + yield _LastResortRedirector(redirects[fd].original_fd) + + +class _StreamRedirector(object): + def __init__(self, handler, fd): + self.handler = handler + self.fd = fd + self.orig_stream = None + + def __enter__(self): + self.orig_stream = self.handler.stream + self.handler.stream = os.fdopen( + os.dup(self.fd), mode="w", closefd=True + ).__enter__() + + def __exit__(self, et, ev, tb): + try: + self.handler.stream.__exit__(et, ev, tb) + finally: + self.handler.stream = self.orig_stream + + +class _LastResortRedirector(object): + def __init__(self, fd): + self.fd = fd + self.orig_stream = None + + def __enter__(self): + self.orig = logging.lastResort + logging.lastResort = logging.StreamHandler( + os.fdopen(os.dup(self.fd), mode="w", closefd=True).__enter__() + ) + + def __exit__(self, et, ev, tb): + try: + logging.lastResort.stream.close() + finally: + logging.lastResort = self.orig diff --git a/pyomo/common/tee.py b/pyomo/common/tee.py index 022db56390d..3ba59135f6d 100644 --- a/pyomo/common/tee.py +++ b/pyomo/common/tee.py @@ -16,13 +16,15 @@ # the U.S. Government retains certain rights in this software. # ___________________________________________________________________________ # +import collections.abc import io import logging import os import sys import threading import time -from io import StringIO + +from pyomo.common.log import LoggingIntercept, LogStream _poll_interval = 0.0001 _poll_rampup_limit = 0.099 @@ -93,13 +95,15 @@ class redirect_fd(object): fd: int The file descriptor to redirect - output: int or str + output: int or str or None The new output target for `fd`: either another valid file - descriptor (int) or a string with the file to open. + descriptor (int) or a string with the file to open. If `None`, + then the fd is redirected to `os.devnull`. synchronize: bool If True, and `fd` is 1 or 2, then update `sys.stdout` or `sys.stderr` to also point to the new file descriptor + """ def __init__(self, fd=1, output=None, synchronize=True): @@ -130,39 +134,22 @@ def __enter__(self): out_fd = os.open(self.target, os.O_WRONLY) # Duplicate the file descriptor for the opened file, closing and - # overwriting the value for stdout (file descriptor 1). Only - # make the new FD inheritable if it is stdout/stderr + # overwriting/replacing the original fd. Only make the new FD + # inheritable if it is stdout/stderr os.dup2(out_fd, self.fd, inheritable=bool(self.std)) # We no longer need this original file descriptor if not isinstance(self.target, int): os.close(out_fd) - if self.std: - if self.synchronize: - # Cause Python's stdout to point to our new file - fd = self.fd - else: - # IF we are not synchronizing the std file object with - # the redirected file descriptor, and IF the current - # file object is pointing to the original file - # descriptor that we just redirected, then we want to - # retarget the std file to the original (duplicated) - # target file descriptor. This allows, e.g. Python to - # still write to stdout when we redirect fd=1 to - # /dev/null - try: - old_std_fd = getattr(sys, self.std).fileno() - fd = self.original_fd if old_std_fd == self.fd else None - except (io.UnsupportedOperation, AttributeError): - fd = None - if fd is not None: - self.target_file = os.fdopen(fd, 'w', closefd=False) - setattr(sys, self.std, self.target_file) + if self.synchronize and self.std: + # Cause Python's stdout to point to our new file + self.target_file = os.fdopen(self.fd, 'w', closefd=False) + setattr(sys, self.std, self.target_file) return self - def __exit__(self, t, v, traceback): + def __exit__(self, et, ev, tb): # Close output: this either closes the new file that we opened, # or else the new file that points to the original (duplicated) # file descriptor @@ -185,7 +172,7 @@ class capture_output(object): Parameters ---------- - output : io.TextIOBase, TeeStream, str, or None + output : io.TextIOBase, Sequence[io.TextIOBase], TeeStream, str, or None Output stream where all captured stdout/stderr data is sent. If a ``str`` is provided, it is used as a file name and opened @@ -201,6 +188,26 @@ class capture_output(object): directly to the process stdout / stderr by external compiled modules. + Capturing and redirecting the file descriptors can cause loops + in the output stream (where one of the `output` streams points + to a file descriptor we just captured). + :py:class:`capture_output` will attempt to locate + :py:class:`io.IOBase` streams in `output` that point to file + descriptors that we just captured and replace them with + temporary streams that point to (copies of) the original file + descriptor. In addition, :py:class:`capture_output` will look + for :py:class:`~pyomo.common.log.LogStream` objects and will + attempt to locate :py:class:`logging.StreamHandle` objects that + would output to a redirected file descriptor and temporarily + redirect those handlers to (copies of) the original file + descriptor. + + Note that this process will cover the most common cases, but is + by no means perfect. Use of other output classes or customized + log handlers may still result in output loops (usually + manifesting in an error message about text being left in the + output buffer). + Returns ------- io.TextIOBase @@ -211,100 +218,166 @@ class capture_output(object): def __init__(self, output=None, capture_fd=False): if output is None: - output = StringIO() + output = io.StringIO() self.output = output - self.output_file = None self.old = None self.tee = None self.capture_fd = capture_fd - self.fd_redirect = None + self.context_stack = [] + + def _enter_context(self, cm, prior_to=None): + """Add the context manager to the context stack and return the result + from calling the context manager's `__enter__()` + + """ + if prior_to is None: + self.context_stack.append(cm) + else: + self.context_stack.insert(self.context_stack.index(prior_to), cm) + return cm.__enter__() + + def _exit_context_stack(self, et, ev, tb): + """Flush the context stack, calling __exit__() on all context managers + + One would usually use the contextlib.ExitStack to implement/manage + the collection of context managers we are putting together. The + problem is that ExitStack will only call the __exit__ handlers + up to the first one that returns an exception. As we are + expecting the possibility of one of the CMs here to raise an + exception (usually from TeeStream when joining the reader + threads), we will explicitly implement the stack management here + so that we will guarantee that all __exit__ handlers will always + be called. + + """ + FAIL = [] + while self.context_stack: + try: + self.context_stack.pop().__exit__(et, ev, tb) + except: + FAIL.append(str(sys.exc_info()[1])) + return FAIL def __enter__(self): self.old = (sys.stdout, sys.stderr) - if isinstance(self.output, str): - self.output_stream = open(self.output, 'w') - else: - self.output_stream = self.output - if isinstance(self.output, TeeStream): - self.tee = self.output - else: - self.tee = TeeStream(self.output_stream) - self.tee.__enter__() - sys.stdout = self.tee.STDOUT - sys.stderr = self.tee.STDERR - if self.capture_fd: - tee_fd = (self.tee.STDOUT.fileno(), self.tee.STDERR.fileno()) - self.fd_redirect = [] - for i in range(2): - # Redirect the standard process file descriptor (1 or 2) - self.fd_redirect.append( - redirect_fd(i + 1, tee_fd[i], synchronize=False) + old_fd = [] + for stream in self.old: + stream.flush() + try: + old_fd.append(stream.fileno()) + except (AttributeError, OSError): + old_fd.append(None) + try: + # We have an issue where we are (very aggressively) + # commandeering the terminal. This is what we intend, but the + # side effect is that any errors generated by this module (e.g., + # because the user gave us an invalid output stream) get + # completely suppressed. So, we will make an exception to the + # output that we are catching and let messages logged to THIS + # logger to still be emitted to the original stderr. + if self.capture_fd: + # Because we are also commandeering the FD that underlies + # sys.stderr, we cannot just write to that stream and + # instead will open a new stream to the "original" FD + # (Note that we need to duplicate that FD, as we will + # overwrite it when we get to redirect_fd below). If + # sys.stderr doesn't have a file descriptor, we will + # fall back on the process stderr (FD=2). + log_stream = self._enter_context( + os.fdopen(os.dup(old_fd[1] or 2), mode="w", closefd=True) ) - # Redirect the file descriptor currently associated with - # sys.stdout / sys.stderr - try: - fd = self.old[i].fileno() - except (AttributeError, OSError): - pass - else: - if fd != i + 1: - self.fd_redirect.append( + else: + log_stream = self.old[1] + self._enter_context(LoggingIntercept(log_stream, logger=logger, level=None)) + + if isinstance(self.output, str): + self.output_stream = self._enter_context(open(self.output, 'w')) + else: + self.output_stream = self.output + if isinstance(self.output, TeeStream): + self.tee = self._enter_context(self.output) + elif isinstance(self.output_stream, collections.abc.Sequence): + self.tee = self._enter_context(TeeStream(*self.output_stream)) + else: + self.tee = self._enter_context(TeeStream(self.output_stream)) + fd_redirect = {} + if self.capture_fd: + tee_fd = (self.tee.STDOUT.fileno(), self.tee.STDERR.fileno()) + for i in range(2): + # Redirect the standard process file descriptor (1 or 2) + fd_redirect[i + 1] = self._enter_context( + redirect_fd(i + 1, tee_fd[i], synchronize=False) + ) + # Redirect the file descriptor currently associated with + # sys.stdout / sys.stderr + fd = old_fd[i] + if fd and fd not in fd_redirect: + fd_redirect[fd] = self._enter_context( redirect_fd(fd, tee_fd[i], synchronize=False) ) - for fdr in self.fd_redirect: - fdr.__enter__() - # We have an issue where we are (very aggressively) - # commandeering the terminal. This is what we intend, but the - # side effect is that any errors generated by this module (e.g., - # because the user gave us an invalid output stream) get - # completely suppressed. So, we will make an exception to the - # output that we are catching and let messages logged to THIS - # logger to still be emitted. - if self.capture_fd: - # Because we are also comandeering the FD that underlies - # self.old[1], we cannot just write to that stream and - # instead open a new stream to the original FD. - # - # Note that we need to duplicate the FD from the redirector, - # as it will close the (temporary) `original_fd` descriptor - # when it restores the actual original descriptor - self.temp_log_stream = os.fdopen( - os.dup(self.fd_redirect[-1].original_fd), mode="w", closefd=True - ) - else: - self.temp_log_stream = self.old[1] - self.temp_log_handler = logging.StreamHandler(self.temp_log_stream) - logger.addHandler(self.temp_log_handler) - self._propagate = logger.propagate - logger.propagate = False - return self.output_stream + # We need to make sure that we didn't just capture the FD + # that underlies a stream that we are outputting to. Note + # that when capture_fd==False, normal streams will be left + # alone, but the lastResort _StderrHandler() will still be + # replaced (needed because that handler uses the *current* + # value of sys.stderr) + ostreams = [] + for stream in self.tee.ostreams: + if isinstance(stream, LogStream): + for handler_redirect in stream.redirect_streams(fd_redirect): + self._enter_context(handler_redirect, prior_to=self.tee) + else: + try: + fd = stream.fileno() + except (AttributeError, OSError): + fd = None + if fd in fd_redirect: + # We just redirected this file descriptor so + # we can capture the output. This makes a + # loop that we really want to break. Undo + # the redirect by pointing our output stream + # back to the original file descriptor. + stream = self._enter_context( + os.fdopen( + os.dup(fd_redirect[fd].original_fd), + mode="w", + closefd=True, + ), + prior_to=self.tee, + ) + ostreams.append(stream) + self.tee.ostreams = ostreams + except: + # Note: we will ignore any exceptions raised while exiting + # the context managers and just reraise the original + # exception. + self._exit_context_stack(*sys.exc_info()) + raise + sys.stdout = self.tee.STDOUT + sys.stderr = self.tee.STDERR + buf = self.tee.ostreams + if len(buf) == 1: + buf = buf[0] + return buf def __exit__(self, et, ev, tb): - # Restore any file descriptors we comandeered - if self.fd_redirect is not None: - for fdr in reversed(self.fd_redirect): - fdr.__exit__(et, ev, tb) - self.fd_redirect = None - # Check and restore sys.stderr / sys.stdout - FAIL = self.tee.STDOUT is not sys.stdout - self.tee.__exit__(et, ev, tb) - if self.output_stream is not self.output: - self.output_stream.close() + # Check that we were nested correctly + FAIL = [] + if self.tee.STDOUT is not sys.stdout: + FAIL.append('Captured output does not match sys.stdout.') + if self.tee.STDERR is not sys.stderr: + FAIL.append('Captured output does not match sys.stderr.') + # Exit all context managers. This includes + # - Restore any file descriptors we commandeered + # - Close / join the TeeStream + # - Close any opened files + FAIL.extend(self._exit_context_stack(et, ev, tb)) sys.stdout, sys.stderr = self.old self.old = None self.tee = None self.output_stream = None - # Clean up our temporary override of the local logger - self.temp_log_handler.flush() - logger.removeHandler(self.temp_log_handler) - if self.capture_fd: - self.temp_log_stream.flush() - self.temp_log_stream.close() - logger.propagate = self._propagate - self.temp_log_stream = None - self.temp_log_handler = None if FAIL: - raise RuntimeError('Captured output does not match sys.stdout.') + raise RuntimeError("\n".join(FAIL)) def __del__(self): if self.tee is not None: @@ -354,9 +427,6 @@ def __init__(self, mode, buffering, encoding, newline): else: self.output_buffer = b'' - def __repr__(self): - return "%s(%s)" % (self.buffering, id(self)) - def fileno(self): return self.read_pipe @@ -540,6 +610,7 @@ def close(self, in_exception=False): # Join all stream processing threads _poll = _poll_interval + FAIL = False while True: for th in self._threads: th.join(_poll) @@ -557,9 +628,11 @@ def close(self, in_exception=False): "threads, possible output stream deadlock" ) elif _poll >= _poll_timeout_deadlock: - raise RuntimeError( - "TeeStream: deadlock observed joining reader threads" - ) + logger.error("TeeStream: deadlock observed joining reader threads") + # Defer raising the exception until after we have + # cleaned things up + FAIL = True + break for h in list(self._handles): h.finalize(self.ostreams) @@ -569,6 +642,8 @@ def close(self, in_exception=False): self._active_handles.clear() self._stdout = None self._stderr = None + if FAIL: + raise RuntimeError("TeeStream: deadlock observed joining reader threads") def __enter__(self): return self diff --git a/pyomo/common/tests/test_log.py b/pyomo/common/tests/test_log.py index f5057066e37..7ef8a51d57f 100644 --- a/pyomo/common/tests/test_log.py +++ b/pyomo/common/tests/test_log.py @@ -526,6 +526,12 @@ def test_log_stream(self): ls.write("line 1\nline 2\n") self.assertEqual(OUT.getvalue(), "INFO: line 1\nINFO: line 2\n") + with LI as OUT: + # empty writes do not generate log records + ls.write("") + ls.flush() + self.assertEqual(OUT.getvalue(), "") + with LI as OUT: ls.write("line 1\nline 2") self.assertEqual(OUT.getvalue(), "INFO: line 1\n") @@ -558,3 +564,46 @@ def test_preformatted_api(self): self.assertIs(msg.msg, ref) self.assertEqual(str(msg), '2') self.assertEqual(repr(msg), "Preformatted(2)") + + +class TestLoggingIntercept(unittest.TestCase): + def test_init(self): + li = LoggingIntercept() + self.assertEqual(li.module, 'root') + + li = LoggingIntercept(module='pyomo.core') + self.assertEqual(li.module, 'pyomo.core') + + li = LoggingIntercept(logger=logger) + self.assertEqual(li.module, 'pyomo.common.log.testing') + + with self.assertRaisesRegex( + ValueError, "LoggingIntercept: only one of 'module' and 'logger' is allowed" + ): + li = LoggingIntercept(module='pyomo', logger=logger) + + def test_propagate(self): + self.assertEqual(logger.propagate, True) + with LoggingIntercept(logger=logger): + self.assertEqual(logger.propagate, False) + self.assertEqual(logger.propagate, True) + + def test_propagate(self): + self.assertEqual(logger.level, 30) + try: + with LoggingIntercept(logger=logger, level=None): + self.assertEqual(logger.level, 30) + self.assertEqual(logger.level, 30) + with LoggingIntercept(logger=logger, level=40): + self.assertEqual(logger.level, 40) + self.assertEqual(logger.level, 30) + + logger.setLevel(40) + with LoggingIntercept(logger=logger, level=None): + self.assertEqual(logger.level, 40) + self.assertEqual(logger.level, 40) + with LoggingIntercept(logger=logger, level=30): + self.assertEqual(logger.level, 30) + self.assertEqual(logger.level, 40) + finally: + logger.setLevel(30) diff --git a/pyomo/common/tests/test_tee.py b/pyomo/common/tests/test_tee.py index 31efb526b0a..34200403722 100644 --- a/pyomo/common/tests/test_tee.py +++ b/pyomo/common/tests/test_tee.py @@ -20,7 +20,7 @@ from io import StringIO, BytesIO -from pyomo.common.log import LoggingIntercept +from pyomo.common.log import LoggingIntercept, LogStream import pyomo.common.unittest as unittest from pyomo.common.tempfiles import TempfileManager import pyomo.common.tee as tee @@ -99,6 +99,28 @@ def test_err_and_out_are_different(self): self.assertIs(err, t.STDERR) self.assertIsNot(out, err) + def test_signal_flush(self): + a = StringIO() + with tee.TeeStream(a) as t: + out = t.STDOUT + self.assertIs(type(out), tee._SignalFlush) + out.write("out1\n") + out.writelines(["out2\n", "out3\n"]) + self.assertEqual(a.getvalue(), "out1\nout2\nout3\n") + with tee.TeeStream(a) as t: + err = t.STDERR + self.assertIs(type(err), tee._AutoFlush) + err.write("err1\n") + err.writelines(["err2\n", "err3\n"]) + self.assertEqual(a.getvalue(), "out1\nout2\nout3\nerr1\nerr2\nerr3\n") + # Note: 'is not writable' appears to work for all platforms + # except PyPy, where the exception is "readonly attribute + # 'name'" + with self.assertRaisesRegex( + AttributeError, '.*(is not writable)|(readonly attribute)' + ): + tee.TeeStream().STDOUT.name = 'foo' + @unittest.skipIf( not tee._peek_available, "Requires the _mergedReader, but _peek_available==False", @@ -259,6 +281,25 @@ def write(self, data): r"\nThe following was left in the output buffer:\n 'i\\n'\n$", ) + +class TestCapture(unittest.TestCase): + def setUp(self): + self.streams = sys.stdout, sys.stderr + self.reenable_gc = gc.isenabled() + gc.disable() + gc.collect() + # Set a short switch interval so that the threading tests behave + # as expected + self.switchinterval = sys.getswitchinterval() + sys.setswitchinterval(tee._poll_interval / 100) + + def tearDown(self): + sys.stdout, sys.stderr = self.streams + sys.setswitchinterval(self.switchinterval) + if self.reenable_gc: + gc.enable() + gc.collect() + def test_capture_output(self): out = StringIO() with tee.capture_output(out) as OUT: @@ -287,10 +328,129 @@ def test_capture_output_logfile_string(self): result = f.read() self.assertEqual('HELLO WORLD\n', result) + # Check that an exception when entering the context will cleanly + # unroll the context stack + logfile = os.path.join('path', 'to', 'nonexisting', 'file.txt') + T = tee.capture_output(logfile) + with self.assertRaises(FileNotFoundError): + T.__enter__() + self.assertEqual(T.context_stack, []) + + def test_capture_to_logger(self): + logger = logging.getLogger('_pyomo_no_logger') + lstream = LogStream(logging.WARNING, logger) + orig = logger.propagate, logger.handlers + try: + logger.propagate = False + logger.handlers = [] + with LoggingIntercept(module='_pyomo_no_logger') as LOG: + with tee.capture_output(lstream, capture_fd=False): + sys.stderr.write("hi!\n") + sys.stderr.flush() + self.assertEqual(LOG.getvalue(), "hi!\n") + + # test that we handle the lastResort logger correctly + _lastResort = logging.lastResort + with tee.capture_output() as OUT: + with tee.capture_output(lstream, capture_fd=False): + self.assertIsNot(_lastResort, logging.lastResort) + sys.stderr.write("hi?\n") + self.assertEqual(OUT.getvalue(), "hi?\n") + + # test that we allow redirect-to-logger out + with tee.capture_output() as OUT: + logger.addHandler(logging.NullHandler()) + logger.addHandler(logging.StreamHandler(sys.stderr)) + with tee.capture_output(lstream, capture_fd=False): + sys.stderr.write("hi.\n") + self.assertEqual(OUT.getvalue(), "hi.\n") + logger.handlers.clear() + + # test a sub-logger + lstream = LogStream( + logging.WARNING, logging.getLogger('_pyomo_no_logger.foo') + ) + with tee.capture_output() as OUT: + logger.addHandler(logging.NullHandler()) + logger.addHandler(logging.StreamHandler(sys.stderr)) + with tee.capture_output(lstream, capture_fd=False): + sys.stderr.write("hi,\n") + self.assertEqual(OUT.getvalue(), "hi,\n") + finally: + logger.propagate, logger.handlers = orig + + def test_capture_fd_to_logger(self): + logger = logging.getLogger('_pyomo_no_logger') + lstream = LogStream(logging.WARNING, logger) + orig = logger.propagate, logger.handlers + try: + logger.propagate = False + logger.handlers = [] + with LoggingIntercept(module='_pyomo_no_logger') as LOG: + with tee.capture_output(lstream, capture_fd=True): + sys.stderr.write("hi!\n") + sys.stderr.flush() + self.assertEqual(LOG.getvalue(), "hi!\n") + + # test that we handle the lastResort logger correctly + _lastResort = logging.lastResort + with tee.capture_output() as OUT: + with tee.capture_output(lstream, capture_fd=True): + self.assertIsNot(_lastResort, logging.lastResort) + sys.stderr.write("hi?\n") + self.assertEqual(OUT.getvalue(), "hi?\n") + + # test that we allow redirect-to-logger out + with tee.capture_output() as OUT: + logger.addHandler(logging.NullHandler()) + logger.addHandler(logging.StreamHandler(sys.stderr)) + with tee.capture_output(lstream, capture_fd=True): + sys.stderr.write("hi.\n") + self.assertEqual(OUT.getvalue(), "hi.\n") + logger.handlers.clear() + + # test a sub-logger + lstream = LogStream( + logging.WARNING, logging.getLogger('_pyomo_no_logger.foo') + ) + with tee.capture_output() as OUT: + logger.addHandler(logging.NullHandler()) + logger.addHandler(logging.StreamHandler(sys.stderr)) + with tee.capture_output(lstream, capture_fd=True): + sys.stderr.write("hi,\n") + self.assertEqual(OUT.getvalue(), "hi,\n") + finally: + logger.propagate, logger.handlers = orig + + def test_no_fileno_stdout(self): + T = tee.capture_output() + with T: + self.assertEqual(len(T.context_stack), 2) + T = tee.capture_output(capture_fd=True) + # out & err point to something other than fd 1 and 2 + sys.stdout = os.fdopen(os.dup(1), closefd=True) + sys.stderr = os.fdopen(os.dup(2), closefd=True) + with sys.stdout, sys.stderr: + with T: + self.assertEqual(len(T.context_stack), 7) + # out & err point to fd 1 and 2 + sys.stdout = os.fdopen(1, closefd=False) + sys.stderr = os.fdopen(2, closefd=False) + with sys.stdout, sys.stderr: + with T: + self.assertEqual(len(T.context_stack), 5) + # out & err have no fileno + sys.stdout = StringIO() + sys.stderr = StringIO() + with sys.stdout, sys.stderr: + with T: + self.assertEqual(len(T.context_stack), 5) + def test_capture_output_stack_error(self): OUT1 = StringIO() OUT2 = StringIO() old = (sys.stdout, sys.stderr) + old_fd = os.dup(1), os.dup(2) try: a = tee.capture_output(OUT1) a.setup() @@ -302,7 +462,10 @@ def test_capture_output_stack_error(self): a.reset() b.tee = None finally: + os.dup2(old_fd[0], 1) + os.dup2(old_fd[1], 2) sys.stdout, sys.stderr = old + logging.getLogger('pyomo.common.tee').handlers.clear() def test_capture_output_invalid_ostream(self): # Test that capture_output does not suppress errors from the tee @@ -330,6 +493,20 @@ def test_capture_output_invalid_ostream(self): "The following was left in the output buffer:\n 'hi\\n'\n", ) + def test_exit_on_del(self): + # This is a weird "feature", but because things like the pyomo + # script will create and "enter" a capture_output object without + # using a context manager, it is possible that the object can be + # deleted without calling __exit__. Check that the context + # stack is correctly unwound + T = tee.capture_output() + T.__enter__() + stack = T.context_stack + self.assertGreater(len(stack), 0) + del T + gc.collect() + self.assertEqual(len(stack), 0) + def test_deadlock(self): class MockStream(object): def write(self, data): @@ -349,11 +526,11 @@ def flush(self): RuntimeError, 'deadlock' ): with tee.TeeStream(MockStream()) as t: - err = t.STDERR - err.write('*') + t.STDERR.write('*') self.assertEqual( 'Significant delay observed waiting to join reader ' - 'threads, possible output stream deadlock\n', + 'threads, possible output stream deadlock\n' + 'TeeStream: deadlock observed joining reader threads\n', LOG.getvalue(), ) finally: @@ -617,7 +794,7 @@ def test_redirect_no_synchronize_stdout(self): with os.fdopen(r, 'r') as FILE: os.close(w) os.close(1) - self.assertEqual(FILE.read(), "to_stdout_1\nto_stdout_2\nto_fd1_2\n") + self.assertEqual(FILE.read(), "to_stdout_2\nto_fd1_2\n") # Pytest's default capture method causes failures for the following # two tests. This re-implementation of the capfd fixture allows @@ -708,6 +885,38 @@ def test_capture_output_fd(self): os.close(w) self.assertEqual(FILE.read(), "to_stdout_2\nto_fd1_2\n") + def test_nested_capture_output(self): + OUT2 = StringIO() + r, w = os.pipe() + os.dup2(w, 1) + sys.stdout = stdout0 = os.fdopen(1, 'w', closefd=False) + with tee.capture_output((sys.stdout, StringIO()), capture_fd=True) as (_, OUT1): + stdout1 = sys.stdout + self.assertIsNot(stdout0, stdout1) + with tee.capture_output((sys.stdout, OUT2), capture_fd=True): + stdout2 = sys.stdout + self.assertIsNot(stdout1, stdout2) + sys.stdout.write("to_stdout_1\n") + sys.stdout.flush() + with os.fdopen(1, 'w', closefd=False) as F: + F.write("to_fd1_1\n") + F.flush() + + sys.stdout.write("to_stdout_2\n") + sys.stdout.flush() + with os.fdopen(1, 'w', closefd=False) as F: + F.write("to_fd1_2\n") + F.flush() + + self.assertEqual(OUT1.getvalue(), "to_stdout_1\nto_fd1_1\n") + self.assertEqual(OUT2.getvalue(), "to_stdout_1\nto_fd1_1\n") + with os.fdopen(r, 'r') as FILE: + os.close(1) + os.close(w) + self.assertEqual( + FILE.read(), "to_stdout_1\nto_fd1_1\nto_stdout_2\nto_fd1_2\n" + ) + if __name__ == '__main__': unittest.main()