Open
Description
The test failed, then there was an error during teardown as well
____________________ ERROR at teardown of test_no_dashboard ____________________
cleanup = None
@pytest.fixture
defloop(cleanup):
> with check_instances():
distributed/utils_test.py:148:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../../../miniconda3/envs/dask-distributed/lib/python3.10/contextlib.py:142: in __exit__
next(self.gen)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
@contextmanager
defcheck_instances():
Client._instances.clear()
Worker._instances.clear()
Scheduler._instances.clear()
SpecCluster._instances.clear()
Worker._initialized_clients.clear()
SchedulerTaskState._instances.clear()
WorkerTaskState._instances.clear()
Nanny._instances.clear()
_global_clients.clear()
Comm._instances.clear()
yield
start = time()
whileset(_global_clients):
sleep(0.1)
> assert time() < start + 10
E assert 1657286310.2876859 < (1657286300.189965 + 10)
E + where 1657286310.2876859 = time()
distributed/utils_test.py:[1844](https://github.com/dask/distributed/runs/7251967954?check_suite_focus=true#step:11:1845): AssertionError
----------------------------- Captured stderr call -----------------------------
2022-07-08 13:18:06,190 - distributed.scheduler - INFO - -----------------------------------------------
2022-07-08 13:18:06,202 - distributed.http.proxy - INFO - To route to workers diagnostics web server please install jupyter-server-proxy: python -m pip install jupyter-server-proxy
2022-07-08 13:18:06,209 - distributed.scheduler - INFO - State start
2022-07-08 13:18:06,214 - distributed.scheduler - INFO - -----------------------------------------------
2022-07-08 13:18:06,215 - distributed.scheduler - INFO - Clear task state
2022-07-08 13:18:06,215 - distributed.scheduler - INFO - Scheduler at: tcp://10.213.0.59:8786
2022-07-08 13:18:06,216 - distributed.scheduler - INFO - dashboard at: :8787
2022-07-08 13:18:08,557 - distributed.scheduler - INFO - Receive client connection: Client-668a589a-fec0-11ec-b0c3-0050568c2328
2022-07-08 13:18:18,667 - distributed.core - INFO - Starting established connection
2022-07-08 13:18:18,668 - distributed.scheduler - INFO - Remove client Client-668a589a-fec0-11ec-b0c3-0050568c2328
2022-07-08 13:18:18,669 - distributed.core - INFO - Event loop was unresponsive in Scheduler for 10.11s. This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability.
2022-07-08 13:18:18,670 - distributed._signals - INFO - Received signal SIGINT (2)
2022-07-08 13:18:18,671 - distributed.scheduler - INFO - Close client connection: Client-668a589a-fec0-11ec-b0c3-0050568c2328
2022-07-08 13:18:18,671 - distributed.scheduler - INFO - Scheduler closing...
2022-07-08 13:18:18,672 - distributed.scheduler - INFO - Scheduler closing all comms
2022-07-08 13:18:18,673 - distributed.scheduler - INFO - Stopped scheduler at 'tcp://10.213.0.59:8786'
2022-07-08 13:18:18,674 - distributed.scheduler - INFO - End scheduler
=================================== FAILURES ===================================
______________________________ test_no_dashboard _______________________________
self = <TCP (closed) Client->Scheduler local=tcp://127.0.0.1:49495 remote=tcp://127.0.0.1:8786>
deserializers = None
asyncdefread(self, deserializers=None):
stream = self.stream
if stream isNone:
raise CommClosedError()
fmt = "Q"
fmt_size = struct.calcsize(fmt)
try:
> frames_nbytes = await stream.read_bytes(fmt_size)
E asyncio.exceptions.CancelledError
distributed/comm/tcp.py:223: CancelledError
During handling of the above exception, another exception occurred:
fut = <Task cancelled name='Task-131' coro=<TCP.read() done, defined at /Users/runner/work/distributed/distributed/distributed/comm/tcp.py:214>>
timeout = 5
asyncdefwait_for(fut, timeout):
"""Wait for the single Future or coroutine to complete, with timeout.
Coroutine will be wrapped in Task.
Returns result of the Future or coroutine. When a timeout occurs,
it cancels the task and raises TimeoutError. To avoid the task
cancellation, wrap it in shield().
If the wait is cancelled, the task is also cancelled.
This function is a coroutine.
"""
loop = events.get_running_loop()
if timeout isNone:
returnawait fut
if timeout <= 0:
fut = ensure_future(fut, loop=loop)
if fut.done():
return fut.result()
await _cancel_and_wait(fut, loop=loop)
try:
return fut.result()
except exceptions.CancelledError as exc:
raise exceptions.TimeoutError() fromexc
waiter = loop.create_future()
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
cb = functools.partial(_release_waiter, waiter)
fut = ensure_future(fut, loop=loop)
fut.add_done_callback(cb)
try:
# wait until the future completes or the timeout
try:
await waiter
except exceptions.CancelledError:
if fut.done():
return fut.result()
else:
fut.remove_done_callback(cb)
# We must ensure that the task is not running
# after wait_for() returns.
# See https://bugs.python.org/issue32751
await _cancel_and_wait(fut, loop=loop)
raise
if fut.done():
return fut.result()
else:
fut.remove_done_callback(cb)
# We must ensure that the task is not running
# after wait_for() returns.
# See https://bugs.python.org/issue32751
await _cancel_and_wait(fut, loop=loop)
# In case task cancellation failed with some
# exception, we should re-raise it
# See https://bugs.python.org/issue40607
try:
> return fut.result()
E asyncio.exceptions.CancelledError
../../../miniconda3/envs/dask-distributed/lib/python3.10/asyncio/tasks.py:456: CancelledError
The above exception was the direct cause of the following exception:
loop = <tornado.platform.asyncio.AsyncIOLoop object at 0x13d34cb20>
requires_default_ports = None
deftest_no_dashboard(loop, requires_default_ports):
with popen(["dask-scheduler", "--no-dashboard"]):
> with Client(f"127.0.0.1:{Scheduler.default_port}", loop=loop):
distributed/cli/tests/test_dask_scheduler.py:69:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
distributed/client.py:940: in __init__
self.start(timeout=timeout)
distributed/client.py:1098: in start
sync(self.loop, self._start, **kwargs)
distributed/utils.py:405: in sync
raise exc.with_traceback(tb)
distributed/utils.py:378: in f
result = yield future
../../../miniconda3/envs/dask-distributed/lib/python3.10/site-packages/tornado/gen.py:762: in run
value = future.result()
distributed/client.py:1178: in _start
awaitself._ensure_connected(timeout=timeout)
distributed/client.py:1265: in _ensure_connected
msg = await asyncio.wait_for(comm.read(), timeout)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
fut = <Task cancelled name='Task-131' coro=<TCP.read() done, defined at /Users/runner/work/distributed/distributed/distributed/comm/tcp.py:214>>
timeout = 5
asyncdefwait_for(fut, timeout):
"""Wait for the single Future or coroutine to complete, with timeout.
Coroutine will be wrapped in Task.
Returns result of the Future or coroutine. When a timeout occurs,
it cancels the task and raises TimeoutError. To avoid the task
cancellation, wrap it in shield().
If the wait is cancelled, the task is also cancelled.
This function is a coroutine.
"""
loop = events.get_running_loop()
if timeout isNone:
returnawait fut
if timeout <= 0:
fut = ensure_future(fut, loop=loop)
if fut.done():
return fut.result()
await _cancel_and_wait(fut, loop=loop)
try:
return fut.result()
except exceptions.CancelledError as exc:
raise exceptions.TimeoutError() fromexc
waiter = loop.create_future()
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
cb = functools.partial(_release_waiter, waiter)
fut = ensure_future(fut, loop=loop)
fut.add_done_callback(cb)
try:
# wait until the future completes or the timeout
try:
await waiter
except exceptions.CancelledError:
if fut.done():
return fut.result()
else:
fut.remove_done_callback(cb)
# We must ensure that the task is not running
# after wait_for() returns.
# See https://bugs.python.org/issue32751
await _cancel_and_wait(fut, loop=loop)
raise
if fut.done():
return fut.result()
else:
fut.remove_done_callback(cb)
# We must ensure that the task is not running
# after wait_for() returns.
# See https://bugs.python.org/issue32751
await _cancel_and_wait(fut, loop=loop)
# In case task cancellation failed with some
# exception, we should re-raise it
# See https://bugs.python.org/issue40607
try:
return fut.result()
except exceptions.CancelledError as exc:
> raise exceptions.TimeoutError() fromexc
E asyncio.exceptions.TimeoutError
../../../miniconda3/envs/dask-distributed/lib/python3.10/asyncio/tasks.py:458: TimeoutError
----------------------------- Captured stderr call -----------------------------
2022-07-08 13:18:06,190 - distributed.scheduler - INFO - -----------------------------------------------
2022-07-08 13:18:06,202 - distributed.http.proxy - INFO - To route to workers diagnostics web server please install jupyter-server-proxy: python -m pip install jupyter-server-proxy
2022-07-08 13:18:06,209 - distributed.scheduler - INFO - State start
2022-07-08 13:18:06,214 - distributed.scheduler - INFO - -----------------------------------------------
2022-07-08 13:18:06,215 - distributed.scheduler - INFO - Clear task state
2022-07-08 13:18:06,215 - distributed.scheduler - INFO - Scheduler at: tcp://10.213.0.59:8786
2022-07-08 13:18:06,216 - distributed.scheduler - INFO - dashboard at: :8787
2022-07-08 13:18:08,557 - distributed.scheduler - INFO - Receive client connection: Client-668a589a-fec0-11ec-b0c3-0050568c2328
2022-07-08 13:18:18,667 - distributed.core - INFO - Starting established connection
2022-07-08 13:18:18,668 - distributed.scheduler - INFO - Remove client Client-668a589a-fec0-11ec-b0c3-0050568c2328
2022-07-08 13:18:18,669 - distributed.core - INFO - Event loop was unresponsive in Scheduler for 10.11s. This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability.
2022-07-08 13:18:18,670 - distributed._signals - INFO - Received signal SIGINT (2)
2022-07-08 13:18:18,671 - distributed.scheduler - INFO - Close client connection: Client-668a589a-fec0-11ec-b0c3-0050568c2328
2022-07-08 13:18:18,671 - distributed.scheduler - INFO - Scheduler closing...
2022-07-08 13:18:18,672 - distributed.scheduler - INFO - Scheduler closing all comms
2022-07-08 13:18:18,673 - distributed.scheduler - INFO - Stopped scheduler at 'tcp://10.213.0.59:8786'
2022-07-08 13:18:18,674 - distributed.scheduler - INFO - End scheduler