Skip to content

Commit 0a1cbb0

Browse files
fruchavelanarius
authored andcommitted
scylla_node: disable logging for watch_rest_for_alive
disable `urllib3.connectionpool` logging for this function, it doing lots of retries and we don't need to see each request that is being sent out in the log Ref: scylladb#477
1 parent d890599 commit 0a1cbb0

File tree

1 file changed

+35
-30
lines changed

1 file changed

+35
-30
lines changed

ccmlib/scylla_node.py

Lines changed: 35 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
import threading
1414
from pathlib import Path
1515
from collections import OrderedDict
16+
import logging
1617

1718
import psutil
1819
import yaml
@@ -1347,36 +1348,40 @@ def watch_rest_for_alive(self, nodes, timeout=120):
13471348
instead of the log file and waits for the node to be really useable,
13481349
not just "UP" (see issue #461)
13491350
"""
1350-
tofind = nodes if isinstance(nodes, list) else [nodes]
1351-
tofind = set([node.address() for node in tofind])
1352-
url_live = f"http://{self.address()}:10000/gossiper/endpoint/live"
1353-
url_joining = f"http://{self.address()}:10000/storage_service/nodes/joining"
1354-
url_tokens = f"http://{self.address()}:10000/storage_service/tokens/"
1355-
endtime = time.time() + timeout
1356-
while time.time() < endtime:
1357-
live = set()
1358-
response = requests.get(url=url_live)
1359-
if response.text:
1360-
live = set(response.json())
1361-
response = requests.get(url=url_joining)
1362-
if response.text:
1363-
live = live - set(response.json())
1364-
# Verify that node knows not only about the existance of the
1365-
# other node, but also its tokens:
1366-
if tofind.issubset(live):
1367-
# This node thinks that all given nodes are alive and not
1368-
# "joining", we're almost done, but still need to verify
1369-
# that the node knows the others' tokens.
1370-
check = tofind
1371-
tofind = set()
1372-
for n in check:
1373-
response = requests.get(url=url_tokens+n)
1374-
if response.text == '[]':
1375-
tofind.add(n)
1376-
if not tofind:
1377-
return
1378-
time.sleep(0.1)
1379-
raise TimeoutError(f"watch_rest_for_alive() timeout after {timeout} seconds")
1351+
logging.getLogger('urllib3.connectionpool').disabled = True
1352+
try:
1353+
tofind = nodes if isinstance(nodes, list) else [nodes]
1354+
tofind = set([node.address() for node in tofind])
1355+
url_live = f"http://{self.address()}:10000/gossiper/endpoint/live"
1356+
url_joining = f"http://{self.address()}:10000/storage_service/nodes/joining"
1357+
url_tokens = f"http://{self.address()}:10000/storage_service/tokens/"
1358+
endtime = time.time() + timeout
1359+
while time.time() < endtime:
1360+
live = set()
1361+
response = requests.get(url=url_live)
1362+
if response.text:
1363+
live = set(response.json())
1364+
response = requests.get(url=url_joining)
1365+
if response.text:
1366+
live = live - set(response.json())
1367+
# Verify that node knows not only about the existance of the
1368+
# other node, but also its tokens:
1369+
if tofind.issubset(live):
1370+
# This node thinks that all given nodes are alive and not
1371+
# "joining", we're almost done, but still need to verify
1372+
# that the node knows the others' tokens.
1373+
check = tofind
1374+
tofind = set()
1375+
for n in check:
1376+
response = requests.get(url=url_tokens+n)
1377+
if response.text == '[]':
1378+
tofind.add(n)
1379+
if not tofind:
1380+
return
1381+
time.sleep(0.1)
1382+
raise TimeoutError(f"watch_rest_for_alive() timeout after {timeout} seconds")
1383+
finally:
1384+
logging.getLogger('urllib3.connectionpool').disabled = False
13801385

13811386
@property
13821387
def gnutls_config_file(self):

0 commit comments

Comments
 (0)