Skip to content

Commit

Permalink
scylla_node: disable logging for watch_rest_for_alive
Browse files Browse the repository at this point in the history
disable `urllib3.connectionpool` logging for this function,
it doing lots of retries and we don't need to see each
request that is being sent out in the log

Ref: scylladb#477
  • Loading branch information
fruch authored and avelanarius committed Jan 12, 2024
1 parent d890599 commit 0a1cbb0
Showing 1 changed file with 35 additions and 30 deletions.
65 changes: 35 additions & 30 deletions ccmlib/scylla_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import threading
from pathlib import Path
from collections import OrderedDict
import logging

import psutil
import yaml
Expand Down Expand Up @@ -1347,36 +1348,40 @@ def watch_rest_for_alive(self, nodes, timeout=120):
instead of the log file and waits for the node to be really useable,
not just "UP" (see issue #461)
"""
tofind = nodes if isinstance(nodes, list) else [nodes]
tofind = set([node.address() for node in tofind])
url_live = f"http://{self.address()}:10000/gossiper/endpoint/live"
url_joining = f"http://{self.address()}:10000/storage_service/nodes/joining"
url_tokens = f"http://{self.address()}:10000/storage_service/tokens/"
endtime = time.time() + timeout
while time.time() < endtime:
live = set()
response = requests.get(url=url_live)
if response.text:
live = set(response.json())
response = requests.get(url=url_joining)
if response.text:
live = live - set(response.json())
# Verify that node knows not only about the existance of the
# other node, but also its tokens:
if tofind.issubset(live):
# This node thinks that all given nodes are alive and not
# "joining", we're almost done, but still need to verify
# that the node knows the others' tokens.
check = tofind
tofind = set()
for n in check:
response = requests.get(url=url_tokens+n)
if response.text == '[]':
tofind.add(n)
if not tofind:
return
time.sleep(0.1)
raise TimeoutError(f"watch_rest_for_alive() timeout after {timeout} seconds")
logging.getLogger('urllib3.connectionpool').disabled = True
try:
tofind = nodes if isinstance(nodes, list) else [nodes]
tofind = set([node.address() for node in tofind])
url_live = f"http://{self.address()}:10000/gossiper/endpoint/live"
url_joining = f"http://{self.address()}:10000/storage_service/nodes/joining"
url_tokens = f"http://{self.address()}:10000/storage_service/tokens/"
endtime = time.time() + timeout
while time.time() < endtime:
live = set()
response = requests.get(url=url_live)
if response.text:
live = set(response.json())
response = requests.get(url=url_joining)
if response.text:
live = live - set(response.json())
# Verify that node knows not only about the existance of the
# other node, but also its tokens:
if tofind.issubset(live):
# This node thinks that all given nodes are alive and not
# "joining", we're almost done, but still need to verify
# that the node knows the others' tokens.
check = tofind
tofind = set()
for n in check:
response = requests.get(url=url_tokens+n)
if response.text == '[]':
tofind.add(n)
if not tofind:
return
time.sleep(0.1)
raise TimeoutError(f"watch_rest_for_alive() timeout after {timeout} seconds")
finally:
logging.getLogger('urllib3.connectionpool').disabled = False

@property
def gnutls_config_file(self):
Expand Down

0 comments on commit 0a1cbb0

Please sign in to comment.