Skip to content

Commit 3243cf8

Browse files
authored
Encoding errors (#1519)
* Wherever check_output is used add encoding as even if we do not read stdout it might fail with a timeout or in exception handling when parsing stdout * Using replacement everywhere when calling .run
1 parent 07d082f commit 3243cf8

File tree

10 files changed

+29
-22
lines changed

10 files changed

+29
-22
lines changed

cron/client.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -124,9 +124,9 @@ def validate_temperature():
124124
validate_temperature.temperature_errors += 1
125125

126126
# stress all cores with constant yes operation
127-
subprocess.check_output('for i in $(seq $(nproc)); do yes > /dev/null & done', shell=True)
127+
subprocess.check_output('for i in $(seq $(nproc)); do yes > /dev/null & done', shell=True, encoding='UTF-8', errors='replace')
128128
time.sleep(300)
129-
subprocess.check_output(['killall', 'yes'])
129+
subprocess.check_output(['killall', 'yes'], encoding='UTF-8', errors='replace')
130130

131131
return False
132132

@@ -287,9 +287,9 @@ def do_measurement_control():
287287
else:
288288
set_status('job_no')
289289
if config['cluster']['client']['shutdown_on_job_no']:
290-
subprocess.check_output(['sync'])
290+
subprocess.check_output(['sync'], encoding='UTF-8', errors='replace')
291291
time.sleep(60) # sleep for 60 before going to suspend to allow logins to cluster when systems are fresh rebooted for maintenance
292-
subprocess.check_output(['sudo', 'systemctl', config['cluster']['client']['shutdown_on_job_no']])
292+
subprocess.check_output(['sudo', 'systemctl', config['cluster']['client']['shutdown_on_job_no']], encoding='UTF-8', errors='replace')
293293

294294
if not args.testing:
295295
time.sleep(config['cluster']['client']['sleep_time_no_job'])

lib/scenario_runner.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -273,12 +273,12 @@ def _save_notes_runner(self):
273273
self.__notes_helper.save_to_db(self._run_id)
274274

275275
def _clear_caches(self):
276-
subprocess.check_output(['sync'])
276+
subprocess.check_output(['sync'], encoding='UTF-8', errors='replace')
277277

278278
if platform.system() == 'Darwin':
279279
return
280280
# 3 instructs kernel to drops page caches AND inode caches
281-
subprocess.check_output(['sudo', '/usr/sbin/sysctl', '-w', 'vm.drop_caches=3'])
281+
subprocess.check_output(['sudo', '/usr/sbin/sysctl', '-w', 'vm.drop_caches=3'], encoding='UTF-8', errors='replace')
282282

283283
def _check_system(self, mode='start'):
284284
print(TerminalColors.HEADER, '\nChecking system', TerminalColors.ENDC)
@@ -315,6 +315,7 @@ def _checkout_repository(self):
315315
check=True,
316316
capture_output=True,
317317
encoding='UTF-8',
318+
errors='replace'
318319
)
319320

320321
if problematic_symlink := utils.find_outside_symlinks(self._repo_folder):
@@ -376,6 +377,7 @@ def _checkout_relations(self):
376377
check=True,
377378
capture_output=True,
378379
encoding='UTF-8',
380+
errors='replace'
379381
)
380382

381383
if 'commit_hash' in relation:
@@ -384,6 +386,7 @@ def _checkout_relations(self):
384386
check=True,
385387
capture_output=True,
386388
encoding='UTF-8',
389+
errors='replace',
387390
cwd=relation_path,
388391
)
389392

@@ -616,13 +619,15 @@ def _check_container_is_running(self, container_name: str, step_description: str
616619
['docker', 'logs', container_name],
617620
check=False,
618621
capture_output=True,
619-
encoding='UTF-8'
622+
encoding='UTF-8',
623+
errors='replace',
620624
)
621625
inspect_ps = subprocess.run(
622626
['docker', 'inspect', '--format={{.State.ExitCode}}', container_name],
623627
check=False,
624628
capture_output=True,
625-
encoding='UTF-8'
629+
encoding='UTF-8',
630+
errors='replace',
626631
)
627632
exit_code = inspect_ps.stdout.strip() if inspect_ps.returncode == 0 else "unknown"
628633

@@ -1119,9 +1124,9 @@ def _setup_networks(self):
11191124
subprocess.run(['docker', 'network', 'rm', network], stderr=subprocess.DEVNULL, check=False)
11201125

11211126
if self.__usage_scenario['networks'][network] and self.__usage_scenario['networks'][network].get('internal', False):
1122-
subprocess.check_output(['docker', 'network', 'create', '--internal', network])
1127+
subprocess.check_output(['docker', 'network', 'create', '--internal', network], encoding='UTF-8', errors='replace')
11231128
else:
1124-
subprocess.check_output(['docker', 'network', 'create', network])
1129+
subprocess.check_output(['docker', 'network', 'create', network], encoding='UTF-8', errors='replace')
11251130

11261131
self.__networks.append(network)
11271132
else:

lib/utils.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -204,8 +204,7 @@ def get_metric_providers_names(config):
204204
return [(m.split('.')[-1]) for m in metric_providers_keys]
205205

206206
def get_architecture():
207-
ps = subprocess.run(['uname', '-s'],
208-
check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, encoding='UTF-8', errors='replace')
207+
ps = subprocess.run(['uname', '-s'], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, encoding='UTF-8', errors='replace')
209208
output = ps.stdout.strip().lower()
210209

211210
if output == 'darwin':

metric_providers/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def check_system(self, check_command="default", check_error_message=None, check_
5656
call_string = f"{self._current_dir}/{call_string}"
5757
check_command = [f"{call_string}", '-c']
5858

59-
ps = subprocess.run(check_command, capture_output=True, encoding='UTF-8', check=False)
59+
ps = subprocess.run(check_command, capture_output=True, encoding='UTF-8', errors='replace', check=False)
6060
if ps.returncode != 0:
6161
if check_error_message is None:
6262
check_error_message = ps.stderr

metric_providers/lmsensors/abstract_provider.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def check_system(self, check_command="default", check_error_message=None, check_
4343
super().check_system(check_command=None)
4444

4545
# Run 'sensors' command and capture the output
46-
ps = subprocess.run(['sensors'], capture_output=True, text=True, check=False)
46+
ps = subprocess.run(['sensors'], capture_output=True, encoding='UTF-8', errors='replace', check=False)
4747
if ps.returncode != 0:
4848
raise MetricProviderConfigurationError(f"{self._metric_name} provider could not be started.\nCannot run the 'sensors' command. Did you install lm-sensors?.\n\nAre you running in a VM / cloud / shared hosting?\nIf so please disable the {self._metric_name} provider in the config.yml")
4949

metric_providers/network/connections/proxy/container/provider.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ def get_docker_params(self, no_proxy_list=''):
5050
elif platform.system() == 'Linux':
5151
# Under Linux there is no way to directly link to the host
5252
cmd = "ip addr show dev $(ip route | grep default | awk '{print $5}') | grep 'inet '| awk '{print $2}'| cut -f1 -d'/'"
53-
ps = subprocess.run(cmd, shell=True, check=True, text=True, capture_output=True)
54-
proxy_addr = ps.stdout.strip()
53+
output = subprocess.check_output(cmd, shell=True, encoding='UTF-8', errors='replace')
54+
proxy_addr = output.strip()
5555
else:
5656
proxy_addr = 'host.docker.internal'
5757

metric_providers/powermetrics/provider.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ def stop_profiling(self):
7474
# There is really no better way of doing this as of now. Keeping the process id for instance in a hash and
7575
# killing only that would also fail du to root permissions missing. If we add an /etc/sudoers entry with a
7676
# wildcard for a PID we open up a security hole. Happy to take suggestions on this one!
77-
subprocess.check_output('sudo /usr/bin/killall powermetrics', shell=True)
77+
subprocess.check_output('sudo /usr/bin/killall powermetrics', shell=True, encoding='UTF-8', errors='replace')
7878
print('Killed powermetrics process with killall!')
7979
if self._pm_process_count > 0:
8080
print('-----------------------------------------------------------------------------------------------------------------')
@@ -89,7 +89,7 @@ def stop_profiling(self):
8989
time.sleep(1)
9090
count += 1
9191
if count >= 60:
92-
subprocess.check_output('sudo /usr/bin/killall -9 powermetrics', shell=True)
92+
subprocess.check_output('sudo /usr/bin/killall -9 powermetrics', shell=True, encoding='UTF-8', errors='replace')
9393
raise RuntimeError('powermetrics had to be killed with kill -9. Values can not be trusted!')
9494

9595
self._ps = None

tests/metric_providers/test_metric_providers.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,9 +72,9 @@ def test_splitting_by_group():
7272

7373
@pytest.mark.skipif(utils.get_architecture() == 'macos', reason="macOS does not support disk used capturing atm")
7474
def test_disk_statvfs_providers():
75-
disk_id_docker = subprocess.check_output("stat -c '%d' $(docker info --format '{{.DockerRootDir}}')", shell=True, encoding='UTF-8')
75+
disk_id_docker = subprocess.check_output("stat -c '%d' $(docker info --format '{{.DockerRootDir}}')", shell=True, encoding='UTF-8', errors='replace')
7676

77-
disk_id_root = subprocess.check_output(['stat', '-c', '%d', '/'], encoding='UTF-8')
77+
disk_id_root = subprocess.check_output(['stat', '-c', '%d', '/'], encoding='UTF-8', errors='replace')
7878

7979
if disk_id_docker != disk_id_root:
8080
pytest.skip('Docker data root is not on same disk and thus cannot determine disk use through standard provider')

tools/cluster/cleanup_original.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,13 +46,13 @@ def check_timers(data):
4646
## Update time
4747
# may throw exception, but we need to check if time sync calls work, as we do not know what the actual time is
4848
# Typically in cluster installations port 123 is blocked and a local time server is available. Thus the guard function here
49-
subprocess.check_output(['sudo', 'timedatectl', 'set-ntp', 'true']) # this will trigger immediate update
49+
subprocess.check_output(['sudo', 'timedatectl', 'set-ntp', 'true'], encoding='UTF-8', errors='replace') # this will trigger immediate update
5050
time.sleep(5)
5151
ntp_status = subprocess.check_output(['timedatectl', '-a'], encoding='UTF-8', errors='replace')
5252
if 'System clock synchronized: yes' not in ntp_status or 'NTP service: active' not in ntp_status:
5353
raise RuntimeError('System clock could not be synchronized', ntp_status)
5454

55-
subprocess.check_output(['sudo', 'timedatectl', 'set-ntp', 'false']) # we want NTP always off in clusters
55+
subprocess.check_output(['sudo', 'timedatectl', 'set-ntp', 'false'], encoding='UTF-8', errors='replace') # we want NTP always off in clusters
5656
time.sleep(2)
5757
ntp_status = subprocess.check_output(['timedatectl', '-a'], encoding='UTF-8', errors='replace')
5858
if 'System clock synchronized: yes' not in ntp_status:
@@ -93,6 +93,7 @@ def check_timers(data):
9393
stdout=subprocess.PIPE,
9494
stderr=subprocess.STDOUT, # put both in one stream
9595
encoding='UTF-8',
96+
errors='replace'
9697
)
9798
if ps.returncode != 0:
9899
raise RuntimeError(f"sudo apt update failed: {ps.stdout}")
@@ -116,6 +117,7 @@ def check_timers(data):
116117
stdout=subprocess.PIPE,
117118
stderr=subprocess.STDOUT, # put both in one stream
118119
encoding='UTF-8',
120+
errors='replace'
119121
)
120122
if ps.returncode != 0:
121123
raise RuntimeError(f"sudo apt full-upgrade -y failed: {ps.stdout}")

tools/update_commit_data.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@
4343
check=True,
4444
capture_output=True,
4545
encoding='UTF-8',
46+
errors='replace',
4647
cwd=args.folder
4748
)
4849
commit_timestamp = commit_timestamp.stdout.strip("\n")

0 commit comments

Comments
 (0)