@@ -208,16 +208,12 @@ def communicate():
208208def start_scheduler (
209209 logdir , addr , port , ssh_username , ssh_port , ssh_private_key , remote_python = None
210210):
211- cmd = "{python} -m distributed.cli.dask_scheduler --port {port}" .format (
212- python = remote_python or sys .executable , port = port
213- )
211+ cmd = f"{ remote_python or sys .executable } -m distributed.cli.dask_scheduler --port { port } "
214212
215213 # Optionally re-direct stdout and stderr to a logfile
216214 if logdir is not None :
217215 cmd = f"mkdir -p { logdir } && { cmd } "
218- cmd += "&> {logdir}/dask_scheduler_{addr}:{port}.log" .format (
219- addr = addr , port = port , logdir = logdir
220- )
216+ cmd += f"&> { logdir } /dask_scheduler_{ addr } :{ port } .log"
221217
222218 # Format output labels we can prepend to each line of output, and create
223219 # a 'status' key to keep track of jobs that terminate prematurely.
@@ -297,16 +293,12 @@ def start_worker(
297293 )
298294
299295 if local_directory is not None :
300- cmd += " --local-directory {local_directory}" .format (
301- local_directory = local_directory
302- )
296+ cmd += f" --local-directory { local_directory } "
303297
304298 # Optionally redirect stdout and stderr to a logfile
305299 if logdir is not None :
306300 cmd = f"mkdir -p { logdir } && { cmd } "
307- cmd += "&> {logdir}/dask_scheduler_{addr}.log" .format (
308- addr = worker_addr , logdir = logdir
309- )
301+ cmd += f"&> { logdir } /dask_scheduler_{ worker_addr } .log"
310302
311303 label = f"worker { worker_addr } "
312304
@@ -402,9 +394,7 @@ def __init__(
402394 )
403395 print (
404396 bcolors .WARNING + "Output will be redirected to logfiles "
405- 'stored locally on individual worker nodes under "{logdir}".' .format (
406- logdir = logdir
407- )
397+ f'stored locally on individual worker nodes under "{ logdir } ".'
408398 + bcolors .ENDC
409399 )
410400 self .logdir = logdir
0 commit comments