|
| 1 | +from dpdispatcher.JobStatus import JobStatus |
| 2 | +from dpdispatcher import dlog |
| 3 | +from dpdispatcher.machine import Machine |
| 4 | +from dpdispatcher.utils import run_cmd_with_all_output |
| 5 | +import subprocess as sp |
| 6 | + |
| 7 | + |
| 8 | +shell_script_header_template=""" |
| 9 | +#!/bin/bash -l |
| 10 | +set -x |
| 11 | +""" |
| 12 | + |
| 13 | +script_env_template=""" |
| 14 | +{module_unload_part} |
| 15 | +{module_load_part} |
| 16 | +{source_files_part} |
| 17 | +{export_envs_part} |
| 18 | +
|
| 19 | +REMOTE_ROOT=`pwd` |
| 20 | +echo 0 > {flag_if_job_task_fail} |
| 21 | +test $? -ne 0 && exit 1 |
| 22 | +
|
| 23 | +if ! ls {submission_hash}_upload.tgz 1>/dev/null 2>&1; then |
| 24 | + hadoop fs -get {remote_root}/*.tgz . |
| 25 | +fi |
| 26 | +for TGZ in `ls *.tgz`; do tar xvf $TGZ; done |
| 27 | +
|
| 28 | +""" |
| 29 | +script_end_template=""" |
| 30 | +cd $REMOTE_ROOT |
| 31 | +test $? -ne 0 && exit 1 |
| 32 | +
|
| 33 | +wait |
| 34 | +FLAG_IF_JOB_TASK_FAIL=$(cat {flag_if_job_task_fail}) |
| 35 | +if test $FLAG_IF_JOB_TASK_FAIL -eq 0; then |
| 36 | + tar czf {submission_hash}_{job_hash}_download.tar.gz {all_task_dirs} |
| 37 | + hadoop fs -put -f {submission_hash}_{job_hash}_download.tar.gz {remote_root} |
| 38 | + hadoop fs -touchz {remote_root}/{job_tag_finished} |
| 39 | +else |
| 40 | + exit 1 |
| 41 | +fi |
| 42 | +""" |
| 43 | + |
| 44 | +class DistributedShell(Machine): |
| 45 | + def gen_script_env(self, job): |
| 46 | + source_files_part = "" |
| 47 | + |
| 48 | + module_unload_part = "" |
| 49 | + module_unload_list = job.resources.module_unload_list |
| 50 | + for ii in module_unload_list: |
| 51 | + module_unload_part += f"module unload {ii}\n" |
| 52 | + |
| 53 | + module_load_part = "" |
| 54 | + module_list = job.resources.module_list |
| 55 | + for ii in module_list: |
| 56 | + module_load_part += f"module load {ii}\n" |
| 57 | + |
| 58 | + source_list = job.resources.source_list |
| 59 | + for ii in source_list: |
| 60 | + line = "{ source %s; } \n" % ii |
| 61 | + source_files_part += line |
| 62 | + |
| 63 | + export_envs_part = "" |
| 64 | + envs = job.resources.envs |
| 65 | + for k, v in envs.items(): |
| 66 | + export_envs_part += f"export {k}={v}\n" |
| 67 | + |
| 68 | + flag_if_job_task_fail = job.job_hash + '_flag_if_job_task_fail' |
| 69 | + |
| 70 | + script_env = script_env_template.format( |
| 71 | + flag_if_job_task_fail=flag_if_job_task_fail, |
| 72 | + module_unload_part=module_unload_part, |
| 73 | + module_load_part=module_load_part, |
| 74 | + source_files_part=source_files_part, |
| 75 | + export_envs_part=export_envs_part, |
| 76 | + remote_root=self.context.remote_root, |
| 77 | + submission_hash=self.context.submission.submission_hash, |
| 78 | + ) |
| 79 | + return script_env |
| 80 | + |
| 81 | + def gen_script_end(self, job): |
| 82 | + all_task_dirs = "" |
| 83 | + for task in job.job_task_list: |
| 84 | + all_task_dirs += "%s " % task.task_work_path |
| 85 | + job_tag_finished = job.job_hash + '_job_tag_finished' |
| 86 | + flag_if_job_task_fail = job.job_hash + '_flag_if_job_task_fail' |
| 87 | + script_end = script_end_template.format( |
| 88 | + job_tag_finished=job_tag_finished, |
| 89 | + flag_if_job_task_fail=flag_if_job_task_fail, |
| 90 | + all_task_dirs=all_task_dirs, |
| 91 | + remote_root=self.context.remote_root, |
| 92 | + submission_hash=self.context.submission.submission_hash, |
| 93 | + job_hash=job.job_hash |
| 94 | + ) |
| 95 | + return script_end |
| 96 | + |
| 97 | + def gen_script_header(self, job): |
| 98 | + shell_script_header = shell_script_header_template |
| 99 | + return shell_script_header |
| 100 | + |
| 101 | + def do_submit(self, job): |
| 102 | + """ submit th job to yarn using distributed shell |
| 103 | +
|
| 104 | + Parameters |
| 105 | + ---------- |
| 106 | + job : Job class instance |
| 107 | + job to be submitted |
| 108 | +
|
| 109 | + Returns |
| 110 | + ------- |
| 111 | + job_id: string |
| 112 | + submit process id |
| 113 | + """ |
| 114 | + |
| 115 | + script_str = self.gen_script(job) |
| 116 | + script_file_name = job.script_file_name |
| 117 | + job_id_name = job.job_hash + '_job_id' |
| 118 | + output_name = job.job_hash + '.out' |
| 119 | + self.context.write_file(fname=script_file_name, write_str=script_str) |
| 120 | + |
| 121 | + resources = job.resources |
| 122 | + submit_command = 'hadoop jar %s/hadoop-yarn-applications-distributedshell-*.jar ' \ |
| 123 | + 'org.apache.hadoop.yarn.applications.distributedshell.Client ' \ |
| 124 | + '-jar %s/hadoop-yarn-applications-distributedshell-*.jar ' \ |
| 125 | + '-queue %s -appname "distributedshell_dpgen_%s" ' \ |
| 126 | + '-shell_env YARN_CONTAINER_RUNTIME_TYPE=docker ' \ |
| 127 | + '-shell_env YARN_CONTAINER_RUNTIME_DOCKER_IMAGE=%s ' \ |
| 128 | + '-shell_env ENV_DOCKER_CONTAINER_SHM_SIZE=\'600m\' '\ |
| 129 | + '-master_memory 1024 -master_vcores 2 -num_containers 1 ' \ |
| 130 | + '-container_resources memory-mb=%s,vcores=%s ' \ |
| 131 | + '-shell_script /tmp/%s' % (resource.kwargs.get('yarn_path',''), |
| 132 | + resource.kwargs.get('yarn_path',''), resources.queue_name, job.job_hash, |
| 133 | + resources.kwargs.get('img_name',''),resources.kwargs.get('mem_limit', 1)*1024, |
| 134 | + resources.cpu_per_node, script_file_name) |
| 135 | + |
| 136 | + cmd = '{ nohup %s 1>%s 2>%s & } && echo $!' % (submit_command, output_name, output_name) |
| 137 | + ret, stdout, stderr = run_cmd_with_all_output(cmd) |
| 138 | + |
| 139 | + if ret != 0: |
| 140 | + err_str = stderr.decode('utf-8') |
| 141 | + raise RuntimeError\ |
| 142 | + ("Command squeue fails to execute, error message:%s\nreturn code %d\n" % (err_str, ret)) |
| 143 | + job_id = int(stdout.decode('utf-8').strip()) |
| 144 | + |
| 145 | + self.context.write_file(job_id_name, str(job_id)) |
| 146 | + return job_id |
| 147 | + |
| 148 | + def check_status(self, job): |
| 149 | + job_id = job.job_id |
| 150 | + if job_id == '' : |
| 151 | + return JobStatus.unsubmitted |
| 152 | + |
| 153 | + ret, stdout, stderr = run_cmd_with_all_output(f"if ps -p {job_id} > /dev/null; then echo 1; fi") |
| 154 | + if ret != 0: |
| 155 | + err_str = stderr.decode('utf-8') |
| 156 | + raise RuntimeError \ |
| 157 | + ("Command fails to execute, error message:%s\nreturn code %d\n" % (err_str, ret)) |
| 158 | + |
| 159 | + if_job_exists = bool(stdout.decode('utf-8').strip()) |
| 160 | + if self.check_finish_tag(job=job): |
| 161 | + dlog.info(f"job: {job.job_hash} {job.job_id} finished") |
| 162 | + return JobStatus.finished |
| 163 | + |
| 164 | + if if_job_exists: |
| 165 | + return JobStatus.running |
| 166 | + else: |
| 167 | + return JobStatus.terminated |
| 168 | + |
| 169 | + def check_finish_tag(self, job): |
| 170 | + job_tag_finished = job.job_hash + '_job_tag_finished' |
| 171 | + return self.context.check_file_exists(job_tag_finished) |
0 commit comments