|
| 1 | +import communication_helpers as ch |
| 2 | +import numpy as np |
| 3 | +from scipy.constants import c, e |
| 4 | +import share_segments as shs |
| 5 | +import time |
| 6 | + |
| 7 | +n_segments = 15#79 |
| 8 | +N_turns = 512 |
| 9 | + |
| 10 | +Dh_sc = .2e-3 |
| 11 | + |
| 12 | + |
| 13 | +intensity = 1.300000e+11 |
| 14 | +epsn_x = 2.5e-6 |
| 15 | +epsn_y = 2.5e-6 |
| 16 | + |
| 17 | +machine_configuration = 'HLLHC-injection' |
| 18 | + |
| 19 | +init_unif_edens_flag = 1 |
| 20 | +init_unif_edens = 1.5e+12 |
| 21 | +N_MP_ele_init = 100000 |
| 22 | +N_mp_max = N_MP_ele_init*4. |
| 23 | + |
| 24 | +x_kick_in_sigmas = 0.1 |
| 25 | +y_kick_in_sigmas = 0.1 |
| 26 | + |
| 27 | + |
| 28 | +chamb_type = 'polyg' |
| 29 | +x_aper = 2.300000e-02 |
| 30 | +y_aper = 1.800000e-02 |
| 31 | +filename_chm = 'LHC_chm_ver.mat' |
| 32 | + |
| 33 | +B_multip_per_eV = [1.190000e-12] |
| 34 | +B_multip_per_eV = np.array(B_multip_per_eV) |
| 35 | + |
| 36 | +Dt_ref=10e-12 |
| 37 | +pyecl_input_folder='./pyecloud_config' |
| 38 | + |
| 39 | +n_macroparticles=300000 |
| 40 | +sigma_z=1.35e-9/4*c |
| 41 | + |
| 42 | +n_slices = 64 |
| 43 | +n_sigma_z = 2. |
| 44 | + |
| 45 | +class Simulation(object): |
| 46 | + def __init__(self): |
| 47 | + self.N_turns = N_turns |
| 48 | + |
| 49 | + def init_all(self): |
| 50 | + |
| 51 | + |
| 52 | + self.n_slices = n_slices |
| 53 | + self.n_segments = n_segments |
| 54 | + |
| 55 | + # define the machine |
| 56 | + from LHC_custom import LHC |
| 57 | + self.machine = LHC(n_segments = n_segments, machine_configuration = machine_configuration) |
| 58 | + |
| 59 | + # define MP size |
| 60 | + nel_mp_ref_0 = init_unif_edens*4*x_aper*y_aper/N_MP_ele_init |
| 61 | + |
| 62 | + # prepare e-cloud |
| 63 | + import PyECLOUD.PyEC4PyHT as PyEC4PyHT |
| 64 | + ecloud = PyEC4PyHT.Ecloud(slice_by_slice_mode=True, |
| 65 | + L_ecloud=self.machine.circumference/n_segments, slicer=None , |
| 66 | + Dt_ref=Dt_ref, pyecl_input_folder=pyecl_input_folder, |
| 67 | + chamb_type = chamb_type, |
| 68 | + x_aper=x_aper, y_aper=y_aper, |
| 69 | + filename_chm=filename_chm, Dh_sc=Dh_sc, |
| 70 | + init_unif_edens_flag=init_unif_edens_flag, |
| 71 | + init_unif_edens=init_unif_edens, |
| 72 | + N_mp_max=N_mp_max, |
| 73 | + nel_mp_ref_0=nel_mp_ref_0, |
| 74 | + B_multip=B_multip_per_eV*self.machine.p0/e*c) |
| 75 | + |
| 76 | + # setup transverse losses (to "protect" the ecloud) |
| 77 | + import PyHEADTAIL.aperture.aperture as aperture |
| 78 | + apt_xy = aperture.EllipticalApertureXY(x_aper=ecloud.impact_man.chamb.x_aper, y_aper=ecloud.impact_man.chamb.y_aper) |
| 79 | + self.machine.one_turn_map.append(apt_xy) |
| 80 | + |
| 81 | + n_non_parallelizable = 2 #rf and aperture |
| 82 | + |
| 83 | + # We suppose that all the object that cannot be slice parallelized are at the end of the ring |
| 84 | + i_end_parallel = len(self.machine.one_turn_map)-n_non_parallelizable |
| 85 | + |
| 86 | + # split the machine |
| 87 | + sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes) |
| 88 | + myid = self.ring_of_CPUs.myid |
| 89 | + i_start_part, i_end_part = sharing.my_part(myid) |
| 90 | + self.mypart = self.machine.one_turn_map[i_start_part:i_end_part] |
| 91 | + if self.ring_of_CPUs.I_am_a_worker: |
| 92 | + print 'I am id=%d/%d (worker) and my part is %d long'%(myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) |
| 93 | + elif self.ring_of_CPUs.I_am_the_master: |
| 94 | + self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:] |
| 95 | + print 'I am id=%d/%d (master) and my part is %d long'%(myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) |
| 96 | + |
| 97 | + #install eclouds in my part |
| 98 | + my_new_part = [] |
| 99 | + self.my_list_eclouds = [] |
| 100 | + for ele in self.mypart: |
| 101 | + my_new_part.append(ele) |
| 102 | + if ele in self.machine.transverse_map: |
| 103 | + ecloud_new = ecloud.generate_twin_ecloud_with_shared_space_charge() |
| 104 | + my_new_part.append(ecloud_new) |
| 105 | + self.my_list_eclouds.append(ecloud_new) |
| 106 | + self.mypart = my_new_part |
| 107 | + |
| 108 | + def init_master(self): |
| 109 | + |
| 110 | + # generate a bunch |
| 111 | + bunch = self.machine.generate_6D_Gaussian_bunch_matched( |
| 112 | + n_macroparticles=n_macroparticles, intensity=intensity, |
| 113 | + epsn_x=epsn_x, epsn_y=epsn_y, sigma_z=sigma_z) |
| 114 | + print 'Bunch initialized.' |
| 115 | + |
| 116 | + # initial slicing |
| 117 | + from PyHEADTAIL.particles.slicing import UniformBinSlicer |
| 118 | + self.slicer = UniformBinSlicer(n_slices = n_slices, n_sigma_z = n_sigma_z) |
| 119 | + |
| 120 | + # compute initial displacements |
| 121 | + inj_opt = self.machine.transverse_map.get_injection_optics() |
| 122 | + sigma_x = np.sqrt(inj_opt['beta_x']*epsn_x/self.machine.betagamma) |
| 123 | + sigma_y = np.sqrt(inj_opt['beta_y']*epsn_y/self.machine.betagamma) |
| 124 | + x_kick = x_kick_in_sigmas*sigma_x |
| 125 | + y_kick = y_kick_in_sigmas*sigma_y |
| 126 | + |
| 127 | + # apply initial displacement |
| 128 | + bunch.x += x_kick |
| 129 | + bunch.y += y_kick |
| 130 | + |
| 131 | + # define a bunch monitor |
| 132 | + from PyHEADTAIL.monitors.monitors import BunchMonitor |
| 133 | + self.bunch_monitor = BunchMonitor('bunch_evolution.h5', N_turns, {'Comment':'PyHDTL simulation'}, |
| 134 | + write_buffer_every = 8) |
| 135 | + |
| 136 | + |
| 137 | + #slice for the first turn |
| 138 | + slice_obj_list = bunch.extract_slices(self.slicer) |
| 139 | + |
| 140 | + pieces_to_be_treated = slice_obj_list |
| 141 | + |
| 142 | + print 'N_turns', self.N_turns |
| 143 | + |
| 144 | + return pieces_to_be_treated |
| 145 | + |
| 146 | + def init_worker(self): |
| 147 | + pass |
| 148 | + |
| 149 | + def treat_piece(self, piece): |
| 150 | + for ele in self.mypart: |
| 151 | + ele.track(piece) |
| 152 | + |
| 153 | + def finalize_turn_on_master(self, pieces_treated): |
| 154 | + |
| 155 | + # re-merge bunch |
| 156 | + bunch = sum(pieces_treated) |
| 157 | + |
| 158 | + #finalize present turn (with non parallel part, e.g. synchrotron motion) |
| 159 | + for ele in self.non_parallel_part: |
| 160 | + ele.track(bunch) |
| 161 | + |
| 162 | + # save results |
| 163 | + #print '%s Turn %d'%(time.strftime("%d/%m/%Y %H:%M:%S", time.localtime()), i_turn) |
| 164 | + self.bunch_monitor.dump(bunch) |
| 165 | + |
| 166 | + # prepare next turn (re-slice) |
| 167 | + new_pieces_to_be_treated = bunch.extract_slices(self.slicer) |
| 168 | + orders_to_pass = ['reset_clouds'] |
| 169 | + |
| 170 | + return orders_to_pass, new_pieces_to_be_treated |
| 171 | + |
| 172 | + |
| 173 | + def execute_orders_from_master(self, orders_from_master): |
| 174 | + if 'reset_clouds' in orders_from_master: |
| 175 | + for ec in self.my_list_eclouds: ec.finalize_and_reinitialize() |
| 176 | + |
| 177 | + |
| 178 | + |
| 179 | + def finalize_simulation(self): |
| 180 | + pass |
| 181 | + |
| 182 | + def piece_to_buffer(self, piece): |
| 183 | + buf = ch.beam_2_buffer(piece) |
| 184 | + return buf |
| 185 | + |
| 186 | + def buffer_to_piece(self, buf): |
| 187 | + piece = ch.buffer_2_beam(buf) |
| 188 | + return piece |
| 189 | + |
| 190 | + |
| 191 | + |
| 192 | + |
0 commit comments