From b9dd995fca517dd85efdd4f299926aa5d7f8105c Mon Sep 17 00:00:00 2001 From: Xavier Buffat Date: Mon, 7 Apr 2025 13:39:35 +0200 Subject: [PATCH 1/5] New test for fake multibunch tracking in xfields --- tests/test_fakemultibunch_vs_sacharer.py | 127 +++++++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 tests/test_fakemultibunch_vs_sacharer.py diff --git a/tests/test_fakemultibunch_vs_sacharer.py b/tests/test_fakemultibunch_vs_sacharer.py new file mode 100644 index 00000000..db5a3946 --- /dev/null +++ b/tests/test_fakemultibunch_vs_sacharer.py @@ -0,0 +1,127 @@ +import time, h5py, os +import numpy as np +from matplotlib import pyplot as plt +from scipy import constants as cst +from scipy.signal import hilbert +from scipy.stats import linregress + +import xobjects as xo +import xtrack as xt +import xfields as xf +import xwakes as xw + +context = xo.ContextCpu() + +energy = 7E3 +qx = 62.31 +qy = 60.32 +momentumCompaction = 3.483575072011584e-04 +basic_slot_time = 25E-9 +n_bunch_max = 3000 +n_bunch = 100 +bunch_intensity = 5E12 +RF_voltage = 16.0E6 +emit_norm = 2.5E-6 +sigma_z = 0.08 +selected_bunch = n_bunch-1 + +assert n_bunch_max%n_bunch == 0 +circumference = n_bunch_max * basic_slot_time * cst.c +bunch_spacing_zeta = circumference/n_bunch +beta_x = circumference/(2.0*np.pi*qx) +beta_y = circumference/(2.0*np.pi*qy) +mass0 = cst.value('proton mass energy equivalent in MeV')*1E-3 +gamma = energy/mass0 +RF_freq = 10.0/basic_slot_time +sigma_x = np.sqrt(emit_norm*beta_x/gamma) +sigma_px = np.sqrt(emit_norm/beta_x/gamma) +sigma_y = np.sqrt(emit_norm*beta_y/gamma) +sigma_py = np.sqrt(emit_norm/beta_y/gamma) +bucket_length = cst.c/RF_freq +bucker_center = -1*selected_bunch*bunch_spacing_zeta +betar = np.sqrt(1 - 1 / gamma ** 2) +p0 = cst.m_p * betar * gamma * cst.c +harmonic_number = circumference / bucket_length +eta = 1/gamma**2-momentumCompaction +qs = np.sqrt(cst.e * RF_voltage * np.abs(eta) * harmonic_number / (2 * np.pi * betar * cst.c * p0)) +averageRadius = circumference / (2.0*np.pi) +sigma_pz = qs*sigma_z/(averageRadius*np.abs(eta)) +zeta_range=(-3*sigma_z, 3*sigma_z) +filling_scheme = np.ones(n_bunch,dtype=int) +omegarev= 2.0*np.pi*cst.c/circumference + +n_part = int(2E4) +n_turns_wake = 10 +num_slices = 21 +n_turn = int(1E4) +coupled_bunch_mode_number = 90 + +wf = xw.wit.ComponentClassicThickWall(kind = 'dipole_x', + layer=xw.wit.Layer(thickness=None,dc_resistivity=1E-8), + radius=0.02, length = circumference, zero_rel_tol = 0.0) + +tune_shift_nx, tune_shift_m0, effective_impedance = xw.wit.sacherer_formula.sacherer_formula( + qp = 0.0, + nx_array = np.array([coupled_bunch_mode_number]), + bunch_intensity = bunch_intensity, + omegas = qs*omegarev, + n_bunches = n_bunch, + omega_rev = omegarev, + tune=qx, + gamma=gamma, + eta = eta, + bunch_length_seconds = sigma_z*4/cst.c, + m_max = 0, + impedance_function = wf.impedance) + +particles = xt.Particles( + _context = context, + q0 = 1, + p0c = energy*1E9, + mass0 = mass0*1E9, + x = sigma_x*(np.random.randn(n_part)+2), + px = sigma_px*(np.random.randn(n_part)), + y = sigma_y*np.random.randn(n_part), + py = sigma_py*np.random.randn(n_part), + zeta = sigma_z*np.random.randn(n_part) + bucker_center, + delta = sigma_pz*np.random.randn(n_part), + weight = bunch_intensity / n_part + ) + +arc = xt.LineSegmentMap(length=None, qx=qx, qy=qy, + betx=beta_x, bety=beta_y, + longitudinal_mode='linear_fixed_rf', + voltage_rf = RF_voltage, + frequency_rf = RF_freq, + lag_rf = 180.0, + slippage_length = circumference, + momentum_compaction_factor = momentumCompaction) + +wf.configure_for_tracking(zeta_range=zeta_range, + num_slices=num_slices, + bunch_spacing_zeta=bunch_spacing_zeta, + num_turns=n_turns_wake, + circumference=circumference, + filling_scheme = filling_scheme, + bunch_selection = [selected_bunch], + fake_coupled_bunch_phase_x = 2.0*np.pi*coupled_bunch_mode_number/n_bunch, + beta_x = beta_x, + ) + +longitudinal_acceptance = xt.LongitudinalLimitRect(min_zeta=bucker_center-bucket_length, max_zeta=bucker_center+bucket_length) + +log = xt.Log(mean_x=lambda l,p:p.x.mean()) + +line = xt.Line([arc,longitudinal_acceptance,wf]) +line.build_tracker(_context=context) +line.track(particles,num_turns=n_turn,log=log) + +ampl = np.abs(hilbert(line.log_last_track['mean_x'])) +turns = np.arange(len(ampl)) +start_fit = 100 +end_fit = len(ampl)-100 +fit = linregress(turns[start_fit:end_fit],np.log(ampl[start_fit:end_fit])) + +assert np.isclose(fit.slope,-2.0*np.pi*np.imag(tune_shift_nx[0][0]),rtol=5E-2) + + From e982cccfdccb0cb3065ca94ef42592947ca0b748 Mon Sep 17 00:00:00 2001 From: Xavier Buffat Date: Mon, 14 Apr 2025 16:58:10 +0200 Subject: [PATCH 2/5] Test fake multibunch with single pass through tracker --- tests/test_fakemultibunch.py | 129 +++++++++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 tests/test_fakemultibunch.py diff --git a/tests/test_fakemultibunch.py b/tests/test_fakemultibunch.py new file mode 100644 index 00000000..9c04b437 --- /dev/null +++ b/tests/test_fakemultibunch.py @@ -0,0 +1,129 @@ +import numpy as np +from matplotlib import pyplot as plt +from scipy import constants as cst + +import xobjects as xo +import xtrack as xt +import xfields as xf +import xwakes as xw + +context = xo.ContextCpu() + +energy = 7E3 +qx = 62.31 +qy = 60.32 +momentumCompaction = 3.48e-04 +basic_slot_time = 25E-9 +n_bunch_max = 3000 +n_bunch = 100 +bunch_intensity = 5E12 +emit_norm = 2.5E-6 +sigma_z = 0.08 +selected_bunch = n_bunch-1 +r_over_q = 1806 +q = 5E3 +coupled_bunch_number_x = 16 +coupled_bunch_number_y = 73 +n_turns_wake = 1 +num_slices = 11 + +assert n_bunch_max%n_bunch == 0 +circumference = n_bunch_max * basic_slot_time * cst.c +bunch_spacing_zeta = circumference/n_bunch +beta_x = circumference/(2.0*np.pi*qx) +beta_y = circumference/(2.0*np.pi*qy) +mass0 = cst.value('proton mass energy equivalent in MeV')*1E-3 +gamma = energy/mass0 +RF_freq = 10.0/basic_slot_time +sigma_x = np.sqrt(emit_norm*beta_x/gamma) +sigma_y = np.sqrt(emit_norm*beta_y/gamma) +betar = np.sqrt(1 - 1 / gamma ** 2) +zeta_range=(-3*sigma_z, 3*sigma_z) +filling_scheme = np.ones(n_bunch,dtype=int) + +n_part = num_slices +slice_intensity = bunch_intensity/num_slices +x0 = np.ones(n_part) +z_edges = np.linspace(zeta_range[0],zeta_range[1],num_slices+1) +z0 = z_edges[:-1]+(z_edges[1]-z_edges[0])/2 +zeta0 = z0-1*(n_bunch-1)*bunch_spacing_zeta +particles = xt.Particles( + _context = context, + q0 = 1, + p0c = energy*1E9, + mass0 = mass0*1E9, + x = sigma_x*x0, + y = sigma_y*x0, + zeta = zeta0, + weight = bunch_intensity / n_part + ) + +wfx = xw.wit.ComponentResonator(kind = 'dipole_x', + r=r_over_q*q, q=q, f_r=RF_freq+3E5) +wfy = xw.wit.ComponentResonator(kind = 'dipole_y', + r=r_over_q*q, q=q, f_r=RF_freq-3E5) + + +coupled_bunch_phase_x = 2*np.pi*coupled_bunch_number_x/n_bunch +wfx.configure_for_tracking(zeta_range=zeta_range, + num_slices=num_slices, + bunch_spacing_zeta=bunch_spacing_zeta, + num_turns=n_turns_wake, + circumference=circumference, + filling_scheme = np.ones(n_bunch,dtype=int), + bunch_selection = [selected_bunch], + fake_coupled_bunch_phase_x = coupled_bunch_phase_x, + beta_x = beta_x, + ) +coupled_bunch_phase_y = 2*np.pi*coupled_bunch_number_y/n_bunch +wfy.configure_for_tracking(zeta_range=zeta_range, + num_slices=num_slices, + bunch_spacing_zeta=bunch_spacing_zeta, + num_turns=n_turns_wake, + circumference=circumference, + filling_scheme = np.ones(n_bunch,dtype=int), + bunch_selection = [selected_bunch], + fake_coupled_bunch_phase_y = coupled_bunch_phase_y, + beta_y = beta_y, + ) + + +for turn in range(n_turns_wake): + px0 = np.copy(particles.px) + wfx.track(particles) + py0 = np.copy(particles.py) + wfy.track(particles) + +moments_data = wfx._xfields_wf.moments_data +z,x = moments_data.get_moment_profile('x',0) +moments_data = wfy._xfields_wf.moments_data +z,y = moments_data.get_moment_profile('y',0) +zetas = np.array([]) +positions_x = np.array([]) +positions_y = np.array([]) +for slot in np.arange(n_bunch): + zetas = np.hstack([zetas,z0-bunch_spacing_zeta*slot]) + positions_x = np.hstack([positions_x,x0*np.cos(coupled_bunch_phase_x*(selected_bunch-slot))]) + positions_y = np.hstack([positions_y,x0*np.cos(coupled_bunch_phase_y*(selected_bunch-slot))]) +indices = np.argsort(zetas) +zetas = zetas[indices] +positions_x = positions_x[indices] +positions_y = positions_y[indices] + +assert np.allclose(z,zetas) +assert np.allclose(x/sigma_x,positions_x) +assert np.allclose(y/sigma_y,positions_y) + + +for i_slice in range(num_slices): + zetas_slice = zeta0[i_slice]-zetas + + scaling_constant = particles.q0**2 * cst.e**2 / (particles.p0c[0] * particles.beta0[0] * cst.e) + kicks = positions_x*sigma_x*scaling_constant*slice_intensity*wfx.function_vs_zeta(zetas_slice,beta0=betar,dzeta=moments_data.dz) + kick_from_track = particles.px[i_slice]-px0[i_slice] + assert np.isclose(np.sum(kicks),kick_from_track) + kicks = positions_x*sigma_x*scaling_constant*slice_intensity*wfy.function_vs_zeta(zetas_slice,beta0=betar,dzeta=moments_data.dz) + kick_from_track = particles.py[i_slice]-py0[i_slice] + assert np.isclose(np.sum(kicks),kick_from_track) + + From b38ef00d8809eec578c5fe1a0726a639df86eabb Mon Sep 17 00:00:00 2001 From: Xavier Buffat Date: Mon, 14 Apr 2025 17:02:29 +0200 Subject: [PATCH 3/5] small refactoring of the test --- tests/test_fakemultibunch.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/tests/test_fakemultibunch.py b/tests/test_fakemultibunch.py index 9c04b437..73404064 100644 --- a/tests/test_fakemultibunch.py +++ b/tests/test_fakemultibunch.py @@ -12,7 +12,6 @@ energy = 7E3 qx = 62.31 qy = 60.32 -momentumCompaction = 3.48e-04 basic_slot_time = 25E-9 n_bunch_max = 3000 n_bunch = 100 @@ -88,11 +87,12 @@ ) -for turn in range(n_turns_wake): - px0 = np.copy(particles.px) - wfx.track(particles) - py0 = np.copy(particles.py) - wfy.track(particles) +px0 = np.copy(particles.px) +wfx.track(particles) +kicks_x_from_track = particles.px-px0 +py0 = np.copy(particles.py) +wfy.track(particles) +kicks_y_from_track = particles.py-py0 moments_data = wfx._xfields_wf.moments_data z,x = moments_data.get_moment_profile('x',0) @@ -115,15 +115,12 @@ assert np.allclose(y/sigma_y,positions_y) +scaling_constant = particles.q0**2 * cst.e**2 / (particles.p0c[0] * particles.beta0[0] * cst.e) for i_slice in range(num_slices): zetas_slice = zeta0[i_slice]-zetas - - scaling_constant = particles.q0**2 * cst.e**2 / (particles.p0c[0] * particles.beta0[0] * cst.e) kicks = positions_x*sigma_x*scaling_constant*slice_intensity*wfx.function_vs_zeta(zetas_slice,beta0=betar,dzeta=moments_data.dz) - kick_from_track = particles.px[i_slice]-px0[i_slice] - assert np.isclose(np.sum(kicks),kick_from_track) + assert np.isclose(np.sum(kicks),kicks_x_from_track[i_slice]) kicks = positions_x*sigma_x*scaling_constant*slice_intensity*wfy.function_vs_zeta(zetas_slice,beta0=betar,dzeta=moments_data.dz) - kick_from_track = particles.py[i_slice]-py0[i_slice] - assert np.isclose(np.sum(kicks),kick_from_track) + assert np.isclose(np.sum(kicks),kicks_y_from_track[i_slice]) From 6e2eaf71843924b2a71eb073b876f106a7a98167 Mon Sep 17 00:00:00 2001 From: Xavier Buffat Date: Wed, 16 Apr 2025 15:24:58 +0200 Subject: [PATCH 4/5] Fix issue with too relaxxed absolute tolerance in test --- tests/test_fakemultibunch.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tests/test_fakemultibunch.py b/tests/test_fakemultibunch.py index 73404064..59d18abd 100644 --- a/tests/test_fakemultibunch.py +++ b/tests/test_fakemultibunch.py @@ -110,17 +110,16 @@ positions_x = positions_x[indices] positions_y = positions_y[indices] -assert np.allclose(z,zetas) +assert np.allclose(z/sigma_z,zetas/sigma_z) assert np.allclose(x/sigma_x,positions_x) assert np.allclose(y/sigma_y,positions_y) - scaling_constant = particles.q0**2 * cst.e**2 / (particles.p0c[0] * particles.beta0[0] * cst.e) for i_slice in range(num_slices): zetas_slice = zeta0[i_slice]-zetas kicks = positions_x*sigma_x*scaling_constant*slice_intensity*wfx.function_vs_zeta(zetas_slice,beta0=betar,dzeta=moments_data.dz) - assert np.isclose(np.sum(kicks),kicks_x_from_track[i_slice]) - kicks = positions_x*sigma_x*scaling_constant*slice_intensity*wfy.function_vs_zeta(zetas_slice,beta0=betar,dzeta=moments_data.dz) - assert np.isclose(np.sum(kicks),kicks_y_from_track[i_slice]) + assert np.isclose(np.sum(kicks),kicks_x_from_track[i_slice],atol=1E-15) + kicks = positions_y*sigma_y*scaling_constant*slice_intensity*wfy.function_vs_zeta(zetas_slice,beta0=betar,dzeta=moments_data.dz) + assert np.isclose(np.sum(kicks),kicks_y_from_track[i_slice],atol=1E-15) From be01ba6c5477729a6e5a76b1e97ecb51157afac5 Mon Sep 17 00:00:00 2001 From: Xavier Buffat Date: Wed, 7 May 2025 14:41:01 +0200 Subject: [PATCH 5/5] Propagate context properly --- xwakes/beam_elements/collective_monitor.py | 13 +++++++++---- xwakes/beam_elements/transverse_damper.py | 4 +++- xwakes/wit/component.py | 2 +- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/xwakes/beam_elements/collective_monitor.py b/xwakes/beam_elements/collective_monitor.py index f136151d..360645b6 100644 --- a/xwakes/beam_elements/collective_monitor.py +++ b/xwakes/beam_elements/collective_monitor.py @@ -121,7 +121,10 @@ def __init__(self, stats_to_store=None, stats_to_store_particles=None, backend='hdf5', - _flatten=False): + _flatten=False, + **kwargs): + + self.xoinitialize(**kwargs) slicer_moments = [] if stats_to_store is not None: @@ -194,7 +197,8 @@ def __init__(self, bunch_spacing_zeta=bunch_spacing_zeta, # This is P in the paper filling_scheme=filling_scheme, bunch_selection=bunch_selection, - with_compressed_profile=False + with_compressed_profile=False, + _context=self._context ) def _reconfigure_for_parallel(self, n_procs, my_rank): @@ -213,7 +217,8 @@ def _reconfigure_for_parallel(self, n_procs, my_rank): zeta_range=self.slicer.zeta_range, num_slices=self.slicer.num_slices, bunch_spacing_zeta=self.slicer.bunch_spacing_zeta, - moments=self.slicer.moments + moments=self.slicer.moments, + _context=self._context ) def track(self, particles, _slice_result=None, _other_bunch_slicers=None): @@ -326,7 +331,7 @@ def _update_bunch_buffer(self, particles): # here we need to use self.i_turn - 1 because the turn is # incremented before calling this method - self.bunch_buffer[bid][stat][(self.i_turn - 1) % + self.bunch_buffer[int(bid)][stat][(self.i_turn - 1) % self.flush_data_every] = val def _update_slice_buffer(self, particles): diff --git a/xwakes/beam_elements/transverse_damper.py b/xwakes/beam_elements/transverse_damper.py index 476bc710..2737ac10 100644 --- a/xwakes/beam_elements/transverse_damper.py +++ b/xwakes/beam_elements/transverse_damper.py @@ -49,6 +49,8 @@ def __init__(self, gain_x, gain_y, zeta_range, num_slices, 'px': gain_x, 'py': gain_y, } + + self.xoinitialize(**kwargs) self.slicer = xf.UniformBinSlicer( filling_scheme=filling_scheme, @@ -56,7 +58,7 @@ def __init__(self, gain_x, gain_y, zeta_range, num_slices, zeta_range=zeta_range, num_slices=num_slices, bunch_spacing_zeta=bunch_spacing_zeta, - moments=['px', 'py'] + moments=['px', 'py'],_context=self._context ) if filling_scheme is not None: diff --git a/xwakes/wit/component.py b/xwakes/wit/component.py index e0e2de4f..e084f578 100644 --- a/xwakes/wit/component.py +++ b/xwakes/wit/component.py @@ -233,7 +233,7 @@ def configure_for_tracking(self, zeta_range: Tuple[float, float], **kwargs # for multibuunch compatibility ) -> None: import xfields as xf - self._xfields_wf = xf.Wakefield(components=[self], zeta_range=zeta_range, + self._xfields_wf = xf.WakeTracker(components=[self], zeta_range=zeta_range, num_slices=num_slices, **kwargs) def track(self, particles):