diff --git a/.gitignore b/.gitignore index 64a33c5f..a89f03b4 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,5 @@ scripts/ .DS_Store *.__afs* *.egg-info +2to3.out +PyHEADTAIL/gpu/thrust.so diff --git a/2to3.out b/2to3.out new file mode 100644 index 00000000..d2cb4bb2 --- /dev/null +++ b/2to3.out @@ -0,0 +1,707 @@ +--- ../PyHEADTAIL/setup.py (original) ++++ ../PyHEADTAIL/setup.py (refactored) +@@ -20,7 +20,7 @@ + "may have to install with the following line:\n\n" + "$ CC=gcc-4.9 ./install\n" + "(or any equivalent version of gcc)") +- input('Hit any key to continue...') ++ eval(input('Hit any key to continue...')) + + + args = sys.argv[1:] +--- ../PyHEADTAIL/PyHEADTAIL/elens/elens.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/elens/elens.py (refactored) +@@ -2,7 +2,7 @@ + @authors: Vadim Gubaidulin, Adrian Oeftiger + @date: 18.02.2020 + ''' +-from __future__ import division ++ + + from PyHEADTAIL.general.element import Element + from PyHEADTAIL.particles import slicing +--- ../PyHEADTAIL/PyHEADTAIL/feedback/feedback.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/feedback/feedback.py (refactored) +@@ -1,10 +1,10 @@ + import numpy as np + import collections + from PyHEADTAIL.mpi import mpi_data +-from core import get_processor_variables, process, Parameters +-from core import z_bins_to_bin_edges, append_bin_edges +-from processors.register import VectorSumCombiner, CosineSumCombiner +-from processors.register import HilbertCombiner, DummyCombiner ++from .core import get_processor_variables, process, Parameters ++from .core import z_bins_to_bin_edges, append_bin_edges ++from .processors.register import VectorSumCombiner, CosineSumCombiner ++from .processors.register import HilbertCombiner, DummyCombiner + from scipy.constants import c + """ + This file contains objecst, which can be used as transverse feedback +@@ -609,7 +609,7 @@ + If True, data from multiple bunches are gathered by using MPI + """ + +- if isinstance(combiner, (str,unicode)): ++ if isinstance(combiner, str): + if combiner == 'vector_sum': + self._combiner_x = VectorSumCombiner(registers_x, + location_x, beta_x, +--- ../PyHEADTAIL/PyHEADTAIL/feedback/processors/addition.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/feedback/processors/addition.py (refactored) +@@ -11,8 +11,7 @@ + @date: 11/10/2017 + """ + +-class Addition(object): +- __metaclass__ = ABCMeta ++class Addition(object, metaclass=ABCMeta): + """ An abstract class which adds an array to the input signal. The addend array is produced by taking + a slice property (determined by the input parameter 'seed') and passing it through the abstract method + addend_function(seed). +@@ -77,7 +76,7 @@ + np.copyto(self._addend, ((parameters.bin_edges[:,1]+parameters.bin_edges[:,0])/2.)) + elif self._seed == 'normalized_bin_midpoint': + +- for i in xrange(parameters.n_segments): ++ for i in range(parameters.n_segments): + i_from = i * parameters.n_bins_per_segment + i_to = (i + 1) * parameters.n_bins_per_segment + +@@ -110,7 +109,7 @@ + + elif self._normalization == 'segment_sum': + norm_coeff = np.ones(len(self._addend)) +- for i in xrange(parameters.n_segments): ++ for i in range(parameters.n_segments): + i_from = i*parameters.n_bins_per_segment + i_to = (i+1)*parameters.n_bins_per_segment + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.sum(self._addend[i_from:i_to])) +@@ -120,7 +119,7 @@ + + elif self._normalization == 'segment_average': + norm_coeff = np.ones(len(self._addend)) +- for i in xrange(parameters.n_segments): ++ for i in range(parameters.n_segments): + i_from = i*parameters.n_bins_per_segment + i_to = (i+1)*parameters.n_bins_per_segment + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.sum(self._addend[i_from:i_to]))/float(parameters.n_bins_per_segment) +@@ -132,7 +131,7 @@ + elif self._normalization == 'segment_integral': + bin_widths = parameters.bin_edges[:,1] - parameters.bin_edges[:,0] + norm_coeff = np.ones(len(self._addend)) +- for i in xrange(parameters.n_segments): ++ for i in range(parameters.n_segments): + i_from = i*parameters.n_bins_per_segment + i_to = (i+1)*parameters.n_bins_per_segment + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.sum(self._addend[i_from:i_to]*bin_widths[i_from:i_to])) +@@ -142,7 +141,7 @@ + + elif self._normalization == 'segment_min': + norm_coeff = np.ones(len(self._addend)) +- for i in xrange(parameters.n_segments): ++ for i in range(parameters.n_segments): + i_from = i*parameters.n_bins_per_segment + i_to = (i+1)*parameters.n_bins_per_segment + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.min(self._addend[i_from:i_to])) +@@ -152,7 +151,7 @@ + + elif self._normalization == 'segment_max': + norm_coeff = np.ones(len(self._addend)) +- for i in xrange(parameters.n_segments): ++ for i in range(parameters.n_segments): + i_from = i*parameters.n_bins_per_segment + i_to = (i+1)*parameters.n_bins_per_segment + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.max(self._addend[i_from:i_to])) +--- ../PyHEADTAIL/PyHEADTAIL/feedback/processors/convolution.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/feedback/processors/convolution.py (refactored) +@@ -6,7 +6,7 @@ + from scipy.constants import pi + import scipy.integrate as integrate + from scipy.interpolate import UnivariateSpline +-import abstract_filter_responses ++from . import abstract_filter_responses + + """Signal processors based on convolution operation. + +@@ -14,9 +14,7 @@ + @date: 11/10/2017 + """ + +-class Convolution(object): +- __metaclass__ = ABCMeta +- ++class Convolution(object, metaclass=ABCMeta): + def __init__(self,**kwargs): + + self._dashed_impulse_responses = None +@@ -54,12 +52,12 @@ + + # List of impulses to the corresponding segments + self._impulses_to_segments = [] +- for i in xrange(self._n_seg): ++ for i in range(self._n_seg): + self._impulses_to_segments.append([]) + + ref_points = [] + +- for i in xrange(self._n_seg): ++ for i in range(self._n_seg): + i_from = i*self._n_bins + i_to = (i+1)*self._n_bins + +@@ -126,7 +124,7 @@ + # response is zero. + n_bins_per_segment = self._n_bins + 2*extra_bins + +- for k in xrange(self._n_seg): ++ for k in range(self._n_seg): + + i_from = k * n_bins_per_segment + i_to = (k+1) * n_bins_per_segment +@@ -159,7 +157,7 @@ + self._init_convolution(parameters) + + # calculates the impulses caused by the segments +- for i in xrange(self._n_seg): ++ for i in range(self._n_seg): + i_from = i*self._n_bins + i_to = (i+1)*self._n_bins + np.copyto(self._impulses_from_segments[i][:len(self._dashed_impulse_responses[i])], +@@ -168,7 +166,7 @@ + + # gathers the output signal + output_signal = np.zeros(len(signal)) +- for i in xrange(self._n_seg): ++ for i in range(self._n_seg): + + i_from = i*self._n_bins + i_to = (i+1)*self._n_bins +@@ -294,7 +292,7 @@ + bin_spacing = np.mean(impulse_ref_edges[:,1]-impulse_ref_edges[:,0]) + impulse_values = np.zeros(len(impulse_bin_mids)) + +- for i in xrange(self._i_from,(self._i_to+1)): ++ for i in range(self._i_from,(self._i_to+1)): + copy_mid = i*self._spacing + copy_from = copy_mid - 0.5 * bin_spacing + copy_to = copy_mid + 0.5 * bin_spacing +@@ -345,10 +343,8 @@ + # else: + # raise ValueError('Unknown value in ConvolutionFromFile._calc_type') + +-class ConvolutionFilter(Convolution): ++class ConvolutionFilter(Convolution, metaclass=ABCMeta): + """ An abstract class for the filtes based on convolution.""" +- +- __metaclass__ = ABCMeta + + def __init__(self,scaling, zero_bin_value=None, normalization=None, + f_cutoff_2nd=None, **kwargs): +@@ -400,7 +396,7 @@ + ref_points = [] + mids = bin_mids(impulse_ref_edges) + n_bins_per_segment = int(len(impulse)/n_segments) +- for i in xrange(n_segments): ++ for i in range(n_segments): + i_from = i * n_bins_per_segment + i_to = (i + 1) * n_bins_per_segment + ref_points.append(np.mean(mids[i_from:i_to])) +@@ -424,7 +420,7 @@ + f_h = self._normalization[1] + + norm_coeff = 0. +- for i in xrange(-1000,1000): ++ for i in range(-1000,1000): + x = float(i)* (1./f_h) * self._scaling + norm_coeff += self._impulse_response(x) + #print norm_coeff +--- ../PyHEADTAIL/PyHEADTAIL/feedback/processors/linear_transform.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/feedback/processors/linear_transform.py (refactored) +@@ -6,7 +6,7 @@ + from scipy import linalg + from cython_hacks import cython_matrix_product + from ..core import default_macros +-import abstract_filter_responses ++from . import abstract_filter_responses + + """Signal processors based on linear transformation. + +@@ -14,8 +14,7 @@ + @date: 11/10/2017 + """ + +-class LinearTransform(object): +- __metaclass__ = ABCMeta ++class LinearTransform(object, metaclass=ABCMeta): + """ An abstract class for signal processors which are based on linear transformation. The signal is processed by + calculating a dot product of a transfer matrix and a signal. The transfer matrix is produced with an abstract + method, namely response_function(*args), which returns an elements of the matrix (an effect of +@@ -93,7 +92,7 @@ + elif self._mode == 'bunch_by_bunch': + output_signal = np.zeros(len(signal)) + +- for i in xrange(self._n_segments): ++ for i in range(self._n_segments): + idx_from = i * self._n_bins_per_segment + idx_to = (i+1) * self._n_bins_per_segment + np.copyto(output_signal[idx_from:idx_to],cython_matrix_product(self._matrix, signal[idx_from:idx_to])) +@@ -111,10 +110,10 @@ + + def print_matrix(self): + for row in self._matrix: +- print "[", ++ print("[", end=' ') + for element in row: +- print "{:6.3f}".format(element), +- print "]" ++ print("{:6.3f}".format(element), end=' ') ++ print("]") + + def __generate_matrix(self,parameters, bin_edges, bin_midpoints): + +@@ -217,8 +216,7 @@ + return np.interp(bin_mid - ref_bin_mid, self._data[:, 0], self._data[:, 1]) + + +-class LinearTransformFilter(LinearTransform): +- __metaclass__ = ABCMeta ++class LinearTransformFilter(LinearTransform, metaclass=ABCMeta): + """ A general class for (analog) filters. Impulse response of the filter must be determined by overwriting + the function raw_impulse_response. + +@@ -273,7 +271,7 @@ + f_h = self._filter_normalization[1] + + norm_coeff = 0. +- for i in xrange(-1000,1000): ++ for i in range(-1000,1000): + x = float(i)* (1./f_h) * self._scaling + norm_coeff += self._impulse_response(x) + +--- ../PyHEADTAIL/PyHEADTAIL/feedback/processors/misc.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/feedback/processors/misc.py (refactored) +@@ -39,7 +39,7 @@ + output_signal = np.zeros(len(signal)) + ones = np.ones(n_slices_per_segment) + +- for i in xrange(n_segments): ++ for i in range(n_segments): + idx_from = i * n_slices_per_segment + idx_to = (i + 1) * n_slices_per_segment + np.copyto(output_signal[idx_from:idx_to], ones * np.mean(signal[idx_from:idx_to])) +--- ../PyHEADTAIL/PyHEADTAIL/feedback/processors/multiplication.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/feedback/processors/multiplication.py (refactored) +@@ -9,8 +9,7 @@ + @date: 11/10/2017 + """ + +-class Multiplication(object): +- __metaclass__ = ABCMeta ++class Multiplication(object, metaclass=ABCMeta): + """ An abstract class which multiplies the input signal by an array. The multiplier array is produced by taking + a slice property (determined by the input parameter 'seed') and passing it through the abstract method + multiplication_function(seed). +@@ -74,7 +73,7 @@ + np.copyto(self._multiplier, ((parameters['bin_edges'][:,1]+parameters['bin_edges'][:,0])/2.)) + elif self._seed == 'normalized_bin_midpoint': + +- for i in xrange(parameters['n_segments']): ++ for i in range(parameters['n_segments']): + i_from = i * parameters['n_bins_per_segment'] + i_to = (i + 1) * parameters['n_bins_per_segment'] + +@@ -107,7 +106,7 @@ + + elif self._normalization == 'segment_sum': + norm_coeff = np.ones(len(self._multiplier)) +- for i in xrange(parameters['n_segments']): ++ for i in range(parameters['n_segments']): + i_from = i*parameters['n_bins_per_segment'] + i_to = (i+1)*parameters['n_bins_per_segment'] + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.sum(self._multiplier[i_from:i_to])) +@@ -117,7 +116,7 @@ + + elif self._normalization == 'segment_average': + norm_coeff = np.ones(len(self._multiplier)) +- for i in xrange(parameters['n_segments']): ++ for i in range(parameters['n_segments']): + i_from = i*parameters['n_bins_per_segment'] + i_to = (i+1)*parameters['n_bins_per_segment'] + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.sum(self._multiplier[i_from:i_to]))/float(parameters['n_bins_per_segment']) +@@ -129,7 +128,7 @@ + elif self._normalization == 'segment_integral': + bin_widths = parameters['bin_edges'][:,1] - parameters['bin_edges'][:,0] + norm_coeff = np.ones(len(self._multiplier)) +- for i in xrange(parameters['n_segments']): ++ for i in range(parameters['n_segments']): + i_from = i*parameters['n_bins_per_segment'] + i_to = (i+1)*parameters['n_bins_per_segment'] + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.sum(self._multiplier[i_from:i_to]*bin_widths[i_from:i_to])) +@@ -139,7 +138,7 @@ + + elif self._normalization == 'segment_min': + norm_coeff = np.ones(len(self._multiplier)) +- for i in xrange(parameters['n_segments']): ++ for i in range(parameters['n_segments']): + i_from = i*parameters['n_bins_per_segment'] + i_to = (i+1)*parameters['n_bins_per_segment'] + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.min(self._multiplier[i_from:i_to])) +@@ -149,7 +148,7 @@ + + elif self._normalization == 'segment_max': + norm_coeff = np.ones(len(self._multiplier)) +- for i in xrange(parameters['n_segments']): ++ for i in range(parameters['n_segments']): + i_from = i*parameters['n_bins_per_segment'] + i_to = (i+1)*parameters['n_bins_per_segment'] + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.max(self._multiplier[i_from:i_to])) +--- ../PyHEADTAIL/PyHEADTAIL/feedback/processors/register.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/feedback/processors/register.py (refactored) +@@ -75,7 +75,7 @@ + + return self + +- def next(self): ++ def __next__(self): + if self._n_iter_left < 1: + raise StopIteration + +@@ -127,9 +127,7 @@ + + + +-class Combiner(object): +- __metaclass__ = ABCMeta +- ++class Combiner(object, metaclass=ABCMeta): + def __init__(self, registers, target_location, target_beta=None, + additional_phase_advance=0., beta_conversion = '0_deg', **kwargs): + """ +@@ -359,7 +357,7 @@ + + coefficients = np.zeros(self._n_taps) + +- for i in xrange(self._n_taps): ++ for i in range(self._n_taps): + n = self._n_taps-i-1 + n -= self._n_taps/2 + h = 0. +@@ -518,7 +516,7 @@ + if self._warning_printed == False: + if (readings_phase_difference%(-1.*np.pi) > 0.2) or (readings_phase_difference%np.pi < 0.2): + self._warning_printed = True +- print "WARNING: It is recommended that the angle between the readings is at least 12 deg" ++ print("WARNING: It is recommended that the angle between the readings is at least 12 deg") + + target_location_difference = target_location - signal_1_location + if target_location_difference < 0.: +@@ -729,7 +727,7 @@ + target_beta = parameters['beta'] + extra_phase = self._additional_phase_advance + +- if isinstance(self._combiner_type, (str,unicode)): ++ if isinstance(self._combiner_type, str): + if self._combiner_type == 'vector_sum': + self._combiner = VectorSumCombiner(registers, target_location, + target_beta, extra_phase) +--- ../PyHEADTAIL/PyHEADTAIL/feedback/processors/resampling.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/feedback/processors/resampling.py (refactored) +@@ -123,21 +123,21 @@ + + bin_edges = None + +- for i in xrange(self._n_extras): ++ for i in range(self._n_extras): + offset = start_mid - (self._n_extras-i)*segment_length + if bin_edges is None: + bin_edges = np.copy(segment_bin_edges+offset) + else: + bin_edges = append_bin_edges(bin_edges, segment_bin_edges+offset) + +- for i in xrange(n_sampled_sequencies): ++ for i in range(n_sampled_sequencies): + offset = i*segment_length + start_mid + if bin_edges is None: + bin_edges = np.copy(segment_bin_edges+offset) + else: + bin_edges = append_bin_edges(bin_edges, segment_bin_edges+offset) + +- for i in xrange(self._n_extras): ++ for i in range(self._n_extras): + offset = start_mid + (i+n_sampled_sequencies)*segment_length + if bin_edges is None: + bin_edges = np.copy(segment_bin_edges+offset) +@@ -218,7 +218,7 @@ + + temp_edges = np.zeros((multiplier, 2)) + +- for i in xrange(multiplier): ++ for i in range(multiplier): + temp_edges[i,0] = edges[0] + i * new_bin_width + temp_edges[i,1] = edges[0] + (i + 1) * new_bin_width + +@@ -254,8 +254,8 @@ + n_bins_per_segment = int(np.floor(original_n_bins_per_segment/multiplier)) + new_edges = None + +- for j in xrange(parameters['n_segments']): +- for i in xrange(n_bins_per_segment): ++ for j in range(parameters['n_segments']): ++ for i in range(n_bins_per_segment): + first_edge = j * original_n_bins_per_segment + i * multiplier + last_edge = j * original_n_bins_per_segment + (i + 1) * multiplier -1 + +@@ -291,7 +291,7 @@ + input_bin_mids = bin_mids(parameters['bin_edges']) + output_bin_mids = bin_mids(self._output_parameters['bin_edges']) + +- for i in xrange(parameters['n_segments']): ++ for i in range(parameters['n_segments']): + i_min = i * parameters['n_bins_per_segment'] + i_max = (i + 1) * parameters['n_bins_per_segment'] - 1 + segment_min_z = input_bin_mids[i_min] +--- ../PyHEADTAIL/PyHEADTAIL/field_maps/efields_funcs.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/field_maps/efields_funcs.py (refactored) +@@ -2,7 +2,7 @@ + @authors: Vadim Gubaidulin, Adrian Oeftiger + @date: 18.02.2020 + ''' +-from __future__ import division ++ + + from PyHEADTAIL.general.element import Element + from PyHEADTAIL.particles.slicing import clean_slices +--- ../PyHEADTAIL/PyHEADTAIL/gpu/gpu_wrap.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/gpu/gpu_wrap.py (refactored) +@@ -565,9 +565,9 @@ + elif dtype.itemsize == 4 and dtype.kind is 'i': + thrust.get_sort_perm_int(to_sort.copy(), permutation) + else: +- print(to_sort.dtype) +- print(to_sort.dtype.itemsize) +- print(to_sort.dtype.kind) ++ print((to_sort.dtype)) ++ print((to_sort.dtype.itemsize)) ++ print((to_sort.dtype.kind)) + raise TypeError('Currently only float64 and int32 types can be sorted') + return permutation + +@@ -603,9 +603,9 @@ + elif dtype.itemsize == 4 and dtype.kind is 'i': + thrust.apply_sort_perm_int(array, tmp, permutation) + else: +- print(array.dtype) +- print(array.dtype.itemsize) +- print(array.dtype.kind) ++ print((array.dtype)) ++ print((array.dtype.itemsize)) ++ print((array.dtype.kind)) + raise TypeError('Currently only float64 and int32 types can be sorted') + return tmp + +--- ../PyHEADTAIL/PyHEADTAIL/impedances/wakes.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/impedances/wakes.py (refactored) +@@ -69,7 +69,7 @@ + lgd += ['Bin edges'] + ax2.legend(lgd) + +- print('\n--> Resulting number of slices: {:g}'.format(len(ss))) ++ print(('\n--> Resulting number of slices: {:g}'.format(len(ss)))) + + return ax1 + +--- ../PyHEADTAIL/PyHEADTAIL/particles/slicing.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/particles/slicing.py (refactored) +@@ -105,7 +105,7 @@ + self._pidx_begin = None + self._pidx_end = None + +- for p_name, p_value in beam_parameters.items(): ++ for p_name, p_value in list(beam_parameters.items()): + if hasattr(self, p_name): + raise ValueError('SliceSet.' + p_name + ' already exists!' + + 'Do not overwrite existing SliceSet ' + +--- ../PyHEADTAIL/PyHEADTAIL/testing/script-tests/test_radiation_damping_time_and_equilibrum_values.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/testing/script-tests/test_radiation_damping_time_and_equilibrum_values.py (refactored) +@@ -58,7 +58,7 @@ + sx, sy, sz, sdp = [], [], [], [] + epsx, epsy, epsz = [], [], [] + for i_turn in range(n_turns): +- print('Turn %d/%d'%(i_turn, n_turns)) ++ print(('Turn %d/%d'%(i_turn, n_turns))) + machine.track(bunch) + + beam_x.append(bunch.mean_x()) +--- ../PyHEADTAIL/PyHEADTAIL/testing/script-tests/test_radiation_energy_loss.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/testing/script-tests/test_radiation_energy_loss.py (refactored) +@@ -41,6 +41,6 @@ + SynchrotronRadiationLongitudinal.track(bunch) + dp_after = bunch.mean_dp() + +-print('Energy loss\nEvaluated :%.6e [eV]\nExpected :%.6e [eV]\nERROR :%.2f'%((dp_before-dp_after)*machine.p0*c/np.abs(machine.charge), +- E_loss_eV,(E_loss_eV-((dp_before-dp_after)*machine.p0*c/np.abs(machine.charge)))*100/E_loss_eV)+'%') ++print(('Energy loss\nEvaluated :%.6e [eV]\nExpected :%.6e [eV]\nERROR :%.2f'%((dp_before-dp_after)*machine.p0*c/np.abs(machine.charge), ++ E_loss_eV,(E_loss_eV-((dp_before-dp_after)*machine.p0*c/np.abs(machine.charge)))*100/E_loss_eV)+'%')) + +--- ../PyHEADTAIL/PyHEADTAIL/testing/script-tests/test_radiation_with_non_linear_bucket.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/testing/script-tests/test_radiation_with_non_linear_bucket.py (refactored) +@@ -79,7 +79,7 @@ + for i in range(n_turns): + + machine.track(bunch) +- print('Turn %d/%d'%(i, n_turns)) ++ print(('Turn %d/%d'%(i, n_turns))) + sigma_x[i] = bunch.sigma_x() + mean_x[i] = bunch.mean_x() + epsn_x[i] = bunch.epsn_x() +--- ../PyHEADTAIL/PyHEADTAIL/testing/script-tests/test_synchrotron_LHC.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/testing/script-tests/test_synchrotron_LHC.py (refactored) +@@ -44,7 +44,7 @@ + beam_alpha_y = [] + beam_beta_y = [] + for i_ele, m in enumerate(machine.one_turn_map): +- print('Element %d/%d'%(i_ele, len(machine.one_turn_map))) ++ print(('Element %d/%d'%(i_ele, len(machine.one_turn_map)))) + beam_alpha_x.append(bunch.alpha_Twiss_x()) + beam_beta_x.append(bunch.beta_Twiss_x()) + beam_alpha_y.append(bunch.alpha_Twiss_y()) +@@ -92,7 +92,7 @@ + sx, sy, sz = [], [], [] + epsx, epsy, epsz = [], [], [] + for i_turn in range(n_turns): +- print('Turn %d/%d'%(i_turn, n_turns)) ++ print(('Turn %d/%d'%(i_turn, n_turns))) + machine.track(bunch) + + beam_x.append(bunch.mean_x()) +@@ -161,15 +161,15 @@ + + LHC_with_octupole_injection = LHC(machine_configuration='Injection', n_segments=5, octupole_knob = -1.5) + print('450GeV:') +-print('i_octupole_focusing =',LHC_with_octupole_injection.i_octupole_focusing) +-print('i_octupole_defocusing =',LHC_with_octupole_injection.i_octupole_defocusing) ++print(('i_octupole_focusing =',LHC_with_octupole_injection.i_octupole_focusing)) ++print(('i_octupole_defocusing =',LHC_with_octupole_injection.i_octupole_defocusing)) + print('in the machine we get 19.557') + print(' ') + LHC_with_octupole_flattop = LHC(machine_configuration='Injection', n_segments=5, p0=6.5e12*e/c, octupole_knob = -2.9) + + print('6.5TeV:') +-print('i_octupole_focusing =',LHC_with_octupole_flattop.i_octupole_focusing) +-print('i_octupole_defocusing =',LHC_with_octupole_flattop.i_octupole_defocusing) ++print(('i_octupole_focusing =',LHC_with_octupole_flattop.i_octupole_focusing)) ++print(('i_octupole_defocusing =',LHC_with_octupole_flattop.i_octupole_defocusing)) + print('in the machine we get 546.146') + + plt.show() +--- ../PyHEADTAIL/PyHEADTAIL/testing/script-tests/test_synchrotron_electrons_CLIC_DR.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/testing/script-tests/test_synchrotron_electrons_CLIC_DR.py (refactored) +@@ -36,7 +36,7 @@ + beam_alpha_y = [] + beam_beta_y = [] + for i_ele, m in enumerate(machine.one_turn_map): +- print('Element %d/%d'%(i_ele, len(machine.one_turn_map))) ++ print(('Element %d/%d'%(i_ele, len(machine.one_turn_map)))) + beam_alpha_x.append(bunch.alpha_Twiss_x()) + beam_beta_x.append(bunch.beta_Twiss_x()) + beam_alpha_y.append(bunch.alpha_Twiss_y()) +@@ -84,7 +84,7 @@ + sx, sy, sz = [], [], [] + epsx, epsy, epsz = [], [], [] + for i_turn in range(n_turns): +- print('Turn %d/%d'%(i_turn, n_turns)) ++ print(('Turn %d/%d'%(i_turn, n_turns))) + machine.track(bunch) + + beam_x.append(bunch.mean_x()) +--- ../PyHEADTAIL/PyHEADTAIL/testing/unittests/test_cobra.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/testing/unittests/test_cobra.py (refactored) +@@ -46,7 +46,7 @@ + """ + v_cobra = cf.cov(self.data1, self.data2) + v_numpy = np.cov(self.data1, self.data2)[0,1] +- self.assertAlmostEquals(v_cobra, v_numpy, places=self.tolerance, ++ self.assertAlmostEqual(v_cobra, v_numpy, places=self.tolerance, + msg='cobra cov() yields a different result ' + + 'than numpy.cov()') + +@@ -58,10 +58,10 @@ + bunch = self.generate_gaussian6dBunch(1000000, 0, 0, 1, 1, 5, 100) + eta_prime_x = cf.dispersion(bunch.xp, bunch.dp) + weak_tol = 2 +- self.assertAlmostEquals(eta_prime_x, 0., places=weak_tol, ++ self.assertAlmostEqual(eta_prime_x, 0., places=weak_tol, + msg='eta_prime_x is not zero but ' + str(eta_prime_x)) + eta_prime_y = cf.dispersion(bunch.yp, bunch.dp) +- self.assertAlmostEquals(eta_prime_y, 0., places=weak_tol, ++ self.assertAlmostEqual(eta_prime_y, 0., places=weak_tol, + msg='eta_prime_y is not zero but ' + str(eta_prime_y)) + + +@@ -71,7 +71,7 @@ + """ + d1 = np.random.normal(100, 2., self.n_samples) + d2 = np.random.normal(200, 0.2, self.n_samples) +- self.assertAlmostEquals(cf.cov(d1, d2), 0.0, ++ self.assertAlmostEqual(cf.cov(d1, d2), 0.0, + places=self.tolerance, + msg='cobra cov() of two uncorrelated ' + + 'Gaussians != 0') +--- ../PyHEADTAIL/PyHEADTAIL/testing/unittests/test_particles.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/testing/unittests/test_particles.py (refactored) +@@ -122,7 +122,7 @@ + + def test_means(self): + ''' Tests the mean() method of the Particle class ''' +- self.assertAlmostEquals(self.bunch.mean_xp(), np.mean(self.bunch.xp), ++ self.assertAlmostEqual(self.bunch.mean_xp(), np.mean(self.bunch.xp), + places=5, msg='np.mean() and bunch.mean_xp() ' + 'yield different results') + +@@ -130,7 +130,7 @@ + '''Test the sigma_z() method of the Particle class + Only check the first 3 digits because the sample is small (2048) + ''' +- self.assertAlmostEquals(self.bunch.sigma_z(), np.std(self.bunch.z), ++ self.assertAlmostEqual(self.bunch.sigma_z(), np.std(self.bunch.z), + places=3, msg='np.std() and bunch.sigma_z() ' + 'yield different results') + +@@ -164,7 +164,7 @@ + emittance for a transverse-only beam. + ''' + beam_transverse = self.create_transverse_only_bunch() +- self.assertAlmostEquals( ++ self.assertAlmostEqual( + beam_transverse.epsn_x(), + beam_transverse.effective_normalized_emittance_x(), + places = 5, +@@ -173,7 +173,7 @@ + 'for a transverse only beam.' + ) + +- self.assertAlmostEquals( ++ self.assertAlmostEqual( + beam_transverse.epsn_y(), + beam_transverse.effective_normalized_emittance_y(), + places = 5, +@@ -200,7 +200,7 @@ + old[attr] = getattr(bunch, attr).copy() + bunch.sort_for('z') + new_idx = bunch.id - 1 +- for attr, oldarray in old.items(): ++ for attr, oldarray in list(old.items()): + self.assertTrue(np.all(oldarray[new_idx] == getattr(bunch, attr)), + msg="beam.sort_for('z') should reorder all beam " + "particle arrays, but beam." + str(attr) + " is " +--- ../PyHEADTAIL/PyHEADTAIL/testing/unittests/autoruntests/SlicingTest.py (original) ++++ ../PyHEADTAIL/PyHEADTAIL/testing/unittests/autoruntests/SlicingTest.py (refactored) +@@ -235,7 +235,7 @@ + + beam_parameters = slicer.extract_beam_parameters(bunch) + +- for p_name, p_value in beam_parameters.items(): ++ for p_name, p_value in list(beam_parameters.items()): + pass + + # In[14]: diff --git a/PyHEADTAIL/cobra_functions/pdf_integrators_2d.py b/PyHEADTAIL/cobra_functions/pdf_integrators_2d.py index 92437b2e..58592edf 100644 --- a/PyHEADTAIL/cobra_functions/pdf_integrators_2d.py +++ b/PyHEADTAIL/cobra_functions/pdf_integrators_2d.py @@ -62,7 +62,7 @@ def compute_mean_quad(psi, ylimit_min, ylimit_max, xmin, xmax, direction='x'): M, error = dblquad(f, xmin, xmax, ylimit_min, ylimit_max) - return M/Q + return M/Q def compute_var_quad(psi, ylimit_min, ylimit_max, xmin, xmax, direction='x'): '''Compute the second moment (variance) with respect to the x or diff --git a/PyHEADTAIL/elens/.vscode/.ropeproject/config.py b/PyHEADTAIL/elens/.vscode/.ropeproject/config.py new file mode 100644 index 00000000..dee2d1ae --- /dev/null +++ b/PyHEADTAIL/elens/.vscode/.ropeproject/config.py @@ -0,0 +1,114 @@ +# The default ``config.py`` +# flake8: noqa + + +def set_prefs(prefs): + """This function is called before opening the project""" + + # Specify which files and folders to ignore in the project. + # Changes to ignored resources are not added to the history and + # VCSs. Also they are not returned in `Project.get_files()`. + # Note that ``?`` and ``*`` match all characters but slashes. + # '*.pyc': matches 'test.pyc' and 'pkg/test.pyc' + # 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc' + # '.svn': matches 'pkg/.svn' and all of its children + # 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o' + # 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o' + prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject', + '.hg', '.svn', '_svn', '.git', '.tox'] + + # Specifies which files should be considered python files. It is + # useful when you have scripts inside your project. Only files + # ending with ``.py`` are considered to be python files by + # default. + # prefs['python_files'] = ['*.py'] + + # Custom source folders: By default rope searches the project + # for finding source folders (folders that should be searched + # for finding modules). You can add paths to that list. Note + # that rope guesses project source folders correctly most of the + # time; use this if you have any problems. + # The folders should be relative to project root and use '/' for + # separating folders regardless of the platform rope is running on. + # 'src/my_source_folder' for instance. + # prefs.add('source_folders', 'src') + + # You can extend python path for looking up modules + # prefs.add('python_path', '~/python/') + + # Should rope save object information or not. + prefs['save_objectdb'] = True + prefs['compress_objectdb'] = False + + # If `True`, rope analyzes each module when it is being saved. + prefs['automatic_soa'] = True + # The depth of calls to follow in static object analysis + prefs['soa_followed_calls'] = 0 + + # If `False` when running modules or unit tests "dynamic object + # analysis" is turned off. This makes them much faster. + prefs['perform_doa'] = True + + # Rope can check the validity of its object DB when running. + prefs['validate_objectdb'] = True + + # How many undos to hold? + prefs['max_history_items'] = 32 + + # Shows whether to save history across sessions. + prefs['save_history'] = True + prefs['compress_history'] = False + + # Set the number spaces used for indenting. According to + # :PEP:`8`, it is best to use 4 spaces. Since most of rope's + # unit-tests use 4 spaces it is more reliable, too. + prefs['indent_size'] = 4 + + # Builtin and c-extension modules that are allowed to be imported + # and inspected by rope. + prefs['extension_modules'] = [] + + # Add all standard c-extensions to extension_modules list. + prefs['import_dynload_stdmods'] = True + + # If `True` modules with syntax errors are considered to be empty. + # The default value is `False`; When `False` syntax errors raise + # `rope.base.exceptions.ModuleSyntaxError` exception. + prefs['ignore_syntax_errors'] = False + + # If `True`, rope ignores unresolvable imports. Otherwise, they + # appear in the importing namespace. + prefs['ignore_bad_imports'] = False + + # If `True`, rope will insert new module imports as + # `from import ` by default. + prefs['prefer_module_from_imports'] = False + + # If `True`, rope will transform a comma list of imports into + # multiple separate import statements when organizing + # imports. + prefs['split_imports'] = False + + # If `True`, rope will remove all top-level import statements and + # reinsert them at the top of the module when making changes. + prefs['pull_imports_to_top'] = True + + # If `True`, rope will sort imports alphabetically by module name instead + # of alphabetically by import statement, with from imports after normal + # imports. + prefs['sort_imports_alphabetically'] = False + + # Location of implementation of + # rope.base.oi.type_hinting.interfaces.ITypeHintingFactory In general + # case, you don't have to change this value, unless you're an rope expert. + # Change this value to inject you own implementations of interfaces + # listed in module rope.base.oi.type_hinting.providers.interfaces + # For example, you can add you own providers for Django Models, or disable + # the search type-hinting in a class hierarchy, etc. + prefs['type_hinting_factory'] = ( + 'rope.base.oi.type_hinting.factory.default_type_hinting_factory') + + +def project_opened(project): + """This function is called after opening the project""" + # Do whatever you like here! diff --git a/PyHEADTAIL/elens/__init__.py b/PyHEADTAIL/elens/__init__.py new file mode 100644 index 00000000..d2d966e2 --- /dev/null +++ b/PyHEADTAIL/elens/__init__.py @@ -0,0 +1 @@ +from .. import __version__ diff --git a/PyHEADTAIL/elens/elens.py b/PyHEADTAIL/elens/elens.py new file mode 100644 index 00000000..9ccf2534 --- /dev/null +++ b/PyHEADTAIL/elens/elens.py @@ -0,0 +1,312 @@ +''' +@authors: Vadim Gubaidulin, Adrian Oeftiger +@date: 18.02.2020 +''' +from __future__ import division + +from PyHEADTAIL.general.element import Element +from PyHEADTAIL.particles import slicing +import numpy as np +from scipy.constants import c, m_e, e, pi +from scipy.interpolate import splrep, splev +from scipy.integrate import quad +from scipy.special import i0e, i1e, i0, i1, ive +from functools import wraps + +from PyHEADTAIL.general import pmath as pm +from PyHEADTAIL.field_maps import efields_funcs as efields +from PyHEADTAIL.trackers.detuners import DetunerCollection + +class PulsedLensDetuner(DetunerCollection): + def __init__(self, dQmax, sigma_ratio, beta_z, static, dynamic): + self.dQmax = dQmax + self.segment_detuners = [] + self.sigma_ratio = sigma_ratio + self.beta_z = beta_z + self.static = static + self.dynamic = dynamic + def generate_segment_detuner(self, dmu_x, dmu_y, **kwargs): + dapp_xz = self.dQmax + dapp_yz = self.dQmax + dapp_xz *= dmu_x + dapp_yz *= dmu_y + detuner = PulsedLensSegmentDetuner( + dapp_xz, dapp_yz, self.sigma_ratio, self.beta_z, self.static, self.dynamic) + self.segment_detuners.append(detuner) + + +class PulsedLensSegmentDetuner(object): + def __init__(self, dapp_xz, dapp_yz, sigma_ratio, beta_z, static, dynamic): + self.dapp_xz = dapp_xz + self.dapp_yz = dapp_yz + self.sigma_ratio = sigma_ratio + self.beta_z = beta_z + self.static = static + self.dynamic = dynamic + + def detune(self, beam): + J_z = (beam.z**2+(beam.dp*self.beta_z)**2)/(2*self.beta_z) + phi_z = np.arctan2(beam.dp*self.beta_z, beam.z) + eps_z = beam.sigma_z()**2/self.beta_z + arg = 0.5*J_z/eps_z*self.sigma_ratio + bessel_term_Z_static = i0e(arg) + bessel_term_Z_dynamic = np.cos(2*phi_z)*ive(2, arg)+np.cos(4*phi_z)*ive(4, arg)+np.cos(6*phi_z)*ive(6, arg)+np.cos(8*phi_z)*ive(8, arg)+np.cos(10*phi_z)*ive(10, arg) + dQx = self.dapp_xz*(bessel_term_Z_static*self.static+2*bessel_term_Z_dynamic*self.dynamic) + dQy = self.dapp_yz*(bessel_term_Z_static*self.static+2*bessel_term_Z_dynamic*self.dynamic) + return dQx, dQy + + +class ElectronLensDetuner(DetunerCollection): + def __init__(self, dQmax, r, beta_x, beta_y): + self.dQmax = dQmax + self.r = r + self.beta_x = beta_x + self.beta_y = beta_y + self.segment_detuners = [] + + def generate_segment_detuner(self, dmu_x, dmu_y, **kwargs): + dapp_xz = self.dQmax + dapp_yz = self.dQmax + dapp_xz *= dmu_x + dapp_yz *= dmu_y + detuner = ElectronLensSegmentDetuner( + dapp_xz, dapp_yz, self.r, self.beta_x, self.beta_y) + self.segment_detuners.append(detuner) + + +class ElectronLensSegmentDetuner(object): + def __init__(self, dapp_xz, dapp_yz, r, beta_x, beta_y): + self.dapp_xz = dapp_xz + self.dapp_yz = dapp_yz + self.beta_x = beta_x + self.beta_y = beta_y + self.r = r + + def detune(self, beam): + def _bessel_term(u, kx, ky): + return (i0(kx*u)-i1(kx*u))*i0(ky*u)*np.exp((kx+ky)*u) + Jx = 0.5*(1/self.beta_x*beam.x**2+self.beta_x*beam.xp**2) + Jy = 0.5*(1/self.beta_y*beam.x**2+self.beta_y*beam.yp**2) + # proper implementation through integration + # Kx = Jx/beam.epsn_x()*self.r**2 + # Ky = Jy/beam.epsn_y()*self.r**2 + # K = tuple(zip(Kx, Ky)) + # bessel_term_X = np.array([quad(_bessel_term, 0, 1, args=(kx, ky))[0] for (kx, ky) in K]) + # bessel_term_Y = np.array([quad(_bessel_term, 0, 1, args=(ky, kx))[0] for (kx, ky) in K]) + # approximate formula from Burov + ax = np.sqrt(2.0*Jx/(beam.epsn_x()/beam.betagamma)) + ay = np.sqrt(2.0*Jy/(beam.epsn_y()/beam.betagamma)) + bessel_term_X = (192.0-11.0*ax-18.0*np.sqrt(ax*ay)+3.0*ax**2) / \ + (192.0-11.0*ax-18.0*np.sqrt(ax*ay)+3.0*ax**2+36.0*ax**2+21.0*ay**2) + bessel_term_Y = (192.0-11.0*ay-18.0*np.sqrt(ax*ay)+3.0*ay**2) / \ + (192.0-11.0*ay-18.0*np.sqrt(ax*ay)+3.0*ay**2+36.0*ay**2+21.0*ax**2) + dQx = self.dapp_xz*bessel_term_X + dQy = self.dapp_yz*bessel_term_Y + return dQx, dQy + + +class ElectronLens(Element): + ''' + Contains implemenation of electron lens generated electromagnetic field acting on a particle collection. + Acts as a localized kick of a thin element. This implementation assumes that an electromagnetic field of + electron lens beam is not affected by interaction with a bunch. + ''' + ''' + Alfven current, used in the expression for the maximum tune shift from an electron lens + as defined here: + Landau Damping of Beam Instabilities by Electron Lenses + V. Shiltsev, Y. Alexahin, A. Burov, and A. Valishev + Phys. Rev. Lett. 119, 134802 (2017) + ''' + I_a = 17e3 + '''Threshold for relative transverse bunch size difference + below which the bunch is assumed to be round: + abs(1 - sig_y / sig_x) < ratio_threshold ==> round bunch + ''' + ratio_threshold = 1e-3 + '''Threshold for absolute transverse bunch size difference + below which the bunch is assumed to be round: + abs(sig_y - sig_x) < absolute_threshold ==> round bunch + ''' + absolute_threshold = 1e-10 + + def __init__(self, + L_e, + I_e, + sigma_x, + sigma_y, + beta_e, + dist, + offset_x=0, + offset_y=0, + sig_check=True): + '''Arguments: + L_e: the length of an electron lens beam and bunch interaction region + I_e: a list of floats defining slicewise electron lens current + sigma_x: transverse horizontal rms size of an electron beam + sigma_y: transverse vertical rms size of an electron beam + beta_e: relativistic beta of electron bunch. Negative value means electron beam is + counterpropagating the accelerator bunch + dist: electron beam transverse distribution from a list ['GS', 'WB', 'KV'] + sig_check: exchanges x and y quantities for sigma_x < sigma_y + and applies the round bunch formulae for sigma_x == sigma_y . + sig_check defaults to True and should not usually be False. + offset_x: Horizontal offset of an electron lens beam to the nominal beam. + Defaults to zero, i.e. the electron lens is ideally matched to beam. + offset_y: Vertical offset of an electron lens beam to the nominal beam + Defaults to zero, i.e. the electron lens is ideally matched to beam. + ''' + self.slicer = slicing.UniformBinSlicer(n_slices=len(I_e), n_sigma_z=4) + self.L_e = L_e + self.I_e = I_e + self.sigma_x = sigma_x + self.sigma_y = sigma_y + self.beta_e = beta_e + self.dist = dist + self.offset_x = offset_x + self.offset_y = offset_y + + assert dist in ['GS', 'WB', 'KV', 'LN' + ], ('The given distribution type is not understood.') + if self.dist == 'GS': + self._efieldn = efields._efieldn_mit + if sig_check: + self._efieldn = efields.add_sigma_check( + self._efieldn, self.dist) + elif self.dist == 'WB': + self._efieldn = efields._efieldn_wb + elif self.dist == 'KV': + self._efieldn = efields._efieldn_kv_b + if sig_check: + self._efieldn = efields.add_sigma_check( + self._efieldn, self.dist) + elif self.dist == 'LN': + self._efieldn = efields._efieldn_linearized + + @classmethod + def RoundDCElectronLens(cls, L_e, dQ_max, ratio, beta_e, dist, bunch): + ''' + Returns an round electron lens matched with a given ratio of electron lens beam size to nominal beam size + Arguments: + L_e: the length of an electron lens beam and bunch interaction region + dQ_max: the maximum tune shift from an electron lens kick + ratio: the ratio of electron lens beam size to nominal beam size + beta_e: relativistic beta of electron bunch. Negative value means electron beam is + counterpropagating the accelerator bunch + dist: electron beam transverse distribution from a list ['GS', 'WB', 'KV'] + bunch: the nominal bunch + ''' + absolute_threshold = 1e-10 + I_a = 17e3 + + assert (bunch.sigma_y() - bunch.sigma_y() + ) < absolute_threshold, ('The given bunch is not round') + if dist == 'GS': + I_e = e/bunch.charge*dQ_max * I_a * (bunch.mass / m_e) * ( + 4 * pi * + bunch.epsn_x()) / L_e * ratio**2 * beta_e * bunch.beta / ( + 1 + pm.abs(beta_e) * bunch.beta) + elif dist == 'WB': + I_e = e/bunch.charge*3 / 4 * dQ_max * I_a * (bunch.mass / m_e) * ( + 4 * pi * + bunch.epsn_x()) / L_e * ratio**2 * beta_e * bunch.beta / ( + 1 + pm.abs(beta_e) * bunch.beta) + elif dist == 'KV': + I_e = e/bunch.charge*4*dQ_max * I_a * (bunch.mass / m_e) * ( + 4 * pi * + bunch.epsn_x()) / L_e * ratio**2 * beta_e * bunch.beta / ( + 1 + np.abs(beta_e) * bunch.beta) + elif dist == 'LN': + I_e = e/bunch.charge*4*dQ_max * I_a * (bunch.mass / m_e) * ( + 4 * pi * + bunch.epsn_x()) / L_e * ratio**2 * beta_e * bunch.beta / ( + 1 + np.abs(beta_e) * bunch.beta) + else: + I_e = 0 + return ElectronLens(L_e, [ + I_e, + ], + ratio * bunch.sigma_x(), + ratio * bunch.sigma_x(), + beta_e, + offset_x=bunch.mean_x(), + offset_y=bunch.mean_y(), + dist=dist, + sig_check=True) + + def get_max_tune_shift(self, bunch): + ''' + ''' + if self.dist == 'GS': + [ + dQmax, + ] = bunch.charge/e*self.I_e / self.I_a * m_e / bunch.mass * self.L_e / ( + 4 * pi * + bunch.epsn_x()) * (bunch.sigma_x() / self.sigma_x())**2 * ( + 1 + self.beta_e * bunch.beta) / (np.abs(self.beta_e) * + bunch.beta) + elif self.dist == 'WB': + [ + dQmax, + ] = bunch.charge/e*4 * self.I_e / self.I_a * m_e / bunch.mass * self.L_e / ( + 4 * pi * bunch.epsn_x()) * (1 / 3) * ( + bunch.sigma_x() / self.sigma_x())**2 * ( + 1 + self.beta_e * bunch.beta) / (np.abs(self.beta_e) * + bunch.beta) + elif self.dist == 'KV': + [ + dQmax, + ] = bunch.charge/e*self.I_e / self.I_a * m_e / bunch.mass * self.L_e / ( + 4 * pi * bunch.epsn_x()) * (1 / 3) * ( + bunch.sigma_x() / self.sigma_x())**2 * ( + 1 + self.beta_e * bunch.beta) / (np.abs(self.beta_e) * + bunch.beta) + + return dQmax + + def track(self, bunch): + '''Add the kick from electron lens electromagnetic field to the bunch's + transverse kicks. + ''' + slices = bunch.get_slices( + self.slicer, statistics=['mean_x', 'mean_y', 'sigma_x', 'sigma_y']) + # Prefactor for round Gaussian bunch from theory: + prefactor = -bunch.charge * self.L_e * ( + 1 + self.beta_e * bunch.beta) * 1. / (bunch.gamma * bunch.mass * + (bunch.beta * c)**2) + # Nlambda_i is the line density [Coul/m] for the current slice + for s_i, I_i in enumerate(self.I_e): + p_id = slices.particle_indices_of_slice(s_i) + if len(p_id) == 0: + continue + Nlambda_i = I_i / (self.beta_e * c) + # Offset for an electron lens + en_x, en_y = self.get_efieldn(pm.take(bunch.x, p_id), + pm.take( + bunch.y, p_id), (self.offset_x), (self.offset_y), + self.sigma_x, self.sigma_y) + kicks_x = (en_x * Nlambda_i) * prefactor + kicks_y = (en_y * Nlambda_i) * prefactor + + kicked_xp = pm.take(bunch.xp, p_id) + kicks_x + kicked_yp = pm.take(bunch.yp, p_id) + kicks_y + + pm.put(bunch.xp, p_id, kicked_xp) + pm.put(bunch.yp, p_id, kicked_yp) + + def get_efieldn(self, xr, yr, mean_x, mean_y, sig_x, sig_y): + '''The charge-normalised electric field components of a + two-dimensional Gaussian charge distribution according to + M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06. + + Return (E_x / Q, E_y / Q). + ''' + x = xr - mean_x + y = yr - mean_y + + # absolute values for convergence reasons of erfc + en_x, en_y = self._efieldn(pm.abs(x), pm.abs(y), sig_x, sig_y) + en_x = pm.abs(en_x) * pm.sign(x) + en_y = pm.abs(en_y) * pm.sign(y) + + return en_x, en_y diff --git a/PyHEADTAIL/feedback/Changelog.txt b/PyHEADTAIL/feedback/Changelog.txt new file mode 100644 index 00000000..5354f9c1 --- /dev/null +++ b/PyHEADTAIL/feedback/Changelog.txt @@ -0,0 +1,5 @@ +# Changelog + +## v0.1.x -> v0.2.x + * almost completely rewritten + * significant API canges \ No newline at end of file diff --git a/PyHEADTAIL/feedback/__init__.py b/PyHEADTAIL/feedback/__init__.py index e69de29b..755dc727 100644 --- a/PyHEADTAIL/feedback/__init__.py +++ b/PyHEADTAIL/feedback/__init__.py @@ -0,0 +1,2 @@ +from PyHEADTAIL.feedback.core import version +# print ('PyHEADTAIL_feedback v' + version + '\n\n') \ No newline at end of file diff --git a/PyHEADTAIL/feedback/core.py b/PyHEADTAIL/feedback/core.py new file mode 100644 index 00000000..4fca7bbd --- /dev/null +++ b/PyHEADTAIL/feedback/core.py @@ -0,0 +1,470 @@ +import numpy as np +import types +import copy +version = '0.2' + +""" +@author Jani Komppula +@date: 11/10/2017 + +This file contains the core functions and variables for the signal processing +framework. + +The basic concept is that a signal compatible with the framework +is generated by using interfaces (e.g. see feedback.py for PyHEADTAIL) which +utilize the basic functions and tools from the core (this file). A signal +from the interfaces is processed by passing it through a list of signal +processors, which represents, for example, a model of a transverse feedback +system. The signal processors in the list represent elementary analog and +digital signal processing steps, e.g. from the pickup plate to the kicker. +The signal processing model of the system itself is independent of the +interfaces, i.e. it can be used with any signal source (e.g. PyHEADTAIL or +signal tools for testing). + +Both interfaces and signal processors can be developed separately and +dynamically without interfering with each other. This file contains an +example of a signal processor. More signal processors can be found from +the folder 'processors' + +This file has been divided into three sections. The sections "SIGNALS AND +PARAMETERS" and "SIGNAL PROCESSORS" contain specifications, generators and +examples for signals and signal processors. The core functions for processing +signals with the signal processors, which can be used in programming +interfaces and signal processors are presented in the section "TOOLS". +""" + + +""" +## SIGNALS AND PARAMETERS +========================= +The definition of signal depends on context. For example, in one context, a +signal might be a continuous, time-varying, quantity, i.e. an analog signal. In +another context, it might be a list of numbers which represent values of +certain moments of equally spaced time, i.e. a digital signal. There are also +situations, where signals are more complex. + +Unfortunately, the framework should work with all signals in all the mentioned +contexts. Because it is very unpractical if not almost impossible to develop +all signal processors to work with the all types of signals, different +definitions for a signal can be used in the framework. + +The basic definition of a signal is that it consists of discrete numbers +in time domain. More specifically, a signal is a numpy array of +floating point numbers in time domain, i.e. +""" + + +def Signal(signal=[]): + """ Returns a prototype for a signal.""" + return np.array(signal) + + +""" +Each number in the array corresponds to a signal value in the +specific time interval, *bin*. By default, the value of the bin +is a time average of the signal over the bin, but this is not +guaranteed because normalization of the signal processors depends +on the studied case. + +In order to simplify development of signal processors, signals are +categorized into three classes basing on the assumptions which can be made +from a signal. Thus, signal processors can be specified to receive and +transmit signals in the specific classes. Due to the hierarchy of the signal +classes, a signal processor designed for lower class signals is also able to +process signals from higher classes. + + ### Class 0 signals + ------------------- + There are no limitations for Class 0 signals, i.e. bin spacing and bin + length might vary randomly. If the signal can be divided into segments, + each segment must have an equal number of bins with equal bin spacing and + bin lengths. + + Class 0 signal gives a large freedom to use any kind of signal as an input + for the signal processors. Particularly it means that a single array of + the slice values from multiple bunches can be used directly as a signal. + + ### Class 1 signals + ------------------- + In this class, it is assumed that signal might be divided into equal + length sequences which are separated by empty spaces. Bin spacing and + width must be constant and equal in each segment. + + In practice, this means that signals from each bunch have an equal number + of equally spaced slices/samples. + + ### Class 2 signals + ------------------- + The signal is equally spaced and continuous in time. + + In practice, this means that the signal is continuously sliced/sampled over + all bunches including empty spaces between bunches. This also limits the + slicing/sampling rate to be a fraction of the bunch spacing in the case of + multi bunch simulations. + +The signal itself does not contain any information about what is the signal +class or how the bins are located in the physical space. Thus, this +information is given in parallel to the signal to the signal processors +by using a dictionary *parameters*. + +The standard (minimal) prototype for the parameter dictionary is following, +but it is also allowed to add additional information to be carried between +the signal processors: +""" + +def Parameters(signal_class=0, bin_edges=np.array([]), n_segments=0, + n_bins_per_segment=0, segment_ref_points=np.array([]), + previous_parameters = [], location=0, beta=1.): + """ + Returns a prototype for signal parameters. + + Parameters + ---------- + class : int + A signal class + bin_edges : NumPy array + A 2D numpy array, which is equal length to the signal. Each row + includes two floating point numbers, the edge positions of + the bin in the physical space (time [s]). + n_segments : int + A number of equal length and equally binned segments where to + the signal can be divided + n_bins_per_segment : int + A number of bins per segment. `len(bin_edges)/n_segments` + segment_ref_points : NumPy array + A numpy array of the reference point for the segments + previous_parameters : array + A list of Parameter objects, which tracks how the samping is changed + during the signal processing + location : float + A location of the signal in betatron phase. + beta : float + A vale of beta function in the source of the signal. Value 1 + is neutral for signal processing + """ + return {'class': signal_class, + 'bin_edges': bin_edges, + 'n_segments': n_segments, + 'n_bins_per_segment': n_bins_per_segment, + 'segment_ref_points': segment_ref_points, + 'previous_parameters': previous_parameters, + 'location': location, + 'beta': beta + } + + +""" +In prinicple, the framework can also be extended to work +in other domains (e.g. f, s or z, symbolic calculations or even influences +from functional programmin), if the definition of signal is extended and +specific processors for the signal conversion between the different domains are +programmed. + +""" + +""" +## SIGNAL PROCESSORS +==================== +A signal processor is a Python object which processes/modifies signals. The +signal is processed in the method process(parameters, signal, *args, **kwargs), +which takes arguments *parameters* and *signal* and returns (possibly) modified +versions of them. + +The constructor of a signal processor must include the following lines: + * Signal classes for the input and output signals + self.signal_classes = (int, int) + * A list of possible extensions used in the processors (an empty list by + default): + self.extensions = [] + * Default macros, which help debugging and assist the future development + self._macros = [] + default_macros(self, 'ProcessorName', **kwargs) + +An example code for a minimal signal processor, which only bypasses a signal is +following: +""" +class Bypass(object): + def __init__(self, **kwargs): + self.signal_classes = (0, 0) + self.extensions = [] + # a list of macros + self._macros = [] + default_macros(self, 'Bypass', **kwargs) + + def process(self, parameters, signal, *args, **kwargs): + + return parameters, signal + +""" +The framework supports extensions to the minimial layout. For example, +extensions can be used to provide extra simulation data to signal processors +(bunch extension), implement more complex signal transfer between simulation +objects (registers and combiners) or provide extra data for debugging and +data visualization. Names of the extensions supported by the core are listed +on the variable extensions. + +The following extensions are supported by the core of the +framework. + + ### Bunch extension + ------------------- + The bunch extensions allows use of additional slice set data from PyHEADTAIL + in the processor. Because calculations of the statistical variables of the + slice sets require a lot of computing power, the names of required + variables are listed on the variable required_variables. + + The slice set data can be found from kwargs['slice_sets'], which is + a list of slice set objects (emulations) of the simulated bunches. Note + that it is not quaranteed that the bin set of the signal corresponds to + the bin set used in the slice set. This can be checked by checking a + number of items in the element 'previous_parameters' of the input + parameters. + +""" +class MinimalChargeWeighter(object): + def __init__(self, **kwargs): + # Signal classes for incoming and outgoing signals + self.signal_classes = (0, 0) + + # A list of extensions supported by the processor + self.extensions = ['bunch'] + + # A list of PYHEADTAIL slice set variables required by the processor + self.required_variables = ['n_macroparticles_per_slice'] + + def process(self, parameters, signal, *args, **kwargs): + + slice_sets = kwargs['slice_sets'] + + output_signal = np.copy(signal) + + for i, slice_set in enumerate(slice_sets): + n_macroparticles = np.sum(slice_set.n_macroparticles_per_slice) + + j_from = i * parameters['n_bins_per_segment'] + j_to = (i + 1) * parameters['n_bins_per_segment'] + output_signal[j_from:j_to] *= slice_set.n_macroparticles_per_slice/n_macroparticles + + # The signal or the prameters could be modified here + return parameters, output_signal + + +""" + ### Register and combiner extensions + ---------------------- + Register and combiner processors are designed to save and combine data + from multiple turns. The basic principle of a register is that it is an + iterable object, i.e. data from the register can be riden by iterating it + in a for loop. As a part of the signal processor list, register by passes + the signal without modifications. A combiner uses a register as a signal + source and returns a combined signal, i.e. it reads the values from + register(s) and combines signals by applying possible betatron phase + advance correction algorithms. + + Details of these processors can be found from the + file processors/register.py. +""" + + +""" +## TOOLS +======== +""" + +def process(parameters, signal, processors, **kwargs): + """ + Processes a signal through the given signal processors + + Parameters + ---------- + parameters : dict + A standardized dict of the additional parameters describing the signal + signal : NumPy array + The signal + processors : list + A list of signal processors. + **kwargs : - + Other arguments which will be passed to the signal processors + + Returns + ------- + dict + Possibly modified dict of the signal parameters + NumPy array + The processed signal + """ + + for processor in processors: + parameters, signal = processor.process(parameters, signal, **kwargs) +# if signal is None: +# print 'None signal!' +# break + + return parameters, signal + + +def bin_widths(bin_edges): + return (bin_edges[:, 1]-bin_edges[:, 0]) + + +def bin_mids(bin_edges): + return (bin_edges[:, 0]+bin_edges[:, 1])/2. + + +def bin_edges_to_z_bins(bin_edges): + return np.append(bin_edges[:, 0], bin_edges[-1, 1]) + + +def z_bins_to_bin_edges(z_bins): + return np.transpose(np.array([z_bins[:-1], z_bins[1:]])) + + +def append_bin_edges(bin_edges_1, bin_edges_2): + return np.concatenate((bin_edges_1, bin_edges_2), axis=0) + + +def get_processor_extensions(processors, external_extensions=None): + """ + A function, which checks available extensions from the processors. + + Parameters + ---------- + processors : list + A list of signal processors. + external_extensions : list + A list of external extensions, which will be added to the list + + Returns + ------- + list + A list of found extensions + """ + + if external_extensions is None: + available_extensions = [] + else: + available_extensions = external_extensions + + for processor in processors: + if processor.extensions is not None: + available_extensions.extend(processor.extensions) + + available_extensions = list(set(available_extensions)) + + return available_extensions + +""" +### Extension specific functions +================================ +""" +def get_processor_variables(processors, required_variables=None): + """ + A function which checks the required PyHEADTAIL slice set variables + from the signal processors. + + Parameters + ---------- + processors : list + A list of signal processors. + external_variables : list + A list of external variables, which will be added to the list + + Returns + ------- + list + A list of found statistical variables + """ + + if required_variables is None: + required_variables = [] + + for processor in processors: + if 'bunch' in processor.extensions: + required_variables.extend(processor.required_variables) + + required_variables = list(set(required_variables)) + + if 'z_bins' in required_variables: + required_variables.remove('z_bins') + + return required_variables + + +""" +### MACROS +======== +""" +def default_macros(obj, label=None, **kwargs): + func_list = [] + + func_list = func_list + debug_macro(obj, label=label, **kwargs) + func_list = func_list + label_macro(obj, label=label, **kwargs) + func_list = func_list + init_vatiables_macro(obj, **kwargs) + + return func_list + + +def label_macro(obj, label=None, **kwargs): + setattr(obj, 'label', label) + return [] + + +def init_vatiables_macro(obj, **kwargs): + setattr(obj, 'time_scale', 0) + return [] + + +def debug_macro(obj, **kwargs): + """ + A debug macro. + + If input parameter debug = True is given to the signal + processor, the input and output parameters and signals are stored + to the signal processor. + + Parameters + ---------- + target_object : object + A object which is operated (virtually always self) + label : string + A name of the signal processor + + Returns + ------- + object + A macro function, which is run in the process(...) method in the debug + mode is turn on. + """ + def decorated_process(self, parameters, signal, *args, **kwargs): + input_parameters = parameters + input_signal = np.copy(signal) + + output_parameters, output_signal = self.process_org(parameters, signal, + *args, **kwargs) + + for macro in self._macros: + macro(self, input_parameters, input_signal, output_parameters, + output_signal, *args, **kwargs) + + return output_parameters, output_signal + + def store_data(target_object, input_parameters, input_signal, + output_parameters, output_signal, *args, **kwargs): + if target_object.debug: + target_object.input_parameters = copy.copy(input_parameters) + target_object.input_signal = np.copy(input_signal) + target_object.output_parameters = copy.copy(output_parameters) + target_object.output_signal = np.copy(output_signal) + + if 'debug' in kwargs: + obj.extensions.append('debug') + + setattr(obj, 'debug', kwargs['debug']) + setattr(obj, 'input_parameters', None) + setattr(obj, 'input_signal', None) + setattr(obj, 'output_parameters', None) + setattr(obj, 'output_signal', None) + + obj.process_org = obj.process + obj.process = types.MethodType(decorated_process, obj) + + return [store_data] + return [] diff --git a/PyHEADTAIL/feedback/feedback.py b/PyHEADTAIL/feedback/feedback.py new file mode 100644 index 00000000..ec7c3e41 --- /dev/null +++ b/PyHEADTAIL/feedback/feedback.py @@ -0,0 +1,683 @@ +import numpy as np +import collections +from PyHEADTAIL.mpi import mpi_data +from core import get_processor_variables, process, Parameters +from core import z_bins_to_bin_edges, append_bin_edges +from processors.register import VectorSumCombiner, CosineSumCombiner +from processors.register import HilbertCombiner, DummyCombiner +from scipy.constants import c +""" + This file contains objecst, which can be used as transverse feedback + systems in the one turn map in PyHEADTAIL. The signal processing in the + feedback systems can be modelled by giving a list of the necessary signal + processors describing the system to the objects. + + @author Jani Komppula + @date 11/10/2017 +""" + + +class IdealBunchFeedback(object): + """ The simplest possible feedback. It corrects a gain fraction of a mean xp/yp value of the bunch. + """ + def __init__(self,gain, multi_bunch=False): + if isinstance(gain, collections.Container): + self._gain_x = gain[0] + self._gain_y = gain[1] + else: + self._gain_x = gain + self._gain_y = gain + + self.multi_bunch = multi_bunch + + def track(self,bunch): + + if self.multi_bunch: + bunch_list = bunch.split_to_views() + + for b in bunch_list: + b.xp -= self._gain_x *b.mean_xp() + b.yp -= self._gain_y*b.mean_yp() + else: + bunch.xp -= self._gain_x *bunch.mean_xp() + bunch.yp -= self._gain_y*bunch.mean_yp() + + +class IdealSliceFeedback(object): + """Corrects a gain fraction of a mean xp/yp value of each slice in the bunch.""" + def __init__(self,gain,slicer, multi_bunch=False): + if isinstance(gain, collections.Container): + self._gain_x = gain[0] + self._gain_y = gain[1] + else: + self._gain_x = gain + self._gain_y = gain + + self.multi_bunch = multi_bunch + + self._slicer = slicer + + def track(self,bunch): + + if self.multi_bunch: + bunch_list = bunch.split_to_views() + + for b in bunch_list: + slice_set = b.get_slices(self._slicer, statistics = ['mean_xp', 'mean_yp']) + p_idx = slice_set.particles_within_cuts + s_idx = slice_set.slice_index_of_particle.take(p_idx) + + b.xp[p_idx] -= self._gain_x * slice_set.mean_xp[s_idx] + b.yp[p_idx] -= self._gain_y * slice_set.mean_yp[s_idx] + + else: + slice_set = bunch.get_slices(self._slicer, statistics = ['mean_xp', 'mean_yp']) + + # Reads a particle index and a slice index for each macroparticle + p_idx = slice_set.particles_within_cuts + s_idx = slice_set.slice_index_of_particle.take(p_idx) + + bunch.xp[p_idx] -= self._gain_x * slice_set.mean_xp[s_idx] + bunch.yp[p_idx] -= self._gain_y * slice_set.mean_yp[s_idx] + + + +class GenericOneTurnMapObject(object): + def __init__(self, gain, slicer, processors_x, processors_y=None, + pickup_axis='divergence', kicker_axis=None, mpi=False, + phase_x=None, phase_y=None, location_x=0., location_y=0., + beta_x=1., beta_y=1., **kwargs): + + if isinstance(gain, collections.Container): + self._gain_x = gain[0] + self._gain_y = gain[1] + else: + self._gain_x = gain + self._gain_y = gain + + self._slicer = slicer + + self._processors_x = processors_x + self._processors_y = processors_y + + # beam parameters + self._pickup_axis = pickup_axis + self._kicker_axis = kicker_axis + self._phase_x = phase_x + self._phase_y = phase_y + self._location_x = location_x + self._location_y = location_y + self._beta_x = beta_x + self._beta_y = beta_y + + self._local_sets = None + self._signal_sets_x = None + self._signal_sets_y = None + self._loc_signal_sets_x = None + self._loc_signal_sets_y = None + self._required_variables = [] + + if (self._pickup_axis == 'divergence') or (phase_x is not None): + self._required_variables.append('mean_xp') + if (self._pickup_axis == 'displacement') or (phase_x is not None): + self._required_variables.append('mean_x') + + self._required_variables = get_processor_variables(self._processors_x, + self._required_variables) + if self._processors_y is not None: + if (self._pickup_axis == 'divergence') or (phase_y is not None): + self._required_variables.append('mean_yp') + if (self._pickup_axis == 'displacement') or (phase_y is not None): + self._required_variables.append('mean_y') + + self._required_variables = get_processor_variables(self._processors_y, + self._required_variables) +# # TODO: Normally n_macroparticles_per_slice is removed from +# # the statistical variables. Check if it is not necessary. + + self._mpi = mpi + if self._mpi: + self._mpi_gatherer = mpi_data.MpiGatherer(self._slicer, + self._required_variables) + self._parameters_x = None + self._signal_x = None + + self._parameters_y = None + self._signal_y = None + + + def _init_signals(self, bunch_list, signal_slice_sets_x, signal_slice_sets_y): + + self._parameters_x = self._generate_parameters(signal_slice_sets_x, + self._location_x, + self._beta_x) + + n_segments = self._parameters_x['n_segments'] + n_bins_per_segment = self._parameters_x['n_bins_per_segment'] + self._signal_x = np.zeros(n_segments * n_bins_per_segment) + + + if self._processors_y is not None: + self._parameters_y = self._generate_parameters(signal_slice_sets_y, + self._location_y, + self._beta_y) + + n_segments = self._parameters_y['n_segments'] + n_bins_per_segment = self._parameters_y['n_bins_per_segment'] + self._signal_y = np.zeros(n_segments * n_bins_per_segment) + + def _get_slice_sets(self, superbunch): + if self._mpi: + self._mpi_gatherer.gather(superbunch) + all_slice_sets = self._mpi_gatherer.bunch_by_bunch_data + local_slice_sets = self._mpi_gatherer.slice_set_list + bunch_list = self._mpi_gatherer.bunch_list + self._local_sets = self._mpi_gatherer.local_bunch_indexes + else: + all_slice_sets = [superbunch.get_slices(self._slicer, + statistics=self._required_variables)] + local_slice_sets = all_slice_sets + bunch_list = [superbunch] + self._local_sets = [0] + + if self._signal_sets_x is None: + indexes = self._parse_relevant_bunches(local_slice_sets, + all_slice_sets, + self._processors_x) + self._signal_sets_x = indexes[0] + self._loc_signal_sets_x = indexes[1] + + if self._processors_y is not None: + indexes = self._parse_relevant_bunches(local_slice_sets, + all_slice_sets, + self._processors_y) + self._signal_sets_y = indexes[0] + self._loc_signal_sets_y = indexes[1] + + signal_slice_sets_x = [] + for idx in self._signal_sets_x: + signal_slice_sets_x.append(all_slice_sets[idx]) + + if self._processors_y is not None: + signal_slice_sets_y = [] + for idx in self._signal_sets_y: + signal_slice_sets_y.append(all_slice_sets[idx]) + else: + signal_slice_sets_y = None + + return bunch_list, local_slice_sets, signal_slice_sets_x, signal_slice_sets_y + + def _generate_parameters(self, signal_slice_sets, location=0., beta=1.): + + bin_edges = None + segment_ref_points = [] + + if len(signal_slice_sets) > 1: + circumference = signal_slice_sets[0].circumference + h_bunch = signal_slice_sets[0].h_bunch + else: + circumference=None + h_bunch=None + + for slice_set in signal_slice_sets: + z_bins = np.copy(slice_set.z_bins) + if circumference is not None: + z_bins -= slice_set.bucket_id*circumference/float(h_bunch) + + edges = -1.*z_bins_to_bin_edges(z_bins)/c + segment_ref_points.append(-1.*np.mean(z_bins)/c) + if bin_edges is None: + bin_edges = np.copy(edges) + else: + bin_edges = append_bin_edges(bin_edges, edges) + + bin_edges = bin_edges[::-1] + bin_edges = np.fliplr(bin_edges) + segment_ref_points = segment_ref_points[::-1] + + n_bins_per_segment = len(bin_edges)/len(signal_slice_sets) + segment_ref_points = np.array(segment_ref_points) + + parameters = Parameters() + parameters['class'] = 0 + parameters['bin_edges'] = bin_edges + parameters['n_segments'] = len(signal_slice_sets) + parameters['n_bins_per_segment'] = n_bins_per_segment + parameters['segment_ref_points'] = segment_ref_points + parameters['location'] = location + parameters['beta'] = beta + + return parameters + + def _parse_relevant_bunches(self, local_slice_sets, all_slice_sets, processors): + circumference = all_slice_sets[0].circumference + h_bunch = all_slice_sets[0].h_bunch + + time_scale = 0. + + for processor in processors: + if processor.time_scale > time_scale: + time_scale = processor.time_scale + local_set_edges = np.zeros((len(local_slice_sets), 2)) + + included_sets = [] + set_is_included = np.zeros(len(all_slice_sets), dtype=int) + set_counter = np.zeros(len(all_slice_sets), dtype=int) + + for i, slice_set in enumerate(local_slice_sets): + local_set_edges[i,0] = np.min(slice_set.z_bins-slice_set.bucket_id*circumference/float(h_bunch))/c + local_set_edges[i,1] = np.max(slice_set.z_bins-slice_set.bucket_id*circumference/float(h_bunch))/c + + + local_min = np.min(local_set_edges) + local_max = np.max(local_set_edges) + + counter = 0 + for i, slice_set in enumerate(all_slice_sets): + set_min = np.min(slice_set.z_bins-slice_set.bucket_id*circumference/float(h_bunch))/c + set_max = np.max(slice_set.z_bins-slice_set.bucket_id*circumference/float(h_bunch))/c + # print 'set_min ' + str(set_min) + ' and (local_min - time_scale)' + str((local_min - time_scale)) + if (set_max > (local_min - time_scale)) and (set_min < (local_max + time_scale)): + included_sets.append(i) + set_is_included[i] = 1 + set_counter[i] = counter + counter += 1 + else: + pass + # print('skip!!!') + + local_sets = [] + for idx in self._local_sets: + if set_is_included[idx] != 1: + raise ValueError('All local bunches are not included!') + else: + local_sets.append(set_counter[idx]) + + + return included_sets, local_sets + + + def _read_signal(self, signal, signal_slice_sets, plane, betatron_phase, + beta_value): + if self._mpi: + n_slices_per_bunch = signal_slice_sets[0]._n_slices + else: + n_slices_per_bunch = signal_slice_sets[0].n_slices + + total_length = len(signal_slice_sets) * n_slices_per_bunch + + if (signal is None) or (len(signal) != total_length): + raise ValueError('Wrong signal length') + + for idx, slice_set in enumerate(signal_slice_sets): + idx_from = idx * n_slices_per_bunch + idx_to = (idx + 1) * n_slices_per_bunch + + + if plane == 'x': + if self._pickup_axis == 'displacement' or (betatron_phase is not None): + x_values = np.copy(slice_set.mean_x) + if (self._pickup_axis == 'divergence') or (betatron_phase is not None): + xp_values = np.copy(slice_set.mean_xp) + elif plane == 'y': + if self._pickup_axis == 'displacement' or (betatron_phase is not None): + x_values = np.copy(slice_set.mean_y) + if (self._pickup_axis == 'divergence') or (betatron_phase is not None): + xp_values = np.copy(slice_set.mean_yp) + else: + raise ValueError('Unknown plane') + + if self._pickup_axis == 'divergence': + if betatron_phase is None: + np.copyto(signal[idx_from:idx_to], xp_values) + else: + np.copyto(signal[idx_from:idx_to], (-np.sin(betatron_phase)*x_values/beta_value + + np.cos(betatron_phase)*xp_values)) + elif self._pickup_axis == 'displacement': + if betatron_phase is None: + np.copyto(signal[idx_from:idx_to], x_values) + else: + np.copyto(signal[idx_from:idx_to], (np.cos(betatron_phase)*x_values + + beta_value*np.sin(betatron_phase)*xp_values)) + else: + raise ValueError('Unknown axis') + + if signal is not None: + np.copyto(signal, signal[::-1]) + + + def _kick_bunches(self, signal, plane, local_slice_sets, bunch_list, local_sets): + + if signal is not None: + np.copyto(signal, signal[::-1]) + + n_slices_per_bunch = local_slice_sets[0].n_slices + + for slice_set, bunch_idx, bunch in zip(local_slice_sets, + local_sets, bunch_list): + idx_from = bunch_idx * n_slices_per_bunch + idx_to = (bunch_idx + 1) * n_slices_per_bunch + + p_idx = slice_set.particles_within_cuts + s_idx = slice_set.slice_index_of_particle.take(p_idx) + + if self._kicker_axis == 'divergence': + if plane == 'x': + correction_x = np.array(signal[idx_from:idx_to], copy=False) + bunch.xp[p_idx] -= correction_x[s_idx] + elif plane == 'y': + correction_y = np.array(signal[idx_from:idx_to], copy=False) + bunch.yp[p_idx] -= correction_y[s_idx] + else: + raise ValueError('Unknown plane') + + elif self._kicker_axis == 'displacement': + if plane == 'x': + correction_x = np.array(signal[idx_from:idx_to], copy=False) + bunch.x[p_idx] -= correction_x[s_idx] + elif plane == 'y': + correction_y = np.array(signal[idx_from:idx_to], copy=False) + bunch.y[p_idx] -= correction_y[s_idx] + else: + raise ValueError('Unknown plane') + else: + raise ValueError('Unknown axis') + +class OneboxFeedback(GenericOneTurnMapObject): + """ An transverse feedback object for the one turn map in PyHEADTAIL. + + By using this object, the pickup and the kicker are in the same location + of the accelerator. Bandwidth limitations, turn delays, noise, etc can be + applied by using signal processors. The axises for the pickup signal and + the correction are by default same, but they can be also specified to be + different (e.g. displacement and divergence). + """ + + def __init__(self, gain, slicer, processors_x, processors_y, + pickup_axis='divergence', kicker_axis=None, mpi=False, + phase_x=None, phase_y=None, beta_x=1., beta_y=1., **kwargs): + """ + Parameters + ---------- + gain : float or tuple + A fraction of the oscillations is corrected, when the perfectly + betatron motion corrected pickup signal by passes the signal + processors without modifications, i.e. 2/(damping time [turns]). + Separate values can be set to x and y planes by giving two values + in a tuple. + slicer : PyHEADTAIL slicer object + processors_x : list + A list of signal processors for the x-plane + processors_y : list + A list of signal processors for the y-plane + pickup_axis : str + A axis, which values are used as a pickup signal + kicker_axis : str + A axis, to which the correction is applied. If None, the axis is + same as the pickup axis + mpi : bool + If True, data from multiple bunches are gathered by using MPI + phase_x : float + Initial betatron phase rotation for the signal in x-plane in the + units of radians + phase_y : float + Initial betatron phase rotation for the signal in y-plane in the + units of radians + beta_x : float + A value of the x-plane beta function in the feedback location + beta_y : float + A value of the y-plane beta function in the feedback location + """ + + if kicker_axis is None: + kicker_axis = pickup_axis + + super(self.__class__, self).__init__(gain, slicer, processors_x, + processors_y=processors_y, pickup_axis=pickup_axis, + kicker_axis=kicker_axis, mpi=mpi, phase_x=phase_x, + phase_y=phase_y, beta_x=beta_x, beta_y=beta_y, **kwargs) + + def track(self, bunch): + + bunch_list, local_slice_sets, signal_slice_sets_x, signal_slice_sets_y = self._get_slice_sets(bunch) + + if self._signal_x is None: + self._init_signals(bunch_list, signal_slice_sets_x, signal_slice_sets_y) + + self._read_signal(self._signal_x, signal_slice_sets_x, 'x', + self._phase_x, self._beta_x) + + kick_parameters_x, kick_signal_x = process(self._parameters_x, + self._signal_x, + self._processors_x, + slice_sets=signal_slice_sets_x) + + if kick_signal_x is not None: + kick_signal_x = kick_signal_x * self._gain_x + + if self._pickup_axis == 'displacement' and self._kicker_axis == 'divergence': + kick_signal_x = kick_signal_x / self._beta_x + elif self._pickup_axis == 'divergence' and self._kicker_axis == 'displacement': + kick_signal_x = kick_signal_x * self._beta_x + + self._kick_bunches(kick_signal_x, 'x', local_slice_sets, bunch_list, + self._loc_signal_sets_x) + + if self._processors_y is not None: + + self._read_signal(self._signal_y, signal_slice_sets_y, 'y', + self._phase_y, self._beta_y) + + kick_parameters_y, kick_signal_y = process(self._parameters_y, + self._signal_y, + self._processors_y, + slice_sets=signal_slice_sets_y) + + if kick_signal_y is not None: + kick_signal_y = kick_signal_y * self._gain_y + + if self._pickup_axis == 'displacement' and self._kicker_axis == 'divergence': + kick_signal_y = kick_signal_y / self._beta_y + elif self._pickup_axis == 'divergence' and self._kicker_axis == 'displacement': + kick_signal_y = kick_signal_y * self._beta_y + + self._kick_bunches(kick_signal_y, 'y', local_slice_sets, bunch_list, + self._loc_signal_sets_y) + + +class PickUp(GenericOneTurnMapObject): + """ A pickup object for the one turn map in PyHEADTAIL. + + This object can be used as a pickup in the trasverse feedback systems + consisting of separate pickup(s) and kicker(s). A model for signal + processing (including, for example, bandwidth limitations and noise) can be + implemented by using signal processors. The signal can be transferred to + kicker(s) by putting registers to the signal processor chains. + """ + + def __init__(self, slicer, processors_x, processors_y, location_x, beta_x, + location_y, beta_y, mpi=False, phase_x=None, phase_y=None, + **kwargs): + """ + Parameters + ---------- + slicer : PyHEADTAIL slicer object + processors_x : list + A list of signal processors for the x-plane + processors_y : list + A list of signal processors for the y-plane + used as a signal source in the y-plane + location_x : float + A location of the pickup in x-plane in the units of betatron phase + advance from a chosen reference point + beta_x : float + A value of the x-plane beta function in the pickup location + location_y : float + A location of the pickup in y-plane in the units of betatron phase + advance from a chosen reference point + beta_y : float + A value of the y-plane beta function in the pickup location + mpi : bool + If True, data from multiple bunches are gathered by using MPI + phase_x : float + Initial betatron phase rotation of the signal in x-plane in the + units of radians + phase_y : float + Initial betatron phase rotation of the signal in y-plane in the + units of radians + """ + + super(self.__class__, self).__init__(0, slicer, processors_x, + processors_y=processors_y, pickup_axis='displacement', + kicker_axis=None, mpi=mpi, phase_x=phase_x, location_x=location_x, + location_y=location_y, phase_y=phase_y, beta_x=beta_x, + beta_y=beta_y, **kwargs) + + def track(self, bunch): + + bunch_list, local_slice_sets, signal_slice_sets_x, signal_slice_sets_y = self._get_slice_sets(bunch) + + if self._signal_x is None: + self._init_signals(bunch_list, signal_slice_sets_x, signal_slice_sets_y) + + self._read_signal(self._signal_x, signal_slice_sets_x, 'x', + self._phase_x, self._beta_x) + + end_parameters_x, end_signal_x = process(self._parameters_x, + self._signal_x, + self._processors_x, + slice_sets=signal_slice_sets_x) + + if self._processors_y is not None: + + self._read_signal(self._signal_y, signal_slice_sets_y, 'y', + self._phase_y, self._beta_y) + + end_parameters_y, end_signal_y = process(self._parameters_y, + self._signal_y, + self._processors_y, + slice_sets=signal_slice_sets_y) + + +class Kicker(GenericOneTurnMapObject): + """ A Kicker object for the one turn map in PyHEADTAIL. + + This object can be used as a kicker in the trasverse feedback systems + consisting of separate pickup(s) and kicker(s). A model for signal + processing (including, for example, bandwidth limitations and noise) can be + implemented by using signal processors. The input signals for the kicker + are the lists of register objects given as a input paramter. + """ + def __init__(self, gain, slicer, processors_x, processors_y, + registers_x, registers_y, location_x, beta_x, + location_y, beta_y, combiner='vector_sum', mpi=False, + **kwargs): + """ + Parameters + ---------- + gain : float or tuple + A fraction of the oscillations is corrected, when the perfectly + betatron motion corrected pickup signal by passes the signal + processors without modifications, i.e. 2/(damping time [turns]). + Separate values can be set to x and y planes by giving two values + in a tuple. + slicer : PyHEADTAIL slicer object + processors_x : list + A list of signal processors for the x-plane + processors_y : list + A list of signal processors for the y-plane + registers_x : list + A list of register object(s) (from pickup(s) processor chain(s) + used as a signal source in the x-plane + registers_y : list + A list of register object(s) (from pickup(s) processor chain(s) + used as a signal source in the y-plane + location_x : float + A location of the kicker in x-plane in the units of betatron phase + advance from a chosen reference point + beta_x : float + A value of the x-plane beta function in the kicker location + location_y : float + A location of the kicker in y-plane in the units of betatron phase + advance from a chosen reference point + beta_y : float + A value of the y-plane beta function in the kicker location + combiner : string or object + A combiner, which is used for combining signals from + the registers. + mpi : bool + If True, data from multiple bunches are gathered by using MPI + """ + + if isinstance(combiner, (str,unicode)): + if combiner == 'vector_sum': + self._combiner_x = VectorSumCombiner(registers_x, + location_x, beta_x, + beta_conversion = '90_deg') + self._combiner_y = VectorSumCombiner(registers_y, + location_y, beta_y, + beta_conversion = '90_deg') + elif combiner == 'cosine_sum': + self._combiner_x = CosineSumCombiner(registers_x, + location_x, beta_x, + beta_conversion = '90_deg') + self._combiner_y = CosineSumCombiner(registers_y, + location_y, beta_y, + beta_conversion = '90_deg') + + elif combiner == 'hilbert': + self._combiner_x = HilbertCombiner(registers_x, + location_x, beta_x, + beta_conversion = '90_deg') + self._combiner_y = HilbertCombiner(registers_y, + location_y, beta_y, + beta_conversion = '90_deg') + + elif combiner == 'dummy': + self._combiner_x = DummyCombiner(registers_x, + location_x, beta_x, + beta_conversion = '90_deg') + self._combiner_y = DummyCombiner(registers_y, + location_y, beta_y, + beta_conversion = '90_deg') + else: + raise ValueError('Unknown combiner type') + else: + self._combiner_x = combiner(registers_x, location_x, beta_x, + beta_conversion = '90_deg') + self._combiner_y = combiner(registers_y, location_y, beta_y, + beta_conversion = '90_deg') + + super(self.__class__, self).__init__(gain, slicer, processors_x, + processors_y=processors_y, pickup_axis='divergence', + kicker_axis='divergence', mpi=mpi, location_x=location_x, + location_y=location_y,beta_x=beta_x, beta_y=beta_y, **kwargs) + + def track(self, bunch): + + bunch_list, local_slice_sets, signal_slice_sets_x, signal_slice_sets_y = self._get_slice_sets(bunch) + + if self._signal_x is None: + self._init_signals(bunch_list, signal_slice_sets_x, signal_slice_sets_y) + + parameters_x, signal_x = self._combiner_x.process() + parameters_x, signal_x = process(parameters_x, + signal_x, + self._processors_x, + slice_sets=signal_slice_sets_x) + if signal_x is not None: + + signal_x = signal_x * self._gain_x + self._kick_bunches(signal_x, 'x', local_slice_sets, + bunch_list, self._loc_signal_sets_x) + + if self._processors_y is not None: + self._parameters_y, self._signal_y = self._combiner_y.process() + kick_parameters_y, kick_signal_y = process(self._parameters_y, + self._signal_y, + self._processors_y, + slice_sets=signal_slice_sets_y) + if kick_signal_y is not None: + kick_signal_y = kick_signal_y * self._gain_y + self._kick_bunches(kick_signal_y, 'y', local_slice_sets, + bunch_list, self._loc_signal_sets_y) diff --git a/PyHEADTAIL/feedback/original_feedback.py b/PyHEADTAIL/feedback/original_feedback.py new file mode 100644 index 00000000..988f8ef8 --- /dev/null +++ b/PyHEADTAIL/feedback/original_feedback.py @@ -0,0 +1,23 @@ +import numpy as np + + +class TransverseDamper(object): + + def __init__(self, dampingrate_x, dampingrate_y, phase_x_deg=90, phase_y_deg=90, + beta_x=1, beta_y=1): + self.gain_x = 2./dampingrate_x + self.gain_y = 2./dampingrate_y + + self.phase_x = phase_x_deg * np.pi/180 + self.phase_y = phase_y_deg * np.pi/180 + + self.beta_x = beta_x + self.beta_y = beta_y + + def track(self, beam): + beam.xp -= self.gain_x * (np.cos(self.phase_x)*beam.mean_x()/self.beta_x + + np.sin(self.phase_x)*beam.mean_xp()) + beam.yp -= self.gain_y * (np.cos(self.phase_y)*beam.mean_y()/self.beta_y + + np.sin(self.phase_y)*beam.mean_yp()) + # beam.xp -= self.gain_x * beam.mean_xp() + # beam.yp -= self.gain_y * beam.mean_yp() diff --git a/PyHEADTAIL/feedback/processors/__init__.py b/PyHEADTAIL/feedback/processors/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/PyHEADTAIL/feedback/processors/abstract_filter_responses.py b/PyHEADTAIL/feedback/processors/abstract_filter_responses.py new file mode 100644 index 00000000..d95473e3 --- /dev/null +++ b/PyHEADTAIL/feedback/processors/abstract_filter_responses.py @@ -0,0 +1,82 @@ +import numpy as np +import scipy.special as special + +""" This file contains dimensionless impulse responses function for different +analog filters, which can be used in different signal processor implementations +(e.g. based on convolution and linear transformation). + +@author Jani Komppula +@date: 11/10/2017 +""" + +def normalized_lowpass(max_impulse_length): + + def response_function(x): + if x < 0.: + return 0. + elif x > max_impulse_length: + return 0. + else: + return np.exp(-1. * x) + + return response_function + +def normalized_highpass(max_impulse_length): + + def response_function(x): + if x < 0.: + return 0. + elif x > max_impulse_length: + return 0. + else: + return np.exp(-1. * x) + + return response_function + +def normalized_phase_linearized_lowpass(max_impulse_length): + """Phase linearized version of the normal lowpass (RC, + singe poll roll-off) filter. Formula derived by Gerd Kotzian.""" + + def response_function(x): + if x == 0.: + return 0. + elif x < -max_impulse_length: + return 0. + elif x > max_impulse_length: + return 0. + else: + return special.k0(abs(x)) + + return response_function + +def normalized_Gaussian(max_impulse_length): + + def response_function(x): + if x < -max_impulse_length: + return 0. + elif x > max_impulse_length: + return 0. + else: + return np.exp(-x ** 2. / 2.* (2.*np.log(2))) / np.sqrt(2. * np.pi) + + return response_function + +def normalized_sinc(window_type, window_width): + + def blackman_window(x): + return 0.42-0.5*np.cos(2.*np.pi*(x/np.pi+window_width)/(2.*window_width))\ + +0.08*np.cos(4.*np.pi*(x/np.pi+window_width)/(2.*window_width)) + + def hamming_window(x): + return 0.54-0.46*np.cos(2.*np.pi*(x/np.pi+window_width)/(2.*window_width)) + + def response_function(x): + if np.abs(x/np.pi) > window_width: + return 0. + else: + if window_type == 'blackman': + return np.sinc(x/np.pi)*blackman_window(x) + elif window_type == 'hamming': + return np.sinc(x/np.pi)*hamming_window(x) + + return response_function \ No newline at end of file diff --git a/PyHEADTAIL/feedback/processors/addition.py b/PyHEADTAIL/feedback/processors/addition.py new file mode 100644 index 00000000..d3a1f0e3 --- /dev/null +++ b/PyHEADTAIL/feedback/processors/addition.py @@ -0,0 +1,229 @@ +from abc import ABCMeta, abstractmethod, abstractproperty +import numpy as np +from scipy.constants import c, pi +from ..core import default_macros + +""" This file contains dimensionless impulse responses function for different +analog filters, which can be used in different signal processor implementations +(e.g. based on convolution and linear transformation). + +@author Jani Komppula +@date: 11/10/2017 +""" + +class Addition(object): + __metaclass__ = ABCMeta + """ An abstract class which adds an array to the input signal. The addend array is produced by taking + a slice property (determined by the input parameter 'seed') and passing it through the abstract method + addend_function(seed). + """ + + def __init__(self, seed, normalization = None, recalculate_addend = False, label='Addition', **kwargs): + """ + :param seed: a seed for the addend, which can be 'bin_length', 'bin_midpoint', 'signal' or any slice + property found from slice_set + :param normalization: + None: + 'total_sum': The sum over the addend is equal to 1 + 'segment_sum': The sum of the addend over each signal segment is equal to 1 + 'total_average': The total average of the addend is equal to 1 + 'segment_average': The average addend of each signal segment is equal to 1 + 'total_integral': The total integral over the addend is equal to 1 + 'segment_integral': The integral of the addend over each signal segment is equal to 1 + 'total_min': The minimum of the addend is equal to 1 + 'segment_min': The minimum of the addend in each signal segment is equal to 1 + 'total_max': The minimum of the addend is equal to 1 + 'segment_max': The minimum of the addend in each signal segment is equal to 1 + :param: recalculate_addend: if True, the weight is recalculated every time when process() is called + """ + + self._seed = seed + self._normalization = normalization + self._recalculate_addend = recalculate_addend + + self._addend = None + + self.signal_classes = (0,0) + + self.extensions = [] + self._macros = [] + default_macros(self, 'Addition', **kwargs) + + if self._seed not in ['bin_length','bin_midpoint','signal']: + self.extensions.append('bunch') + self.required_variables = [self._seed] + + @abstractmethod + def addend_function(self, seed): + pass + + def process(self,parameters, signal, slice_sets = None, *args, **kwargs): + + if (self._addend is None) or self._recalculate_addend: + self.__calculate_addend(parameters, signal, slice_sets) + + output_signal = signal + self._addend + + # process the signal + return parameters, output_signal + + def __calculate_addend(self,parameters, signal, slice_sets): + self._addend = np.zeros(len(signal)) + + if self._seed == 'ones': + self._addend = self._addend + 1. + elif self._seed == 'bin_length': + np.copyto(self._addend, (parameters.bin_edges[:,1]-parameters.bin_edges[:,0])) + elif self._seed == 'bin_midpoint': + np.copyto(self._addend, ((parameters.bin_edges[:,1]+parameters.bin_edges[:,0])/2.)) + elif self._seed == 'normalized_bin_midpoint': + + for i in xrange(parameters.n_segments): + i_from = i * parameters.n_bins_per_segment + i_to = (i + 1) * parameters.n_bins_per_segment + + np.copyto(self._addend[i_from:i_to], ((parameters.bin_edges[i_from:i_to,1]+ + parameters.bin_edges[i_from:i_to,0])/2. + -parameters.original_z_mids[i])) + + elif self._seed == 'signal': + np.copyto(self._addend,signal) + else: + if len(signal) == len(slice_sets) * (len(slice_sets[0].z_bins) - 1): + start_idx = 0 + for slice_set in slice_sets: + seed = getattr(slice_set,self._seed) + np.copyto(self._addend[start_idx:(start_idx+len(seed))],seed) + start_idx += len(seed) + np.copyto(self._addend, self._addend[::-1]) + else: + raise ValueError('Signal length does not correspond to the original signal length ' + 'from the slice sets in the method Addition') + + self._addend = self.addend_function(self._addend) + + # NOTE: add options for average bin integrals? + if self._normalization is None: + norm_coeff = 1. + + elif self._normalization == 'total_sum': + norm_coeff = float(np.sum(self._addend)) + + elif self._normalization == 'segment_sum': + norm_coeff = np.ones(len(self._addend)) + for i in xrange(parameters.n_segments): + i_from = i*parameters.n_bins_per_segment + i_to = (i+1)*parameters.n_bins_per_segment + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.sum(self._addend[i_from:i_to])) + + elif self._normalization == 'total_average': + norm_coeff = float(np.sum(self._addend))/float(len(self._addend)) + + elif self._normalization == 'segment_average': + norm_coeff = np.ones(len(self._addend)) + for i in xrange(parameters.n_segments): + i_from = i*parameters.n_bins_per_segment + i_to = (i+1)*parameters.n_bins_per_segment + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.sum(self._addend[i_from:i_to]))/float(parameters.n_bins_per_segment) + + elif self._normalization == 'total_integral': + bin_widths = parameters.bin_edges[:,1] - parameters.bin_edges[:,0] + norm_coeff = np.sum(self._addend*bin_widths) + + elif self._normalization == 'segment_integral': + bin_widths = parameters.bin_edges[:,1] - parameters.bin_edges[:,0] + norm_coeff = np.ones(len(self._addend)) + for i in xrange(parameters.n_segments): + i_from = i*parameters.n_bins_per_segment + i_to = (i+1)*parameters.n_bins_per_segment + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.sum(self._addend[i_from:i_to]*bin_widths[i_from:i_to])) + + elif self._normalization == 'total_min': + norm_coeff = float(np.min(self._addend)) + + elif self._normalization == 'segment_min': + norm_coeff = np.ones(len(self._addend)) + for i in xrange(parameters.n_segments): + i_from = i*parameters.n_bins_per_segment + i_to = (i+1)*parameters.n_bins_per_segment + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.min(self._addend[i_from:i_to])) + + elif self._normalization == 'total_max': + norm_coeff = float(np.max(self._addend)) + + elif self._normalization == 'segment_max': + norm_coeff = np.ones(len(self._addend)) + for i in xrange(parameters.n_segments): + i_from = i*parameters.n_bins_per_segment + i_to = (i+1)*parameters.n_bins_per_segment + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.max(self._addend[i_from:i_to])) + else: + raise ValueError('Unknown value in Addition._normalization') + + # TODO: try to figure out why this can not be written as + # TODO: self._addend /= norm_coeff + self._addend = self._addend / norm_coeff + + def clear(self): + self._addend = None + +class AdditionFromFile(Addition): + """ Adds an array to the signal, which is produced by interpolation from the external data file. Note the seed + (unit) for the interpolation can be any of those available for the seed. + (i.e. location, sigma, or a number of macroparticles per slice, etc.) + """ + + def __init__(self,filename, x_axis='time', seed='bin_midpoint', **kwargs): + super(self.__class__, self).__init__(seed, label = 'Addition from file', **kwargs) + + self._filename = filename + self._x_axis = x_axis + self._data = np.loadtxt(self._filename) + + if self._x_axis == 'time': + pass + elif self._x_axis == 'position': + self._data[:, 0] = self._data[:, 0] / c + else: + raise ValueError('Unknown value in AdditionFromFile._x_axis') + + def addend_function(self, seed): + return np.interp(seed, self._data[:, 0], self._data[:, 1]) + + +class NoiseGenerator(Addition): + """ Adds noise. The noise level is given as an absolute RMS noise level in the units of signal + (reference_level = 'absolute') or a relative RMS level from the maximum value of the signal + (reference_level = 'maximum'). Options for the noise distribution are a Gaussian (normal) distribution + (distribution = 'normal') or an uniform distribution (distribution = 'uniform') + """ + + def __init__(self,RMS_noise_level,reference_level = 'absolute', distribution = 'normal', **kwargs): + super(self.__class__, self).__init__('signal', recalculate_addend=True, + label = 'Noise generator', **kwargs) + + self._RMS_noise_level = RMS_noise_level + self._reference_level = reference_level + self._distribution = distribution + + def signal_classes(self): + return (0,0) + + def addend_function(self,seed): + + if self._distribution == 'normal' or self._distribution is None: + randoms = np.random.randn(len(seed)) + elif self._distribution == 'uniform': + randoms = 1./0.577263*(-1.+2.*np.random.rand(len(seed))) + else: + raise ValueError('Unknown value in NoiseGenerator._distribution') + + if self._reference_level == 'absolute': + addend = self._RMS_noise_level*randoms + elif self._reference_level == 'maximum': + addend = self._RMS_noise_level*np.max(seed)*randoms + elif self._reference_level == 'local': + addend = seed*self._RMS_noise_level*randoms + else: + raise ValueError('Unknown value in NoiseGenerator._reference_level') + + return addend diff --git a/PyHEADTAIL/feedback/processors/convolution.py b/PyHEADTAIL/feedback/processors/convolution.py new file mode 100644 index 00000000..d7dbd7f8 --- /dev/null +++ b/PyHEADTAIL/feedback/processors/convolution.py @@ -0,0 +1,657 @@ +import numpy as np +from abc import ABCMeta, abstractmethod + +from ..core import bin_widths, bin_mids, bin_edges_to_z_bins +from ..core import default_macros, Parameters +from scipy.constants import pi +import scipy.integrate as integrate +from scipy.interpolate import UnivariateSpline +import abstract_filter_responses + +"""Signal processors based on convolution operation. + +@author Jani Komppula +@date: 11/10/2017 +""" + +class Convolution(object): + __metaclass__ = ABCMeta + + def __init__(self,**kwargs): + + self._dashed_impulse_responses = None + self._impulses_from_segments = None + self._impulses_to_segments = None + + self._n_seg = None + self._n_bins = None + + self.extensions = [] + self._macros = [] + default_macros(self, 'Convolution', **kwargs) + + def _init_convolution(self, parameters): + + + # the parameters of the input signal + self._n_seg = parameters['n_segments'] + self._n_bins = parameters['n_bins_per_segment'] + bin_edges = parameters['bin_edges'] + + original_segment_length = bin_edges[self._n_bins-1,1] - bin_edges[0,0] + + # a number of impulse values added to the both side of the segments + extra_bins = int(np.ceil(self._n_bins/2.)) +# extra_bins = 0 + + # Reference bin edges for one segment + impulse_ref_edges = None + + # ipulse responses for individual segments + self._dashed_impulse_responses = [] + + # impulses caused by the segments + self._impulses_from_segments = [] + + # List of impulses to the corresponding segments + self._impulses_to_segments = [] + for i in xrange(self._n_seg): + self._impulses_to_segments.append([]) + + ref_points = [] + + for i in xrange(self._n_seg): + i_from = i*self._n_bins + i_to = (i+1)*self._n_bins + + # original bins corresponing to the signal + org_edges = bin_edges[i_from:i_to, :] + offset = org_edges[-1,1] - org_edges[0,0] + edges = np.concatenate((org_edges[-extra_bins:]-offset,org_edges),axis=0) + edges = np.concatenate((edges,org_edges[:extra_bins]+offset),axis=0) + + # extra bins before the original bins +# prefix_offset = org_edges[(extra_bins-1), 1]-org_edges[0, 0] +# # extra bins after the original bins +# postfix_offset = org_edges[-extra_bins, 0]-org_edges[-1, 1] + +# edges = np.copy(org_edges) +# edges = np.concatenate(((org_edges[:extra_bins]-prefix_offset), org_edges), axis=0) +# edges = np.concatenate((edges, org_edges[extra_bins:]-postfix_offset), axis=0) + + # reference points of the segments, which correspond to midpoint of + # the bin sets in this case. + ref_points.append(np.mean(bin_edges_to_z_bins(org_edges))) + + if impulse_ref_edges is None: + impulse_ref_edges = edges + else: + impulse_ref_edges = np.concatenate((impulse_ref_edges, edges), axis=0) + + # calculats the impulse response values for each segment + for i, ref_point in enumerate(ref_points): + # sets the zero point of the bin set to be in the middle of the segment + impulse_edges = impulse_ref_edges-ref_point + + # sets the midpoint of the closest bin to the zero to be zero + mids = bin_mids(impulse_edges) + min_max = np.min(mids[mids>=0]) + max_min = np.min(-1.*mids[mids<0]) + mean_width = np.mean(bin_widths(impulse_edges)) + + mid_offset = 0. + idx_offset = 0 + + if min(min_max, max_min) < mean_width/10.: + pass + + elif min_max < max_min: + if min_max < mean_width: + mid_offset = min_max + idx_offset = 1 + else: + if max_min < mean_width: + mid_offset = -1 * max_min + + impulse_edges = impulse_edges - mid_offset + + # calculates impulse response for the determined bin set + dashed_impulse_response = self.response_function(impulse_edges, self._n_seg, + original_segment_length) + + cleaned_impulse = np.array([]) + # a list of segment indexes where impulse response is non zero + target_segments = [] + + # cleans the calculated impulse response, i.e. removes the segments where + # response is zero. + n_bins_per_segment = self._n_bins + 2*extra_bins + + for k in xrange(self._n_seg): + + i_from = k * n_bins_per_segment + i_to = (k+1) * n_bins_per_segment + +# target_segments.append(k) +# cleaned_impulse = np.append(cleaned_impulse, dashed_impulse_response[i_from:i_to]) + + if np.sum(np.abs(dashed_impulse_response[i_from:i_to])) > 0.: + target_segments.append(k) + cleaned_impulse = np.append(cleaned_impulse, dashed_impulse_response[i_from:i_to]) + + self._dashed_impulse_responses.append(cleaned_impulse) + + self._impulses_from_segments.append(np.zeros(len(cleaned_impulse)+idx_offset)) + + for idx, target_idx in enumerate(target_segments): + i_from = idx * n_bins_per_segment + idx_offset + extra_bins + i_to = i_from + self._n_bins + self._impulses_to_segments[target_idx].append(np.array(self._impulses_from_segments[-1][i_from:i_to], copy=False)) + + @abstractmethod + def response_function(self, impulse_ref_edges, n_seg, original_segment_length): + # A function which calculates the impulse response values for the + # the given bin set + pass + + def _apply_convolution(self, parameters, signal): + + if self._dashed_impulse_responses is None: + self._init_convolution(parameters) + + # calculates the impulses caused by the segments + for i in xrange(self._n_seg): + i_from = i*self._n_bins + i_to = (i+1)*self._n_bins + np.copyto(self._impulses_from_segments[i][:len(self._dashed_impulse_responses[i])], + np.convolve(self._dashed_impulse_responses[i], + signal[i_from:i_to], mode='same')) + + # gathers the output signal + output_signal = np.zeros(len(signal)) + for i in xrange(self._n_seg): + + i_from = i*self._n_bins + i_to = (i+1)*self._n_bins +# print np.sum(self._impulses_to_segments[i], axis=0) +# print len(np.sum(self._impulses_to_segments[i], axis=0)) +# print output_signal[i_from:i_to] +# print len(output_signal[i_from:i_to]) + np.copyto(output_signal[i_from:i_to], np.sum(self._impulses_to_segments[i], axis=0)) + + return output_signal + + def process(self, parameters, signal, *args, **kwargs): + + output_signal = self._apply_convolution(parameters, signal) + + return parameters, output_signal + +class Delay(Convolution): + """ Delays signal in the units of time + """ + + def __init__(self,delay, **kwargs): + + self._delay = delay + + super(self.__class__, self).__init__(**kwargs) + self.label = 'Delay' + + def response_function(self, impulse_ref_edges, n_segments, original_segment_length): + impulse_values = np.zeros(len(impulse_ref_edges)) + bin_spacing = np.mean(impulse_ref_edges[:,1]-impulse_ref_edges[:,0]) + + ref_bin_from = -0.5*bin_spacing+self._delay + ref_bin_to = 0.5*bin_spacing+self._delay + + for i, edges in enumerate(impulse_ref_edges): + impulse_values[i] = self._CDF(edges[1],ref_bin_from,ref_bin_to) - self._CDF(edges[0],ref_bin_from,ref_bin_to) + + return impulse_values + + def _CDF(self,x,ref_bin_from, ref_bin_to): + # FIXME: this is not gonna work for nagative delays? + + if x <= ref_bin_from: + return 0. + elif x < ref_bin_to: + return (x-ref_bin_from)/float(ref_bin_to-ref_bin_from) + else: + return 1. + + +class MovingAverage(Convolution): + """ Calculates a moving average + """ + + def __init__(self,window_length, **kwargs): + """ + Parameters + ---------- + window_length : float + Window width in the units of time [t] + n_copies : int + A number of copies + """ + + self._window = (-0.5 * window_length, 0.5 * window_length) + + super(self.__class__, self).__init__(**kwargs) + self.label = 'Average' + + def response_function(self, impulse_ref_edges, n_segments, original_segment_length): + impulse_values = np.zeros(len(impulse_ref_edges)) + + for i, edges in enumerate(impulse_ref_edges): + impulse_values[i] = self._CDF(edges[1], self._window[0], self._window[1]) \ + - self._CDF(edges[0], self._window[0], self._window[1]) + + return impulse_values + + def _CDF(self, x, ref_bin_from, ref_bin_to): + if x <= ref_bin_from: + return 0. + elif x < ref_bin_to: + return (x - ref_bin_from) / float(ref_bin_to - ref_bin_from) + else: + return 1. + + +class WaveletGenerator(Convolution): + """ Makes copies from the signal. + """ + + def __init__(self,spacing,n_copies, **kwargs): + """ + Parameters + ---------- + spacing : float + Gap between the copies [s] + n_copies : int + A number of copies + """ + + self._spacing = spacing + self._n_copies = n_copies + + if isinstance(self._n_copies,tuple): + self._i_from = self._n_copies[0] + self._i_to = self._n_copies[1] + + else: + self._i_from = min(self._n_copies,0) + self._i_to = max(self._n_copies,0) + + self._window = (self._i_from*self._spacing,self._i_to*self._spacing) + + super(self.__class__, self).__init__(**kwargs) + self.label = 'Wavelet generator' + + + def response_function(self, impulse_ref_edges, n_segments, original_segment_length): +# def calculate_response(self, impulse_bin_mids, impulse_bin_edges): + impulse_bin_mids = bin_mids(impulse_ref_edges) + bin_spacing = np.mean(impulse_ref_edges[:,1]-impulse_ref_edges[:,0]) + impulse_values = np.zeros(len(impulse_bin_mids)) + + for i in xrange(self._i_from,(self._i_to+1)): + copy_mid = i*self._spacing + copy_from = copy_mid - 0.5 * bin_spacing + copy_to = copy_mid + 0.5 * bin_spacing + + for j, edges in enumerate(impulse_ref_edges): + impulse_values[j] += (self._CDF(edges[1],copy_from,copy_to)-self._CDF(edges[0],copy_from,copy_to)) + + return impulse_values + + + def _CDF(self, x, ref_bin_from, ref_bin_to): + if x <= ref_bin_from: + return 0. + elif x < ref_bin_to: + return (x - ref_bin_from) / float(ref_bin_to - ref_bin_from) + else: + return 1. +# +#class ConvolutionFromFile(Convolution): +# """ Interpolates matrix columns by using inpulse response data from a file. """ +# +# def __init__(self,filename, x_axis = 'time', calc_type = 'mean', **kwargs): +# self._filename = filename +# self._x_axis = x_axis +# self._calc_type = calc_type +# +# self._data = np.loadtxt(self._filename) +# if self._x_axis == 'time': +# self._data[:, 0]=self._data[:, 0]*c +# +# impulse_range = (self._data[0,0],self._data[-1,0]) +# +# super(self.__class__, self).__init__(impulse_range, **kwargs) +# self.label = 'Convolution from external data' +# +# def calculate_response(self, impulse_response_bin_mid, impulse_response_bin_edges): +# +# if self._calc_type == 'mean': +# return np.interp(impulse_response_bin_mid, self._data[:, 0], self._data[:, 1]) +# elif self._calc_type == 'integral': +# s = UnivariateSpline(self._data[:, 0], self._data[:, 1]) +# response_values = np.zeros(len(impulse_response_bin_mid)) +# +# for i, edges in enumerate(impulse_response_bin_edges): +# response_values[i], _ = s.integral(edges[0],edges[1]) +# return response_values +# +# else: +# raise ValueError('Unknown value in ConvolutionFromFile._calc_type') + +class ConvolutionFilter(Convolution): + """ An abstract class for the filtes based on convolution.""" + + __metaclass__ = ABCMeta + + def __init__(self,scaling, zero_bin_value=None, normalization=None, + f_cutoff_2nd=None, **kwargs): + + self._f_cutoff_2nd = f_cutoff_2nd + + self._scaling = scaling + self._normalization = normalization + + self._zero_bin_value = zero_bin_value + super(ConvolutionFilter, self).__init__(**kwargs) + self.label='ConvolutionFilter' + # NOTE: is the tip cut needed? How to work with the sharp tips of the ideal filters? + + def response_function(self, impulse_ref_edges, n_segments, original_segment_length): + impulse = np.zeros(len(impulse_ref_edges)) + + for i, edges in enumerate(impulse_ref_edges): + # normalizes the edges to dimensioles units + integral_from = edges[0] * self._scaling + integral_to = edges[1] * self._scaling + + # calculates the impulse value for the bin by integrating the impulse + # response over the normalized bin + if (self._impulse_response(integral_from) == 0) and (self._impulse_response(integral_to) == 0) and (self._impulse_response((integral_from+integral_to)/2.) == 0): + # gives zero value if impulse response values are zero on the edges and middle of the bin + # (optimization for the FCC simulations) + impulse[i] = 0. + else: + impulse[i], _ = integrate.quad(self._impulse_response, integral_from, integral_to) + + # normalizes the impulse response + norm_coeff = self. _normalization_coefficient(impulse_ref_edges, impulse, original_segment_length) + impulse = impulse/norm_coeff + + if self._f_cutoff_2nd is not None: + impulse = self._filter_2nd_cutoff(impulse,impulse_ref_edges, n_segments, original_segment_length) + + # searches the zero bin and adds it the set zero bin value if it is + # determined + if self._zero_bin_value is not None: + for i, edges in enumerate(impulse_ref_edges): + if (edges[0] <= 0.) and (0. < edges[1]): + impulse[i] = impulse_ref_edges[i] + self._zero_bin_value + + return impulse + + def _filter_2nd_cutoff(self, impulse,impulse_ref_edges, n_segments, original_segment_length): + ref_points = [] + mids = bin_mids(impulse_ref_edges) + n_bins_per_segment = int(len(impulse)/n_segments) + for i in xrange(n_segments): + i_from = i * n_bins_per_segment + i_to = (i + 1) * n_bins_per_segment + ref_points.append(np.mean(mids[i_from:i_to])) + parameters = Parameters(signal_class=1, bin_edges=impulse_ref_edges, n_segments=n_segments, + n_bins_per_segment=n_bins_per_segment, segment_ref_points=ref_points, + previous_parameters=[], location=0, beta=1.) + + impulse_filter = Gaussian(self._f_cutoff_2nd) + + output_parameters, output_signal = impulse_filter.process(parameters,impulse) + return output_signal + + def _normalization_coefficient(self, impulse_ref_edges, impulse, segment_length): + + if self._normalization is None: + pass + elif isinstance(self._normalization, tuple): + if self._normalization[0] == 'integral': + norm_coeff, _ = integrate.quad(self._impulse_response, self._normalization[1][0], self._normalization[1][1]) + elif self._normalization[0] == 'bunch_by_bunch': + f_h = self._normalization[1] + + norm_coeff = 0. + for i in xrange(-1000,1000): + x = float(i)* (1./f_h) * self._scaling + norm_coeff += self._impulse_response(x) + #print norm_coeff + #print x + #print self._normalization[1] * self._scaling * c + #print self._normalization[1] * c + + + norm_coeff = norm_coeff*(segment_length * self._scaling) + else: + raise ValueError('Unknown normalization method!') + elif self._normalization == 'sum': + norm_coeff = np.sum(impulse) + + else: + raise ValueError('Unknown normalization method!') + + return norm_coeff +# +# if self._normalization is None: +# pass +# elif isinstance(self._normalization, float): +# impulse_values = impulse_values/self._normalization +# elif isinstance(self._normalization, tuple): +# if self._normalization[0] == 'bunch_by_bunch': +# bunch_spacing = self._normalization[1] * c +# +# bunch_locations = np.array([]) +# if (impulse_bin_edges[0,0] < 0): +# bunch_locations = np.append(bunch_locations, -1.*np.arange(0.,-1.*impulse_bin_edges[0,0],bunch_spacing)) +# if (impulse_bin_edges[-1,1] > 0): +# bunch_locations = np.append(bunch_locations, np.arange(0.,impulse_bin_edges[-1,1],bunch_spacing)) +# +# bunch_locations = np.unique(bunch_locations) +# +# min_mask = (bunch_locations >= impulse_bin_edges[0,0]) +# max_mask = (bunch_locations <= impulse_bin_edges[-1,1]) +# +# bunch_locations = bunch_locations[min_mask*max_mask] +# +# total_sum = 0. +# +# # TODO: check, which is the best way to calculate the normalization coefficient +# total_sum = np.sum(np.interp([bunch_locations], impulse_bin_mids, impulse_values)) +## for location in bunch_locations: +## min_mask = (impulse_bin_mids > (location - bunch_length/2.)) +## max_mask = (impulse_bin_mids < (location + bunch_length/2.)) +## +## total_sum += np.mean(impulse_values[min_mask*max_mask]) +# +# impulse_values = impulse_values/total_sum +# +# else: +# raise ValueError('Unknown normalization method') +# +# elif self._normalization == 'max': +# impulse_values = impulse_values/np.max(impulse_values) +# elif self._normalization == 'min': +# impulse_values = impulse_values/np.min(impulse_values) +# elif self._normalization == 'average': +# impulse_values = impulse_values/np.abs(np.mean(impulse_values)) +# elif self._normalization == 'sum': +# # TODO: check naming, this is not a sum, but an integral? +# impulse_values = impulse_values/np.abs(np.sum(impulse_values)) +# elif self._normalization == 'integral': +# bin_widths = impulse_bin_edges[:,1]-impulse_bin_edges[:,0] +# impulse_values = impulse_values / np.abs(np.sum(impulse_values*bin_widths)) +# else: +# raise ValueError('Unknown normalization method') +# +# if self._zero_bin_value is not None: +# for i, edges in enumerate(impulse_bin_edges): +# if (edges[0] <= 0.) and (0. < edges[1]): +# impulse_values[i] = impulse_values[i] + self._zero_bin_value +# +# return impulse_values + + def _impulse_response(x): + """ Impulse response of the filter. + :param x: normalized time (t*2.*pi*f_c) + :return: response at the given time + """ + pass + + + +class Lowpass(ConvolutionFilter): + """ A classical lowpass filter, which is also known as a RC-filter or one + poll roll off. + """ + def __init__(self,f_cutoff, normalization=None, max_impulse_length = 5., **kwargs): + scaling = 2. * pi * f_cutoff + + if normalization is None: + normalization=('integral',(-max_impulse_length,max_impulse_length)) + + self._impulse_response = abstract_filter_responses.normalized_lowpass(max_impulse_length) + + super(self.__class__, self).__init__(scaling, normalization=normalization,**kwargs) + self.label = 'Lowpass filter' + self.time_scale = max_impulse_length/scaling + + +class Highpass(ConvolutionFilter): + """ A high pass version of the lowpass filter, which is constructed by + multiplying the lowpass filter by a factor of -1 and adding to the first + bin 1 + """ + def __init__(self,f_cutoff, normalization=None, max_impulse_length = 5., **kwargs): + scaling = 2. * pi * f_cutoff + + if normalization is None: + normalization=('integral',(-max_impulse_length,max_impulse_length)) + + self._impulse_response = abstract_filter_responses.normalized_highpass(max_impulse_length) + + super(self.__class__, self).__init__( scaling, zero_bin_value= 1., normalization=normalization, **kwargs) + self.label = 'Highpass filter' + self.time_scale = max_impulse_length/scaling + +class PhaseLinearizedLowpass(ConvolutionFilter): + """ A phase linearized 1st order lowpass filter. Note that the narrow and + sharp peak of the impulse response makes the filter to be sensitive + to the bin width and may yield an unrealistically good response for the + short signals. Thus it is recommended to set a second order cut off + frequency, which smooths the impulse response by using a Gaussian filter. + """ + + def __init__(self,f_cutoff, normalization=None, max_impulse_length = 5., **kwargs): + scaling = 2. * pi * f_cutoff + + if normalization is None: + normalization=('integral',(-max_impulse_length,max_impulse_length)) + + self._impulse_response = abstract_filter_responses.normalized_phase_linearized_lowpass(max_impulse_length) + + super(self.__class__, self).__init__( scaling, normalization=normalization, **kwargs) + self.label = 'Phaselinearized lowpass filter' + self.time_scale = max_impulse_length/scaling + + +class Gaussian(ConvolutionFilter): + """ A Gaussian low pass filter, which impulse response is a Gaussian function. + """ + def __init__(self,f_cutoff, normalization=None, max_impulse_length = 5., **kwargs): + scaling = 2. * pi * f_cutoff + + if normalization is None: + normalization=('integral',(-max_impulse_length,max_impulse_length)) + + self._impulse_response = abstract_filter_responses.normalized_Gaussian(max_impulse_length) + + super(self.__class__, self).__init__( scaling, normalization=normalization, **kwargs) + self.label = 'Gaussian lowpass filter' + self.time_scale = 1*max_impulse_length/scaling + + +class Sinc(ConvolutionFilter): + """ A nearly ideal lowpass filter, i.e. a windowed Sinc filter. The impulse response of the ideal lowpass filter + is Sinc function, but because it is infinite length in both positive and negative time directions, it can not be + used directly. Thus, the length of the impulse response is limited by using windowing. Properties of the filter + depend on the width of the window and the type of the windows and must be written down. Too long window causes + ripple to the signal in the time domain and too short window decreases the slope of the filter in the frequency + domain. The default values are a good compromise. More details about windowing can be found from + http://www.dspguide.com/ch16.htm and different options for the window can be visualized, for example, by using + code in example/test 004_analog_signal_processors.ipynb + """ + + def __init__(self, f_cutoff, window_width = 3., window_type = 'blackman', normalization=None, + **kwargs): + """ + :param f_cutoff: a cutoff frequency of the filter + :param delay: a delay of the filter [s] + :param window_width: a (half) width of the window in the units of zeros of Sinc(x) [2*pi*f_c] + :param window_type: a shape of the window, blackman or hamming + :param norm_type: see class LinearTransform + :param norm_range: see class LinearTransform + """ + scaling = 2. * pi * f_cutoff + + if normalization is None: + normalization=('integral',(-window_width,window_width)) + + self._impulse_response = abstract_filter_responses.normalized_sinc(window_type, window_width) + + super(self.__class__, self).__init__(scaling,normalization=normalization, **kwargs) + self.label = 'Sinc filter' + self.time_scale = window_width/scaling + + +class FIRFilter(Convolution): + """ Calculates a convolution over the signal by using the given coefficients as a kernel. + """ + + def __init__(self, coefficients, zero_tap = 0, **kwargs): + """ + Parameters + ---------- + coefficients : array + A list of the filter coefficients + zero_tap : int + A list index for the coefficient at + """ + + self._zero_tap = zero_tap + + self._input_coefficients = coefficients + + super(FIRFilter, self).__init__(**kwargs) + self.label = 'FIR filter' + + + def response_function(self, impulse_ref_edges, n_segments, original_segment_length): + impulse = np.zeros(len(impulse_ref_edges)) + impulse_bin_widths = bin_widths(impulse_ref_edges) + impulse_bin_width = np.mean(impulse_bin_widths) + impulse_bin_mids = bin_mids(impulse_ref_edges) + + n_coefficients = len(self._input_coefficients) + min_filter_idx = -1*self._zero_tap + max_filter_idx = min_filter_idx + n_coefficients -1 + + for i, mid in enumerate(impulse_bin_mids): + filter_idx = mid/impulse_bin_width + filter_idx = int(np.round(filter_idx)) + + if (filter_idx >= min_filter_idx) and (filter_idx <= max_filter_idx): + impulse[i] = self._input_coefficients[filter_idx+self._zero_tap] + + return impulse \ No newline at end of file diff --git a/PyHEADTAIL/feedback/processors/cython_hacks.pyx b/PyHEADTAIL/feedback/processors/cython_hacks.pyx new file mode 100644 index 00000000..9a1140b0 --- /dev/null +++ b/PyHEADTAIL/feedback/processors/cython_hacks.pyx @@ -0,0 +1,26 @@ +#TODO: maybe this could be simplified/avoided by using cimport scipy.linalg.cython_blas + +import numpy as np +cimport numpy as np +cimport cython + +""" The functions in this file have been written, because the dot product function of NumPy slowed down PyHEADTAIL + simulation in the CERN batch system by a factor of two or more. The only working solution which was found was to + write a new function for matrix product in Cython. +""" + +@cython.boundscheck(False) +@cython.wraparound(False) + +def cython_matrix_product(double[:, ::1] matrix not None, double[::1] vector not None): + + cdef np.intp_t i, j, dim_0, dim_1 + dim_0 = matrix.shape[0] + dim_1 = matrix.shape[1] + cdef double[::1] D = np.zeros(dim_0) + + for i in range(dim_0): + for j in range(dim_1): + D[i] += matrix[i,j]* vector[j] + + return D \ No newline at end of file diff --git a/PyHEADTAIL/feedback/processors/linear_transform.py b/PyHEADTAIL/feedback/processors/linear_transform.py new file mode 100644 index 00000000..dfe60969 --- /dev/null +++ b/PyHEADTAIL/feedback/processors/linear_transform.py @@ -0,0 +1,391 @@ +from collections import deque +from abc import ABCMeta, abstractmethod +import numpy as np +from scipy.constants import c, pi +import scipy.integrate as integrate +from scipy import linalg +from cython_hacks import cython_matrix_product +from ..core import default_macros +import abstract_filter_responses + +"""Signal processors based on linear transformation. + +@author Jani Komppula +@date: 11/10/2017 +""" + +class LinearTransform(object): + __metaclass__ = ABCMeta + """ An abstract class for signal processors which are based on linear transformation. The signal is processed by + calculating a dot product of a transfer matrix and a signal. The transfer matrix is produced with an abstract + method, namely response_function(*args), which returns an elements of the matrix (an effect of + the ref_bin to the bin) + """ + + def __init__(self, mode = 'bunch_by_bunch', normalization=None, bin_middle = 'bin', **kwargs): + """ + + :param norm_type: Describes normalization method for the transfer matrix + 'bunch_average': an average value over the bunch is equal to 1 + 'fixed_average': an average value over a range given in a parameter norm_range is equal to 1 + 'bunch_integral': an integral over the bunch is equal to 1 + 'fixed_integral': an integral over a fixed range given in a parameter norm_range is equal to 1 + 'matrix_sum': a sum over elements in the middle column of the matrix is equal to 1 + None: no normalization + :param norm_range: Normalization length in cases of self.norm_type == 'fi + xed_length_average' or + self.norm_type == 'fixed_length_integral' + :param bin_check: if True, a change of the bin_set is checked every time process() is called and matrix is + recalculated if any change is found + :param bin_middle: defines if middle points of the bins are determined by a middle point of the bin + (bin_middle = 'bin') or an average place of macro particles (bin_middle = 'particles') + """ + + self._mode = mode + + self._normalization = normalization + self._bin_middle = bin_middle + + self._z_bin_set = None + self._matrix = None + + self._recalculate_matrix = True + + self.signal_classes = (0,0) + + self._n_segments = None + self._n_bins_per_segment = None + self._mid_bunch = None + + self.extensions = [] + self._macros = [] + default_macros(self, 'LinearTransform', **kwargs) + + if bin_middle == 'particles': + self.extensions.append('bunch') + self.required_variables = ['mean_z'] + + + @abstractmethod + def response_function(self, parameters, ref_bin_mid, ref_bin_from, ref_bin_to, bin_mid, bin_from, bin_to): + # Impulse response function of the processor + pass + + def process(self,parameters, signal, slice_sets = None, *args, **kwargs): + + if self._matrix is None: + + if self._bin_middle == 'particles': + bin_midpoints = np.array([]) + for slice_set in slice_sets: + bin_midpoints = np.append(bin_midpoints, slice_set.mean_z) + elif self._bin_middle == 'bin': + bin_midpoints = (parameters['bin_edges'][:, 1] + parameters['bin_edges'][:, 0]) / 2. + else: + raise ValueError('Unknown value for LinearTransform._bin_middle ') + + self._n_segments = parameters['n_segments'] + self._n_bins_per_segment = parameters['n_bins_per_segment'] + + self.__generate_matrix(parameters, parameters['bin_edges'],bin_midpoints) + + if self._mode == 'total': + output_signal = np.array(cython_matrix_product(self._matrix, signal)) + elif self._mode == 'bunch_by_bunch': + output_signal = np.zeros(len(signal)) + + for i in xrange(self._n_segments): + idx_from = i * self._n_bins_per_segment + idx_to = (i+1) * self._n_bins_per_segment + np.copyto(output_signal[idx_from:idx_to],cython_matrix_product(self._matrix, signal[idx_from:idx_to])) + else: + raise ValueError('Unknown value for LinearTransform._mode ') + + return parameters, output_signal + + # np.dot can't be used, because it slows down the calculations in LSF by a factor of two or more + # return np.dot(self._matrix,signal) + + def clear(self): + self._matrix = np.array([]) + self._recalculate_matrix = True + + def print_matrix(self): + for row in self._matrix: + print "[", + for element in row: + print "{:6.3f}".format(element), + print "]" + + def __generate_matrix(self,parameters, bin_edges, bin_midpoints): + + self._mid_bunch = int(self._n_segments/2) + + bunch_mid = (bin_edges[0,0]+bin_edges[(self._n_bins_per_segment - 1),1]) / 2. + + norm_bunch_midpoints = bin_midpoints[:self._n_bins_per_segment] + norm_bunch_midpoints = norm_bunch_midpoints - bunch_mid + norm_bin_edges = bin_edges[:self._n_bins_per_segment] + norm_bin_edges = norm_bin_edges - bunch_mid + + if self._mode == 'bunch_by_bunch': + + self._matrix = np.identity(len(norm_bunch_midpoints)) + + for i, midpoint_i in enumerate(norm_bunch_midpoints): + for j, midpoint_j in enumerate(norm_bunch_midpoints): + self._matrix[j][i] = self.response_function(parameters, + midpoint_i,norm_bin_edges[i,0],norm_bin_edges[i,1], + midpoint_j,norm_bin_edges[j,0],norm_bin_edges[j,1]) + elif self._mode == 'total': + self._matrix = np.identity(len(bin_midpoints)) + for i, midpoint_i in enumerate(bin_midpoints): + for j, midpoint_j in enumerate(bin_midpoints): + self._matrix[j][i] = self.response_function(parameters, + midpoint_i, bin_edges[i, 0], bin_edges[i, 1], + midpoint_j, bin_edges[j, 0], bin_edges[j, 1]) + + else: + raise ValueError('Unrecognized value in LinearTransform._mode') + + total_impulse = np.append(self._matrix[:,-1],self._matrix[1:,0]) + bin_widths = bin_edges[:, 1]-bin_edges[:, 0] + total_bin_widths = np.append(bin_widths,bin_widths[1:]) + + if self._normalization is None: + pass + elif self._normalization == 'max': + self._matrix = self._matrix/np.max(total_impulse) + elif self._normalization == 'min': + self._matrix = self._matrix/np.min(total_impulse) + elif self._normalization == 'average': + self._matrix = self._matrix/np.abs(np.mean(total_impulse)) + elif self._normalization == 'sum': + self._matrix = self._matrix/np.abs(np.sum(total_impulse)) + elif self._normalization == 'column_sum': + self._matrix = self._matrix/np.abs(np.sum(self._matrix[:,0])) + elif self._normalization == 'integral': + self._matrix = self._matrix / np.abs(np.sum(total_impulse* total_bin_widths)) + else: + raise ValueError('Unrecognized value in LinearTransform._normalization') + +class Averager(LinearTransform): + """ Returns a signal, which consists an average value of the input signal. A sums of the rows in the matrix + are normalized to be one (i.e. a sum of the input signal doesn't change). + """ + + def __init__(self, mode = 'bunch_by_bunch', normalization = 'column_sum', **kwargs): + super(self.__class__, self).__init__(mode, normalization, **kwargs) + self.label = 'Averager' + + def response_function(self, parameters, ref_bin_mid, ref_bin_from, ref_bin_to, bin_mid, bin_from, bin_to): + return 1 + +class Delay(LinearTransform): + """ Delays signal in the units of [second]. + """ + def __init__(self,delay, **kwargs): + self._delay = delay + super(self.__class__, self).__init__( **kwargs) + self.label = 'Delay' + + def response_function(self, parameters, ref_bin_mid, ref_bin_from, ref_bin_to, bin_mid, bin_from, bin_to): + + return self.__CDF(bin_to, ref_bin_from, ref_bin_to) - self.__CDF(bin_from, ref_bin_from, ref_bin_to) + + def __CDF(self,x,ref_bin_from, ref_bin_to): + if (x-self._delay) <= ref_bin_from: + return 0. + elif (x-self._delay) < ref_bin_to: + return ((x-self._delay)-ref_bin_from)/float(ref_bin_to-ref_bin_from) + else: + return 1. + +class LinearTransformFromFile(LinearTransform): + """ Interpolates matrix columns by using inpulse response data from a file. """ + + def __init__(self,filename, x_axis = 'time', **kwargs): + self._filename = filename + self._x_axis = x_axis + self._data = np.loadtxt(self._filename) + if self._x_axis == 'time': + self._data[:, 0]=self._data[:, 0] + + super(self.__class__, self).__init__( **kwargs) + self.label = 'LT from file' + + def response_function(self, parameters, ref_bin_mid, ref_bin_from, ref_bin_to, bin_mid, bin_from, bin_to): + return np.interp(bin_mid - ref_bin_mid, self._data[:, 0], self._data[:, 1]) + + +class LinearTransformFilter(LinearTransform): + __metaclass__ = ABCMeta + """ A general class for (analog) filters. Impulse response of the filter must be determined by overwriting + the function raw_impulse_response. + + This processor includes two additional properties. + + """ + + def __init__(self, scaling, zero_bin_value=None, normalization=None, **kwargs): + + self._scaling = scaling + + if normalization == 'sum': + self._filter_normalization = None + matrix_normalization = normalization + else: + self._filter_normalization = normalization + matrix_normalization = None + + self._zero_bin_value = zero_bin_value + super(LinearTransformFilter, self).__init__(normalization = matrix_normalization, **kwargs) + self.label='LinearTransformFilter' + + self._norm_coeff = None + + def response_function(self, parameters, ref_bin_mid, ref_bin_from, ref_bin_to, bin_mid, bin_from, bin_to): + # Frequency scaling must be done by scaling integral limits, because integration by substitution doesn't work + # with np.quad (see quad_problem.ipynbl). An ugly way, which could be fixed. + + temp, _ = integrate.quad(self._impulse_response, self._scaling * (bin_from - (ref_bin_mid)), + self._scaling * (bin_to - (ref_bin_mid))) + +# temp, _ = integrate.quad(self._impulse_response, self._scaling * (bin_from - (ref_bin_mid + self._delay_z)), +# self._scaling * (bin_to - (ref_bin_mid + self._delay_z))) + + if ref_bin_mid == bin_mid: + if self._zero_bin_value is not None: + temp += self._zero_bin_value + + if self._norm_coeff is None: + self._norm_coeff = self._normalization_coefficient(parameters) + + return temp/self._norm_coeff + + def _normalization_coefficient(self, parameters): + + if self._filter_normalization is None: + norm_coeff = 1. + elif isinstance(self._filter_normalization, tuple): + if self._filter_normalization[0] == 'integral': + norm_coeff, _ = integrate.quad(self._impulse_response, self._filter_normalization[1][0], self._filter_normalization[1][1]) + elif self._filter_normalization[0] == 'bunch_by_bunch': + f_h = self._filter_normalization[1] + + norm_coeff = 0. + for i in xrange(-1000,1000): + x = float(i)* (1./f_h) * self._scaling + norm_coeff += self._impulse_response(x) + + bin_edges = parameters['bin_edges'] + n_bins_per_segment = parameters['n_bins_per_segment'] + segment_length = bin_edges[n_bins_per_segment-1,1] - bin_edges[0,0] + + norm_coeff = norm_coeff*(segment_length * self._scaling) + else: + raise ValueError('Unknown normalization method!') +# elif self._normalization == 'sum': +# norm_coeff = np.sum(impulse) + + else: + raise ValueError('Unknown normalization method!') + + return norm_coeff + +class Lowpass(LinearTransformFilter): + """ A classical lowpass filter, which is also known as a RC-filter or one + poll roll off. + """ + def __init__(self,f_cutoff, normalization=None, max_impulse_length = 5., **kwargs): + scaling = 2. * pi * f_cutoff + + if normalization is None: + normalization=('integral',(-max_impulse_length,max_impulse_length)) + + self._impulse_response = abstract_filter_responses.normalized_lowpass(max_impulse_length) + + super(self.__class__, self).__init__(scaling, normalization=normalization,**kwargs) + self.label = 'Lowpass filter' + + +class Highpass(LinearTransformFilter): + """ A high pass version of the lowpass filter, which is constructed by + multiplying the lowpass filter by a factor of -1 and adding to the first + bin 1 + """ + def __init__(self,f_cutoff, normalization=None, max_impulse_length = 5., **kwargs): + scaling = 2. * pi * f_cutoff + + if normalization is None: + normalization=('integral',(-max_impulse_length,max_impulse_length)) + + self._impulse_response = abstract_filter_responses.normalized_highpass(max_impulse_length) + + super(self.__class__, self).__init__( scaling, zero_bin_value= 1., normalization=normalization, **kwargs) + self.label = 'Highpass filter' + +class PhaseLinearizedLowpass(LinearTransformFilter): + """ A phase linearized 1st order lowpass filter. Note that the narrow and + sharp peak of the impulse response makes the filter to be sensitive + to the bin width and may yield an unrealistically good response for the + short signals. Thus, it is recommended to use a higher bandwidth Gaussian + filter together with this filter. + """ + + def __init__(self,f_cutoff, normalization=None, max_impulse_length = 5., **kwargs): + scaling = 2. * pi * f_cutoff + + if normalization is None: + normalization=('integral',(-max_impulse_length,max_impulse_length)) + + self._impulse_response = abstract_filter_responses.normalized_phase_linearized_lowpass(max_impulse_length) + + super(self.__class__, self).__init__( scaling, normalization=normalization, **kwargs) + self.label = 'Phaselinearized lowpass filter' + + +class Gaussian(LinearTransformFilter): + """ A Gaussian low pass filter, which impulse response is a Gaussian function. + """ + def __init__(self,f_cutoff, normalization=None, max_impulse_length = 5., **kwargs): + scaling = 2. * pi * f_cutoff + + if normalization is None: + normalization=('integral',(-max_impulse_length,max_impulse_length)) + + self._impulse_response = abstract_filter_responses.normalized_Gaussian(max_impulse_length) + + super(self.__class__, self).__init__( scaling, normalization=normalization, **kwargs) + self.label = 'Gaussian lowpass filter' + + +class Sinc(LinearTransformFilter): + """ A nearly ideal lowpass filter, i.e. a windowed Sinc filter. The impulse response of the ideal lowpass filter + is Sinc function, but because it is infinite length in both positive and negative time directions, it can not be + used directly. Thus, the length of the impulse response is limited by using windowing. Properties of the filter + depend on the width of the window and the type of the windows and must be written down. Too long window causes + ripple to the signal in the time domain and too short window decreases the slope of the filter in the frequency + domain. The default values are a good compromise. More details about windowing can be found from + http://www.dspguide.com/ch16.htm and different options for the window can be visualized, for example, by using + code in example/test 004_analog_signal_processors.ipynb + """ + + def __init__(self, f_cutoff, window_width = 3., window_type = 'blackman', normalization=None, + **kwargs): + """ + :param f_cutoff: a cutoff frequency of the filter + :param delay: a delay of the filter [s] + :param window_width: a (half) width of the window in the units of zeros of Sinc(x) [2*pi*f_c] + :param window_type: a shape of the window, blackman or hamming + :param norm_type: see class LinearTransform + :param norm_range: see class LinearTransform + """ + scaling = 2. * pi * f_cutoff + + if normalization is None: + normalization=('integral',(-window_width,window_width)) + + self._impulse_response = abstract_filter_responses.normalized_sinc(window_type, window_width) + + super(self.__class__, self).__init__(scaling,normalization=normalization, **kwargs) + self.label = 'Sinc filter' \ No newline at end of file diff --git a/PyHEADTAIL/feedback/processors/misc.py b/PyHEADTAIL/feedback/processors/misc.py new file mode 100644 index 00000000..25336424 --- /dev/null +++ b/PyHEADTAIL/feedback/processors/misc.py @@ -0,0 +1,53 @@ +import numpy as np +from ..core import default_macros + +class Bypass(object): + """ A fast bypass processor, whichi does not modify the signal. A black sheep, which does not fit for + the abstract classes. + """ + + def __init__(self, **kwargs): + self.signal_classes = (0, 0) + + self.extensions = [] + self._macros = [] + default_macros(self, 'Bypass', **kwargs) + + def process(self, parameters, signal, *args, **kwargs): + + return parameters, signal + + +class Average(object): + + def __init__(self, avg_type = 'bunch', **kwargs): + self.label = 'Average' + self._avg_type = avg_type + + + self.signal_classes = (0, 0) + + self.extensions = [] + self._macros = [] + default_macros(self, 'Average', **kwargs) + + + def process(self, parameters, signal, *args, **kwargs): + + if self._avg_type == 'bunch': + n_segments = parameters.n_segments + n_slices_per_segment = parameters.n_slices_per_segment + + output_signal = np.zeros(len(signal)) + ones = np.ones(n_slices_per_segment) + + for i in xrange(n_segments): + idx_from = i * n_slices_per_segment + idx_to = (i + 1) * n_slices_per_segment + np.copyto(output_signal[idx_from:idx_to], ones * np.mean(signal[idx_from:idx_to])) + + elif self._avg_type == 'total': + output_signal = np.ones(len(signal))*np.mean(signal) + + else: + raise ValueError('Unknown value in Average._avg_type') + + return parameters, output_signal \ No newline at end of file diff --git a/PyHEADTAIL/feedback/processors/multiplication.py b/PyHEADTAIL/feedback/processors/multiplication.py new file mode 100644 index 00000000..c58ee422 --- /dev/null +++ b/PyHEADTAIL/feedback/processors/multiplication.py @@ -0,0 +1,292 @@ +from abc import ABCMeta, abstractmethod +import numpy as np +from scipy.constants import c, pi +from ..core import default_macros + +"""Signal processors based on multiplication operation. + +@author Jani Komppula +@date: 11/10/2017 +""" + +class Multiplication(object): + __metaclass__ = ABCMeta + """ An abstract class which multiplies the input signal by an array. The multiplier array is produced by taking + a slice property (determined by the input parameter 'seed') and passing it through the abstract method + multiplication_function(seed). + """ + def __init__(self, seed, normalization = None, recalculate_multiplier = False, **kwargs): + """ + :param seed: a seed for the multiplier, which can be 'bin_length', 'bin_midpoint', 'signal' or any slice + property found from slice_set + :param normalization: normalization of the multiplier + 'total_sum': The sum over the multiplier is equal to 1 + 'segment_sum': The sum of the multiplier over each signal segment is equal to 1 + 'total_average': The average of the multiplier is equal to 1 + 'segment_average': The average multiplier of each signal segment is equal to 1 + 'total_integral': The total integral over the multiplier is equal to 1 + 'segment_integral': The integral of the multiplier over each signal segment is equal to 1 + 'total_min': The minimum of the multiplier is equal to 1 + 'segment_min': The minimum of the multiplier in each signal segment is equal to 1 + 'total_max': The minimum of the multiplier is equal to 1 + 'segment_max': The minimum of the multiplier in each signal segment is equal to 1 + :param recalculate_weight: if True, the weight is recalculated every time when process() is called + """ + + self._seed = seed + self._normalization = normalization + self._recalculate_multiplier = recalculate_multiplier + + self._multiplier = None + + self.signal_classes = (0,0) + + self.extensions = [] + self._macros = [] + default_macros(self, 'Multiplication', **kwargs) + + if self._seed not in ['bin_length','bin_midpoint','signal','ones']: + self.extensions.append('bunch') + self.required_variables = [self._seed] + + + @abstractmethod + def multiplication_function(self, seed): + pass + + def process(self,parameters, signal, slice_sets = None, *args, **kwargs): + + if (self._multiplier is None) or self._recalculate_multiplier: + self.__calculate_multiplier(parameters, signal, slice_sets) + + output_signal = self._multiplier*signal + + # process the signal + return parameters, output_signal + + def __calculate_multiplier(self,parameters, signal, slice_sets): + self._multiplier = np.zeros(len(signal)) + + if self._seed == 'ones': + self._multiplier = self._multiplier + 1. + elif self._seed == 'bin_width': + np.copyto(self._multiplier, (parameters['bin_edges'][:,1]-parameters['bin_edges'][:,0])) + elif self._seed == 'bin_midpoint': + np.copyto(self._multiplier, ((parameters['bin_edges'][:,1]+parameters['bin_edges'][:,0])/2.)) + elif self._seed == 'normalized_bin_midpoint': + + for i in xrange(parameters['n_segments']): + i_from = i * parameters['n_bins_per_segment'] + i_to = (i + 1) * parameters['n_bins_per_segment'] + + np.copyto(self._multiplier[i_from:i_to], ((parameters['bin_edges'][i_from:i_to,1]+ + parameters['bin_edges'][i_from:i_to,0])/2. + -parameters['segment_midpoints'][i])) + + elif self._seed == 'signal': + np.copyto(self._multiplier,signal) + else: + if len(signal) == len(slice_sets) * (len(slice_sets[0].z_bins) - 1): + start_idx = 0 + for slice_set in slice_sets: + seed = getattr(slice_set,self._seed) + np.copyto(self._multiplier[start_idx:(start_idx+len(seed))],seed) + start_idx += len(seed) + np.copyto(self._multiplier, self._multiplier[::-1]) + else: + raise ValueError('Signal length does not correspond to the original signal length ' + 'from the slice sets in the method Multiplication') + + self._multiplier = self.multiplication_function(self._multiplier) + + # NOTE: add options for average bin integrals? + if self._normalization is None: + norm_coeff = 1. + + elif self._normalization == 'total_sum': + norm_coeff = float(np.sum(self._multiplier)) + + elif self._normalization == 'segment_sum': + norm_coeff = np.ones(len(self._multiplier)) + for i in xrange(parameters['n_segments']): + i_from = i*parameters['n_bins_per_segment'] + i_to = (i+1)*parameters['n_bins_per_segment'] + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.sum(self._multiplier[i_from:i_to])) + + elif self._normalization == 'total_average': + norm_coeff = float(np.sum(self._multiplier))/float(len(self._multiplier)) + + elif self._normalization == 'segment_average': + norm_coeff = np.ones(len(self._multiplier)) + for i in xrange(parameters['n_segments']): + i_from = i*parameters['n_bins_per_segment'] + i_to = (i+1)*parameters['n_bins_per_segment'] + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.sum(self._multiplier[i_from:i_to]))/float(parameters['n_bins_per_segment']) + + elif self._normalization == 'total_integral': + bin_widths = parameters['bin_edges'][:,1] - parameters['bin_edges'][:,0] + norm_coeff = np.sum(self._multiplier*bin_widths) + + elif self._normalization == 'segment_integral': + bin_widths = parameters['bin_edges'][:,1] - parameters['bin_edges'][:,0] + norm_coeff = np.ones(len(self._multiplier)) + for i in xrange(parameters['n_segments']): + i_from = i*parameters['n_bins_per_segment'] + i_to = (i+1)*parameters['n_bins_per_segment'] + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.sum(self._multiplier[i_from:i_to]*bin_widths[i_from:i_to])) + + elif self._normalization == 'total_min': + norm_coeff = float(np.min(self._multiplier)) + + elif self._normalization == 'segment_min': + norm_coeff = np.ones(len(self._multiplier)) + for i in xrange(parameters['n_segments']): + i_from = i*parameters['n_bins_per_segment'] + i_to = (i+1)*parameters['n_bins_per_segment'] + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.min(self._multiplier[i_from:i_to])) + + elif self._normalization == 'total_max': + norm_coeff = float(np.max(self._multiplier)) + + elif self._normalization == 'segment_max': + norm_coeff = np.ones(len(self._multiplier)) + for i in xrange(parameters['n_segments']): + i_from = i*parameters['n_bins_per_segment'] + i_to = (i+1)*parameters['n_bins_per_segment'] + norm_coeff[i_from:i_to] = norm_coeff[i_from:i_to]*float(np.max(self._multiplier[i_from:i_to])) + else: + raise ValueError('Unknown value in Multiplication._normalization') + + # TODO: try to figure out why this can not be written as + # TODO: self._multiplier /= norm_coeff + self._multiplier = self._multiplier / norm_coeff + + def clear(self): + self._multiplier = None + + +class ChargeWeighter(Multiplication): + """ The signal is weighted by charge (a number of macroparticles per slice) + """ + + def __init__(self, normalization = 'segment_max', **kwargs): + super(self.__class__, self).__init__('n_macroparticles_per_slice', normalization,recalculate_multiplier = True + , **kwargs) + self.label = 'Charge weighter' + + def multiplication_function(self,weight): + return weight + + +class EdgeWeighter(Multiplication): + """ Use an inverse of the Fermi-Dirac distribution function to increase signal strength on the edges of the bunch + """ + + def __init__(self,bunch_length,bunch_decay_length,maximum_weight = 10., **kwargs): + """ + :param bunch_length: estimated width of the bunch + :param bunch_decay_length: slope of the function on the edge of the bunch. Smaller value, steeper slope. + :param maximum_weight: maximum value of the weight + """ + self._bunch_length = bunch_length + self._bunch_decay_length = bunch_decay_length + self._maximum_weight=maximum_weight + super(self.__class__, self).__init__('bin_midpoint', 'minimum', **kwargs) + self.label = 'Edge weighter' + + def multiplication_function(self,weight): + weight = np.exp((np.absolute(weight)-self._bunch_length/2.)/float(self._bunch_decay_length))+ 1. + weight = np.clip(weight,1.,self._maximum_weight) + return weight + + +class NoiseGate(Multiplication): + """ Passes a signal which is greater/less than the threshold level. + """ + + def __init__(self,threshold, operator = 'greater', threshold_ref = 'amplitude', **kwargs): + + self._threshold = threshold + self._operator = operator + self._threshold_ref = threshold_ref + super(self.__class__, self).__init__('signal',recalculate_multiplier = True, **kwargs) + self.label = 'Noise gate' + + def multiplication_function(self, seed): + multiplier = np.zeros(len(seed)) + + if self._threshold_ref == 'amplitude': + comparable = np.abs(seed) + elif self._threshold_ref == 'absolute': + comparable = seed + + if self._operator == 'greater': + multiplier[comparable > self._threshold] = 1 + elif self._operator == 'less': + multiplier[comparable < self._threshold] = 1 + + return multiplier + + +class SignalMixer(Multiplication): + """ Multiplies signal with a sine wave. Phase is locked to the midpoint of the each bunch shifted by the value of + phase_shift [radians]""" + def __init__(self,frequency,phase_shift, **kwargs): + + self._frequency = frequency + self._phase_shift = phase_shift + + super(self.__class__, self).__init__('normalized_bin_midpoint', **kwargs) + self.label = 'Signal mixer' + + def multiplication_function(self, seed): + multiplier = np.sin(2.*pi*self._frequency*seed + self._phase_shift) + return multiplier + + +class IdealAmplifier(Multiplication): + """ An ideal amplifier/attenuator, which multiplies signal by a value of gain""" + + def __init__(self,gain, **kwargs): + + self._gain = gain + + super(self.__class__, self).__init__('ones', **kwargs) + self.label = 'IdealAmplifier' + + def multiplication_function(self, seed): + return seed * self._gain + + +class SegmentAverage(Multiplication): + """An average of each signal segment is set to equal to 1. """ + def __init__(self,**kwargs): + + super(self.__class__, self).__init__('ones',normalization = 'segment_sum', **kwargs) + self.label = 'SegmentAverage' + + def multiplication_function(self, seed): + return seed + +class MultiplicationFromFile(Multiplication): + """ Multiplies the signal with an array, which is produced by interpolation from the external data file. Note the seed + (unit) for the interpolation can be any of those available for the seed + (i.e. location, sigma, or a number of macroparticles per slice, etc.) + """ + + def __init__(self,filename, x_axis='time', seed='bin_midpoint', **kwargs): + super(self.__class__, self).__init__(seed, **kwargs) + self.label = 'Multiplication from file' + + self._filename = filename + self._x_axis = x_axis + self._data = np.loadtxt(self._filename) + + if self._x_axis == 'time': + pass + elif self._x_axis == 'position': + self._data[:, 0] = self._data[:, 0] / c + else: + raise ValueError('Unknown value in MultiplicationFromFile._x_axis') + + def multiplication_function(self, seed): + return np.interp(seed, self._data[:, 0], self._data[:, 1]) diff --git a/PyHEADTAIL/feedback/processors/register.py b/PyHEADTAIL/feedback/processors/register.py new file mode 100644 index 00000000..2087f246 --- /dev/null +++ b/PyHEADTAIL/feedback/processors/register.py @@ -0,0 +1,753 @@ +import math, copy +from collections import deque +from abc import ABCMeta, abstractmethod +import numpy as np +from scipy.constants import pi + +from ..core import Parameters, default_macros + +"""Signal processors based on registers and combiners. + +@author Jani Komppula +@date: 11/10/2017 +""" + +class Register(object): + """ + Stores signals to the register. The obejct is iterable, i.e. iteration + returns the stored signals after the given delay. + """ + def __init__(self, n_values, tune, delay=0, **kwargs): + """ + Parameters + ---------- + n_values : number + A maximum number of signals stored and returned (in addition to + the delay) + tune : number + A real number value of a betatron tune + delay : number + A number of turns the signal kept in the register before returning it + + """ + + self._n_values = n_values + self._delay = delay + self._phase_advance_per_turn = 2. * np.pi * tune + + self._n_iter_left = 0 + self._signal_register = deque(maxlen=(n_values + delay)) + self._parameter_register = deque(maxlen=(n_values + delay)) + + self.extensions = ['register'] + self._macros = [] + default_macros(self, 'Register', **kwargs) + + @property + def parameters(self): + if len(self._parameter_register) > 0: + return self._parameter_register[0] + else: + return None + + @property + def phase_advance_per_turn(self): + return self._phase_advance_per_turn + + @property + def delay(self): + return self._delay + + @property + def maxlen(self): + return self._n_values + + def __len__(self): + """ + Returns a number of signals in the register after the delay. + """ + return max((len(self._signal_register) - self._delay), 0) + + def __iter__(self): + """ + Calculates how many iterations are required + """ + self._n_iter_left = len(self) + + return self + + def next(self): + if self._n_iter_left < 1: + raise StopIteration + + else: + delay = -1. * (len(self._signal_register) - self._n_iter_left) \ + * self._phase_advance_per_turn + self._n_iter_left -= 1 + + return (self._parameter_register[self._n_iter_left], + self._signal_register[self._n_iter_left], delay) + + def process(self, parameters, signal, *args, **kwargs): + self._parameter_register.append(parameters) + self._signal_register.append(signal) + + return parameters, signal + + +class UncorrectedDelay(object): + """ Delays the signal in the units of turns without any betatron pahse + advance correction + """ + def __init__(self, delay, **kwargs): + + self._delay = delay + self._register = Register(n_values=1, tune=1., delay=self._delay) + + self.extensions = ['register'] + self._macros = [] + default_macros(self, 'UncorrectedDelay', **kwargs) + + @property + def delay(self): + return self._delay + + def process(self, parameters, signal, *args, **kwargs): + self._register.process(parameters, signal, *args, **kwargs) + output_parameters = None + output_signal = None + + for (parameters_i, signal_i, delay_i) in self._register: + output_parameters = parameters_i + output_signal = signal_i + + if output_parameters is None: + output_parameters = parameters + output_signal = np.zeros(len(signal)) + + return output_parameters, output_signal + + + +class Combiner(object): + __metaclass__ = ABCMeta + + def __init__(self, registers, target_location, target_beta=None, + additional_phase_advance=0., beta_conversion = '0_deg', **kwargs): + """ + Parameters + ---------- + registers : list + A list of registers, which are a source for the signal + target_location : number + A target phase advance in radians of betatron motion + additional_phase_advance : number + Additional phase advance for the target location. + For example, np.pi/2. for shift from displacement in the pick up to + divergenve in the kicker + """ + + self._registers = registers + self._target_location = target_location + self._target_beta = target_beta + self._additional_phase_advance = additional_phase_advance + self._beta_conversion = beta_conversion + + if self._beta_conversion == '0_deg': + pass + elif self._beta_conversion == '90_deg': + self._additional_phase_advance += pi/2. + else: + raise ValueError('Unknown beta conversion type.') + + self._combined_parameters = None + + + self.extensions = ['combiner'] + self._macros = [] + default_macros(self, 'Combiner', **kwargs) + + @abstractmethod + def combine(self, registers, target_location, target_beta, additional_phase_advance, beta_conversion): + pass + + def process(self, parameters=None, signal=None, *args, **kwargs): + + output_signal = self.combine(self._registers, + self._target_location, + self._target_beta, + self._additional_phase_advance, + self._beta_conversion) + + if self._combined_parameters is None: + self._combined_parameters = copy.copy(self._registers[0].parameters) + self._combined_parameters['location'] = self._target_location + self._combined_parameters['beta'] = self._target_beta + + return self._combined_parameters, output_signal + +class CosineSumCombiner(Combiner): + """ A combiner, which utilizes "Cosine sum"- algorithm for the betatron + phase advance correction. + + In the other words, it can be proven that, the sum of the singnals + multiplied by cos(phase advance to the target) approaches a half value of + the correct signal, when the number of the singal with equally distributed + (random) phase advances increases. + """ + + def __init__(self, *args, **kwargs): + """ + Parameters + ---------- + registers : list + A list of registers, which are a source for the signal + target_location : number + A target phase advance in radians of betatron motion + additional_phase_advance : number + Additional phase advance for the target location. + For example, np.pi/2. for shift from displacement in the pick up to + divergenve in the kicker + """ + super(self.__class__, self).__init__(*args, **kwargs) + self.label = 'Cosine sum combiner' + + def combine(self, registers, target_location, target_beta, additional_phase_advance, beta_conversion): + combined_signal = None + n_signals = 0 + + for register in registers: + for (parameters, signal, delay) in register: + if combined_signal is None: + combined_signal = np.zeros(len(signal)) + delta_position = parameters['location'] \ + - target_location + + if delta_position > 0: + delta_position -= register.phase_advance_per_turn + + delta_phi = delay + delta_position - additional_phase_advance + n_signals += 1 + + if target_beta is not None: + beta_correction = 1. / np.sqrt(parameters['beta'] * target_beta) + else: + beta_correction = 1. + + combined_signal += beta_correction * 2. * math.cos(delta_phi) * signal + + if combined_signal is not None: + combined_signal = combined_signal/float(n_signals) + + return combined_signal + +class DummyCombiner(Combiner): + """ A combiner, which by passes the signal without any corrections + """ + def __init__(self, *args, **kwargs): + """ + Parameters + ---------- + registers : list + A list of registers, which are a source for the signal + target_location : number + A target phase advance in radians of betatron motion + additional_phase_advance : number + Additional phase advance for the target location. + For example, np.pi/2. for shift from displacement in the pick up to + divergenve in the kicker + """ + super(self.__class__, self).__init__(*args, **kwargs) + self.label = 'Dummy combiner' + + def combine(self, registers, target_location, target_beta, additional_phase_advance, beta_conversion): + combined_signal = None + + if len(registers[0]) > 0: + + for (parameters, signal, delay) in registers[0]: + combined_signal = signal + + if target_beta is not None: + beta_correction = 1. / np.sqrt(parameters['beta'] * target_beta) + else: + beta_correction = 1. + + return beta_correction*combined_signal + + else: + return combined_signal + +class HilbertCombiner(Combiner): + """ A combiner, which utilizes a algorithm based on the Hilbert transform. + """ + def __init__(self, *args, **kwargs): + """ + Parameters + ---------- + registers : list + A list of registers, which are a source for the signal + target_location : number + A target phase advance in radians of betatron motion + additional_phase_advance : number + Additional phase advance for the target location. + For example, np.pi/2. for shift from displacement in the pick up to + divergenve in the kicker + """ + if 'n_taps' in kwargs: + self._n_taps = kwargs['n_taps'] + else: + self._n_taps = None + + self._coefficients = None + super(self.__class__, self).__init__(*args, **kwargs) + self.label = 'Hilbert combiner' + + @property + def n_taps(self): + return self._n_taps + + @n_taps.setter + def n_taps(self, value): + self._n_taps = value + + + def combine(self, registers, target_location, target_beta, additional_phase_advance, beta_conversion): + if self._coefficients is None: +# print registers + if self._n_taps is None: + self._n_taps = registers[0].maxlen + self._coefficients = [None]*len(registers) + + combined_signal = None + + for i, register in enumerate(registers): + if len(register) >= len(self._coefficients): + if self._coefficients[i] is None: + self._coefficients[i] = self.__generate_coefficients( + register, target_location, target_beta, + additional_phase_advance) + + for j, (parameters, signal, delay) in enumerate(register): + + if target_beta is not None: + beta_correction = 1. / np.sqrt(parameters['beta'] * target_beta) + else: + beta_correction = 1. + + if combined_signal is None: + combined_signal = np.zeros(len(signal)) + + combined_signal += beta_correction * self._coefficients[i][j] * signal + + if combined_signal is not None: + combined_signal = combined_signal/float(len(registers)) + + return combined_signal + + def __generate_coefficients(self, register, target_location, target_beta, additional_phase_advance): + parameters = register.parameters + + delta_phi = -1. * float(register.delay) \ + * register.phase_advance_per_turn + + delta_phi -= float(self._n_taps/2) * register.phase_advance_per_turn + + delta_position = parameters['location'] - target_location + delta_phi += delta_position + if delta_position > 0: + delta_phi -= register.phase_advance_per_turn + + delta_phi -= additional_phase_advance + + coefficients = np.zeros(self._n_taps) + + for i in xrange(self._n_taps): + n = self._n_taps-i-1 + n -= self._n_taps/2 + h = 0. + + if n == 0: + h = np.cos(delta_phi) + elif n % 2 == 1: + h = -2. * np.sin(delta_phi) / (pi * float(n)) + coefficients[i] = h + return coefficients + + +class VectorSumCombiner(Combiner): + """ A combiner, which utilizes vector calculus for the correction. + + It can be proven that if the oscillation amplitude doesn't change + turn by turn (e.g. the damper gain is low), the correction is + ideal if the signal from two different phase advances (e.g. turns or + pickups) are available. + """ + def __init__(self, *args, **kwargs): + """ + Parameters + ---------- + registers : list + A list of registers, which are a source for the signal + target_location : number + A target phase advance in radians of betatron motion + additional_phase_advance : number + Additional phase advance for the target location. + For example, np.pi/2. for shift from displacement in the pick up to + divergenve in the kicker + """ + super(self.__class__, self).__init__(*args, **kwargs) + self.label = 'Vector sum combiner' + self._warning_printed = False + + def combine(self, registers, target_location, target_beta, + additional_phase_advance, beta_conversion): + + combined_signal = None + n_signals = 0 + + if len(registers) == 1: + prev_parameters = None + prev_signal = None + prev_delay = None + + if len(registers[0]) > 1: + + for i, (parameters, signal, delay) in enumerate(registers[0]): + if i == 0: + combined_signal = np.zeros(len(signal)) + prev_signal = np.zeros(len(signal)) + else: + phase_advance_per_turn = ( + registers[0].phase_advance_per_turn) + location_1 = prev_parameters['location'] + beta_1 = prev_parameters['beta'] + delay_1 = prev_delay + location_2 = parameters['location'] + beta_2 = prev_parameters['beta'] + delay_2 = delay + + combined_signal = combined_signal + \ + self.__combine_signals(prev_signal, delay_1, + location_1, beta_1, + signal, delay_2, + location_2, beta_2, + target_location, + target_beta, + beta_conversion, + phase_advance_per_turn, + additional_phase_advance) + n_signals += 1 + + np.copyto(prev_signal,signal) + prev_parameters = parameters + prev_delay = delay + + elif len(registers) > 1: + prev_register = registers[0] + + for register in registers[1:]: + for (parameters_1, signal_1, delay_1), (parameters_2, signal_2, delay_2) in zip(prev_register,register): + if combined_signal is None: + combined_signal = np.zeros(len(signal_1)) + + phase_advance_per_turn = ( + prev_register.phase_advance_per_turn) + location_1 = parameters_1['location'] + beta_1 = parameters_1['beta'] + location_2 = parameters_2['location'] + beta_2 = parameters_2['beta'] + + combined_signal = combined_signal + \ + self.__combine_signals(signal_1, delay_1, + location_1, beta_1, + signal_2, delay_2, + location_2, beta_2, + target_location, + target_beta, + beta_conversion, + phase_advance_per_turn, + additional_phase_advance + ) + n_signals += 1 + + prev_register = register + else: + raise ValueError('At least one register must be given.') + + if combined_signal is not None: + combined_signal = combined_signal / float(n_signals) + + return combined_signal + + def __combine_signals(self, signal_1, delay_1, location_1, beta_1, + signal_2, delay_2, location_2, beta_2, + target_location, target_beta, beta_conversion, + phase_advance_per_turn, additional_phase_advance): + + readings_angle_diff, final_rotation_angle = ( + self.__determine_angles(target_location, phase_advance_per_turn, + location_1, delay_1, location_2, delay_2) + ) + final_rotation_angle += additional_phase_advance + + re, im = self.__determine_vector(signal_1, beta_1, signal_2, beta_2, + readings_angle_diff) + + calculated_signal = self.__rotate_vector(re, im, final_rotation_angle) + + if target_beta is not None: + if beta_conversion == '90_deg': + beta_correction = 1./np.sqrt(beta_1*target_beta) + elif beta_conversion == '0_deg': + beta_correction = np.sqrt(target_beta/beta_1) + else: + beta_correction = 1. + + return beta_correction * calculated_signal + + def __determine_angles(self, target_location, phase_advance_per_turn, + signal_1_location, signal_1_delay, + signal_2_location, signal_2_delay): + + readings_location_difference = signal_2_location - signal_1_location + if readings_location_difference < 0.: + readings_location_difference += readings_location_difference + + readings_delay_difference = signal_2_delay - signal_1_delay + readings_phase_difference = readings_location_difference \ + + readings_delay_difference + + if self._warning_printed == False: + if (readings_phase_difference%(-1.*np.pi) > 0.2) or (readings_phase_difference%np.pi < 0.2): + self._warning_printed = True + print "WARNING: It is recommended that the angle between the readings is at least 12 deg" + + target_location_difference = target_location - signal_1_location + if target_location_difference < 0.: + target_location_difference += readings_location_difference + + target_delay_difference = -1. * signal_1_delay + target_phase_difference = target_location_difference \ + + target_delay_difference + + return readings_phase_difference, target_phase_difference + + def __determine_vector(self, signal_1, beta_1, signal_2, beta_2, + angle_difference): + """ + """ + s = np.sin(angle_difference) + c = np.cos(angle_difference) + + re = signal_1 + im = (1./s) * np.sqrt(beta_1/beta_2) * signal_2 - (c/s) * signal_1 + return re, im + + def __rotate_vector(self, re, im, rotation_angle): + + s = np.sin(rotation_angle) + c = np.cos(rotation_angle) + + return c*re+s*im + + +class FIRCombiner(Combiner): + """ A combiner object, which correct the betatron phase advance by using + the given coefficient. + """ + + def __init__(self, coefficients, *args, **kwargs): + """ + Parameters + ---------- + coefficients: list + A list of FIR coefficients + """ + self._coefficients = coefficients + super(FIRCombiner, self).__init__(*args, **kwargs) + self.label = 'FIR combiner' + + def combine(self, registers, target_location, target_beta, + additional_phase_advance, beta_conversion): + combined_signal = None + + for register in registers: + if len(register) >= len(self._coefficients): + for i, (parameters, signal, delay) in enumerate(register): + if combined_signal is None: + combined_signal = np.zeros(len(signal)) + if i < len(self._coefficients): + combined_signal += self._coefficients[i] * signal + + return combined_signal + + + +class DCRemovedVectorSumCombiner(FIRCombiner): + """ A 'notch filttered', i.e. DC-level removed, version of the vector sum + combiner. It is a three tap FIR filter, which has been derived by using + asumptions that a beam is a rotating vector in (x, xp)-plane and + x-values can be measured in different turns, but they contains an + unknown constant DC-offset. + + This version gives mathematically exact correction when tune + is well known. When tune error exists the version induces only low + noise in comparison to other types of combiners. + + Developed by J. Komppula @ 2017. + """ + def __init__(self, tune, delay=0, *args, **kwargs): + def calculate_coefficients(tune, delay): + ppt = -tune * 2.* np.pi + c12 = np.cos(1.*ppt) + s12 = np.sin(1.*ppt) + c13 = np.cos(2.*ppt) + s13 = np.sin(2.*ppt) + c14 = np.cos((2+delay)*ppt) + s14 = np.sin((2+delay)*ppt) + + divider = -1.*(-c12*s13+c13*s12-s12+s13) + + cx1 = c14*(1-(c12*s13-c13*s12)/divider)+s14*(-c12+c13)/divider + cx2 = (c14*(-(-s13))+s14*(-c13+1))/divider + cx3 = (c14*(-(s12))+s14*(c12-1))/divider + + return [cx3, cx2, cx1] + + coefficients = calculate_coefficients(tune, delay) + + super(DCRemovedVectorSumCombiner, self).__init__(coefficients,*args, **kwargs) + self.label = 'FIR combiner' + + + +class TurnFIRFilter(object): + """A signal processor, which can be used as a FIR filer in turn domain. + """ + + def __init__(self, coefficients, tune, delay = 0, additional_phase_advance = 0., **kwargs): + """ + Parameters + ---------- + coefficients: list + A list of FIR coefficients + tune: float + A betatron tune of the plane + delay: int + A delay of the signal in the units of turn before the filter + addtional_phase_advance: float + An additional betatron phase advance in radians to be taken into + account to the betatron phase correction. + """ + self._coefficients = coefficients + self._tune = tune + self._additional_phase_advance = additional_phase_advance + self._register = Register(len(self._coefficients), self._tune, delay) + self._combiner = None + + self.extensions = [] + self._macros = [] + default_macros(self, 'TurnFIRFilter', **kwargs) + + def process(self, parameters, signal, *args, **kwargs): + self._register.process(parameters, signal, *args, **kwargs) + if self._combiner is None: + self.__init_combiner(parameters) + + output_parameters, output_signal = self._combiner.process(parameters, + signal, + *args, + **kwargs) + + if output_signal is None: + output_parameters = parameters + output_signal = np.zeros(len(signal)) + + return output_parameters, output_signal + + def __init_combiner(self, parameters): + registers = [self._register] + target_location = parameters['location'] + target_beta = parameters['beta'] + extra_phase = self._additional_phase_advance + self._combiner = FIRCombiner(self._coefficients,registers, target_location, + target_beta, extra_phase) + +class TurnDelay(object): + """ Delays the signal a number of turns given as an input parameter. + """ + def __init__(self, delay, tune, n_taps=2, combiner='vector_sum', + additional_phase_advance=0, **kwargs): + """ + Parameters + ---------- + delay: int + A number of turns signal is delayed + tune: float + A betatron tune of the plane + n_taps: int + A number of turns of data used for betatron phase advance + correction of the delay. Note that typically the group delay is + delay + n_taps/2 depending on the correction algorithm. + combiner: str or object + Combiner object, which is used for betatron correction + addtional_phase_advance: float + An additional betatron phase advance in radians to be taken into + account to the betatron phase correction. + + """ + + self._delay = delay + self._tune = tune + self._n_taps = n_taps + self._combiner_type = combiner + self._additional_phase_advance = additional_phase_advance + + self._register = Register(self._n_taps, self._tune, self._delay) + self._combiner = None + + self.extensions = [] + self._macros = [] + default_macros(self, 'TurnDelay', **kwargs) + + def process(self, parameters, signal, *args, **kwargs): + self._register.process(parameters, signal, *args, **kwargs) + + if self._combiner is None: + self.__init_combiner(parameters) + + output_parameters, output_signal = self._combiner.process(parameters, + signal, + *args, + **kwargs) +# print output_signal + if output_signal is None: + output_parameters = parameters + output_signal = np.zeros(len(signal)) + + return output_parameters, output_signal + + def __init_combiner(self, parameters): + registers = [self._register] + target_location = parameters['location'] + target_beta = parameters['beta'] + extra_phase = self._additional_phase_advance + + if isinstance(self._combiner_type, (str,unicode)): + if self._combiner_type == 'vector_sum': + self._combiner = VectorSumCombiner(registers, target_location, + target_beta, extra_phase) + elif self._combiner_type == 'cosine_sum': + self._combiner = CosineSumCombiner(registers, target_location, + target_beta, extra_phase) + elif self._combiner_type == 'hilbert': + self._combiner = HilbertCombiner(registers, target_location, + target_beta, extra_phase) + elif self._combiner_type == 'DCrem_vector_sum': + self._combiner = DCRemovedVectorSumCombiner(self._tune, + self._delay, + registers, + target_location, + target_beta, + extra_phase) + else: + raise ValueError('Unknown combiner type') + else: + self._combiner = self._combiner_type(registers, target_location, + extra_phase) diff --git a/PyHEADTAIL/feedback/processors/resampling.py b/PyHEADTAIL/feedback/processors/resampling.py new file mode 100644 index 00000000..586e06ac --- /dev/null +++ b/PyHEADTAIL/feedback/processors/resampling.py @@ -0,0 +1,714 @@ +import numpy as np +import copy +from scipy import interpolate +from scipy.sparse import csr_matrix + +from ..core import Parameters, bin_edges_to_z_bins, z_bins_to_bin_edges +from ..core import append_bin_edges, bin_mids, default_macros + +"""Signal processors for resampling a signal. + +@author Jani Komppula +@date: 11/10/2017 +""" + +class Resampler(object): + + def __init__(self, method, n_samples=None, offset=0., data_conversion='sum', + n_extras = 0, **kwargs): + """ + Resamples the input signal into a new bin set + + Parameters + ---------- + method : tuple + Resampling method. Possible options are: + ('harmonic', double) + The input signal is converted into one continously sampled + segment. The given number corresponds to the segment spacing + frequency of the input signal (e.g. the harmonic or bunch + frequency of the accelerator). + ('sequenced', double) + Each segment of the signal is resampled by using a given + sampling frequency. + ('previous', int) + The signal is resampled into the previous bin set. The given + number corresponds to an index of the previous parameters in + the input signal paramters. + ('upsampling', int) + Multiplies the original sampling rate by the given number + ('downsampling', int) + Reduces the original sampling rate by the given number. If + the given number is not an harmonic of the number of bins + per segment, the last bins of the segments are skipped. + n_samples : int + A number of samples per input segment when options 'harmonic' or + 'sequenced' have been used. If the given value is None, the number + of samples corresponds to the ceil(segment_length*f_sampling) + offset : double + By default the mid points of the new bin set for the segments have + been set to the found segment reference points from the input signal + parameters. The give value correspods the mid point offsets to the + reference points in the units of bins. + data_conversion : string + A method how the data of the input signal are converted to the output + binset. The output signal can be converted by using: + 'interpolation' + interpolates from the input data. + 'sum' + calculates a bin value sum over the over lapping bins + 'integral' + integrates the input signal over an output bin + 'average' + calculates a bin width weighted average of the overlaping bins + 'average_bin_value' + calculates an average value of the overlaping bins + 'value' + returns a value of the overlapping bin + ('upsampler_kernel', list) + uses a kernel to map an old value to a corresponding + section of upsampled bins + n_extras : int + A number of extra samples added before the first segment and after + the last segment + """ + + self._method = method + self._n_samples = n_samples + self._offset = offset + + self._n_extras = n_extras + + self._data_conversion = data_conversion + + self._output_parameters = None + self._output_signal = None + + self._convert_signal = None + + self.extensions = [] + self._macros = [] + default_macros(self, 'Resampler', **kwargs) + self.signal_classes = None + + def _init_harmonic_bins(self, parameters, signal): + self.signal_classes = (1,2) + base_frequency = self._method[1] + + if parameters['n_segments'] > 1: + min_ref_point = np.min(parameters['segment_ref_points']) + max_ref_point = np.max(parameters['segment_ref_points']) + start_mid = parameters['segment_ref_points'][0] + else: + mids = bin_mids(parameters['bin_edges']) + min_ref_point = np.min(mids) + max_ref_point = np.max(mids) + start_mid = mids[0] + + if self._n_samples is not None: + n_bins_per_segment = self._n_samples + else: + n_bins_per_segment = 1 + + segment_length = 1./base_frequency + bin_width = segment_length/float(n_bins_per_segment) + + n_sampled_sequencies = (max_ref_point-min_ref_point) / segment_length + 1 + n_sampled_sequencies = int(np.round(n_sampled_sequencies)) + + total_n_samples = int((n_sampled_sequencies + 2*self._n_extras) * n_bins_per_segment) + + segment_z_bins = np.linspace(0, segment_length, n_bins_per_segment+1) + segment_z_bins = segment_z_bins + (self._offset - np.floor(n_bins_per_segment/2.)-0.5)*bin_width + segment_bin_edges = z_bins_to_bin_edges(segment_z_bins) + + bin_edges = None + + for i in xrange(self._n_extras): + offset = start_mid - (self._n_extras-i)*segment_length + if bin_edges is None: + bin_edges = np.copy(segment_bin_edges+offset) + else: + bin_edges = append_bin_edges(bin_edges, segment_bin_edges+offset) + + for i in xrange(n_sampled_sequencies): + offset = i*segment_length + start_mid + if bin_edges is None: + bin_edges = np.copy(segment_bin_edges+offset) + else: + bin_edges = append_bin_edges(bin_edges, segment_bin_edges+offset) + + for i in xrange(self._n_extras): + offset = start_mid + (i+n_sampled_sequencies)*segment_length + if bin_edges is None: + bin_edges = np.copy(segment_bin_edges+offset) + else: + bin_edges = append_bin_edges(bin_edges, segment_bin_edges+offset) + + signal_class = 2 + n_segments = 1 + n_bins_per_segment = total_n_samples + segment_ref_points = [np.mean(bin_edges_to_z_bins(bin_edges))] + previous_parameters = [] + location = parameters['location'] + beta = parameters['beta'] + + self._output_parameters = Parameters(signal_class, bin_edges, n_segments, + n_bins_per_segment, segment_ref_points, + previous_parameters, location, beta) + temp_parameters = copy.deepcopy(parameters) + temp_parameters['previous_parameters'] = [] + self._output_parameters['previous_parameters'] = copy.deepcopy(parameters['previous_parameters']) + self._output_parameters['previous_parameters'].append(temp_parameters) + self._output_signal = np.zeros(total_n_samples) + + + def _init_sequenced_bins(self, parameters, signal): + self.signal_classes = (0,1) + bin_width = 1./self._method[1] + if self._n_samples is not None: + n_bins_per_segment = self._n_samples + else: + segment_from = parameters['bin_edges'][0,0] + segment_to = parameters['bin_edges'][parameters['n_bins_per_segment']-1,1] + raw_segment_length = segment_to - segment_from + n_bins_per_segment = int(np.ceil(raw_segment_length/bin_width)) + + segment_z_bins = np.linspace(0, n_bins_per_segment/self._method[1], n_bins_per_segment+1) + segment_z_bins = segment_z_bins - np.mean(segment_z_bins) + self._offset*bin_width + segment_bin_edges = z_bins_to_bin_edges(segment_z_bins) + + bin_edges = None + for offset in parameters['segment_ref_points']: + if bin_edges is None: + temp = (segment_bin_edges+offset) + bin_edges = temp + else: + bin_edges = append_bin_edges(bin_edges, segment_bin_edges+offset) + signal_class = 1 + n_segments = parameters['n_segments'] + segment_ref_points = parameters['segment_ref_points'] + previous_parameters = [] + location = parameters['location'] + beta = parameters['beta'] + self._output_parameters = Parameters(signal_class, bin_edges, n_segments, + n_bins_per_segment, segment_ref_points, + previous_parameters, location, beta) + temp_parameters = copy.deepcopy(parameters) + temp_parameters['previous_parameters'] = [] + self._output_parameters['previous_parameters'] = copy.deepcopy(parameters['previous_parameters']) + self._output_parameters['previous_parameters'].append(temp_parameters) + self._output_signal = np.zeros(self._output_parameters['n_segments'] * self._output_parameters['n_bins_per_segment']) + + def _init_previous_bins(self, parameters, signal): + self.signal_classes = (0,0) + self._output_parameters = copy.deepcopy(parameters['previous_parameters'][self._method[1]]) + self._output_parameters['previous_parameters'] = copy.deepcopy(parameters['previous_parameters'][0:self._method[1]]) + + self._output_signal = np.zeros(self._output_parameters['n_segments'] * self._output_parameters['n_bins_per_segment']) + + def _init_upsampling(self, parameters, signal): + self.signal_classes = (0,0) + multiplier = self._method[1] + + original_edges = parameters['bin_edges'] + new_edges = None + + for edges in original_edges: + new_bin_width = (edges[1]-edges[0])/float(multiplier) + + temp_edges = np.zeros((multiplier, 2)) + + for i in xrange(multiplier): + temp_edges[i,0] = edges[0] + i * new_bin_width + temp_edges[i,1] = edges[0] + (i + 1) * new_bin_width + + if new_edges is None: + new_edges = temp_edges + else: + new_edges = append_bin_edges(new_edges,temp_edges) + + + signal_class = parameters['class'] + n_segments = parameters['n_segments'] + n_bins_per_segment = parameters['n_bins_per_segment']*multiplier + segment_ref_points = parameters['segment_ref_points'] + previous_parameters = [] + location = parameters['location'] + beta = parameters['beta'] + self._output_parameters = Parameters(signal_class, new_edges, n_segments, + n_bins_per_segment, segment_ref_points, + previous_parameters, location, beta) + temp_parameters = copy.deepcopy(parameters) + temp_parameters['previous_parameters'] = [] + self._output_parameters['previous_parameters'] = copy.deepcopy(parameters['previous_parameters']) + self._output_parameters['previous_parameters'].append(temp_parameters) + self._output_signal = np.zeros(len(signal)*multiplier) + + def _init_downsampling(self, parameters, signal): + self.signal_classes = (0,0) + multiplier = self._method[1] + + original_edges = parameters['bin_edges'] + original_n_bins_per_segment = parameters['n_bins_per_segment'] + + n_bins_per_segment = int(np.floor(original_n_bins_per_segment/multiplier)) + new_edges = None + + for j in xrange(parameters['n_segments']): + for i in xrange(n_bins_per_segment): + first_edge = j * original_n_bins_per_segment + i * multiplier + last_edge = j * original_n_bins_per_segment + (i + 1) * multiplier -1 + + temp_edges = np.zeros((1, 2)) + temp_edges[0,0] = original_edges[first_edge,0] + temp_edges[0,1] = original_edges[last_edge,1] + + if new_edges is None: + new_edges = temp_edges + else: + new_edges = append_bin_edges(new_edges,temp_edges) + + + signal_class = parameters['class'] + n_segments = parameters['n_segments'] + segment_ref_points = parameters['segment_ref_points'] + previous_parameters = [] + location = parameters['location'] + beta = parameters['beta'] + self._output_parameters = Parameters(signal_class, new_edges, n_segments, + n_bins_per_segment, segment_ref_points, + previous_parameters, location, beta) + temp_parameters = copy.deepcopy(parameters) + temp_parameters['previous_parameters'] = [] + self._output_parameters['previous_parameters'] = copy.deepcopy(parameters['previous_parameters']) + self._output_parameters['previous_parameters'].append(temp_parameters) + self._output_signal = np.zeros(n_bins_per_segment*n_segments) + + + def _init_interp_conversion(self, parameters, signal): + conversion_map = np.zeros(len(self._output_signal), dtype=bool) + + input_bin_mids = bin_mids(parameters['bin_edges']) + output_bin_mids = bin_mids(self._output_parameters['bin_edges']) + + for i in xrange(parameters['n_segments']): + i_min = i * parameters['n_bins_per_segment'] + i_max = (i + 1) * parameters['n_bins_per_segment'] - 1 + segment_min_z = input_bin_mids[i_min] + segment_max_z = input_bin_mids[i_max] + + map_below_max = (output_bin_mids < segment_max_z) + map_above_min = (output_bin_mids > segment_min_z) + + conversion_map = conversion_map + map_below_max*map_above_min + + def convert_signal(input_signal): + output_signal = np.zeros(len(output_bin_mids)) + tck = interpolate.splrep(input_bin_mids, input_signal, s=0) + output_signal[conversion_map] = interpolate.splev(output_bin_mids[conversion_map], tck, der=0) + return output_signal + + return convert_signal + + def _init_sum_conversion(self, parameters, signal): + def CDF(x, ref_edges): + if x <= ref_edges[0]: + return 0. + elif x < ref_edges[1]: + return (x-ref_edges[0])/float(ref_edges[1]-ref_edges[0]) + else: + return 1. + + big_matrix = np.zeros((len(self._output_signal), len(signal))) + for i, output_edges in enumerate(self._output_parameters['bin_edges']): + for j, input_edges in enumerate(parameters['bin_edges']): + big_matrix[i, j] = CDF(output_edges[1], input_edges) - CDF(output_edges[0], input_edges) + sparse_matrix = csr_matrix(big_matrix) + + def convert_signal(input_signal): + return sparse_matrix.dot(input_signal) + + return convert_signal + + def _init_upsampler_kernel_conversion(self, parameters, signal): + kernel = self._data_conversion[1] + big_matrix = np.zeros((len(self._output_signal), len(signal))) + for j, input_edges in enumerate(parameters['bin_edges']): + for k in range(len(kernel)): + i = j*len(kernel) + k + big_matrix[i, j] = kernel[k] + sparse_matrix = csr_matrix(big_matrix) + + def convert_signal(input_signal): + return sparse_matrix.dot(input_signal) + + return convert_signal + + def _init_integral_conversion(self, parameters, signal): + def CDF(x, ref_edges): + if x <= ref_edges[0]: + return 0. + elif x < ref_edges[1]: + return (x-ref_edges[0])/float(ref_edges[1]-ref_edges[0]) + else: + return 1. + + big_matrix = np.zeros((len(self._output_signal), len(signal))) + + for i, output_edges in enumerate(self._output_parameters['bin_edges']): + for j, input_edges in enumerate(parameters['bin_edges']): + bin_width = input_edges[1] - input_edges[0] + big_matrix[i, j] = (CDF(output_edges[1], input_edges) - CDF(output_edges[0], input_edges))*bin_width + + sparse_matrix = csr_matrix(big_matrix) + + def convert_signal(input_signal): + return sparse_matrix.dot(input_signal) + + return convert_signal + + def _init_avg_conversion(self, parameters, signal): + def CDF(x, ref_edges): + if x <= ref_edges[0]: + return 0. + elif x < ref_edges[1]: + return (x-ref_edges[0])/float(ref_edges[1]-ref_edges[0]) + else: + return 1. + + big_matrix = np.zeros((len(self._output_signal), len(signal))) + + + for i, output_edges in enumerate(self._output_parameters['bin_edges']): + for j, input_edges in enumerate(parameters['bin_edges']): + width_coeff =(input_edges[1]-input_edges[0])/(output_edges[1]-output_edges[0]) + big_matrix[i, j] = (CDF(output_edges[1], input_edges) - CDF(output_edges[0], input_edges))*width_coeff + + sparse_matrix = csr_matrix(big_matrix) + + def convert_signal(input_signal): + return sparse_matrix.dot(input_signal) + + return convert_signal + + def _init_avg_bin_conversion(self, parameters, signal): + def CDF(x, ref_edges): + if x <= ref_edges[0]: + return 0. + elif x < ref_edges[1]: + return (x-ref_edges[0])/float(ref_edges[1]-ref_edges[0]) + else: + return 1. + + big_matrix = np.zeros((len(self._output_signal), len(signal))) + + + for i, output_edges in enumerate(self._output_parameters['bin_edges']): + for j, input_edges in enumerate(parameters['bin_edges']): + big_matrix[i, j] = (CDF(output_edges[1], input_edges) - CDF(output_edges[0], input_edges)) + if np.sum(big_matrix[i, :]) != 0.: + big_matrix[i, :] = big_matrix[i, :]/np.sum(big_matrix[i, :]) + + sparse_matrix = csr_matrix(big_matrix) + + def convert_signal(input_signal): + return sparse_matrix.dot(input_signal) + + return convert_signal + + + def _init_value_conversion(self, parameters, signal): + def CDF(x, ref_edges): + if x <= ref_edges[0]: + return 0. + elif x < ref_edges[1]: + return (x-ref_edges[0])/float(ref_edges[1]-ref_edges[0]) + else: + return 1. + + big_matrix = np.zeros((len(self._output_signal), len(signal))) + output_bin_mids = bin_mids(self._output_parameters['bin_edges']) + + for i, mid in enumerate(output_bin_mids): + for j, edges in enumerate(parameters['bin_edges']): + if (mid >= edges[0]) and (mid < edges[1]) : + big_matrix[i, j] = 1 + + sparse_matrix = csr_matrix(big_matrix) + + def convert_signal(input_signal): + return sparse_matrix.dot(input_signal) + + return convert_signal + def _init_extremum_conversion(self, parameters, signal): + # use np.split etc + pass + + def _init_variables(self, parameters, signal): + if isinstance(self._method, tuple): + if self._method[0] == 'harmonic': + self._init_harmonic_bins(parameters, signal) + elif self._method[0] == 'sequenced': + self._init_sequenced_bins(parameters, signal) + elif self._method[0] == 'previous': + self._init_previous_bins(parameters, signal) + elif self._method[0] == 'upsampling': + self._init_upsampling(parameters, signal) + elif self._method[0] == 'downsampling': + self._init_downsampling(parameters, signal) + else: + raise ValueError('Unknown sampling method') + else: + raise ValueError('Unknown sampling method') + + if self._data_conversion == 'interpolation': + self._convert_signal = self._init_interp_conversion(parameters, signal) + elif self._data_conversion == 'sum': + self._convert_signal = self._init_sum_conversion(parameters, signal) + elif self._data_conversion == 'integral': + self._convert_signal = self._init_integral_conversion(parameters, signal) + elif self._data_conversion == 'average': + self._convert_signal = self._init_avg_conversion(parameters, signal) + elif self._data_conversion == 'average_bin_value': + self._convert_signal = self._init_avg_bin_conversion(parameters, signal) + elif self._data_conversion == 'value': + self._convert_signal = self._init_value_conversion(parameters, signal) + elif isinstance(self._method, tuple): + if self._data_conversion[0] == 'upsampler_kernel': + self._convert_signal = self._init_upsampler_kernel_conversion(parameters, signal) + else: + raise ValueError('Unknown data conversion method') + else: + raise ValueError('Unknown data conversion method') + + def process(self, parameters, signal, *args, **kwargs): + if self._convert_signal is None: + self._init_variables(parameters,signal) + + output_signal = self._convert_signal(signal) + + return self._output_parameters, output_signal + +class Quantizer(object): + def __init__(self, n_bits, input_range, **kwargs): + """ + Quantizates the input signal into discrete levels + + Parameters + ---------- + n_bits : int + A number of bits in the output signal. In the other + worlds the singal is rounded into 2^n_bits levels. + input_range : tuple + A range which is divided into the n bits. The signal values exceed + the range are limited into the range values + """ + + self._n_bits = n_bits + self._n_steps = np.power(2,self._n_bits)-1. + self._input_range = input_range + self._step_size = (self._input_range[1]-self._input_range[0])/float(self._n_steps) + + self.signal_classes = (0, 0) + + self.extensions = [] + self._macros = [] + default_macros(self, 'Quantizer', **kwargs) + + def process(self, parameters, signal, *args, **kwargs): + output_signal = self._step_size*np.floor(signal/self._step_size+0.5) + + output_signal[output_signal < self._input_range[0]] = self._input_range[0] + output_signal[output_signal > self._input_range[1]] = self._input_range[1] + + return parameters, output_signal + + +class ADC(object): + def __init__(self, sampling_rate, n_bits=None, input_range=None, n_samples=None, + data_conversion='sum', **kwargs): + """ + A model for an analog to digital converter. The input signal is + resamapled segment by segment by using a given sampling rate. + If both n_bits and input_range have been given, the output signal is also + quantitized. + + + Parameters + ---------- + sampling rate : float + A number of samples per second. + n_bits : int + A number of bits for the quantizer + input_range : tuple + A range for the quantizer + n_samples : int + A number of bins per segment is set. If None, the number + of samples corresponds to the ceil(segment_length*f_sampling) + """ + + self.signal_classes = (0, 1) + self._resampler = Resampler(('sequenced', sampling_rate) , n_samples, + data_conversion=data_conversion, **kwargs) + + self._digitizer = None + if (n_bits is not None) and (input_range is not None): + self._digitizer = Quantizer(n_bits,input_range, **kwargs) + elif (n_bits is not None) or (input_range is not None): + raise ValueError('Both n_bits and input_range are required for the Quantizer.') + + self.extensions = [] + self._macros = [] + default_macros(self, 'ADC', **kwargs) + + def process(self, parameters, signal, *args, **kwargs): + output_parameters, output_signal = self._resampler.process(parameters, signal, *args, **kwargs) + + if self._digitizer is not None: + output_parameters, output_signal = self._digitizer.process(output_parameters, output_signal + , *args, **kwargs) + + return output_parameters, output_signal + +class HarmonicADC(object): + def __init__(self, base_frequency, n_bits=None, input_range=None, + multiplier = 1, data_conversion='average_bin_value', **kwargs): + """ + A model for an analog to digital converter, which is simular to the + regular ADC object expect that the input signal is continously resampled + ovet the segments. If both n_bits and input_range have been given, + the output signal is also quantitized. + + Parameters + ---------- + base_frequency : float + A base frequency, which corresponds to segment spacing (e.g. + a harmonic frequency of the accelerator) + n_bits : int + A number of bits for the quantizer + input_range : tuple + A range for the quantizer + multiplier : int + A multiplier for the base frequnecy, which together define + the sampling rate, i.e. f_sampling = f_base * multiplier + """ + self.signal_classes = (0, 2) + self._resampler = Resampler(('harmonic', (base_frequency)) , multiplier, + data_conversion=data_conversion, **kwargs) + + self._digitizer = None + if (n_bits is not None) and (input_range is not None): + self._digitizer = Quantizer(n_bits,input_range, **kwargs) + elif (n_bits is not None) or (input_range is not None): + raise ValueError('Both n_bits and input_range are required for the Quantizer.') + + self.extensions = [] + self._macros = [] + default_macros(self, 'HarmonicADC', **kwargs) + + def process(self, parameters, signal, *args, **kwargs): + output_parameters, output_signal = self._resampler.process(parameters, signal, *args, **kwargs) + + if self._digitizer is not None: + output_parameters, output_signal = self._digitizer.process(output_parameters, output_signal + , *args, **kwargs) + + return output_parameters, output_signal + + +class DAC(object): + def __init__(self, n_bits = None, output_range = None, method = ('upsampling', 4), + data_conversion='value', **kwargs): + """ + An model for a digital to analog converter, which quantitizes and + and upsamples the signal by default. The bin set is upsampled by default, + because the sampling rate is often minimized in the real life applications, + but after the DAC the signal is reprocessed by using analog electronics. An + analog signal is continous, which modelling requres higher smapling rate. + + Parameters + ---------- + n_bits : int + A number of bits for the quantizer + output_range : tuple + A range for the quantizer + method : tuple + Resampling method. Possible options are: + ('upsampling', int) + Multiplies the original sampling rate by the given number + ('previous', int) + Returns the previous bin set, which index is given + ('downsampling', int) + Reduces the sampling rate by the given factor + + """ + self._resampler = Resampler(method, + data_conversion=data_conversion, **kwargs) + self.signal_classes = self._resampler.signal_classes + + self._digitizer = None + if (n_bits is not None) and (output_range is not None): + self._digitizer = Quantizer(n_bits,output_range, **kwargs) + elif (n_bits is not None) or (output_range is not None): + raise ValueError('Both n_bits and input_range are required for the Quantizer.') + + self.extensions = [] + self._macros = [] + default_macros(self, 'DAC', **kwargs) + + def process(self, parameters, signal, *args, **kwargs): + output_parameters, output_signal = self._resampler.process(parameters, signal, *args, **kwargs) + + if self._digitizer is not None: + output_parameters, output_signal = self._digitizer.process(output_parameters, output_signal, + *args, **kwargs) + + + return output_parameters, output_signal + +class Upsampler(Resampler): + def __init__(self, multiplier, kernel=None, **kwargs): + """ + Multiplies sampling rate by a given number + + Parameters + ---------- + multiplier : int + A number of new samples per old sample + kernel : list + A list of number, which is used as a kernel (map) to determine + values to the upsampled bins + """ + if kernel is None: + data_conversion = 'value' + else: + if multiplier != len(kernel): + raise ValueError('Kernel length must match the multiplier ') + + data_conversion = ('upsampler_kernel',kernel) + + + super(self.__class__, self).__init__(('upsampling', multiplier), + data_conversion=data_conversion, **kwargs) + self.label='Upsampler' + +class BackToOriginalBins(Resampler): + def __init__(self, data_conversion='interpolation', target_binset = 0, **kwargs): + """ + Returns signal to the original bin set. + Parameters + ---------- + data_conversion : str + The method how the input signal values are converted + taget_binset : int + Index of the target bin set. Index 0 correspons to the first bin + set used. + """ + super(self.__class__, self).__init__(('previous',target_binset), + data_conversion=data_conversion, **kwargs) + self.label='BackToOriginalBins' + + +class BunchByBunchSampler(Resampler): + def __init__(self,f_harmonic, multiplier=1, data_conversion='average_bin_value', **kwargs): + super(self.__class__, self).__init__(('harmonic', (f_harmonic)) , multiplier, + data_conversion=data_conversion, **kwargs) + self.label = 'Bunch by bunch sampler' + + diff --git a/PyHEADTAIL/feedback/transverse_damper.py b/PyHEADTAIL/feedback/transverse_damper.py index 7ebbd80c..3712a579 100644 --- a/PyHEADTAIL/feedback/transverse_damper.py +++ b/PyHEADTAIL/feedback/transverse_damper.py @@ -5,17 +5,18 @@ ''' - import numpy as np from scipy.special import k0 from scipy.constants import c, e from PyHEADTAIL.general.element import Element +from PyHEADTAIL.general import pmath as pm + class TransverseDamper(Element): def __init__(self, dampingrate_x, dampingrate_y, phase=90, - local_beta_function=None, *args, **kwargs): + local_beta_function=None, verbose=True, *args, **kwargs): '''Ideal transverse damper with an in-place "measurement" (transverse "pick-up") of the transverse dipole moment. Note: a single bunch in the beam is assumed, i.e. this works on @@ -36,22 +37,26 @@ def __init__(self, dampingrate_x, dampingrate_y, phase=90, assumed to be the same for both transverse planes, otherwise use two instances of the TransverseDamper. ''' - + self.verbose = verbose if dampingrate_x and not dampingrate_y: self.gain_x = 2/dampingrate_x self.track = self.track_horizontal - self.prints('Damper in horizontal plane active') + if self.verbose == True: + self.prints('Damper in horizontal plane active') elif not dampingrate_x and dampingrate_y: self.gain_y = 2/dampingrate_y self.track = self.track_vertical - self.prints('Damper in vertical plane active') + if self.verbose == True: + self.prints('Damper in vertical plane active') elif not dampingrate_x and not dampingrate_y: - self.prints('Dampers not active') + if self.verbose == True: + self.prints('Dampers not active') else: self.gain_x = 2/dampingrate_x self.gain_y = 2/dampingrate_y self.track = self.track_all - self.prints('Dampers active') + if self.verbose == True: + self.prints('Dampers active') if phase != 90 and phase != 270 and not local_beta_function: raise TypeError( 'TransverseDamper: numeric local_beta_function value at ' @@ -59,8 +64,8 @@ def __init__(self, dampingrate_x, dampingrate_y, phase=90, 'reactive damper component.)') self.phase_in_2pi = phase / 360. * 2*np.pi self.local_beta_function = local_beta_function - # will be overwritten at initialisation + def track(self, beam): pass @@ -92,3 +97,100 @@ def horizontal(cls, dampingrate_x, *args, **kwargs): @classmethod def vertical(cls, dampingrate_y, *args, **kwargs): return cls(0, dampingrate_y, *args, **kwargs) + + +class NonRigidTransverseDamper(Element): + + def __init__(self, slicer, dampingrate_x, dampingrate_y, phase=90, + local_beta_function=None, verbose=True, *args, **kwargs): + '''Ideal transverse damper with an in-place "measurement" + (transverse "pick-up") of the transverse dipole moment. + Note: a single bunch in the beam is assumed, i.e. this works on + the entire beam's moments. + + Arguments: + - dampingrate_x, dampingrate_y: horizontal and vertical + damping rates in turns (e.g. 50 turns for a typical 2018 + LHC ADT set-up) + - phase: phase of the damper kick in degrees with respect to + the transverse position "pick-up". The default value of + 90 degrees corresponds to a typical resistive damper. + - local_beta_function: the optics beta function at the + transverse position "pick-up" (e.g. in the local place + of this Element). This is required if the damper is not + a purely resistive damper (or exciter), i.e. if the + phase is not 90 (or 270) degrees. The beta function is + assumed to be the same for both transverse planes, + otherwise use two instances of the TransverseDamper. + ''' + self.verbose = verbose + self.slicer = slicer + if dampingrate_x and not dampingrate_y: + self.gain_x = 2/dampingrate_x + self.track = self.track_horizontal + if self.verbose == True: + self.prints('Damper in horizontal plane active') + elif not dampingrate_x and dampingrate_y: + self.gain_y = 2/dampingrate_y + self.track = self.track_vertical + if self.verbose == True: + self.prints('Damper in vertical plane active') + elif not dampingrate_x and not dampingrate_y: + if self.verbose == True: + self.prints('Dampers not active') + else: + self.gain_x = 2/dampingrate_x + self.gain_y = 2/dampingrate_y + self.track = self.track_all + if self.verbose == True: + self.prints('Dampers active') + if phase != 90 and phase != 270 and not local_beta_function: + raise TypeError( + 'TransverseDamper: numeric local_beta_function value at ' + 'position of damper missing! (Required because of non-zero ' + 'reactive damper component.)') + self.phase_in_2pi = phase / 360. * 2*np.pi + self.local_beta_function = local_beta_function + # will be overwritten at initialisation + + def track(self, beam): + pass + + def track_horizontal(self, beam): + slices = beam.get_slices( + self.slicer, statistics=["mean_x", "mean_y", "mean_z"]) + for s_i, (mean_x, mean_y, mean_z) in enumerate(zip( + slices.mean_x, slices.mean_y, slices.mean_z)): + p_id = slices.particle_indices_of_slice(s_i) + if len(p_id) == 0: + continue + kicks_x = -(self.gain_x*np.sin(self.phase_in_2pi) * + beam.mean_xp())*np.sin(np.pi*mean_z/(3*beam.sigma_z())) + kicked_xp = pm.take(beam.xp, p_id) + kicks_x + pm.put(beam.xp, p_id, kicked_xp) + if self.local_beta_function: + beam.xp -= (self.gain_x * np.cos(self.phase_in_2pi) * + beam.mean_x() / self.local_beta_function)*np.sin(np.pi*mean_z/(3*beam.sigma_z())) + + def track_vertical(self, beam): + beam.yp -= self.gain_y * np.sin(self.phase_in_2pi) * beam.mean_yp() + if self.local_beta_function: + beam.yp -= (self.gain_y * np.cos(self.phase_in_2pi) * + beam.mean_y() / self.local_beta_function) + + def track_all(self, beam): + beam.xp -= self.gain_x * np.sin(self.phase_in_2pi) * beam.mean_xp() + beam.yp -= self.gain_y * np.sin(self.phase_in_2pi) * beam.mean_yp() + if self.local_beta_function: + beam.xp -= (self.gain_x * np.cos(self.phase_in_2pi) * + beam.mean_x() / self.local_beta_function) + beam.yp -= (self.gain_y * np.cos(self.phase_in_2pi) * + beam.mean_y() / self.local_beta_function) + + @classmethod + def horizontal(cls, dampingrate_x, *args, **kwargs): + return cls(dampingrate_x, 0, *args, **kwargs) + + @classmethod + def vertical(cls, dampingrate_y, *args, **kwargs): + return cls(0, dampingrate_y, *args, **kwargs) diff --git a/PyHEADTAIL/field_maps/efields_funcs.py b/PyHEADTAIL/field_maps/efields_funcs.py new file mode 100644 index 00000000..b62445a7 --- /dev/null +++ b/PyHEADTAIL/field_maps/efields_funcs.py @@ -0,0 +1,269 @@ +''' +@authors: Vadim Gubaidulin, Adrian Oeftiger +@date: 18.02.2020 +''' +from __future__ import division + +from PyHEADTAIL.general.element import Element +from PyHEADTAIL.particles.slicing import clean_slices + +import numpy as np +from scipy.constants import c, epsilon_0, pi, m_e, m_p, e + +from scipy.interpolate import splrep, splev +from functools import wraps + +from PyHEADTAIL.general import pmath as pm + + +def _sig_sqrt(sig_x, sig_y): + return pm.sqrt(2 * (sig_x**2 - sig_y**2)) + + +def _efieldn_mit(x, y, sig_x, sig_y): + '''The charge-normalised electric field components of a + two-dimensional Gaussian charge distribution according to + M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06. + + Return (E_x / Q, E_y / Q). + + Assumes sig_x > sig_y and mean_x == 0 as well as mean_y == 0. + For convergence reasons of the erfc, use only x > 0 and y > 0. + + Uses FADDEEVA C++ implementation from MIT (via SciPy >= 0.13.0). + ''' + sig_sqrt = _sig_sqrt(sig_x, sig_y) + w1re, w1im = pm.wofz(x / sig_sqrt, y / sig_sqrt) + ex = pm.exp(-x*x / (2 * sig_x*sig_x) + + -y*y / (2 * sig_y*sig_y)) + w2re, w2im = pm.wofz(x * sig_y/(sig_x*sig_sqrt), + y * sig_x/(sig_y*sig_sqrt)) + denom = 2. * epsilon_0 * np.sqrt(pi) * sig_sqrt + return (w1im - ex * w2im) / denom, (w1re - ex * w2re) / denom + + +def _efieldn_mitmod(x, y, sig_x, sig_y): + '''The charge-normalised electric field components of a + two-dimensional Gaussian charge distribution according to + M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06. + + Return (E_x / Q, E_y / Q). + + Assumes sig_x > sig_y and mean_x == 0 as well as mean_y == 0. + For convergence reasons of the erfc, use only x > 0 and y > 0. + + Uses erfc C++ implementation from MIT (via SciPy >= 0.13.0) + and calculates wofz (FADDEEVA function) explicitely. + ''' + # timing was ~1.01ms for same situation as _efieldn_mit + sig_sqrt = _sig_sqrt(sig_x, sig_y) + w1 = pm._errfadd((x + 1j * y) / sig_sqrt) + ex = pm.exp(-x*x / (2 * sig_x*sig_x) + + -y*y / (2 * sig_y*sig_y)) + w2 = pm._errfadd(x * sig_y/(sig_x*sig_sqrt) + + y * sig_x/(sig_y*sig_sqrt) * 1j) + val = (w1 - ex * w2) / (2 * epsilon_0 * np.sqrt(pi) * sig_sqrt) + return val.imag, val.real + + +def _efieldn_koelbig(x, y, sig_x, sig_y): + '''The charge-normalised electric field components of a + two-dimensional Gaussian charge distribution according to + M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06. + + Return (E_x / Q, E_y / Q). + + Assumes sig_x > sig_y and mean_x == 0 as well as mean_y == 0. + For convergence reasons of the erfc, use only x > 0 and y > 0. + + Uses CERN library from K. Koelbig. + ''' + # timing was ~3.35ms for same situation as _efieldn_mit + if not pm._errf: + raise ImportError('errfff cannot be imported for using ' + + 'TransverseSpaceCharge._efield_koelbig .' + + 'Did you call make (or f2py general/errfff.f)?') + sig_sqrt = _sig_sqrt(sig_x, sig_y) + w1re, w1im = pm._errf(x/sig_sqrt, y/sig_sqrt) + ex = pm.exp(-x*x / (2 * sig_x*sig_x) + + -y*y / (2 * sig_y*sig_y)) + w2re, w2im = pm._errf(x * sig_y/(sig_x*sig_sqrt), + y * sig_x/(sig_y*sig_sqrt)) + pref = 1. / (2 * epsilon_0 * np.sqrt(pi) * sig_sqrt) + return pref * (w1im - ex * w2im), pref * (w1re - ex * w2re) + + +def wfun(z): + '''FADDEEVA function as implemented in PyECLOUD, vectorised.''' + x = z.real + y = z.imag + if not pm._errf: + raise ImportError('errfff cannot be imported for using ' + + 'TransverseSpaceCharge._efield_pyecloud .' + + 'Did you f2py errfff.f?') + wx, wy = pm._errf(x, y) # in PyECLOUD only pm._errf_f (not vectorised) + return wx+1j*wy + + +def _efieldn_pyecloud(xin, yin, sigmax, sigmay): + '''The charge-normalised electric field components of a + two-dimensional Gaussian charge distribution according to + M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06. + + Return (E_x / Q, E_y / Q). + + Effective copy of PyECLOUD.BassErsk.BassErsk implementation. + ''' + x = abs(xin) + y = abs(yin) + eps0 = 8.854187817620e-12 + if sigmax > sigmay: + S = np.sqrt(2*(sigmax*sigmax-sigmay*sigmay)) + factBE = 1/(2*eps0*np.sqrt(pi)*S) + etaBE = sigmay/sigmax*x+1j*sigmax/sigmay*y + zetaBE = x+1j*y + val = factBE*(wfun(zetaBE/S) - + np.exp(-x*x/(2*sigmax*sigmax)-y*y/(2*sigmay*sigmay)) * + wfun(etaBE/S)) + Ex = abs(val.imag)*np.sign(xin) + Ey = abs(val.real)*np.sign(yin) + else: + S = np.sqrt(2*(sigmay*sigmay-sigmax*sigmax)) + factBE = 1/(2*eps0*np.sqrt(pi)*S) + etaBE = sigmax/sigmay*y+1j*sigmay/sigmax*x + yetaBE = y+1j*x + val = factBE*(wfun(yetaBE/S) - + np.exp(-y*y/(2*sigmay*sigmay)-x*x/(2*sigmax*sigmax)) * + wfun(etaBE/S)) + Ey = abs(val.imag)*np.sign(yin) + Ex = abs(val.real)*np.sign(xin) + return Ex, Ey + + +@np.vectorize +def _efieldn_kv_a(x, y, sigma_x, sigma_y): + ''' + Field of a KV distrubition calculated as in here: https://cds.cern.ch/record/258225/files/P00020427.pdf + ''' + a = sigma_x*pm.sqrt(2) + b = sigma_y*pm.sqrt(2) + if (x/a)**2+(y/b)**2 <= 1: + efield_x = 4.0/(a+b)*x/a + efield_y = 4.0/(a+b)*y/b + else: + uxy = (x)**2-(y)**2 - (a)**2+(b)**2 + vxy = uxy**2+(2.0*x*y)**2 + efield_x = 4.0/(a**2-b**2)*(x-pm.sign(x) / + pm.sqrt(2.0)*pm.sqrt(uxy+pm.sqrt(vxy))) + uxy = (y)**2-(x)**2 - (b)**2+(a)**2 + efield_y = 4.0/(b**2-a**2)*(y - pm.sign(y) / + pm.sqrt(2.0)*pm.sqrt(uxy+pm.sqrt(vxy))) + denom = 4*np.pi*epsilon_0 + return efield_x/denom, efield_y/denom +# vectorize is bad for cuda + + +@np.vectorize +def _efieldn_kv_b(x, y, sigma_x, sigma_y): + ''' + Field of a KV distrubition calculated as in here: https://cds.cern.ch/record/258225/files/P00020427.pdf + ''' + a = sigma_x*pm.sqrt(2) + b = sigma_y*pm.sqrt(2) + if x == 0 and y == 0: + return 0, 0 + elif (x/a)**2+(y/b)**2 <= 1: + efield_x = 4.0/(a+b)*x/a + efield_y = 4.0/(a+b)*y/b + else: + zbar = x-1j*y + efield = 4.0/(zbar+pm.sqrt(zbar*zbar-a*a+b*b)) + efield_x = efield.real + efield_y = -efield.imag + denom = 4*np.pi*epsilon_0 + return efield_x/denom, efield_y/denom + + +@np.vectorize +def _efieldn_wb(x, y, sigma_x, sigma_y): + a = sigma_x*pm.sqrt(3) + b = sigma_y*pm.sqrt(3) + zs = x-1j*y + # if x**2/(a)**2+y**2/(b)**2 <= 1: + chi = x/a+1j*y/b + omegs = b*x/a-1j*a*y/b + efield = 8.0*chi/(a+b) * \ + (1.0-(2.0*zs+omegs)*chi/(3.0*(a+b))) + # else: + # zs = pm.abs(x)+1j*pm.abs(y) + # sqrt_diff = pm.sqrt(zs**2-a**2+b**2) + # first_term = 2.0*zs/(zs+sqrt_diff) + # efield = 2.0/zs*first_term*(zs+2.0*sqrt_diff)/(3.0*zs) + # efield = efield.real*pm.sign(x) - 1.0j*efield.imag*pm.sign(y) + denom = 4.*np.pi*epsilon_0 + return efield.real/denom, efield.imag/denom + + +def _efieldn_gauss_round(x, y, sig_x, sig_y): + '''Return (E_x / Q, E_y / Q) for a round distribution + with sigma_x == sigma_y == sig_r . + ''' + r2 = x*x + y*y + sig_r = sig_x + amplitude = (1 - pm.exp(-r2/(2*sig_r*sig_r))) / (2*pi*epsilon_0 * r2) + return x * amplitude, y * amplitude + + +def _efieldn_linearized(x, y, sig_x, sig_y): + ''' + Returns linearized field + ''' + a = pm.sqrt(2)*sig_x + b = pm.sqrt(2)*sig_y + amplitude = 1./(np.pi*epsilon_0*(a+b)) + return x/a * amplitude, y/b * amplitude + + +def add_sigma_check(efieldn, dist): + '''Wrapper for a normalised electric field function. + + Adds the following actions before calculating the field: + - exchange x and y quantities if sigma_x < sigma_y + - apply round beam field formula when sigma_x close to sigma_y + ''' + + '''Threshold for relative transverse beam size difference + below which the beam is assumed to be round: + abs(1 - sig_y / sig_x) < ratio_threshold ==> round beam + ''' + ratio_threshold = 1e-3 + + '''Threshold for absolute transverse beam size difference + below which the beam is assumed to be round: + abs(sig_y - sig_x) < absolute_threshold ==> round beam + ''' + absolute_threshold = 1e-10 + if dist == 'GS': + efieldn_round = _efieldn_gauss_round + elif dist == 'KV': + efieldn_round = _efieldn_linearized + elif dist == 'LN': + efieldn_round = _efieldn_linearized + + @wraps(efieldn) + def efieldn_checked(x, y, sig_x, sig_y, *args, **kwargs): + tol_kwargs = dict( + rtol=ratio_threshold, + atol=absolute_threshold + ) + if pm.allclose(sig_y, sig_x, **tol_kwargs): + if pm.almost_zero(sig_y, **tol_kwargs): + en_x = en_y = pm.zeros(x.shape, dtype=x.dtype) + else: + en_x, en_y = efieldn_round(x, y, sig_x, sig_y, *args, **kwargs) + elif pm.all(sig_x < sig_y): + en_y, en_x = efieldn(y, x, sig_y, sig_x, *args, **kwargs) + else: + en_x, en_y = efieldn(x, y, sig_x, sig_y, *args, **kwargs) + return en_x, en_y + return efieldn_checked diff --git a/PyHEADTAIL/gpu/gpu_wrap.py b/PyHEADTAIL/gpu/gpu_wrap.py index 6d43597d..b45708f7 100644 --- a/PyHEADTAIL/gpu/gpu_wrap.py +++ b/PyHEADTAIL/gpu/gpu_wrap.py @@ -17,7 +17,7 @@ import pycuda.compiler import pycuda.driver as drv import pycuda.elementwise - import PyHEADTAIL.gpu.thrust_interface + import PyHEADTAIL.gpu.thrust_interface as thrust_interface # if pycuda is there, try to compile things. If no context available, # throw error to tell the user that he should import pycuda.autoinit diff --git a/PyHEADTAIL/gpu/pypic.py b/PyHEADTAIL/gpu/pypic.py index 0af75c53..ea8f8b8a 100644 --- a/PyHEADTAIL/gpu/pypic.py +++ b/PyHEADTAIL/gpu/pypic.py @@ -8,7 +8,7 @@ class depending on the current context (in pmath). ''' from PyHEADTAIL.general import pmath as pm -from pm import UnknownContextManagerError +from PyHEADTAIL.general.pmath import UnknownContextManagerError from PyPIC.GPU import pypic diff --git a/PyHEADTAIL/ion_cloud/__init__.py b/PyHEADTAIL/ion_cloud/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/PyHEADTAIL/ion_cloud/ion_cloud.py b/PyHEADTAIL/ion_cloud/ion_cloud.py new file mode 100644 index 00000000..d80fa712 --- /dev/null +++ b/PyHEADTAIL/ion_cloud/ion_cloud.py @@ -0,0 +1,342 @@ +import PyHEADTAIL.aperture.aperture as aperture +from PyHEADTAIL.field_maps import efields_funcs as efields +from PyHEADTAIL.general import pmath as pm +from PyHEADTAIL.general.element import Element +from PyHEADTAIL.general.printers import SilentPrinter +from PyHEADTAIL.monitors.monitors import BunchMonitor, ParticleMonitor +from PyHEADTAIL.particles import particles, generators +from scipy.constants import m_p, e, c +from numpy import linspace, int64 +import PyPIC.geom_impact_ellip as ell +import PyPIC.FFT_OpenBoundary as PIC_FFT + + +H_RF = 416 +CIRCUMFERENCE = 354 +N_SEGMENTS = 500 +N_TURNS = 1000 + + +class BeamIonElement(Element): + ''' + It has various attributes and methods that allow to initialize the ion beam properties, + create an instance of `Particles` class which represent the ion beam, + create an instance of `BunchMonitor` or `ParticleMonitor` classes to monitor the ion beam, + and track the interaction between the electron bunch and the ion beam. + + Attributes: + ion_beam (Particles): An instance of `Particles` class that represents the ion beam + dist (str): The distribution of ions in the beam (default is 'GS') + dist_func_z (func): A function that generates the z distribution of ions + _efieldn (func): A function that generates the electric field of the electron bunch + sig_check (bool): A boolean to specify if sigma check for the electron field is activated + dist_func_x (func): A function that generates the x distribution of ions + dist_func_y (func): A function that generates the y distribution of ions + interaction_model (str): A string that sets the interaction model between the electron and the ion bunches + set_aperture (bool): A boolean to specify if the aperture should be set + L_sep (float): A scalar value that gives the distance between the electron and the ion bunches + N_MACROPARTICLES (int): The number of macroparticles in the ion beam + N_MACROPARTICLES_MAX (int): The maximum number of macroparticles in the ion beam + CIRCUMFERENCE (float): The circumference of the ion beam + N_SEGMENTS (int): The number of segments for the ion beam + L_SEG (float): The length of each segment of the ion beam + n_g (float): The residual gas density in the vacuum chamber + sigma_i (float): Ionization cross-section of ions + A (float): The mass number of the ions + n_steps (int): The number of tracking steps for the monitor + charge_state (int): The charge state of the ions + ions_monitor (Union[BunchMonitor, ParticleMonitor, None]): An instance of `BunchMonitor` or `ParticleMonitor` + classes that monitor the ion beam + ''' + + def __init__(self, sig_check=True, + dist_ions='GS', + monitor_name=None, + use_particle_monitor=False, + L_sep=0.85, + n_macroparticles_max=int(2e4), + set_aperture=True, + n_segments=500, + ring_circumference=354, + n_steps=None, + interaction_model='weak', + interaction_model_ions='strong', + n_g = 3.9e12, + A = 28, + sigma_i = 1.78e-22, + ): + self.use_particle_monitor = use_particle_monitor + self.dist = dist_ions + self.monitor_name = monitor_name + self.L_sep = L_sep + self.N_MACROPARTICLES_MAX = n_macroparticles_max + self.set_aperture = set_aperture + self.n_segments = n_segments + self.ring_circumference = ring_circumference + self.n_steps = n_steps + self.interaction_model = interaction_model + self.interaction_model_ions = interaction_model_ions + self._set_distribution_for_particle_generation() + self.N_MACROPARTICLES = 30 + self.L_SEG = self.ring_circumference/self.n_segments + self.n_g = n_g #value from old simulations 2.4e13 # (m**-3) + self.sigma_i = sigma_i # (m**2) + self.A = A + self.charge_state = 1 + self.ion_beam = particles.Particles( + macroparticlenumber=1, + particlenumber_per_mp=1, + charge=self.charge_state*e, + mass=self.A*m_p, + circumference=self.ring_circumference, + gamma=1.0001, + coords_n_momenta_dict={ + 'x': [0, ], + 'xp': [0, ], + 'y': [0, ], + 'yp': [0, ], + 'z': [0, ], + 'dp': [0, ] + } + ) + self._add_monitors() + + def _set_distribution_for_particle_generation(self): + self.dist_func_z = generators.uniform2D + if self.dist == 'GS': + self._efieldn = efields._efieldn_mit + self.dist_func_x = generators.gaussian2D_asymmetrical + self.dist_func_y = generators.gaussian2D_asymmetrical + elif self.dist == 'LN': + self._efieldn = efields._efieldn_linearized + self.dist_func_x = generators.uniform2D + self.dist_func_y = generators.uniform2D + else: + print('Distribution given is not implemented') + self._efieldn = efields.add_sigma_check( + self._efieldn, self.dist) + + def _add_monitors(self): + if self.monitor_name is not None: + if self.use_particle_monitor: + self.ions_monitor = ParticleMonitor(self.monitor_name, + stride=1, + parameters_dict=None + ) + else: + self.ions_monitor = BunchMonitor(self.monitor_name, + n_steps=self.n_steps, + parameters_dict=None, + write_buffer_every=50, + buffer_size=100, + ) + else: + self.ions_monitor = None + + def get_ion_beam(self): + """ + A method to access the ion beam object + """ + return self.ion_beam + + def clear_ions(self): + self.ion_beam = particles.Particles( + macroparticlenumber=1, + particlenumber_per_mp=1, + charge=self.charge_state*e, + mass=self.A*m_p, + circumference=self.ring_circumference, + gamma=1.0001, + coords_n_momenta_dict={ + 'x': [0, ], + 'xp': [0, ], + 'y': [0, ], + 'yp': [0, ], + 'z': [0, ], + 'dp': [0, ] + }) + + def _generate_ions(self, electron_bunch, ion_intensity): + ''' + Particles are generated in pairs -x, -y and +x, +y to avoid numerical noise. + The idea came from Blaskiewicz, M. (2019) https://doi.org/10.18429/JACoW-NAPAC2019-TUPLM11 + ''' + assert (self.dist in ['LN', 'GS']), ( + 'The implementation for required distribution {:} is not found'.format(self.dist)) + if self.dist == 'LN': + a_x, b_x = -2*electron_bunch.sigma_x(), 2*electron_bunch.sigma_x() + a_y, b_y = -2*electron_bunch.sigma_y(), 2*electron_bunch.sigma_y() + elif self.dist == 'GS': + a_x, b_x = electron_bunch.sigma_x(), electron_bunch.sigma_xp() + a_y, b_y = electron_bunch.sigma_y(), electron_bunch.sigma_yp() + new_particles = generators.ParticleGenerator( + macroparticlenumber=self.N_MACROPARTICLES//2, + intensity=int(ion_intensity)//2, + charge=self.charge_state*e, + gamma=1.0001, + mass=self.A*m_p, + circumference=self.ring_circumference, + distribution_x=self.dist_func_x(a_x, b_x), + distribution_y=self.dist_func_y(a_y, b_y), + distribution_z=self.dist_func_z( + -self.L_SEG/2, self.L_SEG/2), + limit_n_rms_x=3., + limit_n_rms_y=3., + printer=SilentPrinter() + ).generate() + new_particles_twin = particles.Particles( + macroparticlenumber=self.N_MACROPARTICLES//2, + particlenumber_per_mp=ion_intensity/self.N_MACROPARTICLES, + charge=self.charge_state*e, + gamma=1.0001, + mass=self.A*m_p, + circumference=self.ring_circumference, + coords_n_momenta_dict={ + 'x': -new_particles.x, + 'xp': -new_particles.xp, + 'y': -new_particles.y, + 'yp': -new_particles.yp, + 'z': -new_particles.z, + 'dp': -new_particles.dp + }, + printer=SilentPrinter() + ) + new_particles += new_particles_twin + # Apply initial conditions + new_particles.x[:] += electron_bunch.mean_x() + new_particles.y[:] += electron_bunch.mean_y() + new_particles.xp[:] = 0 + new_particles.yp[:] = 0 + self.ion_beam += new_particles + self.ion_beam.particlenumber_per_mp = new_particles.particlenumber_per_mp + self.ions_aperture = aperture.EllipticalApertureXY( + x_aper=5*electron_bunch.sigma_x(), + y_aper=5*electron_bunch.sigma_y()) + + def track_ions_in_drift(self, p_id_ions): + drifted_ions_x = pm.take( + self.ion_beam.xp, p_id_ions)*self.L_sep + pm.take(self.ion_beam.x, p_id_ions) + drifted_ions_y = pm.take( + self.ion_beam.yp, p_id_ions)*self.L_sep + pm.take(self.ion_beam.y, p_id_ions) + pm.put(self.ion_beam.x, p_id_ions, drifted_ions_x) + pm.put(self.ion_beam.y, p_id_ions, drifted_ions_y) + + def get_updated_ion_positions(self, electron_bunch): + pass + + def _get_efields(self, first_beam, second_beam, p_id_first_beam, interaction_model='weak'): + assert (interaction_model in ['weak', 'strong', 'PIC']), (( + 'The implementation for required beam-ion interaction model {:} is not implemented'.format(self, interaction_model))) + if interaction_model == 'weak': + en_x, en_y = self.get_efieldn( + pm.take(first_beam.x, p_id_first_beam), + pm.take(first_beam.y, p_id_first_beam), + second_beam.mean_x(), second_beam.mean_y(), + second_beam.sigma_x(), second_beam.sigma_y()) + elif interaction_model == 'strong': + en_x, en_y = self.get_efieldn( + first_beam.mean_x(), + first_beam.mean_y(), + second_beam.mean_x(), second_beam.mean_y(), + second_beam.sigma_x(), second_beam.sigma_y()) + if interaction_model == 'PIC': + qe = 1.602176565e-19 + eps0 = 8.8541878176e-12 + Dx = 0.1*second_beam.sigma_x() + Dy = 0.1*second_beam.sigma_y() + x_aper = 10*second_beam.sigma_x() + y_aper = 10*second_beam.sigma_y() + chamber = ell.ellip_cham_geom_object(x_aper=x_aper, y_aper=y_aper) + picFFT = PIC_FFT.FFT_OpenBoundary( + x_aper=chamber.x_aper, y_aper=chamber.y_aper, dx=Dx, dy=Dy, fftlib='pyfftw') + nel_part = 0*second_beam.x+1. + picFFT.scatter(second_beam.x, second_beam.y, nel_part) + picFFT.solve() + en_x, en_y = picFFT.gather( + first_beam.x, first_beam.y) + en_x /= qe*second_beam.x.shape[0] + en_y /= qe*second_beam.x.shape[0] + return en_x, en_y + def track(self, electron_bunch): + '''Tracking method to track an interaction between an electron bunch + and an ion beam (2D electromagnetic field). + The kicks are performed both for electron beam slice and for an ion beam. + Ion beam is tracked in a drift/space-charge of electron bunch sections. + + Interaction is computed via Eqs. (17, 18) of + + Tian, S. K.; Wang, N. (2018). Ion instability in the HEPS storage ring. + FLS 2018 - Proceedings of the 60th ICFA Advanced Beam Dynamics Workshop on Future Light Sources, + 34–38. https://doi.org/10.18429/JACoW-FLS2018-TUA2WB04 + ''' + self.ION_INTENSITY_PER_ELECTRON_BUNCH = electron_bunch.intensity * \ + self.sigma_i*self.n_g*self.L_SEG + self._generate_ions(electron_bunch, ion_intensity=self.ION_INTENSITY_PER_ELECTRON_BUNCH) + # if self.ion_beam.macroparticlenumber < self.N_MACROPARTICLES_MAX: + # self._generate_ions(electron_bunch) + # else: + # self.ion_beam.intensity += self.ION_INTENSITY_PER_ELECTRON_BUNCH + + if self.set_aperture == True: + self.ions_aperture.track(self.ion_beam) + + if self.ions_monitor is not None: + self.ions_monitor.dump(self.ion_beam) + prefactor_kick_ion_field = -(self.ion_beam.intensity * + self.ion_beam.charge*electron_bunch.charge / + (electron_bunch.p0*electron_bunch.beta*c)) + prefactor_kick_electron_field = -(electron_bunch.intensity * + electron_bunch.charge*self.ion_beam.charge / + (self.ion_beam.mass*c**2)) + p_id_electrons = electron_bunch.id-1 + p_id_ions = linspace( + 0, self.ion_beam.y.shape[0]-1, self.ion_beam.y.shape[0], dtype=int64) + en_ions_x, en_ions_y = self._get_efields(first_beam=electron_bunch, + second_beam=self.ion_beam, + p_id_first_beam=p_id_electrons, + interaction_model='strong') + if self.interaction_model == 'PIC': + en_electrons_x, en_electrons_y = self._get_efields(first_beam=self.ion_beam, + second_beam=electron_bunch, + p_id_first_beam=p_id_ions, + interaction_model='PIC') + else: + en_electrons_x, en_electrons_y = self._get_efields(first_beam=self.ion_beam, + second_beam=electron_bunch, + p_id_first_beam=p_id_ions, + interaction_model='weak') + + kicks_electrons_x = en_ions_x * prefactor_kick_ion_field + kicks_electrons_y = en_ions_y * prefactor_kick_ion_field + kicks_ions_x = en_electrons_x * prefactor_kick_electron_field + kicks_ions_y = en_electrons_y * prefactor_kick_electron_field + kicked_electrons_xp = pm.take( + electron_bunch.xp, p_id_electrons) + kicks_electrons_x + kicked_electrons_yp = pm.take( + electron_bunch.yp, p_id_electrons) + kicks_electrons_y + kicked_ions_xp = pm.take(self.ion_beam.xp, p_id_ions) + kicks_ions_x + kicked_ions_yp = pm.take(self.ion_beam.yp, p_id_ions) + kicks_ions_y + + pm.put(electron_bunch.xp, p_id_electrons, kicked_electrons_xp) + pm.put(electron_bunch.yp, p_id_electrons, kicked_electrons_yp) + + pm.put(self.ion_beam.xp, p_id_ions, kicked_ions_xp) + pm.put(self.ion_beam.yp, p_id_ions, kicked_ions_yp) + self.track_ions_in_drift(p_id_ions) + + def get_efieldn(self, xr, yr, mean_x, mean_y, sig_x, sig_y): + '''The charge-normalised electric field components of a + two-dimensional Gaussian charge distribution according to + M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06. + + Return (E_x / Q, E_y / Q). + ''' + x = xr - mean_x + y = yr - mean_y + + # absolute values for convergence reasons of erfc + en_x, en_y = self._efieldn(pm.abs(x), pm.abs(y), sig_x, sig_y) + en_x = pm.abs(en_x) * pm.sign(x) + en_y = pm.abs(en_y) * pm.sign(y) + + return en_x, en_y diff --git a/PyHEADTAIL/monitors/monitors.py b/PyHEADTAIL/monitors/monitors.py index b703a860..bdc3b0eb 100644 --- a/PyHEADTAIL/monitors/monitors.py +++ b/PyHEADTAIL/monitors/monitors.py @@ -7,7 +7,6 @@ """ - import h5py as hp import numpy as np import sys @@ -42,6 +41,7 @@ class BunchMonitor(Monitor): operations to file. This also helps to avoid IO errors and loss of data when writing to a file that may become temporarily unavailable (e.g. if file is located on network) during the simulation. """ + def __init__(self, filename, n_steps, parameters_dict=None, write_buffer_every=512, buffer_size=4096, *args, **kwargs): @@ -65,7 +65,7 @@ def __init__(self, filename, n_steps, parameters_dict=None, stats_to_store = [ 'mean_x', 'mean_xp', 'mean_y', 'mean_yp', 'mean_z', 'mean_dp', 'sigma_x', 'sigma_y', 'sigma_z', 'sigma_dp', 'epsn_x', 'epsn_y', - 'epsn_z', 'macroparticlenumber' ] + 'epsn_z', 'macroparticlenumber'] self.stats_to_store = kwargs.pop('stats_to_store', stats_to_store) self.filename = filename self.n_steps = n_steps @@ -143,13 +143,15 @@ def _write_data_to_buffer(self, bunch): # (macroparticlenumber) of the bunch. write_pos = self.i_steps % self.buffer_size try: - if pm.device is 'is_.2slowerwiththis':#'GPU': - #val_bf[stat] + if pm.device is 'is_.2slowerwiththis': # 'GPU': + # val_bf[stat] st = next(gpu_utils.stream_pool) val_buf[stats] = evaluate_stats(stream=st) - p_write[stats] = int(self.buffer[stats].gpudata) + write_pos*self.buffer[stats].strides[0] - sze = 8#val.nbytes - gpu_utils.driver.memcpy_dtod_async(dest=p_write[stats], src=val_buf[stats].gpudata, size=sze, stream=st) + p_write[stats] = int( + self.buffer[stats].gpudata) + write_pos*self.buffer[stats].strides[0] + sze = 8 # val.nbytes + gpu_utils.driver.memcpy_dtod_async( + dest=p_write[stats], src=val_buf[stats].gpudata, size=sze, stream=st) else: self.buffer[stats][write_pos] = evaluate_stats() except TypeError: @@ -163,15 +165,15 @@ def _write_buffer_to_file(self): self.buffer is on the GPU, copy the data to buffer_tmp and write the result to the file.""" - buffer_tmp = {} # always on CPU + buffer_tmp = {} # always on CPU shift = - (self.i_steps + 1 % self.buffer_size) for stats in self.stats_to_store: try: buffer_tmp[stats] = np.roll(self.buffer[stats].get(), - shift=shift, axis=0) + shift=shift, axis=0) except: buffer_tmp[stats] = np.roll(self.buffer[stats].copy(), - shift=shift, axis=0) + shift=shift, axis=0) n_entries_in_buffer = min(self.i_steps+1, self.buffer_size) low_pos_in_buffer = self.buffer_size - n_entries_in_buffer low_pos_in_file = self.i_steps + 1 - n_entries_in_buffer @@ -226,16 +228,16 @@ def __init__(self, filename, n_steps, slicer, parameters_dict=None, bunch_stats_to_store = [ 'mean_x', 'mean_xp', 'mean_y', 'mean_yp', 'mean_z', 'mean_dp', 'sigma_x', 'sigma_y', 'sigma_z', 'sigma_dp', 'epsn_x', 'epsn_y', - 'epsn_z', 'macroparticlenumber' ] + 'epsn_z', 'macroparticlenumber'] slice_stats_to_store = [ 'mean_x', 'mean_xp', 'mean_y', 'mean_yp', 'mean_z', 'mean_dp', 'sigma_x', 'sigma_y', 'sigma_z', 'sigma_dp', 'epsn_x', 'epsn_y', - 'epsn_z', 'n_macroparticles_per_slice' ] + 'epsn_z', 'n_macroparticles_per_slice'] self.bunch_stats_to_store = kwargs.pop('bunch_stats_to_store', - bunch_stats_to_store) + bunch_stats_to_store) self.slice_stats_to_store = kwargs.pop('slice_stats_to_store', - slice_stats_to_store) + slice_stats_to_store) self.filename = filename self.n_steps = n_steps @@ -251,9 +253,9 @@ def __init__(self, filename, n_steps, slicer, parameters_dict=None, def _init_buffer(self, bunch, slice_set): self.buffer_bunch = pm.init_bunch_buffer(bunch, - self.bunch_stats_to_store, self.buffer_size) + self.bunch_stats_to_store, self.buffer_size) self.buffer_slice = pm.init_slice_buffer(slice_set, - self.slice_stats_to_store, self.buffer_size) + self.slice_stats_to_store, self.buffer_size) def dump(self, bunch): """ Evaluate the statistics like mean and standard deviation for @@ -293,10 +295,10 @@ def _create_file_structure(self, parameters_dict): for stats in self.bunch_stats_to_store: h5group_bunch.create_dataset(stats, shape=(self.n_steps,), - compression='gzip', compression_opts=9) + compression='gzip', compression_opts=9) for stats in self.slice_stats_to_store: h5group_slice.create_dataset(stats, shape=(self.slicer.n_slices, - self.n_steps), compression='gzip', compression_opts=9) + self.n_steps), compression='gzip', compression_opts=9) h5file.close() except Exception as err: self.warns('Problem occurred during Slice monitor creation.') @@ -335,23 +337,23 @@ def _write_buffer_to_file(self): """ Write buffer contents to the HDF5 file. The file is opened and closed each time the buffer is written to file to prevent from loss of data in case of a crash. """ - buffer_tmp_bunch = {} # always on CPU + buffer_tmp_bunch = {} # always on CPU buffer_tmp_slice = {} shift = - (self.i_steps + 1 % self.buffer_size) for stats in self.bunch_stats_to_store: try: buffer_tmp_bunch[stats] = np.roll(self.buffer_bunch[stats].get(), - shift=shift, axis=0) + shift=shift, axis=0) except: buffer_tmp_bunch[stats] = np.roll(self.buffer_bunch[stats].copy(), - shift=shift, axis=0) + shift=shift, axis=0) for stats in self.slice_stats_to_store: try: buffer_tmp_slice[stats] = np.roll(self.buffer_slice[stats].get(), - shift=shift, axis=1) + shift=shift, axis=1) except: buffer_tmp_slice[stats] = np.roll(self.buffer_slice[stats].copy(), - shift=shift, axis=1) + shift=shift, axis=1) # Keep track of where to read from buffers and where to store # data in file. @@ -370,11 +372,11 @@ def _write_buffer_to_file(self): for stats in self.bunch_stats_to_store: h5group_bunch[stats][low_pos_in_file:up_pos_in_file] = \ buffer_tmp_bunch[stats][low_pos_in_buffer:] - #self.buffer_bunch[stats][low_pos_in_buffer:] + # self.buffer_bunch[stats][low_pos_in_buffer:] for stats in self.slice_stats_to_store: - h5group_slice[stats][:,low_pos_in_file:up_pos_in_file] = \ - buffer_tmp_slice[stats][:,low_pos_in_buffer:] - #self.buffer_slice[stats][:,low_pos_in_buffer:] + h5group_slice[stats][:, low_pos_in_file:up_pos_in_file] = \ + buffer_tmp_slice[stats][:, low_pos_in_buffer:] + # self.buffer_slice[stats][:,low_pos_in_buffer:] h5file.close() except IOError: self.warns('Slice monitor file is temporarily unavailable. \n') @@ -401,7 +403,7 @@ def __init__(self, filename, stride=1, parameters_dict=None, Optionally pass a list called quantities_to_store which specifies which members of the bunch will be called/stored. """ - quantities_to_store = [ 'x', 'xp', 'y', 'yp', 'z', 'dp', 'id' ] + quantities_to_store = ['x', 'xp', 'y', 'yp', 'z', 'dp', 'id'] self.quantities_to_store = kwargs.pop('quantities_to_store', quantities_to_store) self.filename = filename @@ -421,6 +423,7 @@ def _create_file_structure(self, parameters_dict): contents of the parameters_dict as metadata (attributes) to the file. Maximum file compression is activated. """ try: + print(self.filename) h5file = hp.File(self.filename + '.h5part', 'w') if parameters_dict: for key in parameters_dict: @@ -443,7 +446,8 @@ def _write_data_to_file(self, bunch, arrays_dict): h5file = hp.File(self.filename + '.h5part', 'a') h5group = h5file.create_group('Step#' + str(self.i_steps)) dims = (bunch.macroparticlenumber // self.stride,) - dims = list(bunch.get_coords_n_momenta_dict().values())[0][::self.stride].shape # more robust implementation + dims = list(bunch.get_coords_n_momenta_dict().values())[ + 0][::self.stride].shape # more robust implementation # resorting_indices = np.argsort(bunch.id)[::self.stride] all_quantities = {} @@ -463,7 +467,7 @@ def _write_data_to_file(self, bunch, arrays_dict): for quant in list(all_quantities.keys()): quant_values = all_quantities[quant] h5group.create_dataset(quant, shape=dims, compression='gzip', - compression_opts=9, dtype=quant_values.dtype) + compression_opts=9, dtype=quant_values.dtype) h5group[quant][:] = quant_values[::self.stride] h5file.close() @@ -587,13 +591,13 @@ def _write_data_to_buffer(self, bunch): self.beta_z, self.radial_cut, self.n_radial_slices, self.n_azimuthal_slices) - self.buffer_cell['mean_x'][:,:,0] = x_cl[:,:] - self.buffer_cell['mean_xp'][:,:,0] = xp_cl[:,:] - self.buffer_cell['mean_y'][:,:,0] = y_cl[:,:] - self.buffer_cell['mean_yp'][:,:,0] = yp_cl[:,:] - self.buffer_cell['mean_z'][:,:,0] = z_cl[:,:] - self.buffer_cell['mean_dp'][:,:,0] = dp_cl[:,:] - self.buffer_cell['macroparticlenumber'][:,:,0] = n_cl[:,:] + self.buffer_cell['mean_x'][:, :, 0] = x_cl[:, :] + self.buffer_cell['mean_xp'][:, :, 0] = xp_cl[:, :] + self.buffer_cell['mean_y'][:, :, 0] = y_cl[:, :] + self.buffer_cell['mean_yp'][:, :, 0] = yp_cl[:, :] + self.buffer_cell['mean_z'][:, :, 0] = z_cl[:, :] + self.buffer_cell['mean_dp'][:, :, 0] = dp_cl[:, :] + self.buffer_cell['macroparticlenumber'][:, :, 0] = n_cl[:, :] for stats in self.stats_to_store: self.buffer_cell[stats] = np.roll( @@ -618,8 +622,8 @@ def _write_buffer_to_file(self): h5group_cells = h5file['Cells'] for stats in self.stats_to_store: - h5group_cells[stats][:,:,low_pos_in_file:up_pos_in_file] = \ - self.buffer_cell[stats][:,:,low_pos_in_buffer:] + h5group_cells[stats][:, :, low_pos_in_file:up_pos_in_file] = \ + self.buffer_cell[stats][:, :, low_pos_in_buffer:] h5file.close() except Exception as err: self.warns(err.message) diff --git a/PyHEADTAIL/multipoles/multipoles.py b/PyHEADTAIL/multipoles/multipoles.py index 6eac3064..5d5a8436 100644 --- a/PyHEADTAIL/multipoles/multipoles.py +++ b/PyHEADTAIL/multipoles/multipoles.py @@ -72,7 +72,6 @@ def __init__(self, k3l, *args, **kwargs): ''' self.kL = k3l self.kL6 = k3l / 6. - def track(self, beam): beam.xp -= self.kL6 * (beam.x*beam.x*beam.x - 3*beam.x*beam.y*beam.y) beam.yp -= self.kL6 * (beam.y*beam.y*beam.y - 3*beam.x*beam.x*beam.y) diff --git a/PyHEADTAIL/particles/generators.py b/PyHEADTAIL/particles/generators.py index 1b3dd5be..7eb07443 100644 --- a/PyHEADTAIL/particles/generators.py +++ b/PyHEADTAIL/particles/generators.py @@ -542,11 +542,14 @@ def _kv2d(n_particles): '''Create a two-dimensional phase space (u, up) Kapchinski-Vladimirski-type uniform distribution. ''' - rand = np.random.uniform(low=-0.5, high=0.5, size=n_particles) - u = np.sin(2 * np.pi * rand) + t = 2 * np.pi * np.random.uniform(low=-0.5, high=0.5, size=n_particles) + u = (np.random.uniform(low=0, high=1, size=n_particles) + + np.random.uniform(low=0, high=1, size=n_particles)) r = np.where(u > 1, 2 - u, u) - sign = (-1)**np.random.randint(2, size=n_particles) - up = sign * np.sqrt(1. - r**2) + u = r_u * r * np.cos(t) + t = 2 * np.pi * np.random.uniform(low=-0.5, high=0.5, size=n_particles) + rp = np.sqrt(1. - r**2) + up = r_up * rp * np.cos(t) return [u, up] return _kv2d diff --git a/PyHEADTAIL/particles/particles.py b/PyHEADTAIL/particles/particles.py index fcf5e6b3..7e9bd4de 100644 --- a/PyHEADTAIL/particles/particles.py +++ b/PyHEADTAIL/particles/particles.py @@ -276,12 +276,11 @@ def __add__(self, other): particlenumber_per_mp=self.particlenumber_per_mp, charge=self.charge, mass=self.mass, circumference=self.circumference, gamma=self.gamma, coords_n_momenta_dict={}) - for coord in list(self_coords_n_momenta_dict.keys()): #setattr(result, coord, np.concatenate((self_coords_n_momenta_dict[coord].copy(), other_coords_n_momenta_dict[coord].copy()))) result.update({coord: np.concatenate((self_coords_n_momenta_dict[coord].copy(), other_coords_n_momenta_dict[coord].copy()))}) - result.id = np.concatenate((self.id.copy(), other.id.copy())) + # result.id = np.concatenate((self.id.copy(), other.id.copy())) return result diff --git a/PyHEADTAIL/particles/rfbucket_matching.py b/PyHEADTAIL/particles/rfbucket_matching.py index 559c9ee8..c8ac9e46 100644 --- a/PyHEADTAIL/particles/rfbucket_matching.py +++ b/PyHEADTAIL/particles/rfbucket_matching.py @@ -89,8 +89,8 @@ def error_from_target_epsn(ec): if np.isnan(emittance): raise ValueError - self.prints('... distance to target emittance: ' + - '{:.2e}'.format(emittance-epsn_z)) + # self.prints('... distance to target emittance: ' + + # '{:.2e}'.format(emittance-epsn_z)) return emittance-epsn_z @@ -106,9 +106,9 @@ def error_from_target_epsn(ec): self.psi_object.H0 = self.rfbucket.guess_H0( ec_bar, from_variable='epsn') emittance = self._compute_emittance(self.rfbucket, self.psi) - self.prints('--> Emittance: ' + str(emittance)) + # self.prints('--> Emittance: ' + str(emittance)) sigma = self._compute_sigma(self.rfbucket, self.psi) - self.prints('--> Bunch length: ' + str(sigma)) + # self.prints('--> Bunch length: ' + str(sigma)) def psi_for_bunchlength_newton_method(self, sigma): # Maximum bunch length @@ -120,7 +120,7 @@ def psi_for_bunchlength_newton_method(self, sigma): 'Using (maximum) full bucket RMS bunch length ' + str(sigma_max*0.99) + 'm instead.') sigma = sigma_max*0.99 - self.prints('*** Maximum RMS bunch length ' + str(sigma_max) + 'm.') + # self.prints('*** Maximum RMS bunch length ' + str(sigma_max) + 'm.') def error_from_target_sigma(sc): '''Width for bunch length''' @@ -147,9 +147,9 @@ def error_from_target_sigma(sc): self.psi_object.H0 = self.rfbucket.guess_H0( sc_bar, from_variable='sigma') sigma = self._compute_sigma(self.rfbucket, self.psi) - self.prints('--> Bunch length: ' + str(sigma)) + # self.prints('--> Bunch length: ' + str(sigma)) emittance = self._compute_emittance(self.rfbucket, self.psi) - self.prints('--> Emittance: ' + str(emittance)) + # self.prints('--> Emittance: ' + str(emittance)) def linedensity(self, xx, quad_type=fixed_quad): L = [] diff --git a/PyHEADTAIL/rfq/rfq.py b/PyHEADTAIL/rfq/rfq.py index 592e3a81..a2575c87 100644 --- a/PyHEADTAIL/rfq/rfq.py +++ b/PyHEADTAIL/rfq/rfq.py @@ -172,7 +172,6 @@ def track(self, beam): """ cos_term = (2. * e * self.v_2 / self.omega * pm.cos(self.omega / (beam.beta * c) * beam.z + self.phi_0)) - beam.xp += -beam.x * cos_term / beam.p0 beam.yp += beam.y * cos_term / beam.p0 diff --git a/PyHEADTAIL/spacecharge/spacecharge.py b/PyHEADTAIL/spacecharge/spacecharge.py index 7a68e618..4ea1ed12 100644 --- a/PyHEADTAIL/spacecharge/spacecharge.py +++ b/PyHEADTAIL/spacecharge/spacecharge.py @@ -3,16 +3,17 @@ @date: 17/04/2015 ''' - +from PyHEADTAIL.general.element import Element +from PyHEADTAIL.particles.slicing import clean_slices +from PyHEADTAIL.field_maps import efields_funcs as efields import numpy as np from scipy.constants import c, epsilon_0, pi + from scipy.interpolate import splrep, splev from functools import wraps -from PyHEADTAIL.general.element import Element from PyHEADTAIL.general import pmath as pm -from PyHEADTAIL.particles.slicing import clean_slices class LongSpaceCharge(Element): @@ -127,6 +128,7 @@ def potential(z): return potential + class TransverseGaussianSpaceCharge(Element): '''Contains transverse space charge for a Gaussian configuration. Applies the Bassetti-Erskine electric field expression slice-wise @@ -165,11 +167,11 @@ def __init__(self, slicer, length, sig_check=True, other_efieldn=None): self.slicer = slicer self.length = length if other_efieldn is None: - self._efieldn = self._efieldn_mit + self._efieldn = efields._efieldn_mit else: self._efieldn = other_efieldn if sig_check: - self._efieldn = self.add_sigma_check(self._efieldn) + self._efieldn = efields.add_sigma_check(self._efieldn, 'GS') def track(self, beam): '''Add the transverse space charge contribution to the beam's @@ -179,7 +181,6 @@ def track(self, beam): self.slicer, statistics=["mean_x", "mean_y", "sigma_x", "sigma_y"]) prefactor = (beam.charge * self.length / (beam.p0 * beam.betagamma * beam.gamma * c)) - # Nlambda_i is the line density [Coul/m] for the current slice for s_i, (Nlambda_i, mean_x, mean_y, sig_x, sig_y) in enumerate(zip( slices.lambda_bins(smoothen=False)/slices.slice_widths, @@ -192,7 +193,6 @@ def track(self, beam): en_x, en_y = self.get_efieldn( pm.take(beam.x, p_id), pm.take(beam.y, p_id), mean_x, mean_y, sig_x, sig_y) - kicks_x = (en_x * Nlambda_i) * prefactor kicks_y = (en_y * Nlambda_i) * prefactor @@ -219,170 +219,51 @@ def get_efieldn(self, xr, yr, mean_x, mean_y, sig_x, sig_y): en_y = pm.abs(en_y) * pm.sign(y) return en_x, en_y +class TransverseLinearSpaceCharge(TransverseGaussianSpaceCharge): + '''Contains transverse space charge for a Gaussian configuration. + Applies the Bassetti-Erskine electric field expression slice-wise + for each particle centred around the slice centre. + ''' - @staticmethod - def _sig_sqrt(sig_x, sig_y): - return pm.sqrt(2 * (sig_x**2 - sig_y**2)) - - @staticmethod - def _efieldn_mit(x, y, sig_x, sig_y): - '''The charge-normalised electric field components of a - two-dimensional Gaussian charge distribution according to - M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06. - - Return (E_x / Q, E_y / Q). + '''Threshold for relative transverse beam size difference + below which the beam is assumed to be round: + abs(1 - sig_y / sig_x) < ratio_threshold ==> round beam + ''' + ratio_threshold = 1e-3 - Assumes sig_x > sig_y and mean_x == 0 as well as mean_y == 0. - For convergence reasons of the erfc, use only x > 0 and y > 0. + '''Threshold for absolute transverse beam size difference + below which the beam is assumed to be round: + abs(sig_y - sig_x) < absolute_threshold ==> round beam + ''' + absolute_threshold = 1e-10 - Uses FADDEEVA C++ implementation from MIT (via SciPy >= 0.13.0). + def __init__(self, slicer, length, sig_check=True): + '''Arguments: + - slicer determines the slicing parameters for the slices over + which the KV electric field expression is applied, + given a slicer with n_slices == 1, you can apply a + longitudinally averaged kick over the whole beam. + - length is an s interval along which the space charge force + is integrated. + - sig_check exchanges x and y quantities for sigma_x < sigma_y + and applies the round beam formula for sigma_x == sigma_y . + sig_check defaults to True and should not usually be False. ''' - # timing was ~0.522 ms for: - # x = np.arange(-1e-5, 1e-5, 1e-8) - # y = np.empty(len(x)) - # sig_x = 1.2e-6 - # sig_y = 1e-6 - sig_sqrt = TransverseGaussianSpaceCharge._sig_sqrt(sig_x, sig_y) - w1re, w1im = pm.wofz(x / sig_sqrt, y / sig_sqrt) - ex = pm.exp(-x*x / (2 * sig_x*sig_x) + - -y*y / (2 * sig_y*sig_y)) - w2re, w2im = pm.wofz(x * sig_y/(sig_x*sig_sqrt), - y * sig_x/(sig_y*sig_sqrt)) - denom = 2. * epsilon_0 * np.sqrt(pi) * sig_sqrt - return (w1im - ex * w2im) / denom, (w1re - ex * w2re) / denom - - @staticmethod - def _efieldn_mitmod(x, y, sig_x, sig_y): - '''The charge-normalised electric field components of a - two-dimensional Gaussian charge distribution according to - M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06. - - Return (E_x / Q, E_y / Q). - - Assumes sig_x > sig_y and mean_x == 0 as well as mean_y == 0. - For convergence reasons of the erfc, use only x > 0 and y > 0. + self.slicer = slicer + self.length = length + self._efieldn = efields._efieldn_linearized + if sig_check: + self._efieldn = efields.add_sigma_check(self._efieldn, 'KV') - Uses erfc C++ implementation from MIT (via SciPy >= 0.13.0) - and calculates wofz (FADDEEVA function) explicitely. + def track(self, beam): + '''Add the transverse space charge contribution to the beam's + transverse kicks. ''' - # timing was ~1.01ms for same situation as _efieldn_mit - sig_sqrt = TransverseGaussianSpaceCharge._sig_sqrt(sig_x, sig_y) - w1 = pm._errfadd((x + 1j * y) / sig_sqrt) - ex = pm.exp(-x*x / (2 * sig_x*sig_x) + - -y*y / (2 * sig_y*sig_y)) - w2 = pm._errfadd(x * sig_y/(sig_x*sig_sqrt) + - y * sig_x/(sig_y*sig_sqrt) * 1j) - val = (w1 - ex * w2) / (2 * epsilon_0 * np.sqrt(pi) * sig_sqrt) - return val.imag, val.real - - @staticmethod - def _efieldn_koelbig(x, y, sig_x, sig_y): - '''The charge-normalised electric field components of a - two-dimensional Gaussian charge distribution according to - M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06. + return super().track(beam) - Return (E_x / Q, E_y / Q). - Assumes sig_x > sig_y and mean_x == 0 as well as mean_y == 0. - For convergence reasons of the erfc, use only x > 0 and y > 0. - - Uses CERN library from K. Koelbig. + def get_efieldn(self, xr, yr, mean_x, mean_y, sig_x, sig_y): ''' - # timing was ~3.35ms for same situation as _efieldn_mit - if not pm._errf: - raise ImportError('errfff cannot be imported for using ' + - 'TransverseSpaceCharge._efield_koelbig .' + - 'Did you call make (or f2py general/errfff.f)?') - sig_sqrt = TransverseGaussianSpaceCharge._sig_sqrt(sig_x, sig_y) - w1re, w1im = pm._errf(x/sig_sqrt, y/sig_sqrt) - ex = pm.exp(-x*x / (2 * sig_x*sig_x) + - -y*y / (2 * sig_y*sig_y)) - w2re, w2im = pm._errf(x * sig_y/(sig_x*sig_sqrt), - y * sig_x/(sig_y*sig_sqrt)) - pref = 1. / (2 * epsilon_0 * np.sqrt(pi) * sig_sqrt) - return pref * (w1im - ex * w2im), pref * (w1re - ex * w2re) - - @staticmethod - def wfun(z): - '''FADDEEVA function as implemented in PyECLOUD, vectorised.''' - x=z.real - y=z.imag - if not pm._errf: - raise ImportError('errfff cannot be imported for using ' + - 'TransverseSpaceCharge._efield_pyecloud .' + - 'Did you f2py errfff.f?') - wx,wy=pm._errf(x,y) # in PyECLOUD only pm._errf_f (not vectorised) - return wx+1j*wy - - @staticmethod - def _efieldn_pyecloud(xin, yin, sigmax, sigmay): - '''The charge-normalised electric field components of a - two-dimensional Gaussian charge distribution according to - M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06. - Return (E_x / Q, E_y / Q). - - Effective copy of PyECLOUD.BassErsk.BassErsk implementation. - ''' - # timing was ~3.52ms for same situation as _efieldn_mit - wfun = TransverseGaussianSpaceCharge.wfun - x=abs(xin); - y=abs(yin); - eps0=8.854187817620e-12; - if sigmax>sigmay: - S=np.sqrt(2*(sigmax*sigmax-sigmay*sigmay)); - factBE=1/(2*eps0*np.sqrt(pi)*S); - etaBE=sigmay/sigmax*x+1j*sigmax/sigmay*y; - zetaBE=x+1j*y; - val=factBE*(wfun(zetaBE/S)- - np.exp( -x*x/(2*sigmax*sigmax)-y*y/(2*sigmay*sigmay))* - wfun(etaBE/S) ); - Ex=abs(val.imag)*np.sign(xin); - Ey=abs(val.real)*np.sign(yin); - else: - S=np.sqrt(2*(sigmay*sigmay-sigmax*sigmax)); - factBE=1/(2*eps0*np.sqrt(pi)*S); - etaBE=sigmax/sigmay*y+1j*sigmay/sigmax*x; - yetaBE=y+1j*x; - val=factBE*(wfun(yetaBE/S)- - np.exp( -y*y/(2*sigmay*sigmay)-x*x/(2*sigmax*sigmax))* - wfun(etaBE/S) ); - Ey=abs(val.imag)*np.sign(yin); - Ex=abs(val.real)*np.sign(xin); - return Ex, Ey - - @staticmethod - def _efieldn_round(x, y, sig_r): - '''Return (E_x / Q, E_y / Q) for a round distribution - with sigma_x == sigma_y == sig_r . - ''' - r2 = x*x + y*y - amplitude = (1 - pm.exp(-r2/(2*sig_r*sig_r))) / (2*pi*epsilon_0 * r2) - return x * amplitude, y * amplitude - - @staticmethod - def add_sigma_check(efieldn): - '''Wrapper for a normalised electric field function. - - Adds the following actions before calculating the field: - - exchange x and y quantities if sigma_x < sigma_y - - apply round beam field formula when sigma_x close to sigma_y ''' - efieldn_round = TransverseGaussianSpaceCharge._efieldn_round - @wraps(efieldn) - def efieldn_checked(x, y, sig_x, sig_y, *args, **kwargs): - tol_kwargs = dict( - rtol=TransverseGaussianSpaceCharge.ratio_threshold, - atol=TransverseGaussianSpaceCharge.absolute_threshold - ) - if pm.allclose(sig_y, sig_x, **tol_kwargs): - if pm.almost_zero(sig_y, **tol_kwargs): - en_x = en_y = pm.zeros(x.shape, dtype=x.dtype) - else: - en_x, en_y = efieldn_round(x, y, sig_x, *args, **kwargs) - elif pm.all(sig_x < sig_y): - en_y, en_x = efieldn(y, x, sig_y, sig_x, *args, **kwargs) - else: - en_x, en_y = efieldn(x, y, sig_x, sig_y, *args, **kwargs) - return en_x, en_y - return efieldn_checked + return super().get_efieldn(xr, yr, mean_x, mean_y, sig_x, sig_y) \ No newline at end of file diff --git a/docs/PyHEADTAIL.aperture.html b/docs/PyHEADTAIL.aperture.html deleted file mode 100644 index 853731bd..00000000 --- a/docs/PyHEADTAIL.aperture.html +++ /dev/null @@ -1,714 +0,0 @@ - - - - - - - PyHEADTAIL.aperture package — PyHEADTAIL documentation - - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL.aperture package

-
-

Submodules

-
-
-

PyHEADTAIL.aperture.aperture module

-

Aperture module to manage particle losses. An aperture is -defined as a condition on the phase space coordinates. Particles -not fulfilling this condition are tagged as lost and are removed -from the beam. Parts of this module are implemented in cython -under aperture_cython.pyx for better performance.

-

@date: Created on 23.03.2016 -@author: Hannes Bartosik, Giovanni Iadarola, Kevin Li, Adrian Oeftiger,

-
-

Michael Schenk

-
-
-
-class PyHEADTAIL.aperture.aperture.Aperture(*args, **kwargs)
-

Abstract base class for Aperture elements. An aperture is -generally defined as a condition on the phase space coordinates. -Particles not fulfilling this condition are tagged as lost and -are removed from the beam directly after.

-
-
-__abstractmethods__ = frozenset({'tag_lost_particles'})
-
- -
-
-__module__ = 'PyHEADTAIL.aperture.aperture'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-static relocate_lost_particles(beam, alive)
-

Relocate particles marked as lost to the end of the beam.u arrays -(u = x, y, z, …). Return the number of alive particles -n_alive_post after considering the losses.

-
-
Parameters
-
    -
  • beam (-) – Particles instance

  • -
  • alive (-) – boolean mask with length n_particles where 1 means alive

  • -
-
-
-
- -
-
-abstract tag_lost_particles(beam)
-

This method is called by Aperture.track(beam) to identify -particles not passing through the aperture. The aperture condition -on the phase space coordinates is defined by the given Aperture -element. Returns a np.int32 array ‘alive’ which contains the -information on whether a particle is lost (0) or not (1).

-
- -
-
-track(beam)
-

Tag particles not passing through the aperture as lost. If -there are any losses, the corresponding particles are removed -from the beam by updating the beam.u arrays, s.t. -beam.u = beam.u[:n_alive] after relocating lost particles to -the end of these arrays. ‘n_alive’ denotes the number of alive -particles after the given aperture element. In addition, the -currently cached slice_sets of the beam are cleaned since losses -change its (longitudinal) state.

-
- -
- -
-
-class PyHEADTAIL.aperture.aperture.CircularApertureXY(radius, *args, **kwargs)
-

Mark particles with transverse spatial coords (x, y) outside a -circle of specified radius, i.e. x**2 + y**2 > radius**2, as lost -and remove them from the beam.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(radius, *args, **kwargs)
-

The argument radius defines the radius of the circular -(transverse) aperture.

-
- -
-
-__module__ = 'PyHEADTAIL.aperture.aperture'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-tag_lost_particles(beam)
-

This method is called by Aperture.track(beam) to identify -particles not passing through the aperture. The aperture condition -on the phase space coordinates is defined by the given Aperture -element. Returns a np.int32 array ‘alive’ which contains the -information on whether a particle is lost (0) or not (1).

-
- -
- -
-
-class PyHEADTAIL.aperture.aperture.EllipticalApertureXY(x_aper, y_aper, *args, **kwargs)
-

Mark particles with transverse spatial coords (x, y) outside a -ellipse of specified radius, i.e. (x/x_aper)**2 + (y/y_aper)**2 > 1., -as lost and remove them from the beam.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(x_aper, y_aper, *args, **kwargs)
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.aperture.aperture'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-tag_lost_particles(beam)
-

This method is called by Aperture.track(beam) to identify -particles not passing through the aperture. The aperture condition -on the phase space coordinates is defined by the given Aperture -element. Returns a np.int32 array ‘alive’ which contains the -information on whether a particle is lost (0) or not (1).

-
- -
- -
-
-class PyHEADTAIL.aperture.aperture.RectangularApertureX(x_low, x_high, *args, **kwargs)
-

Mark particles with transverse spatial coord (x) outside the -interval (x_high, x_low) as lost and remove them from the beam.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(x_low, x_high, *args, **kwargs)
-

The arguments x_low and x_high define the interval of -horizontal spatial coordinates for which particles pass through -the rectangular horizontal aperture.

-
- -
-
-__module__ = 'PyHEADTAIL.aperture.aperture'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-tag_lost_particles(beam)
-

This method is called by Aperture.track(beam) to identify -particles not passing through the aperture. The aperture condition -on the phase space coordinates is defined by the given Aperture -element. Returns a np.int32 array ‘alive’ which contains the -information on whether a particle is lost (0) or not (1).

-
- -
- -
-
-class PyHEADTAIL.aperture.aperture.RectangularApertureY(y_low, y_high, *args, **kwargs)
-

Mark particles with transverse spatial coord (y) outside the -interval (y_high, y_low) as lost and remove them from the beam.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(y_low, y_high, *args, **kwargs)
-

The arguments y_low and y_high define the interval of -vertical spatial coordinates for which particles pass through -the rectangular vertical aperture.

-
- -
-
-__module__ = 'PyHEADTAIL.aperture.aperture'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-tag_lost_particles(beam)
-

This method is called by Aperture.track(beam) to identify -particles not passing through the aperture. The aperture condition -on the phase space coordinates is defined by the given Aperture -element. Returns a np.int32 array ‘alive’ which contains the -information on whether a particle is lost (0) or not (1).

-
- -
- -
-
-class PyHEADTAIL.aperture.aperture.RectangularApertureZ(z_low, z_high, *args, **kwargs)
-

Mark particles with longitudinal spatial coord (z) outside the -interval (z_high, z_low) as lost and remove them from the beam.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(z_low, z_high, *args, **kwargs)
-

The arguments z_low and z_high define the interval of -longitudinal spatial coordinates for which particles pass -through the rectangular longitudinal aperture.

-
- -
-
-__module__ = 'PyHEADTAIL.aperture.aperture'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-tag_lost_particles(beam)
-

This method is called by Aperture.track(beam) to identify -particles not passing through the aperture. The aperture condition -on the phase space coordinates is defined by the given Aperture -element. Returns a np.int32 array ‘alive’ which contains the -information on whether a particle is lost (0) or not (1).

-
- -
- -
-
-PyHEADTAIL.aperture.aperture.make_int32(array)
-
- -
-
-PyHEADTAIL.aperture.aperture.tag_lost_circular(u, v, radius_square)
-

Identify and tag particles lost at a circular transverse aperture -element of a given radius, i.e. particles with -spatial coords u, v (usually (beam.x, beam.y)) fulfilling -u**2 + v**2 > radius_square. Return a np array mask ‘alive’ -containing the information of alive / lost for each particle in the -beam after the aperture.

-
- -
-
-PyHEADTAIL.aperture.aperture.tag_lost_ellipse(u, v, u_aper, v_aper)
-

Identify and tag particles lost at an elliptical transverse -aperture element. Return a np array mask ‘alive’ -containing the information of alive / lost for each particle in the -beam after the aperture.

-
- -
-
-PyHEADTAIL.aperture.aperture.tag_lost_rectangular(u, low_lim, high_lim)
-

Identify and tag particles lost at a rectangular aperture -element, i.e. particles with -a spatial coord u (beam.x, beam.y or beam.z) lying outside the -interval (low_lim, high_lim). Return a np array mask ‘alive’ -containing the information of alive / lost for each particle in the -beam after the aperture.

-
- -
-
-

PyHEADTAIL.aperture.aperture_cython module

-
-
-class PyHEADTAIL.aperture.aperture_cython.Aperture(*args, **kwargs)
-

Pendant to aperture.Aperture with the relocate algorithm -implemented in cython for more efficiency.

-
-
-__abstractmethods__ = frozenset({'tag_lost_particles'})
-
- -
-
-__module__ = 'PyHEADTAIL.aperture.aperture_cython'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-relocate_lost_particles
-

Memory efficient (and fast) cython function to relocate -particles marked as lost to the end of the beam.u arrays (u = x, y, -z, …). Returns the number of alive particles n_alive_post after -considering the losses.

-

Precondition: -- At least one particle must be tagged as alive, otherwise bad things

-
-

might happen…

-
-

Description of the algorithm: -(1) Starting from the end of the numpy array ‘alive’, find the index

-
-

of the last particle in the array which is still alive. Store its -array index in last_alive.

-
-
    -
  1. Loop through the ‘alive’ array from there (continuing in reverse -order). If a particle i is found for which alive[i] == 0, i.e. -it is a lost one, swap its position (and data x, y, z, …) with -the one located at index last_alive.

  2. -
  3. Move last_alive by -1. Due to the chosen procedure, the particle -located at the new last_alive index is known to be alive.

  4. -
  5. Repeat steps (2) and (3) until index i = 0 is reached.

  6. -
-
- -
- -
-
-class PyHEADTAIL.aperture.aperture_cython.CircularApertureXY
-

Mark particles with transverse spatial coords (x, y) outside a -circle of specified radius, i.e. x**2 + y**2 > radius**2, as lost -and remove them from the beam.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__
-

The argument radius defines the radius of the circular -(transverse) aperture.

-
- -
-
-__module__ = 'PyHEADTAIL.aperture.aperture_cython'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-tag_lost_particles
-

This method is called by Aperture.track(beam) to identify -particles not passing through the aperture. The aperture condition -on the phase space coordinates is defined by the given Aperture -element. Returns a np.int32 array ‘alive’ which contains the -information on whether a particle is lost (0) or not (1).

-
- -
- -
-
-class PyHEADTAIL.aperture.aperture_cython.EllipticalApertureXY
-

Mark particles with transverse spatial coords (x, y) outside a -ellipse of specified radius, i.e. (x/x_aper)**2 + (y/y_aper)**2 > 1., -as lost and remove them from the beam.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__
-
- -
-
-__module__ = 'PyHEADTAIL.aperture.aperture_cython'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-tag_lost_particles
-

This method is called by Aperture.track(beam) to identify -particles not passing through the aperture. The aperture condition -on the phase space coordinates is defined by the given Aperture -element. Returns a np.int32 array ‘alive’ which contains the -information on whether a particle is lost (0) or not (1).

-
- -
- -
-
-class PyHEADTAIL.aperture.aperture_cython.RectangularApertureX
-

Mark particles with transverse spatial coord (x) outside the -interval (x_high, x_low) as lost and remove them from the beam.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__
-

The arguments x_low and x_high define the interval of -horizontal spatial coordinates for which particles pass through -the rectangular horizontal aperture.

-
- -
-
-__module__ = 'PyHEADTAIL.aperture.aperture_cython'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-tag_lost_particles
-

This method is called by Aperture.track(beam) to identify -particles not passing through the aperture. The aperture condition -on the phase space coordinates is defined by the given Aperture -element. Returns a np.int32 array ‘alive’ which contains the -information on whether a particle is lost (0) or not (1).

-
- -
- -
-
-class PyHEADTAIL.aperture.aperture_cython.RectangularApertureY
-

Mark particles with transverse spatial coord (y) outside the -interval (y_high, y_low) as lost and remove them from the beam.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__
-

The arguments y_low and y_high define the interval of -vertical spatial coordinates for which particles pass through -the rectangular vertical aperture.

-
- -
-
-__module__ = 'PyHEADTAIL.aperture.aperture_cython'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-tag_lost_particles
-

This method is called by Aperture.track(beam) to identify -particles not passing through the aperture. The aperture condition -on the phase space coordinates is defined by the given Aperture -element. Returns a np.int32 array ‘alive’ which contains the -information on whether a particle is lost (0) or not (1).

-
- -
- -
-
-class PyHEADTAIL.aperture.aperture_cython.RectangularApertureZ
-

Mark particles with longitudinal spatial coord (z) outside the -interval (z_high, z_low) as lost and remove them from the beam.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__
-

The arguments z_low and z_high define the interval of -longitudinal spatial coordinates for which particles pass -through the rectangular longitudinal aperture.

-
- -
-
-__module__ = 'PyHEADTAIL.aperture.aperture_cython'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-tag_lost_particles
-

This method is called by Aperture.track(beam) to identify -particles not passing through the aperture. The aperture condition -on the phase space coordinates is defined by the given Aperture -element. Returns a np.int32 array ‘alive’ which contains the -information on whether a particle is lost (0) or not (1).

-
- -
- -
-
-PyHEADTAIL.aperture.aperture_cython.cytag_lost_circular(double[::1] u, double[::1] v, double radius_square)
-

Cython function for fast identification and tagging of particles -lost at a circular transverse aperture element of a given radius, -i.e. it tags particles with spatial coords u, v (usually (beam.x, -beam.y)) fulfilling u**2 + v**2 > radius_square as lost. Returns a -np array ‘alive’ containing the information of alive / lost for -each particle in the beam after the aperture.

-
- -
-
-PyHEADTAIL.aperture.aperture_cython.cytag_lost_ellipse(double[::1] u, double[::1] v, double u_aper, double v_aper)
-

Cython function for fast identification and tagging of particles -lost at a elliptical transverse aperture element. Returns a -np array ‘alive’ containing the information of alive / lost for -each particle in the beam after the aperture.

-
- -
-
-PyHEADTAIL.aperture.aperture_cython.cytag_lost_rectangular(double[::1] u, double low_lim, double high_lim)
-

Cython function for fast identification and tagging of particles -lost at a rectangular aperture element, i.e. it tags particles with -a spatial coord u (beam.x, beam.y or beam.z) lying outside the -interval (low_lim, high_lim) as lost. Returns a np array ‘alive’ -containing the information of alive / lost for each particle in the -beam after the aperture.

-
- -
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/PyHEADTAIL.cobra_functions.html b/docs/PyHEADTAIL.cobra_functions.html deleted file mode 100644 index 6eafda81..00000000 --- a/docs/PyHEADTAIL.cobra_functions.html +++ /dev/null @@ -1,329 +0,0 @@ - - - - - - - PyHEADTAIL.cobra_functions package — PyHEADTAIL documentation - - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL.cobra_functions package

-
-

Submodules

-
-
-

PyHEADTAIL.cobra_functions.c_sin_cos module

-
-
-PyHEADTAIL.cobra_functions.c_sin_cos.cm_cos(double[::1] x)
-
- -
-
-PyHEADTAIL.cobra_functions.c_sin_cos.cm_sin(double[::1] x)
-
- -
-
-

PyHEADTAIL.cobra_functions.curve_tools module

-
-
-PyHEADTAIL.cobra_functions.curve_tools.extrema(x, y=None)
-

Get extrema of curve x

-
- -
-
-PyHEADTAIL.cobra_functions.curve_tools.zero_crossings(f, x)
-

Get root of function f in intervall x

-
- -
-
-

PyHEADTAIL.cobra_functions.interp_sin_cos module

-
-
-PyHEADTAIL.cobra_functions.interp_sin_cos.interpolated_mod2pi(fn, xmin, xmax, steps)
-
- -
-
-

PyHEADTAIL.cobra_functions.pdf_integrators_2d module

-
-
-
members
-

-
private-members
-

-
special-members
-

-
undoc-members
-

-
-
-
-
-

PyHEADTAIL.cobra_functions.stats module

-

@author Kevin Li, Michael Schenk, Stefan Hegglin -@date 31. January 2014 -@brief Collection of cython functions to calculate statistics

-
-

of bunch and slice_set data.

-
-

@copyright CERN

-
-
-PyHEADTAIL.cobra_functions.stats.calc_cell_stats(double[::1] x, double[::1] xp, double[::1] y, double[::1] yp, double[::1] z, double[::1] dp, double beta_z, double radial_cut, int n_rings, int n_azim_slices)
-
- -
-
-PyHEADTAIL.cobra_functions.stats.count_macroparticles_per_slice(int[::1] slice_index_of_particle, int[::1] particles_within_cuts, int[::1] n_macroparticles)
-

Cython function to count the number of macroparticles in -each slice.

-
- -
-
-PyHEADTAIL.cobra_functions.stats.cov(double[::1] a, double[::1] b) → double
-

Cython function which calculates the covariance -(not the covariance matrix!) of two data sets -a and b using a shifted single pass algorithm -definition: cov(a,b) = E[(A - E[A])*(B - E[B])] -implementation: cov(a,b) = 1/(n-1) sum_{i=1}^n (a_i -<a>)*(b_i -<b>) -a and b do not necessarily have to reference different data -> var -shifts are for makeing the algorithm more stable against cancellation -:param a: numpy array, a.shape[0] defines n. n must be > 1 -:param b: numpy array, at least with length a.shape[0]

-

~ 3 times faster than cov() for n > 1e5 (timed using %timeit) -Faster than np.cov() because it does not compute the whole cov matrix

-
- -
-
-PyHEADTAIL.cobra_functions.stats.cov_per_slice(int[::1] slice_index_of_particle, int[::1] particles_within_cuts, int[::1] n_macroparticles, double[::1] a, double[::1] b, double[::1] result)
-

Cov per slice. Cannot make use of cov() because the particles -per slice are not contiguous in memory

-
- -
-
-PyHEADTAIL.cobra_functions.stats.dispersion(double[::1] u, double[::1] dp) → double
-

Cython function to compute the statistial dispersion: -disp = <u*dp>/<dp**2> -:param u a coordinate array, typically x or y spatial coordinates: it is also possible to pass xp or yp

-
- -
-
-PyHEADTAIL.cobra_functions.stats.emittance(double[::1] u, double[::1] up, double[::1] dp) → double
-

Cython function to calculate the effective (neglecting dispersion) -emittance of datasets u and up, i.e. a coordinate-momentum pair. -To calculate the emittance, one needs the mean values of quantities u and -up. -:param u spatial coordinate array: -:param up momentum coordinate array: -:param dp momentum deviation array: (p-p_0)/p_0. If None, the effective

-
-

emittance is computed instead (dispersion is set to 0)

-
-
- -
-
-PyHEADTAIL.cobra_functions.stats.emittance_per_slice(int[::1] slice_index_of_particle, int[::1] particles_within_cuts, int[::1] n_macroparticles, double[::1] u, double[::1] up, double[::1] dp, double[::1] emittance)
-

Iterate once through all the particles within the -slicing region and calculate simultaneously the emittance -of quantities u and up, i.e. a coordinate-momentum pair, -for each slice separately. To calculate the emittance per -slice, one needs the mean values of quantities u and up -for each slice.

-
- -
-
-PyHEADTAIL.cobra_functions.stats.emittance_per_slice_old(int[::1] slice_index_of_particle, int[::1] particles_within_cuts, int[::1] n_macroparticles, double[::1] u, double[::1] up, double[::1] epsn_u)
-

Iterate once through all the particles within the -slicing region and calculate simultaneously the emittance -of quantities u and up, i.e. a coordinate-momentum pair, -for each slice separately. To calculate the emittance per -slice, one needs the mean values of quantities u and up -for each slice.

-
- -
-
-PyHEADTAIL.cobra_functions.stats.get_alpha(double[::1] u, double[::1] up, double[::1] dp) → double
-

Cython function to calculate the statistical alpha (Twiss) -If dp=None, the effective alpha is computed -:param u: spatial coordinate array -:param up: momentum coordinate array -:param dp: (p-p0)/p0

-
- -
-
-PyHEADTAIL.cobra_functions.stats.get_beta(double[::1] u, double[::1] up, double[::1] dp) → double
-

Cython function to calculate the statistical beta (Twiss) -If dp=None, the effective beta is computed -:param u: spatial coordinate array -:param up: momentum coordinate array -:param dp: (p-p0)/p0

-
- -
-
-PyHEADTAIL.cobra_functions.stats.get_gamma(double[::1] u, double[::1] up, double[::1] dp) → double
-

Cython function to calculate the statistical gamma (Twiss) -If dp=None, the effective gamma is computed -:param u: spatial coordinate array -:param up: momentum coordinate array -:param dp: (p-p0)/p0

-
- -
-
-PyHEADTAIL.cobra_functions.stats.mean_per_slice(int[::1] slice_index_of_particle, int[::1] particles_within_cuts, int[::1] n_macroparticles, double[::1] u, double[::1] mean_u)
-

Iterate once through all the particles within the -slicing region and calculate simultaneously the mean -value of quantity u for each slice separately.

-
- -
-
-PyHEADTAIL.cobra_functions.stats.sort_particle_indices_by_slice(int[::1] slice_index_of_particle, int[::1] particles_within_cuts, int[::1] slice_positions, int[::1] particle_indices_by_slice)
-

Iterate once through all the particles within the slicing -region and assign their position in the bunch.z array to the -respective slice they are in. -This is to provide a method to the user that allows to see -which particles are in a specific slice (see -particle_indices_of_slice in SliceSet class).

-
- -
-
-PyHEADTAIL.cobra_functions.stats.std(double[::1] u) → double
-

Cython function to calculate the standard deviation of -dataset u. The dataset must consist of at least 2 samples

-
- -
-
-PyHEADTAIL.cobra_functions.stats.std_per_slice(int[::1] slice_index_of_particle, int[::1] particles_within_cuts, int[::1] n_macroparticles, double[::1] u, double[::1] std_u)
-

Iterate once through all the particles within the -slicing region and calculate simultaneously the -standard deviation of quantity u for each slice -separately.

-
- -
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/PyHEADTAIL.feedback.html b/docs/PyHEADTAIL.feedback.html deleted file mode 100644 index c4808751..00000000 --- a/docs/PyHEADTAIL.feedback.html +++ /dev/null @@ -1,362 +0,0 @@ - - - - - - - PyHEADTAIL.feedback package — PyHEADTAIL documentation - - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL.feedback package

-
-

Submodules

-
-
-

PyHEADTAIL.feedback.transverse_damper module

-

@author Kevin Li -@date 20/06/2014 -@copyright CERN

-
-
-class PyHEADTAIL.feedback.transverse_damper.TransverseDamper(dampingrate_x, dampingrate_y, phase=90, local_beta_function=None, *args, **kwargs)
-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(dampingrate_x, dampingrate_y, phase=90, local_beta_function=None, *args, **kwargs)
-

Ideal transverse damper with an in-place “measurement” -(transverse “pick-up”) of the transverse dipole moment. -Note: a single bunch in the beam is assumed, i.e. this works on -the entire beam’s moments.

-
-
Parameters
-
    -
  • dampingrate_x, dampingrate_y (-) – horizontal and vertical -damping rates in turns (e.g. 50 turns for a typical 2018 -LHC ADT set-up)

  • -
  • phase (-) – phase of the damper kick in degrees with respect to -the transverse position “pick-up”. The default value of -90 degrees corresponds to a typical resistive damper.

  • -
  • local_beta_function (-) – the optics beta function at the -transverse position “pick-up” (e.g. in the local place -of this Element). This is required if the damper is not -a purely resistive damper (or exciter), i.e. if the -phase is not 90 (or 270) degrees. The beta function is -assumed to be the same for both transverse planes, -otherwise use two instances of the TransverseDamper.

  • -
-
-
-
- -
-
-__module__ = 'PyHEADTAIL.feedback.transverse_damper'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-classmethod horizontal(dampingrate_x, *args, **kwargs)
-
- -
-
-track(beam)
-

Perform tracking of beam through this Element.

-
- -
-
-track_all(beam)
-
- -
-
-track_horizontal(beam)
-
- -
-
-track_vertical(beam)
-
- -
-
-classmethod vertical(dampingrate_y, *args, **kwargs)
-
- -
- -
-
-

PyHEADTAIL.feedback.widebandfeedback module

-

@author Kevin Li -@date 20/06/2014 -@copyright CERN

-
-
-class PyHEADTAIL.feedback.widebandfeedback.Kicker(pickup, plane='y', transfer_function=None, filter_fir=[0], filter_iir=[1], closedloop=False, gain=0, slices=None)
-
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.feedback.widebandfeedback', '__doc__': '\n ', '__init__': <function Kicker.__init__>, 'controller_fir': <function Kicker.controller_fir>, 'controller_iir': <function Kicker.controller_iir>, 'kicker': <function Kicker.kicker>, 'track': <function Kicker.track>, '_prepare_registers': <function Kicker._prepare_registers>, '_check_slices': <function Kicker._check_slices>, '__dict__': <attribute '__dict__' of 'Kicker' objects>, '__weakref__': <attribute '__weakref__' of 'Kicker' objects>})
-
- -
-
-__init__(pickup, plane='y', transfer_function=None, filter_fir=[0], filter_iir=[1], closedloop=False, gain=0, slices=None)
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.feedback.widebandfeedback'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
-
-_check_slices(beam)
-
- -
-
-_prepare_registers(n_taps_forward, n_taps_reverse, n_slices)
-
- -
-
-controller_fir(beam)
-
- -
-
-controller_iir(beam)
-
- -
-
-kicker(beam)
-
- -
-
-track(beam)
-
- -
- -
-
-class PyHEADTAIL.feedback.widebandfeedback.Pickup(slices, plane='y')
-
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.feedback.widebandfeedback', '__doc__': '\n ', '__init__': <function Pickup.__init__>, 'track': <function Pickup.track>, '__dict__': <attribute '__dict__' of 'Pickup' objects>, '__weakref__': <attribute '__weakref__' of 'Pickup' objects>})
-
- -
-
-__init__(slices, plane='y')
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.feedback.widebandfeedback'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
-
-track(beam)
-
- -
- -
-
-class PyHEADTAIL.feedback.widebandfeedback.TransferFunction(slices)
-
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.feedback.widebandfeedback', '__init__': <function TransferFunction.__init__>, 'one_pole_roll_off': <function TransferFunction.one_pole_roll_off>, 'convert': <function TransferFunction.convert>, 'convert_as_one_pole': <function TransferFunction.convert_as_one_pole>, 'convert_as_table': <function TransferFunction.convert_as_table>, '__dict__': <attribute '__dict__' of 'TransferFunction' objects>, '__weakref__': <attribute '__weakref__' of 'TransferFunction' objects>, '__doc__': None})
-
- -
-
-__init__(slices)
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.feedback.widebandfeedback'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
-
-convert()
-
- -
-
-convert_as_one_pole(x)
-
- -
-
-convert_as_table()
-
- -
-
-one_pole_roll_off(frequency)
-
- -
- -
-
-PyHEADTAIL.feedback.widebandfeedback.one_pole(fr, xmax=0)
-
- -
-
-PyHEADTAIL.feedback.widebandfeedback.one_pole_symmetric(fr, xmax=0)
-
- -
-
-PyHEADTAIL.feedback.widebandfeedback.one_pole_symmetric_gerd(fr, xmax=0)
-
- -
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/PyHEADTAIL.field_maps.html b/docs/PyHEADTAIL.field_maps.html deleted file mode 100644 index b4c27eea..00000000 --- a/docs/PyHEADTAIL.field_maps.html +++ /dev/null @@ -1,126 +0,0 @@ - - - - - - - PyHEADTAIL.field_maps package — PyHEADTAIL documentation - - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL.field_maps package

-
-

Submodules

-
-
-

PyHEADTAIL.field_maps.Transverse_Efield_map module

-
-
-

PyHEADTAIL.field_maps.field_map module

-
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/PyHEADTAIL.general.html b/docs/PyHEADTAIL.general.html deleted file mode 100644 index ef818109..00000000 --- a/docs/PyHEADTAIL.general.html +++ /dev/null @@ -1,878 +0,0 @@ - - - - - - - PyHEADTAIL.general package — PyHEADTAIL documentation - - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL.general package

-
-

Submodules

-
-
-

PyHEADTAIL.general.contextmanager module

-

Context manager classes -@author Stefan Hegglin -@data 30.09.2015

-
-
-class PyHEADTAIL.general.contextmanager.CPU(bunch)
-

Dummy class to run the code on the CPU. -Does nothing but has the same interface as the GPU contextmanager

-
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.general.contextmanager', '__doc__': '\n Dummy class to run the code on the CPU.\n Does nothing but has the same interface as the GPU contextmanager\n ', '__init__': <function CPU.__init__>, '__enter__': <function CPU.__enter__>, '__exit__': <function CPU.__exit__>, '__dict__': <attribute '__dict__' of 'CPU' objects>, '__weakref__': <attribute '__weakref__' of 'CPU' objects>})
-
- -
-
-__enter__()
-

Remove slice records from bunch.

-
- -
-
-__exit__(exc_type, exc_value, traceback)
-

Remove slice records from bunch.

-
- -
-
-__init__(bunch)
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.general.contextmanager'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
- -
-
-class PyHEADTAIL.general.contextmanager.Context
-

Example contextmanager class providing enter and exit methods

-
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.general.contextmanager', '__doc__': '\n Example contextmanager class providing enter and exit methods\n ', '__init__': <function Context.__init__>, '__enter__': <function Context.__enter__>, '__exit__': <function Context.__exit__>, '__dict__': <attribute '__dict__' of 'Context' objects>, '__weakref__': <attribute '__weakref__' of 'Context' objects>})
-
- -
-
-__enter__()
-
- -
-
-__exit__(exc_type, exc_value, traceback)
-
- -
-
-__init__()
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.general.contextmanager'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
- -
-
-class PyHEADTAIL.general.contextmanager.GPU(bunch)
-

Class providing enter/exit methods to move/get data from/to the gpu or -provide a general base framework for all decorated function calls -All data after must be in the same state after exiting as before entering -this context!

-
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.general.contextmanager', '__doc__': '\n Class providing enter/exit methods to move/get data from/to the gpu or\n provide a general base framework for all decorated function calls\n All data after must be in the same state after exiting as before entering\n this context!\n ', '__init__': <function GPU.__init__>, '__enter__': <function GPU.__enter__>, '__exit__': <function GPU.__exit__>, '__dict__': <attribute '__dict__' of 'GPU' objects>, '__weakref__': <attribute '__weakref__' of 'GPU' objects>})
-
- -
-
-__enter__()
-

Move all data to the GPU (and monkey patch methods?) -Returns self (eg. to provide info about gpu/status/…)

-

Remove slice records from bunch.

-
- -
-
-__exit__(exc_type, exc_value, traceback)
-

Move all data back to the CPU (and un-patch the methods?) -Reestablish state of everything as it was before entering

-

Remove slice records from bunch.

-
- -
-
-__init__(bunch)
-

Pass the bunch to the context s.t. the context knows what to copy -to the gpu. The problem with this approach: not very nice for the user: -with GPU(bunch) as context:

-
- -
-
-__module__ = 'PyHEADTAIL.general.contextmanager'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
- -
-
-

PyHEADTAIL.general.decorators module

-

@authors: Adrian Oeftiger -@date: 02/10/2014

-

Provide useful decorators for PyHEADTAIL.

-
-
-PyHEADTAIL.general.decorators.deprecated(message)
-

Deprecation warning as described in warnings documentation.

-
- -
-
-PyHEADTAIL.general.decorators.memoize(function)
-

Memoizes the output of a function for given arguments (no keyword arguments) -and returns the correspondingly saved value after the first evaluation.

-
- -
-
-PyHEADTAIL.general.decorators.synchronize_gpu_streams_after(func)
-

Use this decorator if you need the results of all the streams -synchronized after this function is called

-
- -
-
-PyHEADTAIL.general.decorators.synchronize_gpu_streams_before(func)
-

Use this decorator if you need the results of all the streams -synchronized before this function is called

-
- -
-
-

PyHEADTAIL.general.element module

-

@authors: Adrian Oeftiger -@date: 12/09/2014

-

Provide abstract element as part of the tracking layout (e.g. circular -accelerator) for PyHeadtail. All implemented elements derive from this. -Can be used for implementing general features that every derived element -in PyHEADTAIL should have.

-
-
-class PyHEADTAIL.general.element.Element(*args, **kwargs)
-

Abstract element as part of the tracking layout. Guarantees -to fulfil its tracking contract via the method track(beam).

-
-
-__abstractmethods__ = frozenset({'track'})
-
- -
-
-__module__ = 'PyHEADTAIL.general.element'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-abstract track(beam)
-

Perform tracking of beam through this Element.

-
- -
- -
-
-class PyHEADTAIL.general.element.Printing(*args, **kwargs)
-

Provides prints(output) method in order to communicate any output -to the user. Use for instance

-
>>> self.prints("Example message to console.")
-
-
-

instead of

-
>>> print ("Example message to console.")
-
-
-

in order to obtain full flexibility over output channels.

-
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.general.element', '__doc__': 'Provides prints(output) method in order to communicate any output\n to the user. Use for instance\n\n >>> self.prints("Example message to console.")\n\n instead of\n\n >>> print ("Example message to console.")\n\n in order to obtain full flexibility over output channels.\n ', '__init__': <function Printing.__init__>, '__new__': <staticmethod object>, 'prints': <function Printing.prints>, 'warns': <function Printing.warns>, '__dict__': <attribute '__dict__' of 'Printing' objects>, '__weakref__': <attribute '__weakref__' of 'Printing' objects>})
-
- -
-
-__init__(*args, **kwargs)
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.general.element'
-
- -
-
-static __new__(cls, *args, **kwargs)
-

Factory method makes sure that inheriting elements always -have a Printer available for output redirection. -If an inheriting element constructor gets the keyword argument -‘printer’, an individual Printer as defined in the -PyHEADTAIL.general.printers module can be attached to this -instance. Standard is console output, i.e. ConsolePrinter.

-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
-
-prints(output)
-

Communicate any output to the user. Use for instance

-
>>> self.prints("Example message to console.")
-
-
-

instead of

-
>>> print ("Example message to console.")
-
-
-

in order to obtain full flexibility over output channels.

-
- -
-
-warns(output)
-

Communicate warnings to the user. Use for instance

-
>>> self.warns("Example warning to console.")
-
-
-

instead of

-
>>> print ("Example message to console.")
-
-
-
- -
- -
-
-

PyHEADTAIL.general.pmath module

-

PyHEADTAIL math functions -Dispatches for CPU/GPU versions -@author Stefan Hegglin -@date 05.10.2015

-
-
-exception PyHEADTAIL.general.pmath.UnknownContextManagerError(message='Failed to determine current context, e.g. whether pmath.device is "CPU" or "GPU".')
-

Raise if context manager is not found, e.g. cannot determine -whether on CPU or on GPU.

-
-
-__init__(message='Failed to determine current context, e.g. whether pmath.device is "CPU" or "GPU".')
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.general.pmath'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
- -
-
-PyHEADTAIL.general.pmath._count_macroparticles_per_slice_cpu(sliceset)
-
- -
-
-PyHEADTAIL.general.pmath._emittance_per_slice_cpu(sliceset, u, up, dp=None, **kwargs)
-

CPU Wrapper for the emittance per slice function. -TODO: Find a good spot where to put this function (equiv to gpu_wrap) -–> Directly into cobra_functions/stats.pyx?

-
- -
-
-PyHEADTAIL.general.pmath._errfadd(z)
-
- -
-
-PyHEADTAIL.general.pmath._init_bunch_buffer(bunch_stats, buffer_size)
-
- -
-
-PyHEADTAIL.general.pmath._init_slice_buffer(slice_stats, n_slices, buffer_size)
-
- -
-
-PyHEADTAIL.general.pmath._mean_per_slice_cpu(sliceset, u, **kwargs)
-

CPU Wrapper for the mean per slice function. -TODO: Find a good spot where to put this function (equiv to gpu_wrap) -–> Directly into cobra_functions/stats.pyx?

-
- -
-
-PyHEADTAIL.general.pmath._searchsortedleft(array, values, dest_array=None)
-
- -
-
-PyHEADTAIL.general.pmath._searchsortedright(array, values, dest_array=None)
-
- -
-
-PyHEADTAIL.general.pmath._slice_to_particles(sliceset, slice_array, particle_array=None)
-

Convert slice_array with entries for each slice to a -particle array with the respective entry of each particle -given by its slice_array value via the slice that the -particle belongs to. If provided, particle_array should be a -zero-filled destination array.

-
- -
-
-PyHEADTAIL.general.pmath._std_per_slice_cpu(sliceset, u, **kwargs)
-

CPU Wrapper for the cov per slice function. -TODO: Find a good spot where to put this function (equiv to gpu_wrap) -–> Directly into cobra_functions/stats.pyx?

-
- -
-
-PyHEADTAIL.general.pmath._wofz(x, y)
-
- -
-
-PyHEADTAIL.general.pmath.almost_zero(array, *args, **kwargs)
-
- -
-
-PyHEADTAIL.general.pmath.apply_permutation(array, permutation)
-
- -
-
-PyHEADTAIL.general.pmath.emittance(*args, **kwargs)
-
- -
-
-PyHEADTAIL.general.pmath.emittance_per_slice(sliceset, u, up, dp=None, **kwargs)
-

CPU Wrapper for the emittance per slice function. -TODO: Find a good spot where to put this function (equiv to gpu_wrap) -–> Directly into cobra_functions/stats.pyx?

-
- -
-
-PyHEADTAIL.general.pmath.ensure_CPU(array)
-

Accept a GPUArray or a NumPy.ndarray and return a NumPy.ndarray.

-
- -
-
-PyHEADTAIL.general.pmath.ensure_same_device(array)
-

Accept a GPUarray or a NumPy.ndarray, check which device we -are currently running on in the context manager, and return -a possibly transferred GPUarray or a NumPy.ndarray accordingly.

-
- -
-
-PyHEADTAIL.general.pmath.indexify(array)
-
- -
-
-PyHEADTAIL.general.pmath.init_bunch_buffer(bunch, bunch_stats, buffer_size)
-
- -
-
-PyHEADTAIL.general.pmath.init_slice_buffer(slice_set, slice_stats, buffer_size)
-
- -
-
-PyHEADTAIL.general.pmath.macroparticles_per_slice(sliceset)
-
- -
-
-PyHEADTAIL.general.pmath.mean_per_slice(sliceset, u, **kwargs)
-

CPU Wrapper for the mean per slice function. -TODO: Find a good spot where to put this function (equiv to gpu_wrap) -–> Directly into cobra_functions/stats.pyx?

-
- -
-
-PyHEADTAIL.general.pmath.np_sincos(x)
-
- -
-
-PyHEADTAIL.general.pmath.particles_outside_cuts(sliceset)
-
- -
-
-PyHEADTAIL.general.pmath.particles_within_cuts(sliceset)
-
- -
-
-PyHEADTAIL.general.pmath.searchsortedleft(array, values, dest_array=None)
-
- -
-
-PyHEADTAIL.general.pmath.searchsortedright(array, values, dest_array=None)
-
- -
-
-PyHEADTAIL.general.pmath.seq(stop)
-
- -
-
-PyHEADTAIL.general.pmath.sincos(x)
-
- -
-
-PyHEADTAIL.general.pmath.slice_to_particles(sliceset, slice_array, particle_array=None)
-

Convert slice_array with entries for each slice to a -particle array with the respective entry of each particle -given by its slice_array value via the slice that the -particle belongs to. If provided, particle_array should be a -zero-filled destination array.

-
- -
-
-PyHEADTAIL.general.pmath.std_per_slice(sliceset, u, **kwargs)
-

CPU Wrapper for the cov per slice function. -TODO: Find a good spot where to put this function (equiv to gpu_wrap) -–> Directly into cobra_functions/stats.pyx?

-
- -
-
-PyHEADTAIL.general.pmath.update_active_dict(new_dict)
-

Update the currently active dictionary. Removes the keys of the currently -active dictionary from globals() and spills the keys -from new_dict to globals() -:param new_dict A dictionary which contents will be spilled to globals():

-
- -
-
-PyHEADTAIL.general.pmath.wofz(x, y)
-
- -
-
-

PyHEADTAIL.general.printers module

-

@authors: Adrian Oeftiger -@date: 12/09/2014

-

Printer functionality provides different means to control the flow of -output streams.

-
-
-class PyHEADTAIL.general.printers.AccumulatorPrinter(*args, **kwargs)
-

Accumulates all calls to prints in a list ‘log’

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(*args, **kwargs)
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.general.printers'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-prints(output)
-

Stores warnings in list log

-
- -
- -
-
-class PyHEADTAIL.general.printers.ConsolePrinter
-

Redirects to console, equivalent to the print statement

-
>>> print (output)
-
-
-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__module__ = 'PyHEADTAIL.general.printers'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-prints(output)
-

Directs the output to console.

-
- -
- -
-
-class PyHEADTAIL.general.printers.Printer
-

A generic printer knows where to redirect text for print. -Use Printer.prints(output) to print the output instead of -using the standard keyword

-
>>> print (output)
-
-
-

in order to gain flexibility in redirecting output centrally. -E.g. instead of directing output to console one could specify -a file or use different streams for errors, warnings and content -related output etc.

-
-
-__abstractmethods__ = frozenset({'prints'})
-
- -
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.general.printers', '__doc__': '\n A generic printer knows where to redirect text for print.\n Use Printer.prints(output) to print the output instead of\n using the standard keyword\n\n >>> print (output)\n\n in order to gain flexibility in redirecting output centrally.\n E.g. instead of directing output to console one could specify\n a file or use different streams for errors, warnings and content\n related output etc.\n ', 'prints': <function Printer.prints>, '__dict__': <attribute '__dict__' of 'Printer' objects>, '__weakref__': <attribute '__weakref__' of 'Printer' objects>, '__abstractmethods__': frozenset({'prints'}), '_abc_impl': <_abc_data object>})
-
- -
-
-__module__ = 'PyHEADTAIL.general.printers'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-abstract prints(output)
-

Direct the output to the internally defined printing stream.

-
- -
- -
-
-class PyHEADTAIL.general.printers.SilentPrinter
-

Mutes output.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__module__ = 'PyHEADTAIL.general.printers'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-prints(output)
-

Accepts output and does nothing.

-
- -
- -
-
-

PyHEADTAIL.general.utils module

-

@authors: Kevin Li, Adrian Oeftiger -@date: 10/02/2015

-

Provide useful conceptual classes and logics for PyHEADTAIL.

-
-
-class PyHEADTAIL.general.utils.ListProxy(list_of_objects, attr_name)
-

Is a list of object attributes. Accessing ListProxy entries -evaluates the object attributes each time it is accessed, -i.e. this list “proxies” the object attributes.

-

Attention: -If accessed via slicing, e.g. ->>> original = ListProxy(…) ->>> part = original[2:5] -then part created a new list of references, a new _list_of_objects. -Consequently, any change to the direct contents of -original._list_of_objects (such as popping or adding elements) -is not reflected in part.

-
-
-__getitem__(index)
-

Return a ListProxy for slice arguments, -otherwise return the requested value at the given index.

-
- -
-
-__init__(list_of_objects, attr_name)
-

Provide a list of object instances and a name of a commonly -shared attribute that should be proxied by this ListProxy -instance.

-
- -
-
-__len__()
-
- -
-
-__module__ = 'PyHEADTAIL.general.utils'
-
- -
-
-__repr__()
-

Return repr(self).

-
- -
-
-__setitem__(index, value)
-
- -
-
-_rewritable_setitem(index, value)
-

This setter method may be overwritten.

-
- -
-
-pop(index)
-

Remove the object from the internal list and return the -corresponding attribute, analogous to list.pop .

-
- -
- -
-
-class PyHEADTAIL.general.utils.MutableNumber(value)
-

Documentation for MutableNumber

-
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.general.utils', '__doc__': 'Documentation for MutableNumber\n\n ', '__init__': <function MutableNumber.__init__>, '__dict__': <attribute '__dict__' of 'MutableNumber' objects>, '__weakref__': <attribute '__weakref__' of 'MutableNumber' objects>})
-
- -
-
-__init__(value)
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.general.utils'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
- -
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/PyHEADTAIL.gpu.html b/docs/PyHEADTAIL.gpu.html deleted file mode 100644 index cd6d4d60..00000000 --- a/docs/PyHEADTAIL.gpu.html +++ /dev/null @@ -1,376 +0,0 @@ - - - - - - - PyHEADTAIL.gpu package — PyHEADTAIL documentation - - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL.gpu package

-
-

Submodules

-
-
-

PyHEADTAIL.gpu.gpu_utils module

-

GPU Utils -Memory pool, … -This could also be the place to store the context, device, streams, etc… -The module is automatically a singleton -@author Stefan Hegglin

-
-
-

PyHEADTAIL.gpu.gpu_wrap module

-

@author Stefan Hegglin, Adrian Oeftiger -@date 20.10.2015 -Python functions which wrap GPU functionality. -Use in dispatch of general/pmath -All functions assume GPU arrays as arguments!

-
-
-PyHEADTAIL.gpu.gpu_wrap._add_bounds_to_sliceset(sliceset)
-

Adds the lower_bounds and upper_bounds members to the sliceset -They must not present before the function call, otherwise undefined behaviour

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap._empty_like(gpuarray)
-
- -
-
-PyHEADTAIL.gpu.gpu_wrap._inplace_pow(x_gpu, p, stream=None)
-

Perform an in-place x_gpu = x_gpu ** p -Courtesy: scikits.cuda

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.apply_permutation(array, permutation)
-

Permute the entries in array according to the permutation array. -Return a new (permuted) array which is equal to array[permutation] -:param array gpuarray to be permuted. Either float64 or int32: -:param permutation permutation array: must be np.int32 (or int32), is asserted

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.argsort(to_sort)
-

Return the permutation required to sort the array. -:param to_sort: gpuarray for which the permutation array to sort

-
-

it is returned

-
-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.atleast_1d(*arrays)
-

Return input arrays unless they are scalars. Ensure the results have -ndim >= 1.

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.convolve(a, v, mode='full')
-

Compute the convolution of the two arrays a,v. See np.convolve

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.covariance(a, b, stream=None)
-

Covariance (not covariance matrix) -:param a: pycuda.GPUArray -:param b: pycuda.GPUArray

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.covariance_old(a, b)
-

Covariance (not covariance matrix) -:param a: pycuda.GPUArray -:param b: pycuda.GPUArray

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.cumsum(array, dest=None)
-

Return cumulative sum of 1-dimensional GPUArray data. -Works for dtypes np.int32 and np.float64. Wrapper for thrust -prefix sum via thrust::inclusive_scan.

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.emittance(u, up, dp, stream=None)
-

Compute the emittance of GPU arrays. Check the algorithm above for -a more readable version, this one has been ‘optimized’, e.g. mean->sum -and multiplication at the end to minimize kernel calls/inits of gpuarrs -:param u coordinate array: -:param up conjugate momentum array: -:param dp longitudinal momentum variation: -:type dp longitudinal momentum variation: can be None -:param stream: In which cuda stream to perform the computations

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.emittance_(u, up, dp)
-

Compute the emittance of GPU arrays. Check the algorithm above for -a more readable version, this one has been ‘optimized’, e.g. mean->sum -and multiplication at the end to minimize kernel calls/inits of gpuarrs -:param u coordinate array: -:param up conjugate momentum array: -:param dp longitudinal momentum variation:

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.emittance_reference(u, up, dp)
-

Compute the emittance of GPU arrays. Reference implementation, slow -but readable -:param u coordinate array: -:param up conjugate momentum array: -:param dp longitudinal momentum variation:

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.init_bunch_buffer(bunch, bunch_stats, buffer_size)
-

Call bunch.[stats], match the buffer type with the returned type

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.init_slice_buffer(slice_set, slice_stats, buffer_size)
-

Call sliceset.[‘stats’], match the buffer type with the returned type

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.macroparticles_per_slice(sliceset)
-

Return the number of macroparticles per slice. Assumes a sorted beam!

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.mean(a, stream=None)
-

Compute the mean of the gpuarray a -Replacement for skcuda.misc.mean(), which does not allow to specify -the stream (because gpuarray.__div__ does not have a stream -argument).

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.particles_outside_cuts(sliceset)
-

Return np.where((array < minimum) and (array > maximum)) -Assumes a sorted beam!

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.particles_within_cuts(sliceset)
-

Return np.where((array >= minimum) and (array <= maximum)) -Assumes a sorted beam!

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.searchsortedleft(array, values, dest_array=None)
-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.searchsortedright(array, values, dest_array=None)
-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.sincos(array)
-

Return a tuple with the sin and the cos of the input array.

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.sorted_cov_per_slice(sliceset, u, v, stream=None)
-

Computes the covariance of the quantities u,v per slice -:param sliceset specifying slices: -:param u, v the arrays of which to compute the covariance:

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.sorted_emittance_per_slice(sliceset, u, up, dp=None, stream=None)
-

Computes the emittance per slice. -If dp is None, the effective emittance is computed -:param sliceset specifying slices: -:param u, up the quantities of which to compute the emittance, e.g. x,xp:

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.sorted_emittance_per_slice_slow(sliceset, u, up, dp=None, stream=None)
-

Computes the emittance per slice. -If dp is None, the effective emittance is computed -:param sliceset specifying slices: -:param u, up the quantities of which to compute the emittance, e.g. x,xp:

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.sorted_mean_per_slice(sliceset, u, stream=None)
-

Computes the mean per slice of the array u -:param sliceset specifying slices, has .n_slices and .slice_index_of_particle: -:param u the array of which to compute the mean:

-

Return the an array, res[i] stores the mean of slice i

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.sorted_std_per_slice(sliceset, u, stream=None)
-

Computes the cov per slice of the array u -:param sliceset specifying slices: -:param u the array of which to compute the cov:

-

Return an array, res[i] stores the cov of slice i

-
- -
-
-PyHEADTAIL.gpu.gpu_wrap.std(a, stream=None)
-

Std of a vector

-
- -
-
-

PyHEADTAIL.gpu.oldinit module

-
-
-

PyHEADTAIL.gpu.particles module

-
-
-

PyHEADTAIL.gpu.pypic module

-
-
-

PyHEADTAIL.gpu.slicing module

-
-
-

PyHEADTAIL.gpu.thrust_interface module

-
-
-

PyHEADTAIL.gpu.wrapper module

-
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/PyHEADTAIL.html b/docs/PyHEADTAIL.html deleted file mode 100644 index cc395bc9..00000000 --- a/docs/PyHEADTAIL.html +++ /dev/null @@ -1,228 +0,0 @@ - - - - - - - PyHEADTAIL package — PyHEADTAIL documentation - - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL package

-
-

Subpackages

-
- -
-
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/PyHEADTAIL.impedances.html b/docs/PyHEADTAIL.impedances.html deleted file mode 100644 index ec1bee2e..00000000 --- a/docs/PyHEADTAIL.impedances.html +++ /dev/null @@ -1,1064 +0,0 @@ - - - - - - - PyHEADTAIL.impedances package — PyHEADTAIL documentation - - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL.impedances package

-
-

Submodules

-
-
-

PyHEADTAIL.impedances.wake_kicks module

-

@class WakeKick -@author Kevin Li, Michael Schenk -@date July 2014 -@brief Implementation of the wake kicks, i.e. of the elementary objects

-
-

describing the effects of a wake field.

-
-

@copyright CERN

-
-
-class PyHEADTAIL.impedances.wake_kicks.ConstantWakeKickX(wake_function, slicer, n_turns_wake, *args, **kwargs)
-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wake_kicks'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-apply(bunch, slice_set_list, slice_set_age_list)
-

Calculate and apply a constant wake kick to bunch.xp -using the given slice_set. Only particles within the slicing -region, i.e particles_within_cuts (defined by the slice_set) -experience the kick.

-
- -
- -
-
-class PyHEADTAIL.impedances.wake_kicks.ConstantWakeKickY(wake_function, slicer, n_turns_wake, *args, **kwargs)
-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wake_kicks'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-apply(bunch, slice_set_list, slice_set_age_list)
-

Calculate and apply a constant wake kick to bunch.yp -using the given slice_set. Only particles within the slicing -region, i.e particles_within_cuts (defined by the slice_set) -experience the kick.

-
- -
- -
-
-class PyHEADTAIL.impedances.wake_kicks.ConstantWakeKickZ(wake_function, slicer, n_turns_wake, *args, **kwargs)
-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wake_kicks'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-apply(bunch, slice_set_list, slice_set_age_list)
-

Calculate and apply a constant wake kick to bunch.dp -using the given slice_set. Only particles within the slicing -region, i.e particles_within_cuts (defined by the slice_set) -experience the kick.

-
- -
- -
-
-class PyHEADTAIL.impedances.wake_kicks.DipoleWakeKickX(wake_function, slicer, n_turns_wake, *args, **kwargs)
-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wake_kicks'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-apply(bunch, slice_set_list, slice_set_age_list)
-

Calculate and apply a dipolar wake kick to bunch.xp -using the given slice_set. Only particles within the slicing -region, i.e particles_within_cuts (defined by the slice_set) -experience the kick.

-
- -
- -
-
-class PyHEADTAIL.impedances.wake_kicks.DipoleWakeKickXY(wake_function, slicer, n_turns_wake, *args, **kwargs)
-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wake_kicks'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-apply(bunch, slice_set_list, slice_set_age_list)
-

Calculate and apply a dipolar (cross term x-y) wake kick -to bunch.xp using the given slice_set. Only particles within -the slicing region, i.e particles_within_cuts (defined by the -slice_set) experience the kick.

-
- -
- -
-
-class PyHEADTAIL.impedances.wake_kicks.DipoleWakeKickY(wake_function, slicer, n_turns_wake, *args, **kwargs)
-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wake_kicks'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-apply(bunch, slice_set_list, slice_set_age_list)
-

Calculate and apply a dipolar wake kick to bunch.yp -using the given slice_set. Only particles within the slicing -region, i.e particles_within_cuts (defined by the slice_set) -experience the kick.

-
- -
- -
-
-class PyHEADTAIL.impedances.wake_kicks.DipoleWakeKickYX(wake_function, slicer, n_turns_wake, *args, **kwargs)
-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wake_kicks'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-apply(bunch, slice_set_list, slice_set_age_list)
-

Calculate and apply a dipolar (cross term y-x) wake kick -to bunch.yp using the given slice_set. Only particles within -the slicing region, i.e particles_within_cuts (defined by the -slice_set) experience the kick.

-
- -
- -
-
-class PyHEADTAIL.impedances.wake_kicks.QuadrupoleWakeKickX(wake_function, slicer, n_turns_wake, *args, **kwargs)
-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wake_kicks'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-apply(bunch, slice_set_list, slice_set_age_list)
-

Calculate and apply a quadrupolar wake kick to bunch.xp -using the given slice_set. Only particles within the slicing -region, i.e particles_within_cuts (defined by the slice_set) -experience the kick.

-
- -
- -
-
-class PyHEADTAIL.impedances.wake_kicks.QuadrupoleWakeKickXY(wake_function, slicer, n_turns_wake, *args, **kwargs)
-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wake_kicks'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-apply(bunch, slice_set_list, slice_set_age_list)
-

Calculate and apply a quadrupolar (cross term x-y) wake -kick to bunch.xp using the given slice_set. Only particles -within the slicing region, i.e particles_within_cuts (defined by -the slice_set) experience the kick.

-
- -
- -
-
-class PyHEADTAIL.impedances.wake_kicks.QuadrupoleWakeKickY(wake_function, slicer, n_turns_wake, *args, **kwargs)
-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wake_kicks'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-apply(bunch, slice_set_list, slice_set_age_list)
-

Calculate and apply a quadrupolar wake kick to bunch.yp -using the given slice_set. Only particles within the slicing -region, i.e particles_within_cuts (defined by the slice_set) -experience the kick.

-
- -
- -
-
-class PyHEADTAIL.impedances.wake_kicks.QuadrupoleWakeKickYX(wake_function, slicer, n_turns_wake, *args, **kwargs)
-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wake_kicks'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-apply(bunch, slice_set_list, slice_set_age_list)
-

Calculate and apply a quadrupolar (cross term y-x) wake -kick to bunch.yp using the given slice_set. Only particles -within the slicing region, i.e particles_within_cuts (defined by -the slice_set) experience the kick.

-
- -
- -
-
-class PyHEADTAIL.impedances.wake_kicks.WakeKick(wake_function, slicer, n_turns_wake, *args, **kwargs)
-

Abstract base class for wake kick classes, like e.g. the -DipoleWakeKickX. -Provides the basic and universal methods to calculate the strength -of a wake kick. Two implementations of the convolution are -available. Based on what slicer mode (uniform_bin, uniform_charge) -is used, the self._convolution method is bound to one or the other. -The self.apply(bunch, slice_set) method calculates and applies the -corresponding kick to the particles of the bunch that are located -inside the slicing region defined by a slice_set. This should be -the only method to be implemented for a child class inheriting from -the WakeKick class.

-
-
-__abstractmethods__ = frozenset({'apply'})
-
- -
-
-__init__(wake_function, slicer, n_turns_wake, *args, **kwargs)
-

Universal constructor for WakeKick objects. The slicer_mode -is passed only to decide about which of the two implementations -of the convolution the self._convolution method is bound to.

-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wake_kicks'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-_accumulate_source_signal(bunch, times_list, ages_list, moments_list, betas_list)
-

Accumulate (multiturn-)wake signals left by source slices. -Takes a list of slice set attributes and adds up all -convolutions weighted by the respective moments. Also updates -the age of each slice set.

-
- -
-
-_convolution_dot_product(target_times, source_times, source_moments, source_beta)
-

Implementation of the convolution of wake and source_moments -(beam profile) using the numpy dot product. To be used with the -‘uniform_charge’ slicer mode.

-
- -
-
-_convolution_numpy(target_times, source_times, source_moments, source_beta)
-

Implementation of the convolution of wake and source_moments -(longitudinal beam profile) using the numpy built-in -numpy.convolve method. Recommended use with the ‘uniform_bin’ -slicer mode (in case of multiturn wakes, additional conditions -must be fulfilled: fixed z_cuts and no acceleration!) for -higher performance. Question: how about interpolation to avoid -expensive dot product in most cases?

-
- -
-
-static _wake_factor(bunch)
-

Universal scaling factor for the strength of a wake field -kick.

-
- -
-
-abstract apply(bunch, slice_set_list, slice_set_age_list)
-

Calculate and apply the corresponding wake kick to the -bunch conjugate momenta using the given slice_set. Only -particles within the slicing region, i.e particles_within_cuts -(defined by the slice_set) experience a kick.

-
- -
- -
-
-

PyHEADTAIL.impedances.wakes module

-

This module includes the description of a (multiturn) WakeField as well -as the implementation of the WakeSource objects.

-

A WakeField is defined as a composition of the elementary WakeKick -objects (see .wake_kicks module). They originate from WakeSources, -e.g. a WakeTable, Resonator and/or a ResistiveWall. The WakeField does -not directly accept the WakeKick objects, but takes a list of -WakeSources first (can be of different kinds), each of which knows how -to generate its WakeKick objects via the factory method -WakeSource.get_wake_kicks(..). The collection of WakeKicks from all the -WakeSources define the WakeField and are the elementary objects that are -stored, (i.e. the WakeField forgets about the origin of the WakeKicks -once they have been created).

-

@author Hannes Bartosik, Kevin Li, Giovanni Rumolo, Michael Schenk -@date March 2014 -@brief Implementation of a WakeField as a composition of WakeKicks

-
-

originating from different WakeSources.

-
-

@copyright CERN

-
-
-class PyHEADTAIL.impedances.wakes.CircularResistiveWall(pipe_radius, resistive_wall_length, conductivity, dt_min, n_turns_wake=1, *args, **kwargs)
-

Circular resistive wall.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(pipe_radius, resistive_wall_length, conductivity, dt_min, n_turns_wake=1, *args, **kwargs)
-

Special case of a circular resistive wall.

-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wakes'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
- -
-
-class PyHEADTAIL.impedances.wakes.CircularResonator(R_shunt, frequency, Q, n_turns_wake=1, *args, **kwargs)
-

Circular Resonator.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(R_shunt, frequency, Q, n_turns_wake=1, *args, **kwargs)
-

Special case of circular resonator.

-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wakes'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
- -
-
-class PyHEADTAIL.impedances.wakes.ParallelHorizontalPlatesResistiveWall(pipe_radius, resistive_wall_length, conductivity, dt_min, n_turns_wake=1, *args, **kwargs)
-

Resistive wall impedance for horizontal parallel plates.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(pipe_radius, resistive_wall_length, conductivity, dt_min, n_turns_wake=1, *args, **kwargs)
-

Special case of a parallel plates resistive wall.

-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wakes'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
- -
-
-class PyHEADTAIL.impedances.wakes.ParallelHorizontalPlatesResonator(R_shunt, frequency, Q, n_turns_wake=1, *args, **kwargs)
-

Broad-band resonator for horizontal parallel plates.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(R_shunt, frequency, Q, n_turns_wake=1, *args, **kwargs)
-

Special case of parallel plate resonator.

-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wakes'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
- -
-
-PyHEADTAIL.impedances.wakes.ParallelPlatesResistiveWall(*args, **kwargs)
-
- -
-
-PyHEADTAIL.impedances.wakes.ParallelPlatesResonator(*args, **kwargs)
-
- -
-
-class PyHEADTAIL.impedances.wakes.ParallelVerticalPlatesResistiveWall(pipe_radius, resistive_wall_length, conductivity, dt_min, n_turns_wake=1, *args, **kwargs)
-

Resistive wall impedance for vertical parallel plates.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(pipe_radius, resistive_wall_length, conductivity, dt_min, n_turns_wake=1, *args, **kwargs)
-

Special case of a parallel plates resistive wall.

-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wakes'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
- -
-
-class PyHEADTAIL.impedances.wakes.ParallelVerticalPlatesResonator(R_shunt, frequency, Q, n_turns_wake=1, *args, **kwargs)
-

Broad-band resonator for vertical parallel plates.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(R_shunt, frequency, Q, n_turns_wake=1, *args, **kwargs)
-

Special case of parallel plate resonator.

-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wakes'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
- -
-
-class PyHEADTAIL.impedances.wakes.ResistiveWall(pipe_radius, resistive_wall_length, conductivity, dt_min, Yokoya_X1, Yokoya_Y1, Yokoya_X2, Yokoya_Y2, n_turns_wake=1, *args, **kwargs)
-

Class to describe the wake functions originating from a -resistive wall impedance.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(pipe_radius, resistive_wall_length, conductivity, dt_min, Yokoya_X1, Yokoya_Y1, Yokoya_X2, Yokoya_Y2, n_turns_wake=1, *args, **kwargs)
-

General constructor to create a ResistiveWall WakeSource -object describing the wake functions of a resistive wall -impedance. -The parameter ‘n_turns_wake’ defines how many turns are -considered for the multiturn wakes. It is 1 by default, i.e. -multiturn wakes are off.

-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wakes'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-function_transverse(Yokoya_factor)
-

Define the wake function (transverse) of a resistive wall -with the given parameters.

-
- -
-
-get_wake_kicks(slicer)
-

Factory method. Creates instances of the appropriate -WakeKick objects for the ResistiveWall WakeSource with the -specified parameters. A WakeKick object is instantiated only if -the corresponding Yokoya factor is non-zero. The WakeKick -objects are returned as a list wake_kicks.

-
- -
- -
-
-class PyHEADTAIL.impedances.wakes.Resonator(R_shunt, frequency, Q, Yokoya_X1, Yokoya_Y1, Yokoya_X2, Yokoya_Y2, switch_Z, n_turns_wake=1, *args, **kwargs)
-

Class to describe the wake functions originating from a -resonator impedance. Alex Chao’s resonator model (Eq. 2.82) is used -as well as the definitions from HEADTAIL.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(R_shunt, frequency, Q, Yokoya_X1, Yokoya_Y1, Yokoya_X2, Yokoya_Y2, switch_Z, n_turns_wake=1, *args, **kwargs)
-

General constructor to create a Resonator WakeSource object -describing the wake functions of a resonator impedance. Alex -Chao’s resonator model (Eq. 2.82) is used as well as definitions -from HEADTAIL. -Note that it is no longer allowed to pass a LIST of parameters -to generate a number of resonators with different parameters -within the same Resonator object. Instead, create the Resonator -objects and pass all of them to the WakeField constructor. -The parameter ‘n_turns_wake’ defines how many turns are -considered for the multiturn wakes. It is 1 by default, i.e. -multiturn wakes are off.

-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wakes'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-function_longitudinal()
-

Define the wake function (longitudinal) of a resonator with -the given parameters according to Alex Chao’s resonator model -(Eq. 2.82) and definitions of the resonator in HEADTAIL.

-
- -
-
-function_transverse(Yokoya_factor)
-

Define the wake function (transverse) of a resonator with -the given parameters according to Alex Chao’s resonator model -(Eq. 2.82) and definitions of the resonator in HEADTAIL.

-
- -
-
-get_wake_kicks(slicer)
-

Factory method. Creates instances of the appropriate -WakeKick objects for a Resonator WakeSource with the specified -parameters. A WakeKick object is instantiated only if the -corresponding Yokoya factor is non-zero. The WakeKick objects -are returned as a list wake_kicks.

-
- -
- -
-
-class PyHEADTAIL.impedances.wakes.WakeField(slicer, *wake_sources)
-

A WakeField is defined by elementary WakeKick objects that may -originate from different WakeSource objects. Usually, there is -no need for the user to define more than one instance of the -WakeField class in a simulation - except if one wants to use -different slicing configurations (one WakeField object is allowed -to have exactly one slicing configuration, i.e. only one instance -of the Slicer class). A WakeField also is able to calculate the wake -forces coming from earlier turns (multiturn wakes) by archiving the -longitudinal bunch distribution (SliceSet instances) a number of -turns back.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(slicer, *wake_sources)
-

Accepts a list of WakeSource objects. Each WakeSource object -knows how to generate its corresponding WakeKick objects. The -collection of all the WakeKick objects of each of the passed -WakeSource objects defines the WakeField. -When instantiating the WakeField object, the WakeKick objects -for each WakeSource defined in wake_sources are requested. The -returned WakeKick lists are all stored in the -WakeField.wake_kicks list. The WakeField itself forgets about -the origin (WakeSource) of the kicks as soon as they have been -generated. -Exactly one instance of the Slicer class must be passed to the -WakeField constructor. All the wake field components (kicks) -hence use the same slicing and thus the same slice_set to -calculate the strength of the kicks. -To calculate the contributions from multiturn wakes, the -longitudinal beam distributions (SliceSet instances) are -archived in a deque. In parallel to the slice_set_deque, -there is a slice_set_age_deque to keep track of the age of -each of the SliceSet instances.

-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wakes'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-track(bunch)
-

Calls the WakeKick.apply(bunch, slice_set) method of each of -the WakeKick objects stored in self.wake_kicks. A slice_set is -necessary to perform this operation. It is requested from the -bunch (instance of the Particles class) using the -Particles.get_slices(self.slicer) method, where self.slicer is -the instance of the Slicer class used for this particluar -WakeField object. A slice_set is returned according to the -self.slicer configuration. The statistics mean_x and mean_y are -requested to be calculated and saved in the SliceSet instance, -too, s.t. the first moments x, y can be calculated by the -WakeKick instances.

-
- -
- -
-
-class PyHEADTAIL.impedances.wakes.WakeSource(*args, **kwargs)
-

Abstract base class for wake sources, such as WakeTable, -Resonator or ResistiveWall.

-
-
-__abstractmethods__ = frozenset({'get_wake_kicks'})
-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wakes'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-abstract get_wake_kicks(slicer_mode)
-

Factory method. Creates instances of the WakeKick objects -for the given WakeSource and returns them as a list wake_kicks. -This method is usually only called by a WakeField object to -collect and create all the WakeKick objects originating from the -different sources. (The slicer mode Slicer.mode must be passed -at instantiation of a WakeKick object only to set the -appropriate convolution method. See docstrings of WakeKick -class.)

-
- -
- -
-
-class PyHEADTAIL.impedances.wakes.WakeTable(wake_file, wake_file_columns, n_turns_wake=1, *args, **kwargs)
-

Class to define wake functions and WakeKick objects using wake -data from a table.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(wake_file, wake_file_columns, n_turns_wake=1, *args, **kwargs)
-

Load data from the wake_file and store them in a dictionary -self.wake_table. Keys are the names specified by the user in -wake_file_columns and describe the names of the wake field -components (e.g. dipole_x or dipole_yx). The dict values are -given by the corresponding data read from the table. The -nomenclature of the wake components must be strictly obeyed. -Valid names for wake components are:

-

‘constant_x’, ‘constant_y’, ‘dipole_x’, ‘dipole_y’, ‘dipole_xy’, -‘dipole_yx’, ‘quadrupole_x’, ‘quadrupole_y’, ‘quadrupole_xy’, -‘quadrupole_yx’, ‘longitudinal’.

-

The order of wake_file_columns is relevant and must correspond -to the one in the wake_file. There is no way to check this here -and it is in the responsibility of the user to ensure it is -correct. Two checks made here are whether the length of -wake_file_columns corresponds to the number of columns in the -wake_file and whether a column ‘time’ is specified.

-

The units and signs of the wake table data are assumed to follow -the HEADTAIL conventions, i.e.

-
-

time: [ns] -transverse wake components: [V/pC/mm] -longitudinal wake component: [V/pC].

-
-

The parameter ‘n_turns_wake’ defines how many turns are -considered for the multiturn wakes. It is 1 by default, i.e. -multiturn wakes are off.

-
- -
-
-__module__ = 'PyHEADTAIL.impedances.wakes'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-_is_provided(wake_component)
-

Check whether wake_component is a valid name and available -in wake table data. Return ‘True’ if yes and ‘False’ if no.

-
- -
-
-function_longitudinal()
-

Defines and returns the wake(dt) function for the given -wake_component (longitudinal). Data from the wake table are -used, but first converted to SI units assuming that time is -specified in [ns] and longitudinal wake field strength in -[V/pC]. Sign conventions are applied (HEADTAIL conventions). -The wake(dt) uses the scipy.interpolate.interp1d linear -interpolation to calculate the wake strength at an arbitrary -value of dt (provided it is in the valid range). The valid range -of dt is given by the time range from the wake table. If values -of wake(dt) are requested for dt outside the valid range, a -ValueError is raised by interp1d. -The beam loading theorem is respected and applied for dt=0.

-
- -
-
-function_transverse(wake_component)
-

Defines and returns the wake(beta, dz) function for the -given wake_component (transverse). Data from the wake table are -used, but first converted to SI units assuming that time is -specified in [ns] and transverse wake field strengths in -[V/pC/mm]. Sign conventions are applied (HEADTAIL conventions). -dz is related to wake table time data by dz = beta c dt (dz < 0 -for the ultrarelativistic case). -The wake(dt) uses the scipy.interpolate.interp1d linear -interpolation to calculate the wake strength at an arbitrary -value of dt (provided it is in the valid range). The valid range -of dt is given by the time range from the wake table. If values -of wake(dt) are requested for dt outside the valid range, a -ValueError is raised by interp1d. -Very basic conformity checks for the wake table data are already -performed at definition time of the wake(dt) method. E.g. -whether the specified wake is valid only for ultrarelativistic -cases or low beta cases. In the former case, the wake strength -at time 0 must be defined by the user!

-
- -
-
-get_wake_kicks(slicer)
-

Factory method. Creates instances of the appropriate -WakeKick objects for all the wake components provided by the -user (and the wake table data). The WakeKick objects are -returned as a list wake_kicks.

-
- -
- -
-
-PyHEADTAIL.impedances.wakes.check_wake_sampling(bunch, slicer, wakes, beta=1, wake_column=None, bins=False)
-

Handy function for quick visual check of sampling of the wake functions. -For now only implemented for wake table type wakes.

-
- -
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/PyHEADTAIL.machines.html b/docs/PyHEADTAIL.machines.html deleted file mode 100644 index 128545ad..00000000 --- a/docs/PyHEADTAIL.machines.html +++ /dev/null @@ -1,459 +0,0 @@ - - - - - - - PyHEADTAIL.machines package — PyHEADTAIL documentation - - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL.machines package

-
-

Submodules

-
-
-

PyHEADTAIL.machines.synchrotron module

-
-
-class PyHEADTAIL.machines.synchrotron.BasicSynchrotron(*args, **kwargs)
-
-
-__init__(*args, **kwargs)
-

Creates a synchrotron.

-
-
Parameters
-
    -
  • optics_mode ('smooth', 'non-smooth') –

      -
    • ‘smooth’: the optics parameters are the same for all segments;

    • -
    • ’non-smooth’: the optics parameters are different for each segment.

    • -
    -

  • -
  • charge (C) – reference particle charge in Coulomb.

  • -
  • mass (kg) – reference particle mass in Kg.

  • -
  • p0 (kg m/s) – reference particle momentum.

  • -
  • circumference (m) – ring circumference (to be provided only if optics_mode is ‘smooth’).

  • -
  • n_segments – Number of segments in the machine (to be provided if -optics_mode is ‘smooth’, otherwise it is inferred by the length of s).

  • -
  • name – Name of the locations in between segments. The length of the array -should be n_segments + 1.

  • -
  • s (m, array) – Longitudinal positions at which the machine is cut -in segments. The length of the array should be n_segments + 1. -The last value in the array provides the ring circumference.

  • -
  • alpha_x – Horizontal alpha twiss parameter at each segment -(cannot be provided if optics_mode is ‘smooth’). In this case, -the length of the array should be n_segments + 1. The last point of -the array should be equal to the first (periodic condition).

  • -
  • beta_x (m) – Horizontal beta twiss parameter at each segment. It has -to be a scalar if optics_mode is ‘smooth’, an array if optics_mode -is ‘non-smooth’. In this case, the length of the array should be -n_segments + 1. The last point of the array should be equal to the -first (periodic condition).

  • -
  • D_x (m) – Horizontal beta twiss parameter at each segment. It has -to be a scalar if optics_mode is ‘smooth’, an array if optics_mode -is ‘non-smooth’. In this case, the length of the array should be -n_segments + 1. The last point of the array should be equal to the -first (periodic condition).

  • -
  • alpha_y – Vertical alpha twiss parameter at each segment -(cannot be provided if optics_mode is ‘smooth’). In this case, -the length of the array should be n_segments + 1. The last point of -the array should be equal to the first (periodic condition).

  • -
  • beta_y (m) – Vertical beta twiss parameter at each segment. It has -to be a scalar if optics_mode is ‘smooth’, an array if optics_mode -is ‘non-smooth’. In this case, the length of the array should be -n_segments + 1. The last point of the array should be equal to the -first (periodic condition).

  • -
  • D_y (m) – Vertical beta twiss parameter at each segment. It has -to be a scalar if optics_mode is ‘smooth’, an array if optics_mode -is ‘non-smooth’. In this case, the length of the array should be -n_segments + 1. The last point of the array should be equal to the -first (periodic condition).

  • -
  • accQ_x

    Horizontal tune or phase advance:

    -
      -
    • for ‘optics_mode’ = ‘smooth’ this is the horizontal tune;

    • -
    • for ‘optics_mode’ = ‘non-smooth’ this is the horizontal phase advance -per segment in units of (2*pi). The length of the array should be -n_segments + 1. The last point of the array should be the -horizontal tune.

    • -
    -

  • -
  • accQ_y

    Vertical tune or phase advance:

    -
    -
      -
    • for ‘optics_mode’ = ‘smooth’ this is the vertical tune;

    • -
    • for ‘optics_mode’ = ‘non-smooth’ this is the vertical phase advance -per segment in units of (2*pi). The length of the array should be -n_segments + 1. The last point of the array should be the -vertical tune.

    • -
    -
    -

  • -
  • Qp_x,Qp_y – Horizontal and vertical chromaticity (dQ/dp), the detuning -is shared over segments.

  • -
  • app_x,app_y,app_xy – Amplitude detuning coefficients (anharmonicities).

  • -
  • longitudinal_mode ('linear', 'non-linear') –

    -
    Longitudinal mode:
      -
    • ’linear’: linear longitudinal force (RF cavity)

    • -
    • -
      ’non-linear’: sinusoidal longitudinal force (RF cavities). Multiple

      harmonics can be defined in this case.

      -
      -
      -
    • -
    -
    -
    -

  • -
  • Q_s – Synchrotron tune. It can be defined only if longitudinal_mode is -‘linear’. If Q_s is provided, V_RF cannot be provided.

  • -
  • alpha_mom_compaction – Momentum compaction factor (dL/L)/(dp)

  • -
  • h_RF – Harmonic number. For multiple-harmonic RF systems this can be -an array.

  • -
  • V_RF (V) – RF voltage. For multiple-harmonic RF systems this can be -an array.

  • -
  • dphi_RF (rad) –

    Phase of the RF system with respect to the reference -particle (z=0). For a single harmonic, in the absence of acceleration -or energy losses:

    -
    -
      -
    • above transition z = 0 is the stable fixed-point if dphi_RF = 0;

    • -
    • below transition z = 0 is the stable fixed-point if dphi_RF = pi.

    • -
    -
    -

  • -
  • p_increment (kg m / s) – Acceleration, reference particle momentum change -per turn.

  • -
  • RF_at ('middle', 'end_of_transverse') – Position of the longitudinal map in the ring.

  • -
  • wrap_z (True, False) – Wrap longitudinal position using the accelerator length.

  • -
  • other_detuners – List of other detuners to be applied -(default is other_detuners = []).

  • -
-
-
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
- -
-
-class PyHEADTAIL.machines.synchrotron.Synchrotron(optics_mode, charge=None, mass=None, p0=None, circumference=None, n_segments=None, name=None, s=None, alpha_x=None, beta_x=None, D_x=None, alpha_y=None, beta_y=None, D_y=None, accQ_x=None, accQ_y=None, Qp_x=0, Qp_y=0, app_x=0, app_y=0, app_xy=0, longitudinal_mode=None, Q_s=None, alpha_mom_compaction=None, h_RF=None, V_RF=None, dphi_RF=None, p_increment=None, RF_at='middle', wrap_z=False, other_detuners=[], use_cython=False)
-
-
-property Q_s
-
- -
-
-property Q_x
-
- -
-
-property Q_y
-
- -
-
-__init__(optics_mode, charge=None, mass=None, p0=None, circumference=None, n_segments=None, name=None, s=None, alpha_x=None, beta_x=None, D_x=None, alpha_y=None, beta_y=None, D_y=None, accQ_x=None, accQ_y=None, Qp_x=0, Qp_y=0, app_x=0, app_y=0, app_xy=0, longitudinal_mode=None, Q_s=None, alpha_mom_compaction=None, h_RF=None, V_RF=None, dphi_RF=None, p_increment=None, RF_at='middle', wrap_z=False, other_detuners=[], use_cython=False)
-

Creates a synchrotron.

-
-
Parameters
-
    -
  • optics_mode ('smooth', 'non-smooth') –

      -
    • ‘smooth’: the optics parameters are the same for all segments;

    • -
    • ’non-smooth’: the optics parameters are different for each segment.

    • -
    -

  • -
  • charge (C) – reference particle charge in Coulomb.

  • -
  • mass (kg) – reference particle mass in Kg.

  • -
  • p0 (kg m/s) – reference particle momentum.

  • -
  • circumference (m) – ring circumference (to be provided only if optics_mode is ‘smooth’).

  • -
  • n_segments – Number of segments in the machine (to be provided if -optics_mode is ‘smooth’, otherwise it is inferred by the length of s).

  • -
  • name – Name of the locations in between segments. The length of the array -should be n_segments + 1.

  • -
  • s (m, array) – Longitudinal positions at which the machine is cut -in segments. The length of the array should be n_segments + 1. -The last value in the array provides the ring circumference.

  • -
  • alpha_x – Horizontal alpha twiss parameter at each segment -(cannot be provided if optics_mode is ‘smooth’). In this case, -the length of the array should be n_segments + 1. The last point of -the array should be equal to the first (periodic condition).

  • -
  • beta_x (m) – Horizontal beta twiss parameter at each segment. It has -to be a scalar if optics_mode is ‘smooth’, an array if optics_mode -is ‘non-smooth’. In this case, the length of the array should be -n_segments + 1. The last point of the array should be equal to the -first (periodic condition).

  • -
  • D_x (m) – Horizontal beta twiss parameter at each segment. It has -to be a scalar if optics_mode is ‘smooth’, an array if optics_mode -is ‘non-smooth’. In this case, the length of the array should be -n_segments + 1. The last point of the array should be equal to the -first (periodic condition).

  • -
  • alpha_y – Vertical alpha twiss parameter at each segment -(cannot be provided if optics_mode is ‘smooth’). In this case, -the length of the array should be n_segments + 1. The last point of -the array should be equal to the first (periodic condition).

  • -
  • beta_y (m) – Vertical beta twiss parameter at each segment. It has -to be a scalar if optics_mode is ‘smooth’, an array if optics_mode -is ‘non-smooth’. In this case, the length of the array should be -n_segments + 1. The last point of the array should be equal to the -first (periodic condition).

  • -
  • D_y (m) – Vertical beta twiss parameter at each segment. It has -to be a scalar if optics_mode is ‘smooth’, an array if optics_mode -is ‘non-smooth’. In this case, the length of the array should be -n_segments + 1. The last point of the array should be equal to the -first (periodic condition).

  • -
  • accQ_x

    Horizontal tune or phase advance:

    -
      -
    • for ‘optics_mode’ = ‘smooth’ this is the horizontal tune;

    • -
    • for ‘optics_mode’ = ‘non-smooth’ this is the horizontal phase advance -per segment in units of (2*pi). The length of the array should be -n_segments + 1. The last point of the array should be the -horizontal tune.

    • -
    -

  • -
  • accQ_y

    Vertical tune or phase advance:

    -
    -
      -
    • for ‘optics_mode’ = ‘smooth’ this is the vertical tune;

    • -
    • for ‘optics_mode’ = ‘non-smooth’ this is the vertical phase advance -per segment in units of (2*pi). The length of the array should be -n_segments + 1. The last point of the array should be the -vertical tune.

    • -
    -
    -

  • -
  • Qp_x,Qp_y – Horizontal and vertical chromaticity (dQ/dp), the detuning -is shared over segments.

  • -
  • app_x,app_y,app_xy – Amplitude detuning coefficients (anharmonicities).

  • -
  • longitudinal_mode ('linear', 'non-linear') –

    -
    Longitudinal mode:
      -
    • ’linear’: linear longitudinal force (RF cavity)

    • -
    • -
      ’non-linear’: sinusoidal longitudinal force (RF cavities). Multiple

      harmonics can be defined in this case.

      -
      -
      -
    • -
    -
    -
    -

  • -
  • Q_s – Synchrotron tune. It can be defined only if longitudinal_mode is -‘linear’. If Q_s is provided, V_RF cannot be provided.

  • -
  • alpha_mom_compaction – Momentum compaction factor (dL/L)/(dp)

  • -
  • h_RF – Harmonic number. For multiple-harmonic RF systems this can be -an array.

  • -
  • V_RF (V) – RF voltage. For multiple-harmonic RF systems this can be -an array.

  • -
  • dphi_RF (rad) –

    Phase of the RF system with respect to the reference -particle (z=0). For a single harmonic, in the absence of acceleration -or energy losses:

    -
    -
      -
    • above transition z = 0 is the stable fixed-point if dphi_RF = 0;

    • -
    • below transition z = 0 is the stable fixed-point if dphi_RF = pi.

    • -
    -
    -

  • -
  • p_increment (kg m / s) – Acceleration, reference particle momentum change -per turn.

  • -
  • RF_at ('middle', 'end_of_transverse') – Position of the longitudinal map in the ring.

  • -
  • wrap_z (True, False) – Wrap longitudinal position using the accelerator length.

  • -
  • other_detuners – List of other detuners to be applied -(default is other_detuners = []).

  • -
-
-
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-_add_wrapper_and_buncher()
-

Add longitudinal z wrapping around the circumference as -well as a UniformBinSlicer for bunching the beam.

-
- -
-
-_construct_longitudinal_map(longitudinal_mode=None, h_RF=None, V_RF=None, dphi_RF=None, alpha_mom_compaction=None, Q_s=None, p_increment=None, RF_at=None)
-
- -
-
-_construct_transverse_map(optics_mode=None, circumference=None, n_segments=None, s=None, name=None, alpha_x=None, beta_x=None, D_x=None, alpha_y=None, beta_y=None, D_y=None, accQ_x=None, accQ_y=None, Qp_x=None, Qp_y=None, app_x=None, app_y=None, app_xy=None, other_detuners=None, use_cython=None)
-
- -
-
-property beta
-
- -
-
-property betagamma
-
- -
-
-property gamma
-
- -
-
-generate_6D_Gaussian_bunch(n_macroparticles, intensity, epsn_x, epsn_y, sigma_z)
-

Generate a 6D Gaussian distribution of particles which is -transversely matched to the Synchrotron. Longitudinally, the -distribution is matched only in terms of linear focusing. -For a non-linear bucket, the Gaussian distribution is cut along -the separatrix (with some margin). It will gradually filament -into the bucket. This will change the specified bunch length.

-
- -
-
-generate_6D_Gaussian_bunch_matched(n_macroparticles, intensity, epsn_x, epsn_y, sigma_z=None, epsn_z=None)
-

Generate a 6D Gaussian distribution of particles which is -transversely as well as longitudinally matched. -The distribution is found iteratively to exactly yield the -given bunch length while at the same time being stationary in -the non-linear bucket. Thus, the bunch length should amount -to the one specificed and should not change significantly -during the synchrotron motion.

-

Requires self.longitudinal_mode == ‘non-linear’ -for the bucket.

-
- -
-
-install_after_each_transverse_segment(element_to_add)
-

Attention: Do not add any elements which update the dispersion!

-
- -
-
-property p0
-
- -
-
-track(bunch, verbose=False)
-

Perform tracking of beam through this Element.

-
- -
- -
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/PyHEADTAIL.monitors.html b/docs/PyHEADTAIL.monitors.html deleted file mode 100644 index 87226cc3..00000000 --- a/docs/PyHEADTAIL.monitors.html +++ /dev/null @@ -1,527 +0,0 @@ - - - - - - - PyHEADTAIL.monitors package — PyHEADTAIL documentation - - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL.monitors package

-
-

Submodules

-
-
-

PyHEADTAIL.monitors.monitors module

-

@author Kevin Li, Michael Schenk, Stefan Hegglin -@date 11. February 2014 -@brief Implementation of monitors to store bunch-, slice- or particle-

-
-

specific data to a HDF5 file.

-
-

@copyright CERN

-
-
-class PyHEADTAIL.monitors.monitors.BunchMonitor(filename, n_steps, parameters_dict=None, write_buffer_every=512, buffer_size=4096, *args, **kwargs)
-

Class to store bunch-specific data to a HDF5 file. This monitor -uses a buffer (a shift register) to reduce the number of writing -operations to file. This also helps to avoid IO errors and loss of -data when writing to a file that may become temporarily unavailable -(e.g. if file is located on network) during the simulation.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(filename, n_steps, parameters_dict=None, write_buffer_every=512, buffer_size=4096, *args, **kwargs)
-

Create an instance of a BunchMonitor class. Apart from -initializing the HDF5 file, a self.buffer dictionary is -prepared to buffer the data before writing them to file.

-
-
-
filename: Path and name of HDF5 file. Without file

extension.

-
-
n_steps: Number of entries to be reserved for each

of the quantities in self.stats_to_store.

-
-
parameters_dict: Metadata for HDF5 file containing main

simulation parameters.

-
-
write_buffer_every: Number of steps after which buffer

contents are actually written to file.

-
-
-

buffer_size: Number of steps to be buffered.

-

Optionally pass a list called stats_to_store which specifies -which members/methods of the bunch will be called/stored.

-
-
- -
-
-__module__ = 'PyHEADTAIL.monitors.monitors'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-_create_file_structure(parameters_dict)
-

Initialize HDF5 file and create its basic structure (groups -and datasets). One group is created for bunch-specific data. -One dataset for each of the quantities defined in -self.stats_to_store is generated. -If specified by the user, write the contents of the -parameters_dict as metadata (attributes) to the file. -Maximum file compression is activated.

-
- -
-
-_init_buffer(bunch)
-

Init the correct buffer type (np.zeros, gpuarrays.zeros)

-
- -
-
-_write_buffer_to_file()
-

Write buffer contents to the HDF5 file. The file is opened and -closed each time the buffer is written to file to prevent from -loss of data in case of a crash. -buffer_tmp is an extra buffer which is always on the CPU. If -self.buffer is on the GPU, copy the data to buffer_tmp and write -the result to the file.

-
- -
-
-_write_data_to_buffer(**kwargs)
-
- -
-
-dump(bunch)
-

Evaluate the statistics like mean and standard deviation for -the given bunch and write the data to the HDF5 file. Make use of -a buffer to reduce the number of writing operations to file. -This helps to avoid IO errors and loss of data when writing data -to a file that may become temporarily unavailable (e.g. if file -is on network). during the simulation. Buffer contents are -written to file only every self.write_buffer_every steps. -The buffer gets initialized in the first dump() call. This allows -for a dynamic creation of the buffer memory on either CPU or GPU

-
- -
- -
-
-class PyHEADTAIL.monitors.monitors.CellMonitor(filename, n_steps, n_azimuthal_slices, n_radial_slices, radial_cut, beta_z, parameters_dict=None, write_buffer_every=512, buffer_size=4096, *args, **kwargs)
-

Class to store cell (z, dp) specific data (for the moment only -mean_x, mean_y, mean_z, mean_dp and n_particles_in_cell) to a HDF5 -file. This monitor uses a buffer (shift register) to reduce the -number of writing operations to file. This also helps to avoid IO -errors and loss of data when writing to a file that may become -temporarily unavailable (e.g. if file is located on network) during -the simulation.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(filename, n_steps, n_azimuthal_slices, n_radial_slices, radial_cut, beta_z, parameters_dict=None, write_buffer_every=512, buffer_size=4096, *args, **kwargs)
-

Create an instance of a CellMonitor class. Apart from -initializing the HDF5 file, a buffer self.buffer_cell is -prepared to buffer the cell-specific data before writing them -to file.

-
-
-
filename: Path and name of HDF5 file. Without file

extension.

-
-
n_steps: Number of entries to be reserved for each

of the quantities in self.stats_to_store.

-
-
-

n_azimuthal_slices: Number of pizza slices (azimuthal slicing). -n_radial_slices: Number of rings (radial slicing). -radial_cut: ‘Radius’ of the outermost ring in

-
-

longitudinal phase space (using beta_z*dp)

-
-
-
parameters_dict: Metadata for HDF5 file containing main

simulation parameters.

-
-
write_buffer_every: Number of steps after which buffer

contents are actually written to file.

-
-
-

buffer_size: Number of steps to be buffered.

-
-
- -
-
-__module__ = 'PyHEADTAIL.monitors.monitors'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-_create_file_structure(parameters_dict)
-

Initialize HDF5 file and create its basic structure (groups -and datasets). One dataset for each of the quantities defined -in self.stats_to_store is generated. If specified by -the user, write the contents of the parameters_dict as metadata -(attributes) to the file. Maximum file compression is -activated.

-
- -
-
-_write_buffer_to_file()
-

Write buffer contents to the HDF5 file. The file is opened -and closed each time the buffer is written to file to prevent -from loss of data in case of a crash.

-
- -
-
-_write_data_to_buffer(bunch)
-

Store the data in the self.buffer dictionary before writing -them to file. The buffer is implemented as a shift register. The -cell-specific data are computed by a cython function.

-
- -
-
-dump(bunch)
-

Evaluate the statistics for the given cells and write the -data to the buffer and/or to the HDF5 file. The buffer is used -to reduce the number of writing operations to file. This helps -to avoid IO errors and loss of data when writing data to a file -that may become temporarily unavailable (e.g. if file is on -network) during the simulation. Buffer contents are written to -file only every self.write_buffer_every steps.

-
- -
- -
-
-class PyHEADTAIL.monitors.monitors.Monitor(*args, **kwargs)
-

Abstract base class for monitors. A monitor can request -statistics data such as mean value and standard deviation and store -the results in an HDF5 file.

-
-
-__abstractmethods__ = frozenset({'dump'})
-
- -
-
-__module__ = 'PyHEADTAIL.monitors.monitors'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-abstract dump()
-

Write particle data given by bunch (instance of Particles -class) to buffer and/or file at the specific time the method is -called. Data can e.g. be bunch-specific, slice_set-specific or -particle-specific.

-
- -
- -
-
-class PyHEADTAIL.monitors.monitors.ParticleMonitor(filename, stride=1, parameters_dict=None, *args, **kwargs)
-

Class to store particle-specific data to a HDF5 file, i.e. the -coordinates and conjugate momenta as well as the id of individual -macroparticles of a bunch.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(filename, stride=1, parameters_dict=None, *args, **kwargs)
-

Create an instance of a ParticleMonitor class. The HDF5 file -is initialized, and if specified, the parameters_dict is written -to file.

-
-
-
filename: Path and name of HDF5 file. Without file

extension.

-
-
stride: Only store data of macroparticles for which

id % stride == 0.

-
-
parameters_dict: Metadata for HDF5 file containing main

simulation parameters.

-
-
-

Optionally pass a list called quantities_to_store which -specifies which members of the bunch will be called/stored.

-
-
- -
-
-__module__ = 'PyHEADTAIL.monitors.monitors'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-_create_file_structure(parameters_dict)
-

Initialize HDF5 file. If specified by the user, write the -contents of the parameters_dict as metadata (attributes) -to the file. Maximum file compression is activated.

-
- -
-
-_write_data_to_file(bunch, arrays_dict)
-

Write macroparticle data (x, xp, y, yp, z, dp, id) of a -selection of particles to the HDF5 file. Optionally, data in -additional_quantities can also be added if provided in the -constructor. The file is opened and closed every time to prevent -from loss of data in case of a crash. -For each simulation step, a new group with name ‘Step#..’ is -created. It contains one dataset for each of the quantities -given in self.quantities_to_store.

-
- -
-
-dump(bunch, arrays_dict=None)
-

Write particle data to file. See docstring of method -self._write_data_to_file .

-
- -
- -
-
-class PyHEADTAIL.monitors.monitors.SliceMonitor(filename, n_steps, slicer, parameters_dict=None, write_buffer_every=512, buffer_size=4096, *args, **kwargs)
-

Class to store bunch- and slice_set-specific data to a HDF5 -file. This monitor uses two buffers (shift registers) to reduce the -number of writing operations to file. This also helps to avoid IO -errors and loss of data when writing to a file that may become -temporarily unavailable (e.g. if file is located on network) during -the simulation.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(filename, n_steps, slicer, parameters_dict=None, write_buffer_every=512, buffer_size=4096, *args, **kwargs)
-

Create an instance of a SliceMonitor class. Apart from -initializing the HDF5 file, two buffers self.buffer_bunch and -self.buffer_slice are prepared to buffer the bunch-specific and -slice_set-specific data before writing them to file.

-
-
-
filename: Path and name of HDF5 file. Without file

extension.

-
-
n_steps: Number of entries to be reserved for each

of the quantities in self.stats_to_store.

-
-
slicer: Instance of the Slicer class containing

the configuration defining a slice_set.

-
-
parameters_dict: Metadata for HDF5 file containing main

simulation parameters.

-
-
write_buffer_every: Number of steps after which buffer

contents are actually written to file.

-
-
-

buffer_size: Number of steps to be buffered.

-

optionally pass a list called bunch_stats_to_store or -slice_stats_to_store which specifie -which members/methods of the bunch will be called/stored.

-
-
- -
-
-__module__ = 'PyHEADTAIL.monitors.monitors'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-_create_file_structure(parameters_dict)
-

Initialize HDF5 file and create its basic structure (groups -and datasets). Two groups are created, one for slice_set- -specific and one for bunch-specific data. One dataset for each -of the quantities defined in self.bunch_stats_to_store and -self.slice_stats_to_store resp. is generated. If specified by -the user, write the contents of the parameters_dict as metadata -(attributes) to the file. Maximum file compression is -activated.

-
- -
-
-_init_buffer(bunch, slice_set)
-
- -
-
-_write_buffer_to_file()
-

Write buffer contents to the HDF5 file. The file is opened -and closed each time the buffer is written to file to prevent -from loss of data in case of a crash.

-
- -
-
-_write_data_to_buffer(bunch)
-

Store the data in the self.buffer dictionary before writing -them to file. The buffer is implemented as a shift register. To -find the slice_set-specific data, a slice_set, defined by the -slicing configuration self.slicer must be requested from the -bunch (instance of the Particles class), including all the -statistics that are to be saved.

-
- -
-
-dump(bunch)
-

Evaluate the statistics like mean and standard deviation for -the given slice_set and the bunch and write the data to the -buffers and/or to the HDF5 file. The buffers are used to reduce -the number of writing operations to file. This helps to avoid IO -errors and loss of data when writing data to a file that may -become temporarily unavailable (e.g. if file is on network) -during the simulation. Buffer contents are written to file only -every self.write_buffer_every steps.

-
- -
- -
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/PyHEADTAIL.multipoles.html b/docs/PyHEADTAIL.multipoles.html deleted file mode 100644 index a65d21d4..00000000 --- a/docs/PyHEADTAIL.multipoles.html +++ /dev/null @@ -1,347 +0,0 @@ - - - - - - - PyHEADTAIL.multipoles package — PyHEADTAIL documentation - - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL.multipoles package

-
-

Submodules

-
-
-

PyHEADTAIL.multipoles.multipoles module

-

Collection of localised thin multipole maps. -For formulae see e.g. SIXTRACK:

-

SixTrack Physics Manual -R. De. Maria and M. Fjellstrom -August 18, 2015

-

or, likewise,

-

A Symplectic Six-Dimensional Thin-Lens Formalism for Tracking -G. Ripken, F. Schmidt -April 5, 1995

-

@authors: Adrian Oeftiger -@date: 23/03/2016

-
-
-class PyHEADTAIL.multipoles.multipoles.ThinMultipole(knl, ksl=[], *args, **kwargs)
-

Implements the Horner scheme to efficiently calculate the -polynomials for any order multipole maps.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(knl, ksl=[], *args, **kwargs)
-

MAD style counting of of normal and skew multipole strengths: -[dipolar, quadrupolar, sextupolar, octupolar, …] components. -:param - knl: list of normalised normal strengths times the length

-
-

of the multipole magnet [1/m^order] in ascending -order

-
-
-
Optional arguments:
    -
  • -
    ksl: list of normalised skew strengths times the length

    of the multipole magnet [1/m^order] in ascending -order

    -
    -
    -
  • -
-
-
-

N.B.: If knl and ksl have different lengths, zeros are appended -until they are equally long.

-
- -
-
-__module__ = 'PyHEADTAIL.multipoles.multipoles'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-static ctaylor(x, y, kn, ks)
-

Efficient Horner scheme.

-
- -
-
-track(beam)
-

Perform tracking of beam through this Element.

-
- -
-
-static ztaylor(x, y, kn, ks)
-

Same as ctaylor but using complex numbers, slower but more -readable – added for the sake of clarity.

-
- -
- -
-
-class PyHEADTAIL.multipoles.multipoles.ThinOctupole(k3l, *args, **kwargs)
-

Thin octupolar map.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(k3l, *args, **kwargs)
-

Arguments: -- k3l: normalised strength times the length of the

-
-

octupole magnet [1/m^3]

-
-
- -
-
-__module__ = 'PyHEADTAIL.multipoles.multipoles'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-track(beam)
-

Perform tracking of beam through this Element.

-
- -
- -
-
-class PyHEADTAIL.multipoles.multipoles.ThinQuadrupole(k1l, *args, **kwargs)
-

Thin quadrupolar map.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(k1l, *args, **kwargs)
-

Arguments: -- k1l: normalised strength times the length of the

-
-

quadrupole magnet [1/m]

-
-
- -
-
-__module__ = 'PyHEADTAIL.multipoles.multipoles'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-track(beam)
-

Perform tracking of beam through this Element.

-
- -
- -
-
-class PyHEADTAIL.multipoles.multipoles.ThinSextupole(k2l, *args, **kwargs)
-

Thin sextupolar map.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(k2l, *args, **kwargs)
-

Arguments: -- k2l: normalised strength times the length of the

-
-

sextupole magnet [1/m^2]

-
-
- -
-
-__module__ = 'PyHEADTAIL.multipoles.multipoles'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-track(beam)
-

Perform tracking of beam through this Element.

-
- -
- -
-
-class PyHEADTAIL.multipoles.multipoles.ThinSkewQuadrupole(k1sl, *args, **kwargs)
-

Thin skew quadrupolar map.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(k1sl, *args, **kwargs)
-

Arguments: -- k1sl: normalised strength times the length of the

-
-

skew quadrupole magnet [1/m]

-
-
- -
-
-__module__ = 'PyHEADTAIL.multipoles.multipoles'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-track(beam)
-

Perform tracking of beam through this Element.

-
- -
- -
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/PyHEADTAIL.particles.html b/docs/PyHEADTAIL.particles.html deleted file mode 100644 index ef0e7886..00000000 --- a/docs/PyHEADTAIL.particles.html +++ /dev/null @@ -1,1523 +0,0 @@ - - - - - - - PyHEADTAIL.particles package — PyHEADTAIL documentation - - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL.particles package

-
-

Submodules

-
-
-

PyHEADTAIL.particles.generators module

-

@author Kevin Li, Michael Schenk, Adrian Oeftiger, Stefan Hegglin -@date 30.03.2015 -@brief module for generating & matching particle distributions

-
-
-class PyHEADTAIL.particles.generators.HEADTAILcoords
-

The classic HEADTAIL phase space.

-
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.particles.generators', '__doc__': 'The classic HEADTAIL phase space.', 'coordinates': ('x', 'xp', 'y', 'yp', 'z', 'dp'), 'transverse': ('x', 'xp', 'y', 'yp'), 'longitudinal': ('z', 'dp'), '__dict__': <attribute '__dict__' of 'HEADTAILcoords' objects>, '__weakref__': <attribute '__weakref__' of 'HEADTAILcoords' objects>})
-
- -
-
-__module__ = 'PyHEADTAIL.particles.generators'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
-
-coordinates = ('x', 'xp', 'y', 'yp', 'z', 'dp')
-
- -
-
-longitudinal = ('z', 'dp')
-
- -
-
-transverse = ('x', 'xp', 'y', 'yp')
-
- -
- -
-
-class PyHEADTAIL.particles.generators.ParticleGenerator(macroparticlenumber, intensity, charge, mass, circumference, gamma, distribution_x=None, alpha_x=0.0, beta_x=1.0, D_x=None, distribution_y=None, alpha_y=0.0, beta_y=1.0, D_y=None, distribution_z=None, Qs=None, eta=None, *args, **kwargs)
-

Factory to generate Particle instances according to distributions -specified by the parameters in the initializer. -The Particle instance can be generated via the .generate() method

-
-
-__init__(macroparticlenumber, intensity, charge, mass, circumference, gamma, distribution_x=None, alpha_x=0.0, beta_x=1.0, D_x=None, distribution_y=None, alpha_y=0.0, beta_y=1.0, D_y=None, distribution_z=None, Qs=None, eta=None, *args, **kwargs)
-

Specify the distribution for each phase space seperately. Only -the phase spaces for which a distribution has been specified -will be generated. -The transverse phase space can be matched by specifying the Twiss -parameters alpha and/or beta. The dispersion will be take into -account after the beam has been matched longitudinally (if matched). -The longitudinal phase space will only get matched -if both Qs and eta are specified. -:param distribution_[x,y,z]: a function which takes the n_particles

-
-

as a parameter and returns a list-like object containing -a 2D phase space. result[0] should stand for the spatial, -result[1] for the momentum coordinate

-
-
-
Parameters
-
    -
  • alpha_[x,y] – Twiss parameter. The corresponding transverse phase -space gets matched to (alpha_[], beta_[])

  • -
  • beta_[x,y] – Twiss parameter. The corresponding transverse phase -space gets matched to (alpha_[], beta_[])

  • -
  • D_[x,y] – Dispersion. Only valid in combination with a longitudinal -phase space.

  • -
  • Qs – Synchrotron tune. If Qs and eta are specified the -longitudinal phase space gets matched to these parameters.

  • -
  • eta – Slippage factor (zeroth order).If Qs and eta are specified -the longitudinal phase space gets matched to these parameters.

  • -
-
-
-
- -
-
-__module__ = 'PyHEADTAIL.particles.generators'
-
- -
-
-_create_phase_space()
-
- -
-
-_linear_match_phase_space(beam)
-
- -
-
-generate()
-

Returns a particle object with the parameters specified -in the constructor of the Generator object

-
- -
-
-update(beam)
-

Updates the beam coordinates specified in the constructor of the -Generator object. Existing coordinates will be overriden, new ones -will be added. Calls beam.update()

-
- -
- -
-
-PyHEADTAIL.particles.generators.RF_bucket_distribution(rfbucket, sigma_z=None, epsn_z=None, margin=0, distribution_type=<class 'PyHEADTAIL.particles.rfbucket_matching.ThermalDistribution'>, *args, **kwargs)
-

Return a distribution function which generates particles -which are matched to the specified bucket and target emittance or std -Specify only one of sigma_z, epsn_z -:param rfbucket: An object of type RFBucket -:param sigma_z: target std -:param epsn_z: target normalized emittance in z-direction -:param margin: relative margin from the separatrix towards the

-
-

inner stable fix point in which particles are avoided

-
-
-
Parameters
-

distribution_type – longitudinal distribution type from -rfbucket_matching (default is ThermalDistribution which -produces a Gaussian-like matched Boltzmann distribution)

-
-
Returns
-

A matcher with the specified bucket properties (closure)

-
-
Raises
-

ValueError – If neither or both of sigma_z, epsn_z are specified

-
-
-
- -
-
-PyHEADTAIL.particles.generators.cut_distribution(distribution, is_accepted)
-

Generate coordinates according to some distribution inside the -region specified by where the function is_accepted returns 1. -(Wrapper for distributions, based on RF_cut..) -:param distribution: a function which takes the n_particles as a

-
-

parameter and returns a list-like object -containing a 2D phase space. result[0] should -stand for the spatial, result[1] for the momentum -coordinate

-
-
-
Parameters
-

is_accepted – function taking two parameters (z, dp) -[vectorised as arrays] and returning a boolean -specifying whether the coordinate lies -inside the desired phase space volume. A possible -source to provide such an is_accepted function -is the RFBucket.make_is_accepted or -generators.make_is_accepted_within_n_sigma .

-
-
Returns
-

A matcher with the specified bucket properties (closure)

-
-
-
- -
-
-PyHEADTAIL.particles.generators.gaussian2D(emittance_geo)
-

Closure which generates a gaussian distribution with the given -geometrical emittance. Uncorrelated and symmetrical. -:param -emittance_geo: geometrical emittance (normalized emittance/betagamma

-
-

for transverse, emittance*e/(4*pi*p0) for longitudinal)

-
-
-
Returns
-

A function generating a 2d gaussian with the desired parameters

-
-
-
- -
-
-PyHEADTAIL.particles.generators.gaussian2D_asymmetrical(sigma_u, sigma_up)
-

Closure which generates a gaussian distribution with the given -standard deviations. No correlation between u and up -:param - sigma_u: standard deviation of the marginal spatial distribution -:param - sigma_up: standard deviation of the marginal momentum distribution

-
-
Returns
-

A function generating a 2d gaussian with the desired parameters

-
-
-
- -
-
-PyHEADTAIL.particles.generators.generate_Gaussian6DTwiss(macroparticlenumber, intensity, charge, mass, circumference, gamma, alpha_x, alpha_y, beta_x, beta_y, beta_z, epsn_x, epsn_y, epsn_z, dispersion_x=None, dispersion_y=None, limit_n_rms_x=None, limit_n_rms_y=None, limit_n_rms_z=None)
-

Convenience wrapper generating a 6D Gaussian phase space -distribution of macro-particles with the specified parameters:

-
-
Parameters
-
    -
  • macroparticlenumber – number of macro-particles in the beam

  • -
  • intensity – number of represented beam particles

  • -
  • charge – charge per particle [SI unit Coul]

  • -
  • mass – mass per particle [SI unit kg]

  • -
  • circumference – ring circumference (needed for effective models)

  • -
  • gamma – relativistic Lorentz factor

  • -
  • alpha_[x,y] – Twiss parameter. The corresponding transverse phase -space gets matched to (alpha_[], beta_[])

  • -
  • beta_[x,y] – Twiss parameter. The corresponding transverse phase -space gets matched to (alpha_[], beta_[])

  • -
  • beta_z – corresponding longitudinal Twiss parameter -amounting to |eta| * circumference / (2 * pi * Qs)

  • -
  • epsn_x – horizontal normalised RMS emittance [m.rad]

  • -
  • epsn_y – vertical normalised RMS emittance [m.rad]

  • -
  • epsn_z – longitudinal 90% emittance (4x the RMS emittance) [eV.s]

  • -
-
-
-
-
Optional args:

dispersion_x: horizontal optics dispersion value for matching -dispersion_y: vertical optics dispersion value for matching -limit_n_rms_[x,y]: number of RMS amplitudes to cut distribution -limit_n_rms_z: longitudinal number of RMS amplitudes to cut

-
-

distribution (remember that epsn_z is already 4x the RMS -value, i.e. 2 amplitudes)

-
-
-
-

Return a Particles instance with the phase space matched to the -arguments.

-
- -
-
-PyHEADTAIL.particles.generators.import_distribution2D(coords)
-

Return a closure which generates the phase space specified -by the coords list -:param coords: list containing the coordinates to use

-
-

coords[0] is the space, coords[1] the momentum coordinate

-
-
- -
-
-PyHEADTAIL.particles.generators.kv2D(r_u, r_up)
-

Closure which generates a Kapchinski-Vladimirski-type uniform -distribution in 2D. The extent is determined by the arguments.

-
-
Parameters
-
    -
  • r_u (-) – envelope edge radius for the spatial axis

  • -
  • r_up (-) – envelope edge angle for the momentum axis

  • -
-
-
-
- -
-
-PyHEADTAIL.particles.generators.kv4D(r_x, r_xp, r_y, r_yp)
-

Closure which generates a Kapchinski-Vladimirski-type uniform -distribution in 4D. The extent of the phase space ellipses is -determined by the arguments.

-
-
Parameters
-
    -
  • r_x (-) – envelope edge radius for the horizontal spatial axis

  • -
  • r_xp (-) – envelope edge angle for the horizontal momentum axis

  • -
  • r_y (-) – envelope edge radius for the vertical spatial axis

  • -
  • r_yp (-) – envelope edge angle for the vertical momentum axis

  • -
-
-
-
- -
-
-PyHEADTAIL.particles.generators.longitudinal_linear_matcher(Qs, eta, C)
-

Return simple longitudinal matcher -Internally calls the transverse linear matcher with beta=beta_z -and alpha = 0. -beta_z = |eta| * C / (2*pi*Qs)t p -:param Qs: synchroton tune -:param eta: slippage factor (zeroth order),

-
-

is lpha_c - gamma^2 (lpha_c = momentum compaction factor)

-
-
-
Parameters
-

C – circumference

-
-
Returns
-

A matcher with the specified Qs, eta (closure)

-
-
-
- -
-
-PyHEADTAIL.particles.generators.make_is_accepted_within_n_sigma(rms_amplitude=None, limit_n_rms=None, epsn_rms=None)
-

Closure creating an is_accepted function (e.g. for -cut_distribution). The is_accepted function will return whether -the canonical coordinate and momentum pair lies within the phase -space region limited by the action value -limit_n_rms * rms_amplitude. -The closure acts on normalised Floquet space, i.e. do apply this -function to the particles before matching to the optics values.

-

Coordinate u and momentum up are squared to give the action -amplitude -J = u^2 + up^2 . -The amplitude is required to be below the limit to be accepted, -J < limit_n_rms * rms_amplitude. -The usual use case will be generating u and up in normalised Floquet -space (i.e. before the normalised phase space coordinates -get matched to the optics or longitudinal eta and Qs). -Consequently, the 1 sigma RMS reference value -epsn_rms corresponds to the normalised 1 sigma RMS emittance -(i.e. amounting to beam.epsn_x() and beam.epsn_y() in the transverse -plane, and beam.epsn_z()/4 in the longitudinal plane).

-
- -
-
-PyHEADTAIL.particles.generators.transverse_linear_matcher(alpha, beta, dispersion=None)
-

Return a transverse matcher with the desired parameters. -:param alpha: Twiss parameter -:param beta: Twiss parameter -:param dispersion: (optional) only use in combination with a longitudinal

-
-

phase space

-
-

Returns: Matcher(closure) taking two parameters: coords and direction

-
- -
-
-PyHEADTAIL.particles.generators.uniform2D(low, high)
-

Closure which generates a uniform distribution for the space coords. -All momenta are 0.

-
- -
-
-

PyHEADTAIL.particles.particles module

-

Created on 17.10.2014 -@author: Kevin Li, Michael Schenk, Adrian Oeftiger -@copyright CERN

-
-
-class PyHEADTAIL.particles.particles.Particles(macroparticlenumber, particlenumber_per_mp, charge, mass, circumference, gamma, coords_n_momenta_dict={}, *args, **kwargs)
-

Contains the basic properties of a particle ensemble with -their coordinate and conjugate momentum arrays, energy and the like. -Designed to describe beams, electron clouds, …

-
-
-__add__(other)
-

Merges two beams.

-
- -
-
-__init__(macroparticlenumber, particlenumber_per_mp, charge, mass, circumference, gamma, coords_n_momenta_dict={}, *args, **kwargs)
-

The dictionary coords_n_momenta_dict contains the coordinate -and conjugate momenta names and assigns to each the -corresponding array. -e.g.: coords_n_momenta_dict = {‘x’: array(..), ‘xp’: array(..)}

-
- -
-
-__module__ = 'PyHEADTAIL.particles.particles'
-
- -
-
-__radd__(other)
-
- -
-
-_slice_sets = None
-

Set of coordinate and momentum attributes of this Particles -instance.

-
- -
-
-add(coords_n_momenta_dict)
-

Add the coordinates and momenta with their according arrays -to the attributes of the Particles instance (via -self.update(coords_n_momenta_dict)). Does not allow existing -coordinate or momentum attributes to be overwritten.

-
- -
-
-alpha_Twiss_x()
-
- -
-
-alpha_Twiss_y()
-
- -
-
-property beta
-
- -
-
-beta_Twiss_x()
-
- -
-
-beta_Twiss_y()
-
- -
-
-property betagamma
-
- -
-
-property charge_per_mp
-
- -
-
-clean_slices()
-

Erases the SliceSet records of this Particles instance. -Any longitudinal trackers (or otherwise modifying elements) -should use this method to clean the recorded SliceSet objects.

-
- -
-
-coords_n_momenta = None
-

ID of particles in order to keep track of single entries -in the coordinate and momentum arrays.

-
- -
-
-dispersion_x()
-
- -
-
-dispersion_y()
-
- -
-
-effective_normalized_emittance_x(**kwargs)
-
- -
-
-effective_normalized_emittance_y(**kwargs)
-
- -
-
-effective_normalized_emittance_z(**kwargs)
-
- -
-
-epsn_x(**kwargs)
-
- -
-
-epsn_y(**kwargs)
-
- -
-
-epsn_z(**kwargs)
-
- -
-
-extract_slices(slicer, include_non_sliced='if_any', *args, **kwargs)
-

Return a list Particles object with the different slices. -The last element of the list contains particles not assigned to any slice.

-

include_non_sliced : {‘always’, ‘never’, ‘if_any’}, optional -‘always’:

-
-

extra element in the list with particles not belonging to any slice -is always inserted (it can be empty).

-
-
-
‘never’:

extra element in the list with particles not belonging to any slice -is never inserted.

-
-
‘if_any’:

extra element in the list with particles not belonging to any slice -is inserted only if such particles exist.

-
-
-
- -
-
-property gamma
-

Dictionary of SliceSet objects which are retrieved via -self.get_slices(slicer) by a client. Each SliceSet is recorded -only once for a specific longitudinal state of Particles. -Any longitudinal trackers (or otherwise modifying elements) -should clean the saved SliceSet dictionary via -self.clean_slices().

-
- -
-
-gamma_Twiss_x()
-
- -
-
-gamma_Twiss_y()
-
- -
-
-get_coords_n_momenta_dict()
-

Return a dictionary containing the coordinate and conjugate -momentum arrays.

-
- -
-
-get_slices(slicer, *args, **kwargs)
-

For the given Slicer, the last SliceSet is returned. -If there is no SliceSet recorded (i.e. the longitudinal -state has changed), a new SliceSet is requested from the Slicer -via Slicer.slice(self) and stored for future reference.

-

Arguments: -- statistics=True attaches mean values, standard deviations -and emittances to the SliceSet for all planes. -- statistics=[‘mean_x’, ‘sigma_dp’, ‘epsn_z’] only adds the -listed statistics values (can be used to save time). -Valid list entries are all statistics functions of Particles.

-

Note: Requesting statistics after calling get_slices w/o -the statistics keyword results in creating a new SliceSet!

-
- -
-
-property intensity
-
- -
-
-mean_dp(**kwargs)
-
- -
-
-mean_x(**kwargs)
-
- -
-
-mean_xp(**kwargs)
-
- -
-
-mean_y(**kwargs)
-
- -
-
-mean_yp(**kwargs)
-
- -
-
-mean_z(**kwargs)
-
- -
-
-property p0
-
- -
-
-reorder(permutation, except_for_attrs=[])
-

Reorder all particle coordinate and momentum arrays -(in self.coords_n_momenta) and ids except for except_for_attrs -according to the given index array permutation.

-
- -
-
-sigma_dp(**kwargs)
-
- -
-
-sigma_x(**kwargs)
-
- -
-
-sigma_xp(**kwargs)
-
- -
-
-sigma_y(**kwargs)
-
- -
-
-sigma_yp(**kwargs)
-
- -
-
-sigma_z(**kwargs)
-
- -
-
-sort_for(attr)
-

Sort the named particle attribute (coordinate / momentum) -array and reorder all particles accordingly.

-
- -
-
-update(coords_n_momenta_dict)
-

Assigns the keys of the dictionary coords_n_momenta_dict as -attributes to this Particles instance and puts the corresponding -values. Pretty much the same as dict.update({…}) . -Attention: overwrites existing coordinate / momentum attributes.

-
- -
-
-property z_beamframe
-
- -
- -
-
-

PyHEADTAIL.particles.rfbucket_matching module

-

@author Kevin Li, Adrian Oeftiger, Stefan Hegglin -@date 16.06.2017 -@brief module for matching longitudinal particle distributions to an RFBucket instance

-
-
-class PyHEADTAIL.particles.rfbucket_matching.ParabolicDistribution(H, Hmax=None, Hcut=0, H0=1)
-

The parabolic profile distribution is a specific case of the -present implementation of the q-Gaussian distribution for n = 1/2, -psi ~ sqrt(1 - H/H0). -For a quadratic harmonic oscillator Hamiltonian this distribution -provides a parabolic line density.

-
-
-__module__ = 'PyHEADTAIL.particles.rfbucket_matching'
-
- -
-
-_psi(H)
-

Define the distribution value for the given H, the output -lies in the interval [0,1]. This is the central function to -be implemented by stationary distributions.

-
- -
- -
-
-class PyHEADTAIL.particles.rfbucket_matching.QGaussianDistribution(H, Hmax=None, Hcut=0, H0=1)
-

Specific Tsallis q-Gaussian distribution for q=3/5 for now, -leading to psi ~ (1 - H/H0)^2, this may be generalised.

-
-
-__module__ = 'PyHEADTAIL.particles.rfbucket_matching'
-
- -
-
-_psi(H)
-

Define the distribution value for the given H, the output -lies in the interval [0,1]. This is the central function to -be implemented by stationary distributions.

-
- -
-
-n = 2
-
- -
- -
-
-class PyHEADTAIL.particles.rfbucket_matching.RFBucketMatcher(rfbucket, distribution_type=None, sigma_z=None, epsn_z=None, verbose_regeneration=False, psi=None, *args, **kwargs)
-
-
-__init__(rfbucket, distribution_type=None, sigma_z=None, epsn_z=None, verbose_regeneration=False, psi=None, *args, **kwargs)
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.particles.rfbucket_matching'
-
- -
-
-_compute_emittance(rfbucket, psi)
-
- -
-
-_compute_sigma(rfbucket, psi)
-
- -
-
-generate(macroparticlenumber, cutting_margin=0)
-

Generate a 2d phase space of n_particles particles randomly distributed -according to the particle distribution function psi within the region -[xmin, xmax, ymin, ymax].

-
- -
-
-get_moment_integrators()
-

Return moment integrators from -cobra_functions.pdf_integrators_2d according to the chosen -self.integrationmethod. Allows to change integration method -for RFBucket matching.

-
- -
-
-integrationmethod = 'quad'
-
- -
-
-linedensity(xx, quad_type=<function fixed_quad>)
-
- -
-
-psi_for_bunchlength_newton_method(sigma)
-
- -
-
-psi_for_emittance_newton_method(epsn_z)
-
- -
- -
-
-class PyHEADTAIL.particles.rfbucket_matching.StationaryDistribution(H, Hmax=None, Hcut=0, H0=1)
-
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.particles.rfbucket_matching', '__init__': <function StationaryDistribution.__init__>, '_psi': <function StationaryDistribution._psi>, 'function': <function StationaryDistribution.function>, '__dict__': <attribute '__dict__' of 'StationaryDistribution' objects>, '__weakref__': <attribute '__weakref__' of 'StationaryDistribution' objects>, '__doc__': None})
-
- -
-
-__init__(H, Hmax=None, Hcut=0, H0=1)
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.particles.rfbucket_matching'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
-
-abstract _psi(H)
-

Define the distribution value for the given H, the output -lies in the interval [0,1]. This is the central function to -be implemented by stationary distributions.

-
- -
-
-function(z, dp)
-
- -
- -
-
-class PyHEADTAIL.particles.rfbucket_matching.ThermalDistribution(H, Hmax=None, Hcut=0, H0=1)
-

Thermal Boltzmann distribution psi ~ exp(-H/H0). -For a quadratic harmonic oscillator Hamiltonian this gives the -bi-Gaussian phase space distribution.

-
-
-__module__ = 'PyHEADTAIL.particles.rfbucket_matching'
-
- -
-
-_psi(H)
-

Define the distribution value for the given H, the output -lies in the interval [0,1]. This is the central function to -be implemented by stationary distributions.

-
- -
- -
-
-class PyHEADTAIL.particles.rfbucket_matching.WaterbagDistribution(H, Hmax=None, Hcut=0, H0=1)
-

The waterbag distribution has a constant Hamiltonian distribution -until a cutoff, psi ~ Theta(H - H0) with Theta the Heaviside -step function.

-
-
-__module__ = 'PyHEADTAIL.particles.rfbucket_matching'
-
- -
-
-_psi(H)
-

Define the distribution value for the given H, the output -lies in the interval [0,1]. This is the central function to -be implemented by stationary distributions.

-
- -
- -
-
-

PyHEADTAIL.particles.slicing module

-
-
@authors: Hannes Bartosik,

Stefan Hegglin, -Giovanni Iadarola, -Kevin Li, -Adrian Oeftiger, -Michael Schenk

-
-
-

@date: 01/10/2014 -@copyright CERN

-
-
-exception PyHEADTAIL.particles.slicing.ModeIsUniformCharge(message)
-
-
-__init__(message)
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.particles.slicing'
-
- -
-
-__str__()
-

Return str(self).

-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
- -
-
-class PyHEADTAIL.particles.slicing.SliceSet(z_bins, slice_index_of_particle, mode, n_macroparticles_per_slice=None, beam_parameters={})
-

Defines a set of longitudinal slices. It’s a blueprint or photo -of a beam’s longitudinal profile. It knows where the slices are -located, how many and which particles there are in which slice. All -its attributes refer to the state of the beam at creation time of -the SliceSet. Hence, it must never be updated with new -distributions, rather, a new SliceSet needs to be created.

-
-
-__init__(z_bins, slice_index_of_particle, mode, n_macroparticles_per_slice=None, beam_parameters={})
-

Is intended to be created by the Slicer factory method. -A SliceSet is given a set of intervals defining the slicing -region and the histogram over the thereby defined slices.

-

beam_parameters is a dictionary containing certain beam -parameters to be recorded with this SliceSet. -(e.g. beta being saved via beam_parameters[‘beta’] = beam.beta)

-
- -
-
-__module__ = 'PyHEADTAIL.particles.slicing'
-
- -
-
-age = None
-

Array of z values of each bin, goes from the left bin edge -of the first bin to the right bin edge of the last bin.

-
- -
-
-property charge_per_slice
-

Array of slice charges, i.e. summing up all the -particle charges for each slice.

-
- -
-
-convert_to_particles(slice_array, empty_particles=None)
-

Convert slice_array with entries for each slice to a -particle array with the respective entry of each particle -given by its slice_array value via the slice that the -particle belongs to.

-
- -
-
-convert_to_time(z)
-

Convert longitudinal quantity from length to time units using -the relativistic beta saved at creation time of the SliceSet.

-
- -
-
-lambda_bins(sigma=None, smoothen=True)
-

Line charge density with respect to bins along the slices.

-
- -
-
-lambda_prime_bins(sigma=None, smoothen_before=True, smoothen_after=True)
-

Return array of length (n_slices - 1) containing -the derivative of the line charge density lambda -w.r.t. the slice bins while smoothing via a Gaussian filter. -(i.e. the smoothened derivative of the n_macroparticles array -times the macroparticle charge.)

-
- -
-
-lambda_prime_z(z, sigma=None, smoothen_before=True, smoothen_after=True)
-

Line charge density derivative with respect to z along -the slices.

-
- -
-
-lambda_z(z=None, sigma=None, smoothen=True)
-

Line charge density with respect to z along the slices. -If z is None, return the line charge density along the slice -centres.

-
- -
-
-mode = None
-

Numpy array containing the number of macroparticles in each -slice.

-
- -
-
-property n_macroparticles_per_slice
-

Slice distribution, i.e. number of macroparticles in each -slice.

-
- -
-
-property n_slices
-
- -
-
-property particle_indices_by_slice
-

Array of particle indices arranged / sorted according to -their slice affiliation.

-
- -
-
-particle_indices_of_slice(slice_index)
-

Return an array of particle indices which are located in the -slice defined by the given slice_index.

-
- -
-
-property particles_outside_cuts
-

All particle indices which are situated outside the slicing -region defined by [z_cut_tail, z_cut_head).

-
- -
-
-property particles_within_cuts
-

All particle indices which are situated within the slicing -region defined by [z_cut_tail, z_cut_head).

-
- -
-
-property particles_within_cuts_slice
-

Returns a continous slice(first,last,1) of particles indices -between [z_cut_tail, z_cut_head] -Required for gpuarrays slicing syntax, i.e. x[slice] += 2 -Only use when beam is sorted!

-
- -
-
-property pidx_begin
-

particle index of the first particle within the sliceset region

-
- -
-
-property pidx_end
-

particle index of the last+1 particle within the sliceset region

-
- -
-
-slice_index_of_particle = None
-

How is the slicing done? For the moment it is either -‘uniform_charge’ or ‘uniform_bin’.

-
- -
-
-property slice_positions
-

Position of the respective slice start within the array -self.particle_indices_by_slice .

-
- -
-
-property slice_widths
-

Array of the widths of the slices.

-
- -
-
-property smoothing_sigma
-
- -
-
-z_bins = None
-

Array of slice indices for each particle, positions (indices) -are the same as in beam.z .

-
- -
-
-property z_centers
-
- -
-
-property z_cut_head
-
- -
-
-property z_cut_tail
-
- -
- -
-
-class PyHEADTAIL.particles.slicing.Slicer(*args, **kwargs)
-

Slicer class that controls longitudinal binning of a beam. -Factory for SliceSet objects.

-
-
-__abstractmethods__ = frozenset({'compute_sliceset_kwargs'})
-
- -
-
-__eq__(other)
-

Return self==value.

-
- -
-
-__hash__()
-

Identifies different instantiations of Slicer objects via -their configuration (instead of their instance ID).

-
- -
-
-__module__ = 'PyHEADTAIL.particles.slicing'
-
- -
-
-__ne__(other)
-

Return self!=value.

-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-_eff_epsn_x(sliceset, beam, **kwargs)
-
- -
-
-_eff_epsn_y(sliceset, beam, **kwargs)
-
- -
-
-_epsn(sliceset, u, up, dp, **kwargs)
-
- -
-
-_epsn_x(sliceset, beam, **kwargs)
-
- -
-
-_epsn_y(sliceset, beam, **kwargs)
-
- -
-
-_epsn_z(sliceset, beam, **kwargs)
-
- -
-
-_mean(sliceset, u, **kwargs)
-
- -
-
-_mean_dp(sliceset, beam, **kwargs)
-
- -
-
-_mean_x(sliceset, beam, **kwargs)
-
- -
-
-_mean_xp(sliceset, beam, **kwargs)
-
- -
-
-_mean_y(sliceset, beam, **kwargs)
-
- -
-
-_mean_yp(sliceset, beam, **kwargs)
-
- -
-
-_mean_z(sliceset, beam, **kwargs)
-
- -
-
-_sigma(sliceset, u, **kwargs)
-
- -
-
-_sigma_dp(sliceset, beam, **kwargs)
-
- -
-
-_sigma_x(sliceset, beam, **kwargs)
-
- -
-
-_sigma_y(sliceset, beam, **kwargs)
-
- -
-
-_sigma_z(sliceset, beam, **kwargs)
-
- -
-
-add_statistics(**kwargs)
-
- -
-
-abstract compute_sliceset_kwargs(beam)
-

Return argument dictionary to create a new -SliceSet object according to the saved configuration. -This method defines the slicing behaviour of inheriting -Slicer implementations.

-
- -
-
-property config
-
- -
-
-static extract_beam_parameters(beam)
-

Return a dictionary of beam parameters to be stored -in a SliceSet instance. (such as beam.beta etc.)

-
- -
-
-get_long_cuts(beam)
-

Return boundaries of slicing region, -(z_cut_tail, z_cut_head). If they have been set at -instantiation, self.z_cuts is returned. -If n_sigma_z is given, a cut of -n_sigma_z * beam.sigma_z to the left and to the right -respectively is applied, otherwise the longitudinally first and -last particle define the full region.

-
- -
-
-slice(beam, *args, **kwargs)
-

Return a SliceSet object according to the saved -configuration. Generate it using the keywords of the -self.compute_sliceset_kwargs(beam) method. -Defines interface to create SliceSet instances -(factory method).

-

Arguments: -- statistics=True attaches mean values, standard deviations -and emittances to the SliceSet for all planes. -- statistics=[‘mean_x’, ‘sigma_dp’, ‘epsn_z’] only adds the -listed statistics values (can be used to save time). -Valid list entries are all statistics functions of Particles.

-
- -
-
-update_slices(beam)
-

non-existent anymore!

-
- -
- -
-
-class PyHEADTAIL.particles.slicing.UniformBinSlicer(n_slices, n_sigma_z=None, z_cuts=None, z_sample_points=None, *args, **kwargs)
-

Slices with respect to uniform bins along the slicing region.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(n_slices, n_sigma_z=None, z_cuts=None, z_sample_points=None, *args, **kwargs)
-

Return a UniformBinSlicer object. Set and store the -corresponding slicing configuration in self.config. -Note that either n_sigma_z or z_cuts and/or z_sampling_point -can be set. If both are given, a ValueError will be raised.

-
- -
-
-__module__ = 'PyHEADTAIL.particles.slicing'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-_get_slicing_from_z_sample_points(z_sample_points, z_cuts=None)
-

Alternative slicing function for UniformBinSlicer. The function -takes a given array of sampling points and ensures that the -slice centers lie at those sampling points. If z_cuts is -given and is beyond the sampling points, it furthermore extends -the given sampling points at the same sampling frequency to -include the range given by z_cuts. Very useful if one wants -to ensure that certain points or regions of a wakefield are -included or correctl sampled.

-
- -
-
-compute_sliceset_kwargs(beam)
-

Return argument dictionary to create a new SliceSet -according to the saved configuration for -uniformly binned SliceSet objects.

-
- -
- -
-
-class PyHEADTAIL.particles.slicing.UniformChargeSlicer(n_slices, n_sigma_z=None, z_cuts=None, *args, **kwargs)
-

Slices with respect to uniform charge for each bin along the -slicing region.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(n_slices, n_sigma_z=None, z_cuts=None, *args, **kwargs)
-

Return a UniformChargeSlicer object. Set and store the -corresponding slicing configuration in self.config . -Note that either n_sigma_z or z_cuts can be set. If both are -given, a ValueError will be raised.

-
- -
-
-__module__ = 'PyHEADTAIL.particles.slicing'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-compute_sliceset_kwargs(beam)
-

Return argument dictionary to create a new SliceSet -according to the saved configuration for a uniform charge -distribution along the bins.

-
- -
- -
-
-PyHEADTAIL.particles.slicing.clean_slices(long_track_method)
-

Adds the beam.clean_slices() to any track(beam) method of -longitudinal elements (elements that change beam.z, the -longitudinal position of any particles).

-
- -
-
-PyHEADTAIL.particles.slicing.make_int32(array)
-
- -
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/PyHEADTAIL.radiation.html b/docs/PyHEADTAIL.radiation.html deleted file mode 100644 index 5b2743ab..00000000 --- a/docs/PyHEADTAIL.radiation.html +++ /dev/null @@ -1,190 +0,0 @@ - - - - - - - PyHEADTAIL.radiation package — PyHEADTAIL documentation - - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL.radiation package

-
-

Submodules

-
-
-

PyHEADTAIL.radiation.radiation module

-

@author Andrea Passarelli -@date 23. February 2016 -@brief Synchrotron radiation damping effect in transverse and longitudinal planes. -@copyright CERN

-
-
-class PyHEADTAIL.radiation.radiation.SynchrotronRadiationLongitudinal(eq_sig_dp, damping_time_z_turns, E_loss_eV)
-
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.radiation.radiation', '__init__': <function SynchrotronRadiationLongitudinal.__init__>, 'track': <function SynchrotronRadiationLongitudinal.track>, '__dict__': <attribute '__dict__' of 'SynchrotronRadiationLongitudinal' objects>, '__weakref__': <attribute '__weakref__' of 'SynchrotronRadiationLongitudinal' objects>, '__doc__': None})
-
- -
-
-__init__(eq_sig_dp, damping_time_z_turns, E_loss_eV)
-

We are assuming no alpha, no dispersion, etc.

-
- -
-
-__module__ = 'PyHEADTAIL.radiation.radiation'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
-
-track(bunch)
-
- -
- -
-
-class PyHEADTAIL.radiation.radiation.SynchrotronRadiationTransverse(eq_emit_x, eq_emit_y, damping_time_x_turns, damping_time_y_turns, beta_x, beta_y)
-
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.radiation.radiation', '__init__': <function SynchrotronRadiationTransverse.__init__>, 'track': <function SynchrotronRadiationTransverse.track>, '__dict__': <attribute '__dict__' of 'SynchrotronRadiationTransverse' objects>, '__weakref__': <attribute '__weakref__' of 'SynchrotronRadiationTransverse' objects>, '__doc__': None})
-
- -
-
-__init__(eq_emit_x, eq_emit_y, damping_time_x_turns, damping_time_y_turns, beta_x, beta_y)
-

We are assuming no alpha, no dispersion, etc.

-
- -
-
-__module__ = 'PyHEADTAIL.radiation.radiation'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
-
-track(bunch)
-
- -
- -
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/PyHEADTAIL.rfq.html b/docs/PyHEADTAIL.rfq.html deleted file mode 100644 index c0b7ec40..00000000 --- a/docs/PyHEADTAIL.rfq.html +++ /dev/null @@ -1,419 +0,0 @@ - - - - - - - PyHEADTAIL.rfq package — PyHEADTAIL documentation - - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL.rfq package

-
-

Submodules

-
-
-

PyHEADTAIL.rfq.rfq module

-

This module contains the Python implementation of a pillbox-cavity RF -quadrupole - referred to as the RFQ - as it was proposed by Alexej -Grudiev in ‘Radio frequency quadrupole for Landau damping in -accelerators’, Phys. Rev. Special Topics - Accelerators and Beams 17, -011001 (2014) [1]. Similar to a ‘Landau’ octupole magnet, this device -is intended to introduce an incoherent tune spread such that Landau -damping can prevent the growth of transverse collective instabilities.

-

The formulae that are used are based on [1] and make use of the thin- -lens approximation. On the one hand, the RFQ introduces a longitudinal -spread of the betatron frequency and on the other hand, a transverse -spread of the synchrotron frequency.

-

The effect in the transverse plane is modelled in two different -ways

-
    -
  1. RFQ as a detuner acting directly on each particles’ betatron -tunes,

  2. -
  3. RFQ as a localized kick acting on each particles’ momenta xp -and yp.

  4. -
-

The effect in the longitudinal plane is always modelled as a localized -kick, i.e. a change in a particle’s normalized momentum dp. For model -(II), the incoherent betatron detuning is not applied directly, but is -a consequence of the change in momenta xp and yp.

-

@author Michael Schenk, Adrian Oeftiger -@date July, 10th 2014 -@brief Python implementation of a pillbox cavity RF quadrupole for

-
-

Landau damping.

-
-

@copyright CERN

-
-
-class PyHEADTAIL.rfq.rfq.RFQKick
-

Python base class to describe the RFQ element in the -localized kick model for both the transverse and the -longitudinal coordinates.

-
-
-__abstractmethods__ = frozenset({'track'})
-
- -
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.rfq.rfq', '__doc__': 'Python base class to describe the RFQ element in the\n localized kick model for both the transverse and the\n longitudinal coordinates.\n ', 'track': <function RFQKick.track>, '__dict__': <attribute '__dict__' of 'RFQKick' objects>, '__weakref__': <attribute '__weakref__' of 'RFQKick' objects>, '__abstractmethods__': frozenset({'track'}), '_abc_impl': <_abc_data object>})
-
- -
-
-__module__ = 'PyHEADTAIL.rfq.rfq'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-abstract track(beam)
-
- -
- -
-
-class PyHEADTAIL.rfq.rfq.RFQLongitudinalKick(v_2, omega, phi_0)
-

Python implementation of the RFQ element acting on the -particles’ longitudinal coordinate dp.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(v_2, omega, phi_0)
-

An RFQ element is fully characterized by the parameters -v_2: quadrupolar expansion coefficient of the

-
-

accelerating voltage (~strength of the RFQ), in -[V/m^2].

-
-

omega: Angular frequency of the RF wave, in [rad/s]. -phi_0: Constant phase offset wrt. bunch center (z=0), in

-
-

[rad].

-
-
- -
-
-__module__ = 'PyHEADTAIL.rfq.rfq'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-track(beam)
-

The formula used to describe the longitudinal kick is given -by

-
-
-
Delta p_z = -(x^2 - y^2) (e v_2 / (beta c)) *

sin(omega z / (beta c) + phi_0).

-
-
-
-
- -
- -
-
-class PyHEADTAIL.rfq.rfq.RFQTransverseDetuner(v_2, omega, phi_0, beta_x_RFQ, beta_y_RFQ)
-

Collection class to contain/manage the segment-wise defined -RFQ elements RFQTransverseDetunerSegment acting on the -betatron tunes (detuner model of the RFQ). This is a pure -Python class and it derives from the DetunerCollection class -defined in the module PyHEADTAIL.trackers.detuners.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(v_2, omega, phi_0, beta_x_RFQ, beta_y_RFQ)
-
-
An RFQ element is fully characterized by the parameters
-
v_2: quadrupolar expansion coefficient of the accelerating

voltage (~strength of the RFQ), in [V/m^2]. One-turn -value.

-
-
-

omega: Angular frequency of the RF wave, in [rad/s]. -phi_0: Constant phase offset wrt. bunch center (z=0), in

-
-

[rad].

-
-
-
-

beta_x_RFQ and beta_y_RFQ are the beta functions at the -position of the RFQ, although in the detuner model of the RFQ, -the RFQ should not actually be understood as being localized.

-
- -
-
-__module__ = 'PyHEADTAIL.rfq.rfq'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-generate_segment_detuner(dmu_x, dmu_y, **kwargs)
-

Instantiate a RFQTransverseSegmentDetuner for the -specified segment of the accelerator ring. -Note that the bare betatron -phase advances over the current segment, dmu_x and dmu_y, are -given as relative values, i.e. in units of the overall phase -advance around the whole accelerator (the betatron tune). -The method is called by the TransverseMap object which manages -the creation of a detuner for every defined segment.

-
- -
- -
-
-class PyHEADTAIL.rfq.rfq.RFQTransverseDetunerSegment(dapp_xz, dapp_yz, omega, phi_0)
-

Python implementation of the RFQ element acting directly on the -particles’ betatron tunes (i.e. RFQ detuner model).

-
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.rfq.rfq', '__doc__': "Python implementation of the RFQ element acting directly on the\n particles' betatron tunes (i.e. RFQ detuner model).\n ", '__init__': <function RFQTransverseDetunerSegment.__init__>, 'detune': <function RFQTransverseDetunerSegment.detune>, '__dict__': <attribute '__dict__' of 'RFQTransverseDetunerSegment' objects>, '__weakref__': <attribute '__weakref__' of 'RFQTransverseDetunerSegment' objects>})
-
- -
-
-__init__(dapp_xz, dapp_yz, omega, phi_0)
-

Creates an instance of the RFQTransverseDetunerSegment -class. The RFQ is characterized by

-
-

omega: Angular frequency of the RF wave, in [rad/s]. -phi_0: Constant phase offset wrt. bunch center (z=0), in

-
-

[rad].

-
-
-
dapp_xz: Strength of detuning in the horizontal plane, scaled

to the relative bare betatron phase advance in x.

-
-
dapp_yz: Strength of detuning in the vertical plane, scaled

to the relative bare betatron phase advance in y.

-
-
-
-
- -
-
-__module__ = 'PyHEADTAIL.rfq.rfq'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
-
-detune(beam)
-

Calculates for each particle its betatron detuning -dQ_x, dQ_y according to formulae taken from [1] (see -above).

-
-

dQ_x = dapp_xz / p * cos(omega / (beta c) z + phi_0) -dQ_y = dapp_yz / p * cos(omega / (beta c) z + phi_0)

-
-
-
with

dapp_xz = beta_x_RFQ * v_2 * e / (2 Pi * omega) -dapp_yz = -beta_y_RFQ * v_2 * e / (2 Pi * omega)

-
-
-

and p the particle momentum p = (1 + dp) p0. -(Probably, it would make sense to approximate p by p0 for better -performance).

-
- -
- -
-
-class PyHEADTAIL.rfq.rfq.RFQTransverseKick(v_2, omega, phi_0)
-

Python implementation of the RFQ element acting on the -particles’ transverse coordinates (i.e. localized kick -model).

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(v_2, omega, phi_0)
-

An RFQ element is fully characterized by the parameters -v_2: quadrupolar expansion coefficient of the

-
-

accelerating voltage (~strength of the RFQ), in -[V/m^2].

-
-

omega: Angular frequency of the RF wave, in [rad/s]. -phi_0: Constant phase offset wrt. bunch center (z=0), in

-
-

[rad].

-
-
- -
-
-__module__ = 'PyHEADTAIL.rfq.rfq'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-track(beam)
-

The formula that describes the transverse kick experienced -by an ultra-relativistic particle traversing the RFQ -longitudinally is based on the thin-lens approximation

-
-
-
Delta p_x = -x*(2 e v_2 / omega) *

cos(omega z / (beta c) + phi_0),

-
-
Delta p_y = y*(2 e v_2 / omega) *

cos(omega z / (beta c) + phi_0).

-
-
-
-
- -
- -
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/PyHEADTAIL.spacecharge.html b/docs/PyHEADTAIL.spacecharge.html deleted file mode 100644 index 180385fd..00000000 --- a/docs/PyHEADTAIL.spacecharge.html +++ /dev/null @@ -1,400 +0,0 @@ - - - - - - - PyHEADTAIL.spacecharge package — PyHEADTAIL documentation - - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL.spacecharge package

-
-

Submodules

-
-
-

PyHEADTAIL.spacecharge.pypic_factory module

-
-
-

PyHEADTAIL.spacecharge.pypic_spacecharge module

-
-
-

PyHEADTAIL.spacecharge.spacecharge module

-

@authors: Adrian Oeftiger -@date: 17/04/2015

-
-
-class PyHEADTAIL.spacecharge.spacecharge.LongSpaceCharge(slicer, pipe_radius, length, n_slice_sigma=3, *args, **kwargs)
-

Contains longitudinal space charge (SC) via Chao’s expression:

-

dp’ = - e^2 * g * lambda’(z) / (2 * pi * eps_0 * gamma^2 * p_0)

-

cf. the original HEADTAIL version.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(slicer, pipe_radius, length, n_slice_sigma=3, *args, **kwargs)
-

Arguments: -- pipe_radius is the the radius of the vacuum pipe in metres. -- length is an s interval (in metres) along which the SC force -is integrated. Usually you want to set this to circumference -in conjunction with the LongitudinalOneTurnMap RFSystems. -- n_slice_sigma indicates the number of slices taken as a -sigma for the Gaussian kernel that smoothens the line charge -density derivative (see SliceSet.lambda_prime_bins for more -info).

-
- -
-
-__module__ = 'PyHEADTAIL.spacecharge.spacecharge'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-_gfactor0(beam)
-

Geometry factor for circular vacuum pipe.

-
- -
-
-static _prefactor(beam)
-
- -
-
-directSC = 0.67
-
- -
-
-make_force(beam)
-

Return the electric force field due to space charge -of the given SliceSet instance as a function of z -in units of Coul*Volt/metre.

-
- -
-
-make_potential(beam)
-

Return the electric potential energy due to space charge -of the given SliceSet instance as a function of z -in units of Coul*Volt.

-
- -
-
-track(beam)
-

Add the longitudinal space charge contribution to the beam’s -dp kick.

-
- -
- -
-
-class PyHEADTAIL.spacecharge.spacecharge.TransverseGaussianSpaceCharge(slicer, length, sig_check=True, other_efieldn=None)
-

Contains transverse space charge for a Gaussian configuration. -Applies the Bassetti-Erskine electric field expression slice-wise -for each particle centred around the slice centre.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(slicer, length, sig_check=True, other_efieldn=None)
-

Arguments: -- slicer determines the slicing parameters for the slices over -which the Bassetti-Erskine electric field expression is applied, -given a slicer with n_slices == 1, you can apply a -longitudinally averaged kick over the whole beam. -- length is an s interval along which the space charge force -is integrated. -- sig_check exchanges x and y quantities for sigma_x < sigma_y -and applies the round beam formula for sigma_x == sigma_y . -sig_check defaults to True and should not usually be False. -- other_efieldn can be used to use a different implementation of -the charge-normalised electric field expression (there are four -different implementations to choose from in this class: -_efieldn_mit, _efield_mitmod, _efieldn_koelbig, -_efieldn_pyecloud; in order of computational time consumption)

-
- -
-
-__module__ = 'PyHEADTAIL.spacecharge.spacecharge'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-static _efieldn_koelbig(x, y, sig_x, sig_y)
-

The charge-normalised electric field components of a -two-dimensional Gaussian charge distribution according to -M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06.

-

Return (E_x / Q, E_y / Q).

-

Assumes sig_x > sig_y and mean_x == 0 as well as mean_y == 0. -For convergence reasons of the erfc, use only x > 0 and y > 0.

-

Uses CERN library from K. Koelbig.

-
- -
-
-static _efieldn_mit(x, y, sig_x, sig_y)
-

The charge-normalised electric field components of a -two-dimensional Gaussian charge distribution according to -M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06.

-

Return (E_x / Q, E_y / Q).

-

Assumes sig_x > sig_y and mean_x == 0 as well as mean_y == 0. -For convergence reasons of the erfc, use only x > 0 and y > 0.

-

Uses FADDEEVA C++ implementation from MIT (via SciPy >= 0.13.0).

-
- -
-
-static _efieldn_mitmod(x, y, sig_x, sig_y)
-

The charge-normalised electric field components of a -two-dimensional Gaussian charge distribution according to -M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06.

-

Return (E_x / Q, E_y / Q).

-

Assumes sig_x > sig_y and mean_x == 0 as well as mean_y == 0. -For convergence reasons of the erfc, use only x > 0 and y > 0.

-

Uses erfc C++ implementation from MIT (via SciPy >= 0.13.0) -and calculates wofz (FADDEEVA function) explicitely.

-
- -
-
-static _efieldn_pyecloud(xin, yin, sigmax, sigmay)
-

The charge-normalised electric field components of a -two-dimensional Gaussian charge distribution according to -M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06.

-

Return (E_x / Q, E_y / Q).

-

Effective copy of PyECLOUD.BassErsk.BassErsk implementation.

-
- -
-
-static _efieldn_round(x, y, sig_r)
-

Return (E_x / Q, E_y / Q) for a round distribution -with sigma_x == sigma_y == sig_r .

-
- -
-
-static _sig_sqrt(sig_x, sig_y)
-
- -
-
-absolute_threshold = 1e-10
-
- -
-
-static add_sigma_check(efieldn)
-

Wrapper for a normalised electric field function.

-

Adds the following actions before calculating the field: -- exchange x and y quantities if sigma_x < sigma_y -- apply round beam field formula when sigma_x close to sigma_y

-
- -
-
-get_efieldn(xr, yr, mean_x, mean_y, sig_x, sig_y)
-

The charge-normalised electric field components of a -two-dimensional Gaussian charge distribution according to -M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06.

-

Return (E_x / Q, E_y / Q).

-
- -
-
-ratio_threshold = 0.001
-

Threshold for absolute transverse beam size difference -below which the beam is assumed to be round: -abs(sig_y - sig_x) < absolute_threshold ==> round beam

-
- -
-
-track(beam)
-

Add the transverse space charge contribution to the beam’s -transverse kicks.

-
- -
-
-static wfun(z)
-

FADDEEVA function as implemented in PyECLOUD, vectorised.

-
- -
- -
-
-

PyHEADTAIL.spacecharge.transverse_spacecharge module

-
-
-class PyHEADTAIL.spacecharge.transverse_spacecharge.TransverseSpaceCharge(L_interaction, slicer, pyPICsolver, flag_clean_slices=False, *args, **kwargs)
-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(L_interaction, slicer, pyPICsolver, flag_clean_slices=False, *args, **kwargs)
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.spacecharge.transverse_spacecharge'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-get_beam_x(beam)
-
- -
-
-get_beam_y(beam)
-
- -
-
-track(beam)
-

Perform tracking of beam through this Element.

-
- -
- -
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/PyHEADTAIL.trackers.html b/docs/PyHEADTAIL.trackers.html deleted file mode 100644 index 121dd02d..00000000 --- a/docs/PyHEADTAIL.trackers.html +++ /dev/null @@ -1,1659 +0,0 @@ - - - - - - - PyHEADTAIL.trackers package — PyHEADTAIL documentation - - - - - - - - - - - - - - -
-
-
-
- -
-

PyHEADTAIL.trackers package

-
-

Submodules

-
-
-

PyHEADTAIL.trackers.detuners module

-

Module to describe devices/effects, such as chromaticity or octupole -magnets, leading to an incoherent detuning of the particles in the beam. -A detuner is (in general) present along the full circumference of the -accelerator and the detuning is applied proportionally along the ring.

-

The structure of this module is such that there is a DetunerCollection -object for each type of detuning effect present in the accelerator. It -provides a description of the detuning along the full circumference. The -accelerator is divided into segments (1 or more) and the -DetunerCollection can create and store a SegmentDetuner object of the -given type of detuning for each of these segments. A SegmentDetuner -object has a detune(beam) method that defines how the phase advance of -each particle in the beam is changed according to the formula describing -the effect.

-

@author Kevin Li, Michael Schenk, Adrian Oeftiger -@date 23. June 2014 -@brief Module to describe elements/effects in an accelerator leading to

-
-

an incoherent detuning.

-
-

@copyright CERN

-
-
-class PyHEADTAIL.trackers.detuners.AmplitudeDetuning(app_x, app_y, app_xy)
-

Collection class to contain/manage the segment-wise defined -amplitude detuning elements (octupoles). They are stored in the -self.segment_detuners list.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(app_x, app_y, app_xy)
-

Return an instance of the AmplitudeDetuning -DetunerCollection class. The coefficients app_x, app_y, app_xy -are the detuning strengths (one-turn values). Note that the -convention used here is such that they are NOT normalized to -the reference momentum beam.p0. The normalization to beam.p0 -is done only in the detune(beam) method of the -AmplitudeDetuningSegment.

-
- -
-
-__module__ = 'PyHEADTAIL.trackers.detuners'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-classmethod from_octupole_currents_LHC(i_focusing, i_defocusing)
-

Calculate the constants of proportionality app_x, app_y and -app_xy (== app_yx) for the amplitude detuning introduced by the -LHC octupole magnets (aka. LHC Landau octupoles) from the -electric currents i_focusing [A] and i_defocusing [A] flowing -through the magnets. The maximum current is given by -i_max = +/- 550 [A]. The values app_x, app_y, app_xy obtained -from the formulae are proportional to the strength of detuning -for one complete turn around the accelerator, i.e. one-turn -values.

-

The calculation is based on formulae (3.6) taken from ‘The LHC -transverse coupled-bunch instability’ by N. Mounet, EPFL PhD -Thesis, 2012. Values (hard-coded numbers below) are valid for -LHC Landau octupoles before LS1. Beta functions in x and y are -correctly taken into account. Note that here, the values of -app_x, app_y and app_xy are not normalized to the reference -momentum p0. This is done only during the calculation of the -detuning in the corresponding detune method of the -AmplitudeDetuningSegment.

-

More detailed explanations and references on how the formulae -were obtained are given in the PhD thesis (pg. 85ff) cited -above.

-
- -
-
-generate_segment_detuner(dmu_x, dmu_y, **kwargs)
-

Instantiate an AmplitudeDetuningSegment for the specified -segment of the accelerator ring. Note that the bare betatron -phase advances over the current segment, dmu_x and dmu_y, are -given as relative values, i.e. in units of the overall phase -advance around the whole accelerator (the betatron tune).

-
- -
- -
-
-class PyHEADTAIL.trackers.detuners.AmplitudeDetuningSegment(dapp_x, dapp_y, dapp_xy, dapp_yx, alpha_x, beta_x, alpha_y, beta_y)
-

Detuning object for a segment of the accelerator ring to -describe amplitude detuning (introduced by octupoles).

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(dapp_x, dapp_y, dapp_xy, dapp_yx, alpha_x, beta_x, alpha_y, beta_y)
-

Return an instance of an AmplitudeDetuningSegment by passing -the coefficients of detuning strength dapp_x, dapp_y, dapp_xy, -dapp_yx (scaled to the segment length. NOT normalized to -beam.p0 yet). -Note that beta_{x,y} are only used to correctly calculate the -transverse actions J_{x,y}. Although they have an influence on -the strength of detuning, they have no actual effect on the -integrated strength of the octupoles -(dapp_x, dapp_y, dapp_xy, dapp_yx).

-
- -
-
-__module__ = 'PyHEADTAIL.trackers.detuners'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-detune(beam)
-

Linear amplitude detuning formula, usually used for detuning -introduced by octupoles. The normalization of dapp_x, dapp_y, -dapp_xy, dapp_yx to the reference momentum is done here (compare -documentation of AmplitudeDetuning class). -J_x and J_y resp. denote the horizontal and vertical action of -a specific particle.

-
- -
- -
-
-class PyHEADTAIL.trackers.detuners.Chromaticity(Qp_x, Qp_y)
-

Collection class to contain/manage the segment-wise defined -elements that introduce detuning as a result of chromaticity -effects. They are stored in the self.segment_detuners list.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(Qp_x, Qp_y)
-

Return an instance of a Chromaticity DetunerCollection -class. The Qp_{x,y} are resp. scalars / lists (or tuples, numpy -arrays) containing first, second, third, … order chromaticity -coefficients (one-turn values), aka. Q’_{x,y}, Q’’_{x,y} -(Q-prime, Q-double-prime), …

-
- -
-
-__module__ = 'PyHEADTAIL.trackers.detuners'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-generate_segment_detuner(dmu_x, dmu_y, **kwargs)
-

Instantiate a ChromaticitySegment for the specified -segment of the accelerator ring. Note that the bare betatron -phase advances over the current segment, dmu_x and dmu_y, are -given as relative values, i.e. in units of the overall phase -advance around the whole accelerator (the betatron tune).

-
- -
- -
-
-class PyHEADTAIL.trackers.detuners.ChromaticitySegment(dQp_x, dQp_y)
-

Detuning object for a segment of the accelerator ring to -describe the detuning introduced by chromaticity effects.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(dQp_x, dQp_y)
-

Return an instance of a ChromaticitySegment. The dQp_{x,y} -denote resp. scalars / lists (or tuples, numpy arrays) -containing first, second, third, … order chromaticity -coefficients scaled to the relative bare phase advance.

-
- -
-
-__module__ = 'PyHEADTAIL.trackers.detuners'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-static _make_calc_detuning(Qp)
-

Define the polynomial used to calculate the chromaticity -up to higher orders. The polynomials are explicitly defined up -to order 3 for performance reasons (order 3 is the highest -usually used). Above order 3, the numpy polyval is used to -evaluate the polynomial. np.polynomial polyval is considerably -slower for low order polynomials.

-
- -
-
-detune(beam)
-

Calculate for every particle the change in phase advance -(detuning) dQ_{x,y} caused by chromaticity effects.

-
- -
- -
-
-class PyHEADTAIL.trackers.detuners.DetunerCollection
-

Abstract base class for a collection of SegmentDetuner objects -(see above). A detuner collection object defines the detuning for -one complete turn around the accelerator ring for the given -detuning element. Hence, the strength of detuning must be specified -by the user as integrated over one turn. -The accelerator ring is divided into a number of segments (often -there is just 1). To apply the detuning segment-wise, a -SegmentDetuner object is instantiated for each of the accelerator -segments and the detuning strength is chosen to be proportional to -the bare betatron phase advance per segment (normalized to the -respective tunes of the accelerator). -The instantiation of SegmentDetuner objects is -handled by the generate_segment_detuner method. This method is -called by the TransverseSegmentMap object as it contains the -information of how the segments of the accelerator are defined by -the user. The SegmentDetuner objects are stored in the -segment_detuners list (in order of segments along the ring) within -the DetunerCollection.

-

Since the DetunerCollection is implemented as a sequence, the -individual SegmentDetuner objects stored by a DetunerCollection can -be accessed via square brackets [i] where i is the index of the -segment.

-
-
-__abstractmethods__ = frozenset({'generate_segment_detuner'})
-
- -
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.trackers.detuners', '__doc__': 'Abstract base class for a collection of SegmentDetuner objects\n (see above). A detuner collection object defines the detuning for\n one complete turn around the accelerator ring for the given\n detuning element. Hence, the strength of detuning must be specified\n by the user as integrated over one turn.\n The accelerator ring is divided into a number of segments (often\n there is just 1). To apply the detuning segment-wise, a\n SegmentDetuner object is instantiated for each of the accelerator\n segments and the detuning strength is chosen to be proportional to\n the bare betatron phase advance per segment (normalized to the\n respective tunes of the accelerator).\n The instantiation of SegmentDetuner objects is\n handled by the generate_segment_detuner method. This method is\n called by the TransverseSegmentMap object as it contains the\n information of how the segments of the accelerator are defined by\n the user. The SegmentDetuner objects are stored in the\n segment_detuners list (in order of segments along the ring) within\n the DetunerCollection.\n\n Since the DetunerCollection is implemented as a sequence, the\n individual SegmentDetuner objects stored by a DetunerCollection can\n be accessed via square brackets [i] where i is the index of the\n segment.\n ', 'generate_segment_detuner': <function DetunerCollection.generate_segment_detuner>, '__len__': <function DetunerCollection.__len__>, '__getitem__': <function DetunerCollection.__getitem__>, '__dict__': <attribute '__dict__' of 'DetunerCollection' objects>, '__weakref__': <attribute '__weakref__' of 'DetunerCollection' objects>, '__abstractmethods__': frozenset({'generate_segment_detuner'}), '_abc_impl': <_abc_data object>})
-
- -
-
-__getitem__(key)
-
- -
-
-__len__()
-
- -
-
-__module__ = 'PyHEADTAIL.trackers.detuners'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-abstract generate_segment_detuner(dmu_x, dmu_y, **kwargs)
-

Instantiate a SegmentDetuner of the given type for a -segment of the accelerator ring. Note that the bare betatron -phase advances over the current segment, dmu_x and dmu_y, are -given as relative values, i.e. in units of the overall phase -advance around the whole accelerator (the betatron tune). -The method is called by the TransverseMap object which manages -the creation of a detuner for every defined segment. -The kwargs are used e.g. to pass the beta functions from the -TransverseMap where necessary (e.g. for AmplitudeDetuning).

-
- -
- -
-
-class PyHEADTAIL.trackers.detuners.SegmentDetuner
-

Abstract base class for detuning elements and effects defined -only for a segment of the accelerator ring (NB. The segment can also -be given by the full circumference). -Every detuner element/effect inheriting from this class must -implement the detune(beam) method to describe the change in phase -advance for each particle of the beam.

-
-
-__abstractmethods__ = frozenset({'detune'})
-
- -
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.trackers.detuners', '__doc__': 'Abstract base class for detuning elements and effects defined\n only for a segment of the accelerator ring (NB. The segment can also\n be given by the full circumference).\n Every detuner element/effect inheriting from this class must\n implement the detune(beam) method to describe the change in phase\n advance for each particle of the beam.\n ', 'detune': <function SegmentDetuner.detune>, '__dict__': <attribute '__dict__' of 'SegmentDetuner' objects>, '__weakref__': <attribute '__weakref__' of 'SegmentDetuner' objects>, '__abstractmethods__': frozenset({'detune'}), '_abc_impl': <_abc_data object>})
-
- -
-
-__module__ = 'PyHEADTAIL.trackers.detuners'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-abstract detune(beam)
-
- -
- -
-
-

PyHEADTAIL.trackers.libTPSA module

-

Copyright CERN 2014 -Author: Adrian Oeftiger, oeftiger@cern.ch Adrian Oeftiger, oeftiger@cern.ch

-

This module provides a two-dimensional Truncated Power Series -up until first order as suited for algebraical Jacobian determination -for two given variables.

-

The TPS class supports elementary operations such as +, -, /, * -(and true division according to “from __future__ import division”).

-

Functions such as sin, cos, exp, log etc are envisaged -to be implemented in a later version.

-
-
-class PyHEADTAIL.trackers.libTPSA.TPS(vector=array([0, 1, 0]))
-

Truncated Power Series which obeys a TPS Algebra, -cf. “DIFFERENTIAL ALGEBRAIC DESCRIPTION OF BEAM -DYNAMICS TO VERY HIGH ORDERS” by M. BERZ, -Particle Accelerators, 1989. Vol. 24, pp. 109-124.

-
-
-__add__(other)
-

this TPS + (other TPS or scalar)

-
- -
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.trackers.libTPSA', '__doc__': 'Truncated Power Series which obeys a TPS Algebra,\n cf. "DIFFERENTIAL ALGEBRAIC DESCRIPTION OF BEAM\n DYNAMICS TO VERY HIGH ORDERS" by M. BERZ,\n Particle Accelerators, 1989. Vol. 24, pp. 109-124.', '__init__': <function TPS.__init__>, 'get_instance': <classmethod object>, '__add__': <function TPS.__add__>, '__radd__': <function TPS.__radd__>, '__mul__': <function TPS.__mul__>, '__rmul__': <function TPS.__rmul__>, '__div__': <function TPS.__div__>, '__rdiv__': <function TPS.__rdiv__>, '__truediv__': <function TPS.__truediv__>, '__rtruediv__': <function TPS.__rtruediv__>, '__sub__': <function TPS.__sub__>, '__rsub__': <function TPS.__rsub__>, '__eq__': <function TPS.__eq__>, '__ne__': <function TPS.__ne__>, '__neg__': <function TPS.__neg__>, 'invert': <function TPS.invert>, 'real': <property object>, 'diff': <property object>, 'getvector': <function TPS.getvector>, '__dict__': <attribute '__dict__' of 'TPS' objects>, '__weakref__': <attribute '__weakref__' of 'TPS' objects>, '__hash__': None})
-
- -
-
-__div__(other)
-

this TPS / (other TPS or scalar)

-
- -
-
-__eq__(other)
-

this TPS == other TPS or this TPS real value == other scalar.

-
- -
-
-__hash__ = None
-
- -
-
-__init__(vector=array([0, 1, 0]))
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.trackers.libTPSA'
-
- -
-
-__mul__(other)
-

this TPS * (other TPS or scalar)

-
- -
-
-__ne__(other)
-

this TPS != other TPS

-
- -
-
-__neg__()
-
    -
  • (this TPS)

  • -
-
- -
-
-__radd__(other)
-

(other TPS or scalar) + this TPS

-
- -
-
-__rdiv__(other)
-

(other TPS or scalar) / this TPS

-
- -
-
-__rmul__(other)
-

(other TPS or scalar) * TPS

-
- -
-
-__rsub__(other)
-

other TPS - this TPS

-
- -
-
-__rtruediv__(other)
-

(other TPS or scalar) / this TPS

-
- -
-
-__sub__(other)
-

this TPS - other TPS

-
- -
-
-__truediv__(other)
-

this TPS / (other TPS or scalar)

-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
-
-property diff
-

First-order entries, the first differential values

-
- -
-
-classmethod get_instance(vector)
-
- -
-
-getvector()
-

returns all TPS coefficients in an np.ndarray

-
- -
-
-invert()
-

1 / (this TPS)

-
- -
-
-property real
-

Zero-order entry, the main value

-
- -
- -
-
-class PyHEADTAIL.trackers.libTPSA.TPS4(vector=array([0, 1, 0, 0, 0]))
-

1D4, 1st order and 4 variables

-
-
-__init__(vector=array([0, 1, 0, 0, 0]))
-

Initialize self. See help(type(self)) for accurate signature.

-
- -
-
-__module__ = 'PyHEADTAIL.trackers.libTPSA'
-
- -
-
-__mul__(other)
-

this TPS * (other TPS or scalar)

-
- -
-
-property diff
-

returns the first-order entries, the first differential values

-
- -
-
-invert()
-

1 / (this TPS)

-
- -
- -
-
-

PyHEADTAIL.trackers.libintegrators module

-

Copyright CERN 2014 -Author: Adrian Oeftiger, oeftiger@cern.ch

-

This module provides various numerical integration methods -for Hamiltonian vector fields on (currently two-dimensional) phase space. -The integrators are separated according to symplecticity. -The method is_symple() is provided to check for symplecticity -of a given integration method – it may be used generically -for any integration method with the described signature.

-
-
-PyHEADTAIL.trackers.libintegrators.is_symple(integrator)
-

returns whether the given integrator is symplectic w.r.t. to a certain -numerical tolerance (fixed by numpy.allclose). -The decision is taken on whether the Jacobian determinant remains 1 -(after a time step of 1 while modelling a harmonic oscillator). -The integrator input should be a function taking the argument -signature (x_initial, p_initial, timestep, H_p, H_x), where -the first three arguments are numbers and H_p(p) and H_x(x) are -functions taking one argument.

-
- -
-
-class PyHEADTAIL.trackers.libintegrators.non_symple
-

Contains non-symplectic integrator algorithms. -The integrator input should be a function taking the argument -signature (x_initial, p_initial, timestep, H_p, H_x). -H_x(x) = dH/dx is a function of x only while -H_p(p) = dH/dp is a function of p only.

-
-
-static Euler(x_initial, p_initial, timestep, H_p, H_x)
-

Non-symplectic one-dimensional Euler O(T^2) Algorithm.

-
- -
-
-static RK2(x_initial, p_initial, timestep, H_p, H_x)
-

Non-symplectic one-dimensional Runge Kutta 2 O(T^3) Algorithm.

-
- -
-
-static RK4(x_initial, p_initial, timestep, H_p, H_x)
-

Non-symplectic one-dimensional Runge Kutta 4 O(T^5) Algorithm.

-
- -
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.trackers.libintegrators', '__doc__': 'Contains *non-symplectic* integrator algorithms.\n\tThe integrator input should be a function taking the argument \n\tsignature (x_initial, p_initial, timestep, H_p, H_x).\n\tH_x(x) = dH/dx is a function of x only while\n\tH_p(p) = dH/dp is a function of p only.', 'Euler': <staticmethod object>, 'RK2': <staticmethod object>, 'RK4': <staticmethod object>, '__dict__': <attribute '__dict__' of 'non_symple' objects>, '__weakref__': <attribute '__weakref__' of 'non_symple' objects>})
-
- -
-
-__module__ = 'PyHEADTAIL.trackers.libintegrators'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
- -
-
-class PyHEADTAIL.trackers.libintegrators.symple
-

Contains symplectic integrator algorithms. -The integrator input should be a function taking the argument -signature (x_initial, p_initial, timestep, H_p, H_x). -It is assumed that the Hamiltonian is separable into a kinetic -part T(p) (giving rise to H_p(p) = dH/dp which only depends on the -conjugate momentum p) and into a potential part V(x) (giving rise -to H_x(x) = dH/dx which only depends on the spatial coordinate x).

-
-
-static Euler_Cromer(x_initial, p_initial, timestep, H_p, H_x)
-

Symplectic one-dimensional Euler Cromer O(T^2) Algorithm. -This Euler_Cromer is explicite! keyword: drift-kick mechanism

-
- -
-
-static Ruth(x_initial, p_initial, timestep, H_p, H_x)
-

Symplectic one-dimensional Ruth and Forest O(T^5) Algorithm. -Harvard: 1992IAUS..152..407Y

-
- -
-
-static Verlet(x_initial, p_initial, timestep, H_p, H_x)
-

Symplectic one-dimensional (Velocity) Verlet O(T^3) Algorithm. -keyword: leapfrog mechanism

-
- -
-
-__dict__ = mappingproxy({'__module__': 'PyHEADTAIL.trackers.libintegrators', '__doc__': 'Contains *symplectic* integrator algorithms. \n\tThe integrator input should be a function taking the argument \n\tsignature (x_initial, p_initial, timestep, H_p, H_x).\n\tIt is assumed that the Hamiltonian is separable into a kinetic\n\tpart T(p) (giving rise to H_p(p) = dH/dp which only depends on the \n\tconjugate momentum p) and into a potential part V(x) (giving rise \n\tto H_x(x) = dH/dx which only depends on the spatial coordinate x).', 'Euler_Cromer': <staticmethod object>, 'Verlet': <staticmethod object>, 'Ruth': <staticmethod object>, '__dict__': <attribute '__dict__' of 'symple' objects>, '__weakref__': <attribute '__weakref__' of 'symple' objects>})
-
- -
-
-__module__ = 'PyHEADTAIL.trackers.libintegrators'
-
- -
-
-__weakref__
-

list of weak references to the object (if defined)

-
- -
- -
-
-

PyHEADTAIL.trackers.longitudinal_tracking module

-

@author Kevin Li, Adrian Oeftiger, Michael Schenk -@date 03.10.2014 -@copyright CERN

-
-
-class PyHEADTAIL.trackers.longitudinal_tracking.Drift(alpha_array, length, shrinkage_p_increment=0, *args, **kwargs)
-

The drift (i.e. Delta z) of the particle’s z coordinate is given by -the (separable) Hamiltonian derived by dp (defined by (p - p0) / p0).

-

self.length is the drift length, -self.shrinkage_p_increment being non-zero includes the shrinking -ratio eta_{n+1} / eta_n (see MacLachlan 1989 in FN-529), -it is usually neglected. [Otherwise it may continuously be -adapted by the user according to the total momentum increment.] -If it is not neglected, the beta factor ratio would yield -(eta + Delta eta) / eta =

-
-

= 1 - Delta gamma / (eta^2 * gamma^2)

-
-

resp. = 1 - p_increment / (gamma^3 * p0) -since p_increment = gamma * m * c / eta * Delta gamma .

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(alpha_array, length, shrinkage_p_increment=0, *args, **kwargs)
-

The length of the momentum compaction factor array /alpha_array/ -defines the order of the slippage factor expansion.

-
- -
-
-__module__ = 'PyHEADTAIL.trackers.longitudinal_tracking'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-track(beam)
-

Should be decorated by @clean_slices for any inheriting -classes changing beam.z .

-
- -
- -
-
-class PyHEADTAIL.trackers.longitudinal_tracking.Kick(alpha_array, circumference, harmonic, voltage, phi_offset=0, p_increment=0, D_x=0, D_y=0, *args, **kwargs)
-

The Kick class represents the kick by a single RF element -in a ring! The kick (i.e. dp_{n+1} - dp_n) of the particle’s dp -coordinate is given by the (separable) Hamiltonian derived -by z, i.e. the force.

-

self.p_increment is the momentum step per turn added by this Kick, -it can be continuously adjusted externally -by the user to reflect different slopes in the dipole field ramp.

-

self.phi_offset reflects an offset of the cavity’s reference system, -this can be tweaked externally by the user for simulating RF system -ripple and the like. Include the change of flank of the sine curve -here, explicitely (i.e. pi below transition and 0 above transition).

-

(self._phi_lock adds to the offset as well but should -be used internally in the module (e.g. by RFSystems) for -acceleration purposes. It may be used for synchronisation with the -momentum updating by self.p_increment via self.calc_phi_0(beam), -thus readjusting the zero-crossing of this sinosoidal kick. -This requires a convention how to mutually displace the Kick -phases to each other w.r.t. to their contribution to acceleration.)

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(alpha_array, circumference, harmonic, voltage, phi_offset=0, p_increment=0, D_x=0, D_y=0, *args, **kwargs)
-

D_x, D_y: horizontal and vertical dispersion

-

!! Attention !! -The user is responsible of making sure the dispersions match the -dispersions of the beam which were added in the track() of the last map. -This corresponds to the dispersion of the transverse/longitudinal map -!following! this Kick (D_x_s1 of the preceding transverse map) -Example: -dx = np.array([1, 2., 5]) #the dispersions at the TransverseSegmentMaps -trans_map = TransverseMap(C, segments, ax, bx, dx, ay, by, Q_x, Q_y) -map_ = [m for m in trans_map] + [LinearMap(alpha_0, C, Q_s, D_x=???)] -D_x = 1. # if we place the LinearMap after the transverse maps, we

-
-

# need to make sure the dispersion matches the dispersion -# added in this transverse map. This corresponds to the -# dispersion of the first segment of the transverse map!

-
-

Or simply: Match the dispersion of this Element with the dispersion -of the following transverse element.

-
- -
-
-__module__ = 'PyHEADTAIL.trackers.longitudinal_tracking'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-track(beam)
-

Should be decorated by @clean_slices for any inheriting -classes changing beam.z .

-
- -
-
-track_with_dispersion(beam)
-

Subtract the dispersion before computing a new dp, then add -the dispersion using the new dp.

-
- -
-
-track_without_dispersion(beam)
-
- -
- -
-
-class PyHEADTAIL.trackers.longitudinal_tracking.LinearMap(alpha_array, circumference, Q_s, D_x=0, D_y=0, *args, **kwargs)
-

Linear Map represented by a Courant-Snyder transfer matrix. -Makes use only of the linear first order slippage factor eta. -Higher orders are manifestly neglected:

-
-\[\]
-

eta(delta = 0) = sum_i eta_i * delta^i === eta_0

-

where

-
-\[\]
-

eta_0 := 1 / gamma_{tr}^2 - 1 / gamma^2

-
-
-property Qs
-
- -
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(alpha_array, circumference, Q_s, D_x=0, D_y=0, *args, **kwargs)
-

Q_s is the synchrotron tune. -D_x, D_y are the dispersions in horizontal and vertical direction.

-

!! Attention !! -The user is responsible of making sure the dispersions match the -dispersions of the beam which were added in the track() of the last map. -This corresponds to the dispersion of the transverse/longitudinal map -!following! this LinearMap (D_x_s1 of the preceding transverse map) -Example: -dx = np.array([1, 2., 5]) #the dispersions at the TransverseSegmentMaps -trans_map = TransverseMap(C, segments, ax, bx, dx, ay, by, Q_x, Q_y) -map_ = [m for m in trans_map] + [LinearMap(alpha_0, C, Q_s, D_x=???)] -D_x = 1. # if we place the LinearMap after the transverse maps, we

-
-

# need to make sure the dispersion matches the dispersion -# added in this transverse map. This corresponds to the -# dispersion of the first segment of the transverse map!

-
-

Or simply: Match the dispersion of this Element with the dispersion -of the following transverse element.

-
- -
-
-__module__ = 'PyHEADTAIL.trackers.longitudinal_tracking'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-track(beam)
-

Contract: advances the longitudinal coordinates -of the beam over a full turn / circumference.

-
- -
-
-track_with_dispersion(beam)
-

Subtract the dispersion before computing a new dp, then add -the dispersion using the new dp.

-
- -
-
-track_without_dispersion(beam)
-
- -
- -
-
-class PyHEADTAIL.trackers.longitudinal_tracking.LongitudinalMap(alpha_array, *args, **kwargs)
-

A longitudinal map represents a longitudinal dynamical element -(e.g. a kick or a drift…), i.e. an abstraction of a cavity -of an RF system etc. -Any track method of a longitudinal element should clean the slices -from the beam – use @clean_slices! -LongitudinalMap objects can compose a longitudinal one turn map! -Definitions of various orders of the slippage factor eta(delta) -for delta = (p - p0) / p0 should be implemented in this class. -Any derived objects will access self.eta(delta, gamma).

-

Note: the momentum compaction factors are defined by the change of radius -Delta R / R0 = sum_i alpha_i * delta^(i + 1) -hence yielding expressions for the higher slippage factor orders -Delta w / w0 = sum_j eta_j * delta^(i + 1) -(for the revolution frequency w)

-
-
-__abstractmethods__ = frozenset({'track'})
-
- -
-
-__init__(alpha_array, *args, **kwargs)
-

The length of the momentum compaction factor array /alpha_array/ -defines the order of the slippage factor expansion.

-
- -
-
-__module__ = 'PyHEADTAIL.trackers.longitudinal_tracking'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-static _eta0(alpha_array, gamma)
-
- -
-
-eta(dp, gamma)
-

Depending on the number of entries in self.alpha_array the -according order of eta = sum_i eta_i * delta^i where -delta = Delta p / p0 will be included in this gathering function.

-

Note: Please implement higher slippage factor orders as static methods -with name _eta<N> where <N> is the order of delta in eta(delta) -and with signature (alpha_array, gamma).

-
- -
-
-abstract track(beam)
-

Should be decorated by @clean_slices for any inheriting -classes changing beam.z .

-
- -
- -
-
-class PyHEADTAIL.trackers.longitudinal_tracking.LongitudinalOneTurnMap(alpha_array, circumference, *args, **kwargs)
-

A longitudinal one turn map tracks over a complete turn. -Any inheriting classes guarantee to provide a self.track(beam) -method that tracks around the whole ring!

-

LongitudinalOneTurnMap classes possibly comprise several -LongitudinalMap objects.

-
-
-__abstractmethods__ = frozenset({'track'})
-
- -
-
-__init__(alpha_array, circumference, *args, **kwargs)
-

LongitudinalOneTurnMap objects know their circumference.

-
- -
-
-__module__ = 'PyHEADTAIL.trackers.longitudinal_tracking'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-abstract track(beam)
-

Contract: advances the longitudinal coordinates -of the beam over a full turn / circumference.

-
- -
- -
-
-class PyHEADTAIL.trackers.longitudinal_tracking.RFBox(z_left, z_right, alpha_array, length, shrinkage_p_increment=0)
-

Represents longitudinal square well potential.

-

Particles drift freely along z within the interval (z_left, z_right) -according to their momentum. When they hit the box boundary they are -reflected with their momentum inverted.

-

NB: dispersion subtraction not implemented yet!

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(z_left, z_right, alpha_array, length, shrinkage_p_increment=0)
-

The length of the momentum compaction factor array /alpha_array/ -defines the order of the slippage factor expansion.

-
- -
-
-__module__ = 'PyHEADTAIL.trackers.longitudinal_tracking'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-reflect(beam)
-
- -
-
-track(beam)
-

Should be decorated by @clean_slices for any inheriting -classes changing beam.z .

-
- -
- -
-
-class PyHEADTAIL.trackers.longitudinal_tracking.RFSystems(circumference, harmonic_list, voltage_list, phi_offset_list, alpha_array, gamma_reference, p_increment=0, phase_lock=True, shrink_transverse=True, shrink_longitudinal=False, D_x=0, D_y=0, charge=None, mass=None, *args, **kwargs)
-

With one RFSystems object in the ring layout (with all Kick -objects located at the same longitudinal position), the -longitudinal separatrix function is exact and makes a valid -local statement about stability!

-
-
-property Q_s
-
- -
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(circumference, harmonic_list, voltage_list, phi_offset_list, alpha_array, gamma_reference, p_increment=0, phase_lock=True, shrink_transverse=True, shrink_longitudinal=False, D_x=0, D_y=0, charge=None, mass=None, *args, **kwargs)
-

The first entry in harmonic_list, voltage_list and -phi_offset_list defines the parameters for the one -accelerating Kick object (i.e. the accelerating RF system).

-

For several accelerating Kick objects one would have to -extend this class and settle for the relative phases -between the Kick objects! (For one accelerating Kick object, -all the other Kick objects’ zero crossings are displaced by -the negative phase shift induced by the accelerating Kick.)

-

The length of the momentum compaction factor array alpha_array -defines the order of the slippage factor expansion. -(See the LongitudinalMap class for further details.)

-

RFSystems comprises a half the circumference drift, -then all the kicks by the RF Systems in one location, -then the remaining half the circumference drift. -This Verlet algorithm (“leap-frog” featuring O(n_turn^2) as -opposed to symplectic Euler-Cromer with O(n_turn)) makes -sure that the longitudinal phase space is read out in -a symmetric way (otherwise phase space should be tilted -at the entrance or exit of the cavity / kick location! -cf. discussions with Christian Carli).

-

The boolean parameter shrink_longitudinal determines whether the -shrinkage ratio beta_{n+1} / beta_n should be taken -into account during the second Drift. -(See the Drift class for further details.)

-

The boolean parameter shrink_transverse allows for transverse -emittance cooling from acceleration.

-

Arguments: -- self.p_increment is the momentum step per turn of the -synchronous particle, it can be continuously adjusted to -reflect different slopes in the dipole magnet strength ramp. -(See the Kick class for further details.) -- phase_lock == True means all phi_offsets are given w.r.t. the -fundamental kick, adjusted at set-up time. -- phase_lock == False means all phi_offsets are absolute. -In this case take care about all Kick.p_increment attributes – -highly non-trivial, as all other p_increment functionality -in RFSystems is broken. So take care, you’re on your own! :-) -- D_x, D_y: horizontal and vertical dispersion. These arguments

-
-

are passed to the Kicks class. Because both kicks are applied -consecutively, the dispersion will be the same for both kicks and -it is therefore sufficient to specify only one dispersion. -The dispersion must match the dispersion of the following transverse -map. See the docstring of the Kick class for a more detailed -description.

-
-
- -
-
-__module__ = 'PyHEADTAIL.trackers.longitudinal_tracking'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-_phaselock(gamma, charge)
-

Put all _kicks other than the accelerating kick to -zero phase difference w.r.t. the accelerating kick. -Attention: Make sure the p_increment of each non-accelerating -kick is set to 0 (assuming phi_offset == 0, otherwise adapt!).

-
- -
-
-static _shrink_transverse_emittance(beam, geo_emittance_factor)
-

Account for the transverse geometrical emittance shrinking -due to acceleration cooling.

-
- -
-
-property accelerating_kick
-

non-existent anymore!

-
- -
-
-clean_buckets()
-

Erases all RFBucket records of this RFSystems instance. -Any change of the Kick parameters should entail calling -clean_buckets in order to update the Hamiltonian etc.

-
- -
-
-property elements
-

non-existent anymore!

-
- -
-
-property fundamental_kick
-

non-existent anymore!

-
- -
-
-get_bucket(bunch=None, gamma=None, mass=None, charge=None, *args, **kwargs)
-

Return an RFBucket instance which contains all information -and all physical parameters of the current longitudinal RF -configuration. (Factory method)

-

Use for plotting or obtaining the Hamiltonian etc.

-

Attention: For the moment it is assumed that only the -accelerating kick (defined by the first entry in the -parameter lists) has a non-zero p_increment. -(see RFSystems.p_increment)

-

Arguments: -Either give a bunch or the three parameters -(gamma, mass, charge) explicitely to return a bucket -defined by these.

-
- -
-
-property harmonics
-

List of Kick harmonics, ONLY use this interface in RFSystems -to access and modify any Kick harmonic. -(Otherwise the get_bucket functionality is broken, -clean_buckets will not be called if not using this interface.)

-
- -
-
-property kicks
-

non-existent anymore!

-
- -
-
-property p_increment
-

The increment in momentum of the accelerating Kick, i.e. -defined by the first entry in the RF parameter lists. -ONLY use this interface in RFSystems to access and modify -the accelerating Kick.p_increment. -(Otherwise the get_bucket functionality is broken,

-
-

clean_buckets will not be called if not using this interface.)

-
-
- -
-
-property phi_offsets
-

List of Kick phi_offsets, ONLY use this interface in -RFSystems to access and modify any Kick phi_offset. -(Otherwise the get_bucket functionality is broken, -clean_buckets will not be called if not using this interface.)

-
- -
-
-phi_s(gamma, charge)
-
- -
-
-pop_kick(index)
-

Remove a Kick instance from this RFSystems instance. -Return the removed Kick instance. -:param - index: the index according to the defining lists -:param voltages, harmonics, phi_offsets.:

-

Note: can only remove kicks that are not index == 0. -The accelerating / fundamental kick cannot be removed.

-
- -
-
-property rfbucket
-

non-existent anymore!

-
- -
-
-set_harmonic_list(harmonic_list)
-

non-existent anymore!

-
- -
-
-set_phi_offset_list(phi_offset_list)
-

non-existent anymore!

-
- -
-
-set_voltage_list(voltage_list)
-

non-existent anymore!

-
- -
-
-track(beam)
-

Contract: advances the longitudinal coordinates -of the beam over a full turn / circumference.

-
- -
-
-track_no_transverse_shrinking(beam)
-
- -
-
-track_transverse_shrinking(beam)
-
- -
-
-property voltages
-

List of Kick voltages, ONLY use this interface in RFSystems -to access and modify any Kick voltage. -(Otherwise the get_bucket functionality is broken,

-
-

clean_buckets will not be called if not using this interface.)

-
-
- -
- -
-
-

PyHEADTAIL.trackers.rf_bucket module

-
-
-
members
-

-
private-members
-

-
special-members
-

-
undoc-members
-

-
-
-
-
-

PyHEADTAIL.trackers.simple_long_tracking module

-
-
-

PyHEADTAIL.trackers.transverse_tracking module

-

@author Kevin Li, Michael Schenk, Stefan Hegglin -@date 07. January 2014 -@brief Description of the transport of transverse phase spaces. -@copyright CERN

-
-
-class PyHEADTAIL.trackers.transverse_tracking.TransverseMap(s, alpha_x, beta_x, D_x, alpha_y, beta_y, D_y, accQ_x, accQ_y, detuners=[], *args, **kwargs)
-

Collection class for TransverseSegmentMap objects. This class is -used to define a one turn map for transverse particle tracking. An -accelerator ring is divided into segments (1 or more). They are -defined by the user with the array s containing the positions of -all the segment boundaries. The TransverseMap stores all the -relevant parameters (optics) at each segment boundary. The first -boundary of the first segment is referred to as the injection -point. -At instantiation of the TransverseMap, a TransverseSegmentMap object -is created for each segment of the accelerator and appended to the -list self.segment_maps. When generating the TransverseSegmentMaps, -the influence of incoherent detuning by effects defined in the -trackers.detuners module is included and the corresponding -SegmentDetuner objects are generated on the fly. Their strength of -detuning is distributed proportionally along the accelerator -circumference. -Note that the TransverseMap only knows all the relevant optics -parameters needed to generate the TransverseSegmentMaps. It is not -capable of tracking particles. The transport mechanism of particles -in the transverse plane is entirely implemented in the -TransverseSegmentMap class. -Since the TransverseMap is implemented to act as a sequence, the -instances of the TransverseSegmentMap objects (stored in -self.segment_maps) can be accessed using the notation -TransverseMap(…)[i] (with i the index of the accelerator -segment).

-
-
-__getitem__(key)
-
- -
-
-__init__(s, alpha_x, beta_x, D_x, alpha_y, beta_y, D_y, accQ_x, accQ_y, detuners=[], *args, **kwargs)
-

Create a one-turn map that manages the transverse tracking -for each of the accelerator segments defined by s.

-
-
    -
  • s is the array of positions defining the boundaries of the -segments for one turn. The first element in s must be zero -and the last element must be equal to the accelerator -circumference C.

  • -
  • accQ_{x,y} are arrays with the accumulating phase advance -in units of 2 pi (i.e. mu_{x,y} / 2 pi) at each segment -boundary. The respective last entry gives the betatron tune -Q_{x,y} . -Note: instead of arrays of length len(s) it is possible -to provide solely the scalar one-turn betatron tune Q_{x,y} -directly. Then the phase advances are smoothly distributed -over the segments (proportional to the respective s length).

  • -
  • alpha_{x,y}, beta_{x,y} are the TWISS parameters alpha and -beta. They are arrays of size len(s) as these parameters -must be defined at every segment boundary of the -accelerator.

  • -
  • D_{x,y} are the dispersion coefficients. They are arrays of -size len(s) as these parameters must be defined at every -segment boundary of the accelerator.

  • -
  • detuner_collections is a list of DetunerCollection objects -that are present in the accelerator. Each DetunerCollection -knows how to generate and store its SegmentDetuner objects -to ‘distribute’ the detuning proportionally along the -accelerator circumference.

  • -
-
-
- -
-
-__len__()
-
- -
-
-__module__ = 'PyHEADTAIL.trackers.transverse_tracking'
-
- -
-
-_generate_segment_maps()
-

This method is called at instantiation of a TransverseMap -object. For each segment of the accelerator ring (defined by the -array self.s), a TransverseSegmentMap object is instantiated and -appended to the list self.segment_maps. The creation of the -TransverseSegmentMaps includes the instantiation of the -SegmentDetuner objects which is achieved by calling the -self.detuner_collections.generate_segment_detuner(…) method. -The detuning strength given in a DetunerCollection is valid for -one complete turn around the accelerator. To determine the -detuning strength of a SegmentDetuner, the one-turn detuning -strength is scaled to the segment_length. Note that this -quantity is given in relative units (i.e. it is normalized to -the accelerator circumference s[-1]).

-
- -
-
-detuner_collections = None
-

List to store TransverseSegmentMap instances.

-
- -
-
-get_injection_optics()
-

Return a dict with the transverse TWISS parameters -alpha_x, beta_x, D_x, alpha_y, beta_y, D_y from the -beginning of the first segment (injection point).

-
- -
- -
-
-class PyHEADTAIL.trackers.transverse_tracking.TransverseSegmentMap(alpha_x_s0, beta_x_s0, D_x_s0, alpha_x_s1, beta_x_s1, D_x_s1, alpha_y_s0, beta_y_s0, D_y_s0, alpha_y_s1, beta_y_s1, D_y_s1, dQ_x, dQ_y, *args, **kwargs)
-

Class to transport/track the particles of the beam in the -transverse plane through an accelerator ring segment defined by its -boundaries [s0, s1]. To calculate the transverse linear transport -matrix M that transports each particle’s transverse phase space -coordinates (x, xp, y, yp) from position s0 to position s1 in the -accelerator, the TWISS parameters alpha and beta at positions s0 -and s1 must be provided. The betatron phase advance of each -particle in the present segment is given by their betatron tune -Q_{x,y} (phase advance) and possibly by an incoherent tune shift -introduced e.g. by amplitude detuning or chromaticity effects -(see trackers.detuners module).

-

Dispersion is added in the horizontal and vertical planes. Care -needs to be taken, that dispersive effects were taken into account -upon beam creation. Then, before each linear tracking step, the -dispersion is removed, linear tracking is performed via the linear -periodic map and dispersion is added back so that any subsequent -collective effect has dispersion taken into account.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(alpha_x_s0, beta_x_s0, D_x_s0, alpha_x_s1, beta_x_s1, D_x_s1, alpha_y_s0, beta_y_s0, D_y_s0, alpha_y_s1, beta_y_s1, D_y_s1, dQ_x, dQ_y, *args, **kwargs)
-

Return an instance of the TransverseSegmentMap class. The -values of the TWISS parameters alpha_{x,y} and beta_{x,y} as -well as of the dispersion coefficients D_{x,y} (not yet -implemented) are given at the beginning s0 and at the end s1 of -the corresponding segment. The dQ_{x,y} denote the betatron -tune advance over the current segment (phase advance divided by -2 pi). The SegmentDetuner objects present in this segment are -passed as a list via the keyword argument ‘segment_detuners’. -The matrices self.I and self.J are constant and are calculated -only once at instantiation of the TransverseSegmentMap.

-
- -
-
-__module__ = 'PyHEADTAIL.trackers.transverse_tracking'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-_build_segment_map(alpha_x_s0, beta_x_s0, alpha_x_s1, beta_x_s1, alpha_y_s0, beta_y_s0, alpha_y_s1, beta_y_s1)
-

Calculate matrices I and J which are decoupled from the -phase advance dependency and only depend on the TWISS parameters -at the boundaries of the accelerator segment. These matrices are -constant and hence need to be calculated only once at -instantiation of the TransverseSegmentMap.

-
- -
-
-_track_with_dispersion(beam, M00, M01, M10, M11, M22, M23, M32, M33)
-

This method gets bound to the self._track() method if -there are dispersion effects, i.e. any of the 4 dispersion parameters -is != 0 -It computes the transverse tracking given the matrix elements Mij. -1) Subtract the dispersion using dp -2) Change the positions and momenta using the matrix elements -3) Add the dispersion effects using dp

-
- -
-
-_track_without_dispersion(beam, M00, M01, M10, M11, M22, M23, M32, M33)
-

This method gets bound to the self._track() method if there are -no dispersive effects, i.e. all of the 4 dispersion parameters -are close to (1e-3) 0. -It computes the transverse tracking given the matrix elements Mij

-
- -
-
-track(beam)
-

The dphi_{x,y} denote the phase advance in the horizontal -and vertical plane respectively for the given accelerator -segment. They are composed of the betatron tunes dQ_{x,y} and a -possible incoherent tune shift introduced by detuner elements -/ effects defined in the list self.segment_detuners (they are -all instances of the SegmentDetuner child classes). -The transport matrix is defined by the coefficients M_{ij}.

-
- -
- -
-
-

PyHEADTAIL.trackers.wrapper module

-
-
-class PyHEADTAIL.trackers.wrapper.LongWrapper(circumference, z0=0, *args, **kwargs)
-

Wrap particles that go out of the z range covering the circumference.

-
-
-__abstractmethods__ = frozenset({})
-
- -
-
-__init__(circumference, z0=0, *args, **kwargs)
-

Arguments: -- circumference: the interval length in z in [m] -- z0: the central value of z -Particles outside of the interval -[z0 - circumference / 2, z0 + circumference / 2] -will be folded back into the interval.

-
- -
-
-__module__ = 'PyHEADTAIL.trackers.wrapper'
-
- -
-
-_abc_impl = <_abc_data object>
-
- -
-
-track(beam)
-

Perform tracking of beam through this Element.

-
- -
-
-track_numpy(beam)
-

Explicitly uses numpy functions on the beam.

-
- -
- -
-
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/genindex.html b/docs/genindex.html deleted file mode 100644 index 597207b5..00000000 --- a/docs/genindex.html +++ /dev/null @@ -1,2440 +0,0 @@ - - - - - - - - Index — PyHEADTAIL documentation - - - - - - - - - - - - - -
-
-
-
- - -

Index

- -
- _ - | A - | B - | C - | D - | E - | F - | G - | H - | I - | K - | L - | M - | N - | O - | P - | Q - | R - | S - | T - | U - | V - | W - | Z - -
-

_

- - - -
- -

A

- - - -
- -

B

- - - -
- -

C

- - - -
- -

D

- - - -
- -

E

- - - -
- -

F

- - - -
- -

G

- - - -
- -

H

- - - -
- -

I

- - - -
- -

K

- - - -
- -

L

- - - -
- -

M

- - - -
- -

N

- - - -
- -

O

- - - -
- -

P

- - - -
- -

Q

- - - -
- -

R

- - - -
- -

S

- - - -
- -

T

- - - -
- -

U

- - - -
- -

V

- - - -
- -

W

- - - -
- -

Z

- - - -
- - - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/index.html b/docs/index.html deleted file mode 100644 index 4683bdfd..00000000 --- a/docs/index.html +++ /dev/null @@ -1,135 +0,0 @@ - - - - - - - Welcome to PyHEADTAIL’s API description! — PyHEADTAIL documentation - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/py-modindex.html b/docs/py-modindex.html deleted file mode 100644 index 9eb94329..00000000 --- a/docs/py-modindex.html +++ /dev/null @@ -1,351 +0,0 @@ - - - - - - - Python Module Index — PyHEADTAIL documentation - - - - - - - - - - - - - - - - -
-
-
-
- - -

Python Module Index

- -
- p -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 
- p
- PyHEADTAIL -
    - PyHEADTAIL.aperture -
    - PyHEADTAIL.aperture.aperture -
    - PyHEADTAIL.aperture.aperture_cython -
    - PyHEADTAIL.cobra_functions -
    - PyHEADTAIL.cobra_functions.c_sin_cos -
    - PyHEADTAIL.cobra_functions.curve_tools -
    - PyHEADTAIL.cobra_functions.interp_sin_cos -
    - PyHEADTAIL.cobra_functions.stats -
    - PyHEADTAIL.feedback -
    - PyHEADTAIL.feedback.transverse_damper -
    - PyHEADTAIL.feedback.widebandfeedback -
    - PyHEADTAIL.field_maps -
    - PyHEADTAIL.general -
    - PyHEADTAIL.general.contextmanager -
    - PyHEADTAIL.general.decorators -
    - PyHEADTAIL.general.element -
    - PyHEADTAIL.general.pmath -
    - PyHEADTAIL.general.printers -
    - PyHEADTAIL.general.utils -
    - PyHEADTAIL.gpu -
    - PyHEADTAIL.gpu.gpu_utils -
    - PyHEADTAIL.gpu.gpu_wrap -
    - PyHEADTAIL.impedances -
    - PyHEADTAIL.impedances.wake_kicks -
    - PyHEADTAIL.impedances.wakes -
    - PyHEADTAIL.machines -
    - PyHEADTAIL.machines.synchrotron -
    - PyHEADTAIL.monitors -
    - PyHEADTAIL.monitors.monitors -
    - PyHEADTAIL.multipoles -
    - PyHEADTAIL.multipoles.multipoles -
    - PyHEADTAIL.particles -
    - PyHEADTAIL.particles.generators -
    - PyHEADTAIL.particles.particles -
    - PyHEADTAIL.particles.rfbucket_matching -
    - PyHEADTAIL.particles.slicing -
    - PyHEADTAIL.radiation -
    - PyHEADTAIL.radiation.radiation -
    - PyHEADTAIL.rfq -
    - PyHEADTAIL.rfq.rfq -
    - PyHEADTAIL.spacecharge -
    - PyHEADTAIL.spacecharge.spacecharge -
    - PyHEADTAIL.spacecharge.transverse_spacecharge -
    - PyHEADTAIL.trackers -
    - PyHEADTAIL.trackers.detuners -
    - PyHEADTAIL.trackers.libintegrators -
    - PyHEADTAIL.trackers.libTPSA -
    - PyHEADTAIL.trackers.longitudinal_tracking -
    - PyHEADTAIL.trackers.simple_long_tracking -
    - PyHEADTAIL.trackers.transverse_tracking -
    - PyHEADTAIL.trackers.wrapper -
- - -
-
-
- -
-
- - - - \ No newline at end of file diff --git a/docs/search.html b/docs/search.html deleted file mode 100644 index 9e329ae0..00000000 --- a/docs/search.html +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - Search — PyHEADTAIL documentation - - - - - - - - - - - - - - - - - - -
-
-
-
- -

Search

-
- -

- Please activate JavaScript to enable the search - functionality. -

-
-

- From here you can search these documents. Enter your search - words into the box below and click "search". Note that the search - function will automatically search for all of the words. Pages - containing fewer words won't appear in the result list. -

-
- - - -
- -
- -
- -
-
-
- -
-
- - - - \ No newline at end of file