Skip to content

Commit 38e435a

Browse files
committed
release-script: Merge branch 'release/v1.13.0'
2 parents f9c94b1 + 9ce0ef2 commit 38e435a

File tree

15 files changed

+285
-71
lines changed

15 files changed

+285
-71
lines changed

LICENSE.txt

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
BSD 4-Clause License
2+
3+
Copyright (c) 2018, CERN
4+
All rights reserved.
5+
6+
Redistribution and use in source and binary forms, with or without
7+
modification, are permitted provided that the following conditions are met:
8+
9+
* Redistributions of source code must retain the above copyright notice, this
10+
list of conditions and the following disclaimer.
11+
12+
* Redistributions in binary form must reproduce the above copyright notice,
13+
this list of conditions and the following disclaimer in the documentation
14+
and/or other materials provided with the distribution.
15+
16+
* Neither the name of the copyright holder nor the names of its
17+
contributors may be used to endorse or promote products derived from
18+
this software without specific prior written permission.
19+
20+
* Neither the name of the copyright holder nor the names of its contributors
21+
may be used to endorse or promote products derived from this software
22+
without specific prior written permission.
23+
24+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
28+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

PyHEADTAIL/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = '1.12.4'
1+
__version__ = '1.13.0'

PyHEADTAIL/cobra_functions/stats.pyx

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -418,8 +418,11 @@ cpdef emittance_per_slice(int[::1] slice_index_of_particle,
418418
@cython.boundscheck(False)
419419
@cython.cdivision(True)
420420
@cython.wraparound(False)
421-
cpdef calc_cell_stats(bunch, double beta_z, double radial_cut,
422-
int n_rings, int n_azim_slices):
421+
cpdef calc_cell_stats(
422+
double[::1] x, double[::1] xp, double[::1] y,
423+
double[::1] yp, double[::1] z, double[::1] dp,
424+
double beta_z, double radial_cut,
425+
int n_rings, int n_azim_slices):
423426

424427
# Prepare arrays to store cell statistics.
425428
cdef int[:,::1] n_particles_cell = np.zeros((n_azim_slices, n_rings),
@@ -438,12 +441,12 @@ cpdef calc_cell_stats(bunch, double beta_z, double radial_cut,
438441
dtype=np.double)
439442

440443
# Declare datatypes of bunch coords.
441-
cdef double[::1] x = bunch.x
442-
cdef double[::1] xp = bunch.xp
443-
cdef double[::1] y = bunch.y
444-
cdef double[::1] yp = bunch.yp
445-
cdef double[::1] z = bunch.z
446-
cdef double[::1] dp = bunch.dp
444+
# cdef double[::1] x = bunch.x
445+
# cdef double[::1] xp = bunch.xp
446+
# cdef double[::1] y = bunch.y
447+
# cdef double[::1] yp = bunch.yp
448+
# cdef double[::1] z = bunch.z
449+
# cdef double[::1] dp = bunch.dp
447450
cdef unsigned int n_particles = x.shape[0]
448451

449452
cdef double ring_width = radial_cut / <double>n_rings

PyHEADTAIL/feedback/transverse_damper.py

Lines changed: 50 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -14,42 +14,81 @@
1414

1515
class TransverseDamper(Element):
1616

17-
def __init__(self, dampingrate_x, dampingrate_y, *args, **kwargs):
17+
def __init__(self, dampingrate_x, dampingrate_y, phase=90,
18+
local_beta_function=None, *args, **kwargs):
19+
'''Ideal transverse damper with an in-place "measurement"
20+
(transverse "pick-up") of the transverse dipole moment.
21+
Note: a single bunch in the beam is assumed, i.e. this works on
22+
the entire beam's moments.
23+
24+
Arguments:
25+
- dampingrate_x, dampingrate_y: horizontal and vertical
26+
damping rates in turns (e.g. 50 turns for a typical 2018
27+
LHC ADT set-up)
28+
- phase: phase of the damper kick in degrees with respect to
29+
the transverse position "pick-up". The default value of
30+
90 degrees corresponds to a typical resistive damper.
31+
- local_beta_function: the optics beta function at the
32+
transverse position "pick-up" (e.g. in the local place
33+
of this Element). This is required if the damper is not
34+
a purely resistive damper (or exciter), i.e. if the
35+
phase is not 90 (or 270) degrees. The beta function is
36+
assumed to be the same for both transverse planes,
37+
otherwise use two instances of the TransverseDamper.
38+
'''
1839

1940
if dampingrate_x and not dampingrate_y:
2041
self.gain_x = 2/dampingrate_x
2142
self.track = self.track_horizontal
22-
self.prints('Damper in V active')
43+
self.prints('Damper in horizontal plane active')
2344
elif not dampingrate_x and dampingrate_y:
2445
self.gain_y = 2/dampingrate_y
2546
self.track = self.track_vertical
26-
self.prints('Damper in Y active')
47+
self.prints('Damper in vertical plane active')
2748
elif not dampingrate_x and not dampingrate_y:
2849
self.prints('Dampers not active')
2950
else:
3051
self.gain_x = 2/dampingrate_x
3152
self.gain_y = 2/dampingrate_y
3253
self.track = self.track_all
3354
self.prints('Dampers active')
55+
if phase != 90 and phase != 270 and not local_beta_function:
56+
raise TypeError(
57+
'TransverseDamper: numeric local_beta_function value at '
58+
'position of damper missing! (Required because of non-zero '
59+
'reactive damper component.)')
60+
self.phase_in_2pi = phase / 360. * 2*np.pi
61+
self.local_beta_function = local_beta_function
3462

3563
# will be overwritten at initialisation
3664
def track(self, beam):
3765
pass
3866

3967
def track_horizontal(self, beam):
40-
beam.xp -= self.gain_x * beam.mean_xp()
68+
beam.xp -= self.gain_x * np.sin(self.phase_in_2pi) * beam.mean_xp()
69+
if self.local_beta_function:
70+
beam.xp -= (self.gain_x * np.cos(self.phase_in_2pi) *
71+
beam.mean_x() / self.local_beta_function)
4172

4273
def track_vertical(self, beam):
43-
beam.yp -= self.gain_y * beam.mean_yp()
74+
beam.yp -= self.gain_y * np.sin(self.phase_in_2pi) * beam.mean_yp()
75+
if self.local_beta_function:
76+
beam.yp -= (self.gain_y * np.cos(self.phase_in_2pi) *
77+
beam.mean_y() / self.local_beta_function)
4478

4579
def track_all(self, beam):
46-
beam.xp -= self.gain_x * beam.mean_xp()
47-
beam.yp -= self.gain_y * beam.mean_yp()
80+
beam.xp -= self.gain_x * np.sin(self.phase_in_2pi) * beam.mean_xp()
81+
beam.yp -= self.gain_y * np.sin(self.phase_in_2pi) * beam.mean_yp()
82+
if self.local_beta_function:
83+
beam.xp -= (self.gain_x * np.cos(self.phase_in_2pi) *
84+
beam.mean_x() / self.local_beta_function)
85+
beam.yp -= (self.gain_y * np.cos(self.phase_in_2pi) *
86+
beam.mean_y() / self.local_beta_function)
4887

4988
@classmethod
50-
def horizontal(cls, dampingrate_x):
51-
return cls(dampingrate_x, 0)
89+
def horizontal(cls, dampingrate_x, *args, **kwargs):
90+
return cls(dampingrate_x, 0, *args, **kwargs)
5291

5392
@classmethod
54-
def vertical(cls, dampingrate_y):
55-
return cls(0, dampingrate_y)
93+
def vertical(cls, dampingrate_y, *args, **kwargs):
94+
return cls(0, dampingrate_y, *args, **kwargs)

PyHEADTAIL/gpu/gpu_wrap.py

Lines changed: 26 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,11 @@
4747
# 'No GPU capabilities available')
4848
has_pycuda = False
4949

50+
def _empty_like(gpuarray):
51+
return pycuda.gpuarray.empty(
52+
shape=gpuarray.shape, dtype=gpuarray.dtype,
53+
allocator=gpu_utils.memory_pool.allocate)
54+
5055

5156
if has_pycuda:
5257
# define all compilation depending functions (e.g. ElementwiseKernel)
@@ -57,7 +62,7 @@
5762
)
5863
def sub_scalar(gpuarr, scalar, out=None, stream=None):
5964
if out is None:
60-
out = pycuda.gpuarray.empty_like(gpuarr)
65+
out = _empty_like(gpuarr)
6166
_sub_1dgpuarr(out, gpuarr, scalar, stream=stream)
6267
return out
6368

@@ -71,14 +76,14 @@ def _mul_scalar(gpuarr, scalar, out=None, stream=None):
7176
to specify a stream
7277
'''
7378
if out is None:
74-
out = pycuda.gpuarray.empty_like(gpuarr)
79+
out = _empty_like(gpuarr)
7580
_mul_with_factor(out, gpuarr, scalar, stream=stream)
7681

7782
def _multiply(a, b, out=None, stream=None):
7883
'''Elementwise multiply of two gpuarray specifying a stream
7984
Required because gpuarray.__mul__ has no stream argument'''
8085
if out is None:
81-
out = pycuda.gpuarray.empty_like(a)
86+
out = _empty_like(a)
8287
func = pycuda.elementwise.get_binary_op_kernel(a.dtype, b.dtype,
8388
out.dtype, "*")
8489
func.prepared_async_call(a._grid, a._block, stream, a.gpudata,
@@ -136,7 +141,7 @@ def _compute_sigma(a, b, c, d, out=None, stream=None):
136141
'''Computes elementwise a - b*c/d as required in compute sigma for
137142
the emittance '''
138143
if out is None:
139-
out = pycuda.gpuarray.empty_like(a)
144+
out = _empty_like(a)
140145
_comp_sigma(out, a, b, c, d, stream=stream)
141146
return out
142147

@@ -160,7 +165,7 @@ def _emittance_dispersion(
160165
n, cov_u2, cov_u_up, cov_up2, cov_u_dp, cov_up_dp,
161166
cov_dp2, out=None, stream=None):
162167
if out is None:
163-
out = pycuda.gpuarray.empty_like(cov_u2)
168+
out = _empty_like(cov_u2)
164169
_emitt_disp(out, cov_u2, cov_u_up, cov_up2, cov_u_dp, cov_up_dp,
165170
cov_dp2, np.float64(n), stream=stream)
166171
return out
@@ -176,7 +181,7 @@ def _emittance_dispersion(
176181
def _emittance_no_dispersion(
177182
n, cov_u2, cov_u_up, cov_up2, out=None, stream=None):
178183
if out is None:
179-
out = pycuda.gpuarray.empty_like(cov_u2)
184+
out = _empty_like(cov_u2)
180185
_emitt_nodisp(out, cov_u2, cov_u_up, cov_up2, np.float64(n),
181186
stream=stream)
182187
return out
@@ -194,9 +199,9 @@ def wofz(in_real, in_imag, out_real=None, out_imag=None, stream=None):
194199
part of z.
195200
'''
196201
if out_real is None:
197-
out_real = pycuda.gpuarray.empty_like(in_real)
202+
out_real = _empty_like(in_real)
198203
if out_imag is None:
199-
out_imag = pycuda.gpuarray.empty_like(in_imag)
204+
out_imag = _empty_like(in_imag)
200205
_wofz(in_real, in_imag, out_real, out_imag, stream=stream)
201206
return out_real, out_imag
202207

@@ -207,7 +212,7 @@ def wofz(in_real, in_imag, out_real=None, out_imag=None, stream=None):
207212
)
208213
def sign(array, out=None, stream=None):
209214
if out is None:
210-
out = pycuda.gpuarray.empty_like(array)
215+
out = _empty_like(array)
211216
_sign(array, out, stream=stream)
212217
return out
213218

@@ -260,13 +265,9 @@ def thrust_mean_and_std_per_slice(sliceset, u, stream=None):
260265
p_sids = sliceset.slice_index_of_particle
261266
# slice_index_of_particle may have slice indices outside of slicing area,
262267
# the following arrays therefore can comprise non valid slice entries
263-
slice_ids_noncontained = pycuda.gpuarray.empty(
264-
p_sids.shape, dtype=p_sids.dtype,
265-
allocator=gpu_utils.memory_pool.allocate)
266-
slice_means_noncontained = pycuda.gpuarray.empty(
267-
u.shape, dtype=u.dtype, allocator=gpu_utils.memory_pool.allocate)
268-
slice_stds_noncontained = pycuda.gpuarray.empty(
269-
u.shape, dtype=u.dtype, allocator=gpu_utils.memory_pool.allocate)
268+
slice_ids_noncontained = _empty_like(p_sids)
269+
slice_means_noncontained = _empty_like(u)
270+
slice_stds_noncontained = _empty_like(u)
270271

271272
(_, _, _, new_end) = thrust.thrust_stats_per_slice(
272273
p_sids, u, slice_ids_noncontained, slice_means_noncontained,
@@ -368,8 +369,8 @@ def covariance(a,b, stream=None):
368369
b: pycuda.GPUArray
369370
'''
370371
n = len(a)
371-
x = pycuda.gpuarray.empty_like(a)
372-
y = pycuda.gpuarray.empty_like(b)
372+
x = _empty_like(a)
373+
y = _empty_like(b)
373374
mean_a = skcuda.misc.mean(a)
374375
#x -= mean_a
375376
_sub_1dgpuarr(x, a, mean_a, stream=stream)
@@ -397,7 +398,7 @@ def std(a, stream=None):
397398
#return skcuda.misc.std(a, ddof=1)
398399
n = len(a)
399400
#mean_a = skcuda.misc.mean(a)
400-
x = pycuda.gpuarray.empty_like(a)
401+
x = _empty_like(a)
401402
mean_a = mean(a, stream=stream)
402403
_sub_1dgpuarr(x, a, mean_a, stream=stream)
403404
_inplace_pow(x, 2, stream=stream)
@@ -523,7 +524,7 @@ def emittance(u, up, dp, stream=None):
523524
n = len(u)
524525
mean_u = mean(u, stream=stream)
525526
mean_up = mean(up, stream=stream)
526-
out = pycuda.gpuarray.empty_like(mean_u)
527+
out = _empty_like(mean_u)
527528
tmp_u = sub_scalar(u, mean_u, stream=stream)
528529
tmp_up = sub_scalar(up, mean_up, stream=stream)
529530
tmp_space = _multiply(tmp_u, tmp_u, stream=stream)
@@ -568,7 +569,7 @@ def emittance_multistream(u, up, dp, stream=None):
568569
tmp_u = sub_scalar(u, mean_u, stream=streams[0])
569570
tmp_space = _multiply(tmp_u, tmp_u, stream=streams[0])
570571
cov_u2 = pycuda.gpuarray.sum(tmp_space, stream=streams[0])
571-
out = pycuda.gpuarray.empty_like(mean_u)
572+
out = _empty_like(mean_u)
572573
tmp_up = sub_scalar(up, mean_up, stream=streams[1])
573574
streams[0].synchronize()
574575
streams[1].synchronize()
@@ -601,11 +602,11 @@ def cumsum(array, dest=None):
601602
'''
602603
if array.dtype == np.int32:
603604
if dest is None:
604-
dest = pycuda.gpuarray.empty_like(array)
605+
dest = _empty_like(array)
605606
thrust_interface.thrust_cumsum_int(array, dest)
606607
elif array.dtype == np.float64:
607608
if dest is None:
608-
dest = pycuda.gpuarray.empty_like(array)
609+
dest = _empty_like(array)
609610
thrust_interface.thrust_cumsum_double(array, dest)
610611
else:
611612
dest = array.copy()
@@ -660,7 +661,7 @@ def apply_permutation(array, permutation):
660661
permutation permutation array: must be np.int32 (or int32), is asserted
661662
'''
662663
assert(permutation.dtype.itemsize == 4 and permutation.dtype.kind is 'i')
663-
tmp = pycuda.gpuarray.empty_like(array)
664+
tmp = _empty_like(array)
664665
dtype = array.dtype
665666
if dtype.itemsize == 8 and dtype.kind is 'f':
666667
thrust.apply_sort_perm_double(array, tmp, permutation)
@@ -832,7 +833,7 @@ def sorted_emittance_per_slice(sliceset, u, up, dp=None, stream=None):
832833
cov_u2 = sorted_cov_per_slice(sliceset, u, u, stream=streams[0])
833834
cov_up2= sorted_cov_per_slice(sliceset, up, up, stream=streams[1])
834835
cov_u_up = sorted_cov_per_slice(sliceset, u, up, stream=streams[2])
835-
out = pycuda.gpuarray.empty_like(cov_u2)
836+
out = _empty_like(cov_u2)
836837
# use this factor in emitt_disp: the code has a 1/(n*n+n) factor which is not
837838
# required here since the scaling is done in the cov_per_slice
838839
# --> 1/(n*n + n) must be 1. ==> n = sqrt(5)/2 -0.5

PyHEADTAIL/impedances/wake_kicks.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ def _convolution_numpy(self, target_times, source_times,
105105
try:
106106
source_times = source_times.get()
107107
except AttributeError:
108-
pass #is already on GPU
108+
pass #is already on CPU
109109
dt_to_target_slice = np.concatenate(
110110
(target_times - source_times[-1],
111111
(target_times - source_times[0])[1:]))

0 commit comments

Comments
 (0)