Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 67 additions & 0 deletions tests/test_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ def test_constructor(test_context):
#xt.ParticlesMonitor(_context=test_context, start_at_turn=0, stop_at_turn=10, num_particles=42),
#xt.LastTurnsMonitor(_context=test_context, n_last_turns=5, num_particles=42),
xt.BeamPositionMonitor(_context=test_context, stop_at_turn=10),
xt.BunchMonitor(_context=test_context, stop_at_turn=10),
]

# test to_dict / from_dict
Expand Down Expand Up @@ -492,3 +493,69 @@ def test_beam_position_monitor(test_context):
expected_x_centroid[18:20] = np.nan
assert_allclose(monitor.x_cen, expected_x_centroid, err_msg="Monitor x centroid does not match expected values")
assert_allclose(monitor.y_cen, -expected_x_centroid, err_msg="Monitor y centroid does not match expected values")

@for_all_test_contexts
def test_bunch_monitor(test_context):

npart = 512 # must be even and >= 512
zeta = np.empty(npart+4) # generate a few more than we record to test "num_particles"
zeta[0:npart:2] = zeta1 = np.linspace(-0.2, 0.2, npart//2) # first bunch centered around 0
zeta[1:npart:2] = zeta2 = -0.5 + zeta1 # second bunch centered around -1/harmonic
zeta[npart:] = np.linspace(2,4,4)

particles = xp.Particles(
p0c=6.5e12,
zeta=zeta,
delta=1e-3*zeta,
_context=test_context,
)

monitor = xt.BunchMonitor(
num_particles=npart,
start_at_turn=0,
stop_at_turn=10,
frev=2.99792458e8,
harmonic=2,
_context=test_context,
)

line = xt.Line([monitor])
line.build_tracker(_context=test_context)

for turn in range(11): # track a few more than we record to test "stop_at_turn"
# Note that indicees are re-ordered upon particle loss on CPU contexts,
# so sort before manipulation
if isinstance(test_context, xo.ContextCpu):
particles.sort(interleave_lost_particles=True)

# manipulate particles for testing
particles.zeta[0] += 0.005
particles.delta[0] += 1e-3*0.005
if turn == 8:
particles.state[256:] = 0
if turn == 9:
particles.state[:] = 0

if isinstance(test_context, xo.ContextCpu):
particles.reorganize()

# track and monitor
line.track(particles, num_turns=1)

# Check against expected values
expected_count = np.tile([npart//2, npart//2], 10)
expected_count[16:18] = 128
expected_count[18:20] = 0
assert_equal(monitor.count, expected_count, err_msg="Monitor count does not match expected particle count")

expected_zeta_sum = np.zeros(20)
expected_zeta_sum[0::2] = np.sum(zeta1) + 0.005*np.arange(1, 11)
expected_zeta_sum[1::2] = np.sum(zeta2)
expected_zeta_sum[16] -= np.sum(zeta1[128:])
expected_zeta_sum[17] -= np.sum(zeta2[128:])
expected_zeta_sum[18:] = 0
assert_allclose(monitor.zeta_sum, expected_zeta_sum, err_msg="Monitor zeta sum does not match expected values")
assert_allclose(monitor.delta_sum, 1e-3*expected_zeta_sum, err_msg="Monitor delta sum does not match expected values")

# TODO: test zeta_mean, delta_mean, zeta2_sum, delta2_sum, zeta_std, delta_std, zeta_var, delta_var

1 change: 1 addition & 0 deletions xtrack/monitors/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,6 @@
from .beam_position_monitor import *
from .beam_size_monitor import *
from .beam_profile_monitor import *
from .bunch_monitor import *

monitor_classes = tuple(v for v in globals().values() if isinstance(v, type) and issubclass(v, BeamElement))
36 changes: 18 additions & 18 deletions xtrack/monitors/bunch_monitor.h
Original file line number Diff line number Diff line change
@@ -1,33 +1,32 @@
// ##################################
// Bunch Monitor
//
// Author: Philipp Niedermayer
// Author: Philipp Niedermayer, Cristopher Cortes
// Date: 2023-08-14
// Edit: 2024-06-12
// ##################################


#ifndef XTRACK_BEAM_SIZE_MONITOR_H
#define XTRACK_BEAM_SIZE_MONITOR_H
#ifndef XTRACK_BUNCH_MONITOR_H
#define XTRACK_BUNCH_MONITOR_H

#if !defined( C_LIGHT )
#define C_LIGHT ( 299792458.0 )
#endif /* !defined( C_LIGHT ) */

/*gpufun*/
void BeamSizeMonitor_track_local_particle(BeamSizeMonitorData el, LocalParticle* part0){
void BunchMonitor_track_local_particle(BunchMonitorData el, LocalParticle* part0){

// get parameters
int64_t const start_at_turn = BeamSizeMonitorData_get_start_at_turn(el);
int64_t particle_id_start = BeamSizeMonitorData_get_particle_id_start(el);
int64_t particle_id_stop = particle_id_start + BeamSizeMonitorData_get_num_particles(el);
// Should we do explicit casting to float (?)
int64_t const harmonic = BeamSizeMonitorData_get_harmonic(el);
double const frev = BeamSizeMonitorData_get_frev(el);
int64_t const start_at_turn = BunchMonitorData_get_start_at_turn(el);
int64_t particle_id_start = BunchMonitorData_get_particle_id_start(el);
int64_t particle_id_stop = particle_id_start + BunchMonitorData_get_num_particles(el);
int64_t const harmonic = BunchMonitorData_get_harmonic(el);
double const frev = BunchMonitorData_get_frev(el);

BeamSizeMonitorRecord record = BeamSizeMonitorData_getp_data(el);
BunchMonitorRecord record = BunchMonitorData_getp_data(el);

int64_t max_slot = BeamSizeMonitorRecord_len_count(record);
int64_t max_slot = BunchMonitorRecord_len_count(record);

//start_per_particle_block(part0->part)

Expand All @@ -46,24 +45,25 @@ void BeamSizeMonitor_track_local_particle(BeamSizeMonitorData el, LocalParticle*

if (slot >= 0 && slot < max_slot){

double delta = LocalParticle_get_delta(part);
double const delta = LocalParticle_get_delta(part);

/*gpuglmem*/ double* count = BeamSizeMonitorRecord_getp1_count(record, slot);
/*gpuglmem*/ double* count = BunchMonitorRecord_getp1_count(record, slot);
atomicAdd(count, 1);

/*gpuglmem*/ double * zeta_sum = BeamSizeMonitorRecord_getp1_zeta_sum(record, slot);
/*gpuglmem*/ double * zeta_sum = BunchMonitorRecord_getp1_zeta_sum(record, slot);
atomicAdd(zeta_sum, zeta);

/*gpuglmem*/ double * delta_sum = BeamSizeMonitorRecord_getp1_delta_sum(record, slot);
/*gpuglmem*/ double * delta_sum = BunchMonitorRecord_getp1_delta_sum(record, slot);
atomicAdd(delta_sum, delta);

/*gpuglmem*/ double * zeta2_sum = BeamSizeMonitorRecord_getp1_zeta2_sum(record, slot);
/*gpuglmem*/ double * zeta2_sum = BunchMonitorRecord_getp1_zeta2_sum(record, slot);
atomicAdd(zeta2_sum, zeta*zeta);

/*gpuglmem*/ double * delta2_sum = BeamSizeMonitorRecord_getp1_delta2_sum(record, slot);
/*gpuglmem*/ double * delta2_sum = BunchMonitorRecord_getp1_delta2_sum(record, slot);
atomicAdd(delta2_sum, delta*delta);
}
}

//end_per_particle_block
}

Expand Down
18 changes: 9 additions & 9 deletions xtrack/monitors/bunch_monitor.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""
Beam Size Monitor

Author: Philipp Niedermayer
Author: Philipp Niedermayer, Cristopher Cortes
Date: 2023-08-14
Edit: 2024-06-12
"""
Expand All @@ -15,14 +15,14 @@
from ..internal_record import RecordIndex
from ..general import _pkg_root

class BeamSizeMonitorRecord(xo.Struct):
class BunchMonitorRecord(xo.Struct):
count = xo.Float64[:]
zeta_sum = xo.Float64[:]
zeta2_sum = xo.Float64[:]
delta_sum = xo.Float64[:]
delta2_sum = xo.Float64[:]

class BeamSizeMonitor(BeamElement):
class BunchMonitor(BeamElement):

_xofields={
'particle_id_start': xo.Int64,
Expand All @@ -32,13 +32,13 @@ class BeamSizeMonitor(BeamElement):
'frev': xo.Float64,
'harmonic': xo.Int64,
'_index': RecordIndex,
'data': BeamSizeMonitorRecord,
'data': BunchMonitorRecord,
}

behaves_like_drift = True
allow_loss_refinement = True

properties = [field.name for field in BeamSizeMonitorRecord._fields]
properties = [field.name for field in BunchMonitorRecord._fields]

_extra_c_sources = [
_pkg_root.joinpath('headers/atomicadd.h'),
Expand All @@ -49,7 +49,7 @@ def __init__(self, *, particle_id_range=None, particle_id_start=None, num_partic
start_at_turn=None, stop_at_turn=None, frev=None,
harmonic=None, _xobject=None, **kwargs):
"""
Monitor to save the longitudinal beam size (standard deviation of the tracked particle positions)
Monitor to save the longitudinal bunch position and size (mean and std of zeta) as well as mean and std of momentum spread (delta)


The monitor allows for arbitrary sampling rate and can thus not only be used to monitor
Expand Down Expand Up @@ -117,18 +117,18 @@ def __init__(self, *, particle_id_range=None, particle_id_start=None, num_partic

if "data" not in kwargs:
# explicitely init with zeros (instead of size only) to have consistent initial values
size = int(round(( stop_at_turn - start_at_turn ) * sampling_frequency / frev))
size = int(round(( stop_at_turn - start_at_turn ) * harmonic))
kwargs["data"] = {prop: np.zeros(size) for prop in self.properties}

super().__init__(particle_id_start=particle_id_start, num_particles=num_particles,
start_at_turn=start_at_turn, stop_at_turn=stop_at_turn, frev=frev,
sampling_frequency=sampling_frequency, **kwargs)
harmonic=harmonic, **kwargs)

def __repr__(self):
return (
f"{type(self).__qualname__}(start_at_turn={self.start_at_turn}, stop_at_turn={self.stop_at_turn}, "
f"particle_id_start={self.particle_id_start}, num_particles={self.num_particles}, frev={self.frev}, "
f"harmonic={self.sampling_frequency}) at {hex(id(self))}"
f"harmonic={self.harmonic}) at {hex(id(self))}"
)

def __getattr__(self, attr):
Expand Down