From bc9e635127996f7824a00592d147d3d2bbf555ba Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Fri, 31 Oct 2025 11:15:20 +0100 Subject: [PATCH 01/37] Refactor file path handling in LHC model creator and response MAD-X scripts for improved clarity and consistency --- omc3/correction/response_madx.py | 20 ++++++--- omc3/model/accelerators/lhc.py | 3 +- .../model/model_creators/lhc_model_creator.py | 44 ++++++++++--------- 3 files changed, 38 insertions(+), 29 deletions(-) diff --git a/omc3/correction/response_madx.py b/omc3/correction/response_madx.py index b51f2c6f..150ff007 100644 --- a/omc3/correction/response_madx.py +++ b/omc3/correction/response_madx.py @@ -127,20 +127,28 @@ def _generate_madx_jobs( if var_idx >= len(no_dpp_vars): break var = no_dpp_vars[var_idx] + outfile = temp_dir / f"twiss.{var}" + incr_dict[var] = delta_k current_job += f"{var} = {var}{delta_k:+.15e};\n" - current_job += f"twiss, file='{str(temp_dir / f'twiss.{var}')}'{deltap_twiss};\n" + current_job += f"twiss, file='{str(outfile.absolute())}'{deltap_twiss};\n" current_job += f"{var} = {var}{-delta_k:+.15e};\n\n" if proc_idx == num_proc - 1: - current_job += f"twiss, file='{str(temp_dir / 'twiss.0')}'{deltap_twiss};\n" + outfile = temp_dir / "twiss.0" + current_job += f"twiss, file='{str(outfile.absolute())}'{deltap_twiss};\n" - if compute_deltap: # If ORBIT_DPP is in variables, we run this in the last iteration - # Due to the match and correction of the orbit, this needs to be run at the end of the process + if compute_deltap: + # If ORBIT_DPP is in variables, we run this in the last iteration + # Due to the matching and correction of the orbit, this needs to be run at the end of the process incr_dict[ORBIT_DPP] = delta_k current_job += f"{ORBIT_DPP} = {ORBIT_DPP}{delta_k:+.15e};\n" - current_job += creator.get_update_deltap_script(deltap=ORBIT_DPP) # Do twiss, correct, match - current_job += f"twiss, deltap={ORBIT_DPP}, file='{str(temp_dir/f'twiss.{ORBIT_DPP}')}';\n" + # Do twiss, correct, match + current_job += creator.get_update_deltap_script(deltap=ORBIT_DPP) + outfile = temp_dir / f"twiss.{ORBIT_DPP}" + current_job += ( + f"twiss, deltap={ORBIT_DPP}, file='{str(outfile.absolute())}';\n" + ) jobfile_path.write_text(current_job) return incr_dict diff --git a/omc3/model/accelerators/lhc.py b/omc3/model/accelerators/lhc.py index 1cd5097e..bcf04a22 100644 --- a/omc3/model/accelerators/lhc.py +++ b/omc3/model/accelerators/lhc.py @@ -330,8 +330,7 @@ def important_phase_advances(self) -> list[list[str]] | None: return None def get_accel_file(self, filename: Path | str) -> Path: - return LHC_DIR / self.year / filename - + return Path(LHC_DIR / self.year / filename).absolute() # Private Methods ############################################################## def _get_corrector_elems(self) -> Path: diff --git a/omc3/model/model_creators/lhc_model_creator.py b/omc3/model/model_creators/lhc_model_creator.py index 7ccbe3fc..4a5056ba 100644 --- a/omc3/model/model_creators/lhc_model_creator.py +++ b/omc3/model/model_creators/lhc_model_creator.py @@ -131,14 +131,12 @@ def prepare_run(self) -> None: shutil.copy(OMC3_MADX_MACROS_DIR / LHC_MACROS, macros_path / LHC_MACROS) shutil.copy(OMC3_MADX_MACROS_DIR / LHC_MACROS_RUN3, macros_path / LHC_MACROS_RUN3) - if accel.energy is not None: core = f"{int(accel.energy):04d}" LOGGER.debug("Copying error defs for analytical N-BPM method") error_dir_path = accel.get_lhc_error_dir() - shutil.copy(error_dir_path / - f"{core}GeV.tfs", accel.model_dir / ERROR_DEFFS_TXT) + shutil.copy(error_dir_path / f"{core}GeV.tfs", accel.model_dir / ERROR_DEFFS_TXT) def check_accelerator_instance(self) -> None: accel = self.accel @@ -159,8 +157,7 @@ def check_accelerator_instance(self) -> None: # hint: if modifiers are given as absolute paths: `path / abs_path` returns `abs_path` (jdilly) # hint: modifiers were at this point probably already checked (and adapted) in `prepare_run()`. - inexistent_modifiers = [ - m for m in accel.modifiers if not (accel.model_dir / m).exists()] + inexistent_modifiers = [m for m in accel.modifiers if not (accel.model_dir / m).exists()] if len(inexistent_modifiers): raise AcceleratorDefinitionError( "The following modifier files do not exist: " @@ -334,9 +331,9 @@ def _get_madx_main_sequence_loading(self) -> str: accel: Lhc = self.accel if accel.acc_model_path is not None: - main_call = f'call, file = \'{accel.acc_model_path / "lhc.seq"}\';' - if accel.year.startswith('hl'): - main_call += f'\ncall, file = \'{accel.acc_model_path / "hllhc_sequence.madx"}\';' + main_call = f"call, file = '{accel.acc_model_path / 'lhc.seq'}';" + if accel.year.startswith("hl"): + main_call += f"\ncall, file = '{accel.acc_model_path / 'hllhc_sequence.madx'}';" return main_call try: return self._get_call_main() @@ -388,8 +385,8 @@ def _get_call_main(self) -> str: return call_main - -class LhcBestKnowledgeCreator(LhcModelCreator): # --------------------------------------------------------------------- +# --------------------------------------------------------------------- +class LhcBestKnowledgeCreator(LhcModelCreator): EXTRACTED_MQTS_FILENAME: str = "extracted_mqts.str" CORRECTIONS_FILENAME: str = "corrections.madx" jobfile: str = JOB_MODEL_MADX_BEST_KNOWLEDGE @@ -432,9 +429,7 @@ def check_accelerator_instance(self): ) if accel.excitation is not AccExcitationMode.FREE: - raise AcceleratorDefinitionError( - "Don't set ACD or ADT for best knowledge model." - ) + raise AcceleratorDefinitionError("Don't set ACD or ADT for best knowledge model.") def prepare_run(self): accel: Lhc = self.accel @@ -455,7 +450,7 @@ def prepare_run(self): # copy errors table --- b2_table = tfs.read(b2_error_path, index=NAME) gen_df = pd.DataFrame( - data=0., + data=0.0, index=b2_table.index, columns=_b2_columns(), ) @@ -474,7 +469,7 @@ def get_madx_script(self) -> str: madx_script += "\n! ----- Load MQTs -----\n" mqts_file = accel.model_dir / self.EXTRACTED_MQTS_FILENAME if mqts_file.exists(): - madx_script += f"call, file = '{mqts_file}';\n" + madx_script += f"call, file = '{str(mqts_file.absolute())}';\n" madx_script += "\n! ----- Output Files -----\n" madx_script += ( @@ -522,20 +517,24 @@ def __init__(self, accel: Lhc, twiss_out: Path | str, corr_files: Sequence[Path super().__init__(accel, twiss_out, corr_files, update_dpp) def get_madx_script(self) -> str: - """ Get the madx script for the correction model creator, which updates the model after correcion. """ + """Get the madx script for the correction model creator, which updates the model after correcion.""" accel: Lhc = self.accel - madx_script = self.get_base_madx_script() # do not super().get_madx_script as we don't need the uncorrected output. + + # do not super().get_madx_script as we don't need the uncorrected output. + madx_script = self.get_base_madx_script() # First set the dpp to the value in the accelerator model madx_script += f"{ORBIT_DPP} = {accel.dpp};\n" for corr_file in self.corr_files: # Load the corrections, can also update ORBIT_DPP - madx_script += f"call, file = '{corr_file!s}';\n" + file = Path(corr_file).absolute() + madx_script += f"call, file = '{file!s}';\n" - if self.update_dpp: # If we are doing orbit correction, we need to ensure that a correct and a match is done + # If we are doing orbit correction, we need to ensure that a correct and a match is done + if self.update_dpp: madx_script += self.get_update_deltap_script(deltap=ORBIT_DPP) - madx_script += f'exec, do_twiss_elements(LHCB{accel.beam}, "{self.twiss_out!s}", {ORBIT_DPP});\n' + madx_script += f'exec, do_twiss_elements(LHCB{accel.beam}, "{self.twiss_out.absolute()!s}", {ORBIT_DPP});\n' return madx_script def prepare_run(self) -> None: @@ -545,7 +544,9 @@ def prepare_run(self) -> None: LOGGER.debug("Preparing model creation structure") macros_path = self.accel.model_dir / MACROS_DIR if not macros_path.exists(): - raise AcceleratorDefinitionError(f"Folder for the macros does not exist at {macros_path!s}.") + raise AcceleratorDefinitionError( + f"Folder for the macros does not exist at {macros_path!s}." + ) @property def files_to_check(self) -> list[str]: @@ -554,6 +555,7 @@ def files_to_check(self) -> list[str]: # LHC Segment Creator ---------------------------------------------------------- + class LhcSegmentCreator(SegmentCreator, LhcModelCreator): def get_madx_script(self): From 719a33e7308c747a07332b76e92c76bc6fbfb8a4 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Fri, 31 Oct 2025 17:27:12 +0100 Subject: [PATCH 02/37] Add MQT extraction functionality and related tests - Implemented a new module `mqt_extraction.py` for retrieving MQT (Quadrupole Trim) knob values from NXCALS for specified beams and timestamps. - Added helper functions to generate MAD-X MQT variable names and retrieve corresponding values. - Introduced `DateOrStr` class in `iotools.py` for flexible datetime and string parsing. - Created sample output files for MQT extraction tests: `extracted_mqts_b1.str` and `extracted_mqts_b2.str`. - Developed unit tests for MQT extraction functionality in `test_nxcals_mqt_extraction.py`, ensuring correct retrieval and formatting of MQT values. - Added a utility script `generate_mqt_references.py` to copy reference MQT files from CERN data paths for testing purposes. --- omc3/correction/response_madx.py | 147 ++++---- omc3/correction/sequence_evaluation.py | 78 +++-- omc3/model/accelerators/lhc.py | 57 ++-- .../model_creators/abstract_model_creator.py | 245 ++++++++----- .../model/model_creators/lhc_model_creator.py | 322 +++++++++++------- omc3/model/model_creators/ps_model_creator.py | 40 ++- .../model_creators/psbooster_model_creator.py | 24 +- .../model/model_creators/sps_model_creator.py | 68 ++-- omc3/nxcals/__init__.py | 2 + omc3/nxcals/constants.py | 1 + omc3/nxcals/knob_extraction.py | 274 +++++++++++++++ omc3/nxcals/mqt_extraction.py | 126 +++++++ omc3/utils/iotools.py | 57 +++- .../knob_extractor/extracted_mqts_b1.str | 16 + .../knob_extractor/extracted_mqts_b2.str | 16 + tests/unit/test_model_creator.py | 72 ++-- tests/unit/test_nxcals_mqt_extraction.py | 99 ++++++ tests/utils/generate_mqt_references.py | 38 +++ 18 files changed, 1236 insertions(+), 446 deletions(-) create mode 100644 omc3/nxcals/__init__.py create mode 100644 omc3/nxcals/constants.py create mode 100644 omc3/nxcals/knob_extraction.py create mode 100644 omc3/nxcals/mqt_extraction.py create mode 100644 tests/inputs/knob_extractor/extracted_mqts_b1.str create mode 100644 tests/inputs/knob_extractor/extracted_mqts_b2.str create mode 100644 tests/unit/test_nxcals_mqt_extraction.py create mode 100644 tests/utils/generate_mqt_references.py diff --git a/omc3/correction/response_madx.py b/omc3/correction/response_madx.py index 150ff007..c2461a8c 100644 --- a/omc3/correction/response_madx.py +++ b/omc3/correction/response_madx.py @@ -12,6 +12,7 @@ :author: Lukas Malina, Joschua Dilly, Jaime (...) Coello de Portugal """ + from __future__ import annotations import copy @@ -59,17 +60,17 @@ def create_fullresponse( variable_categories: Sequence[str], delta_k: float = 2e-5, num_proc: int = multiprocessing.cpu_count(), - temp_dir: Path | None = None + temp_dir: Path | None = None, ) -> dict[str, pd.DataFrame]: - """ Generate a dictionary containing response matrices for - beta, phase, dispersion, tune and coupling and saves it to a file. - - Args: - accel_inst : Accelerator Instance. - variable_categories (list): Categories of the variables/knobs to use. (from .json) - delta_k (float): delta K1L to be applied to quads for sensitivity matrix - num_proc (int): Number of processes to use in parallel. - temp_dir (str): temporary directory. If ``None``, uses folder of original_jobfile. + """Generate a dictionary containing response matrices for + beta, phase, dispersion, tune and coupling and saves it to a file. + + Args: + accel_inst : Accelerator Instance. + variable_categories (list): Categories of the variables/knobs to use. (from .json) + delta_k (float): delta K1L to be applied to quads for sensitivity matrix + num_proc (int): Number of processes to use in parallel. + temp_dir (str): temporary directory. If ``None``, uses folder of original_jobfile. """ LOG.debug("Generating Fullresponse via Mad-X.") with timeit(lambda t: LOG.debug(f" Total time generating fullresponse: {t} s")): @@ -96,11 +97,11 @@ def _generate_madx_jobs( variables: Sequence[str], delta_k: float, num_proc: int, - temp_dir: Path + temp_dir: Path, ) -> dict[str, float]: - """ Generates madx job-files """ + """Generates madx job-files""" LOG.debug("Generating MAD-X jobfiles.") - incr_dict = {'0': 0.0} + incr_dict = {"0": 0.0} compute_deltap: bool = ORBIT_DPP in variables no_dpp_vars = [var for var in variables if var != ORBIT_DPP] vars_per_proc = int(np.ceil(len(no_dpp_vars) / num_proc)) @@ -112,7 +113,7 @@ def _generate_madx_jobs( # This is here only for multiple iteration of the global correction # By including dpp here, it means that if deltap is in variables and dpp is not 0, the orbit and tune magnets change # We have to be very careful that DELTAP_NAME is not used ANYWHERE else in MAD-X - madx_job += f"{ORBIT_DPP} = {accel_inst.dpp};\n" # Set deltap to 0 + madx_job += f"{ORBIT_DPP} = {accel_inst.dpp};\n" # Set deltap to 0 # get update deltap setup from model creator madx_job += creator.get_update_deltap_script(deltap=ORBIT_DPP) @@ -146,33 +147,27 @@ def _generate_madx_jobs( # Do twiss, correct, match current_job += creator.get_update_deltap_script(deltap=ORBIT_DPP) outfile = temp_dir / f"twiss.{ORBIT_DPP}" - current_job += ( - f"twiss, deltap={ORBIT_DPP}, file='{str(outfile.absolute())}';\n" - ) + current_job += f"twiss, deltap={ORBIT_DPP}, file='{str(outfile.absolute())}';\n" jobfile_path.write_text(current_job) return incr_dict def _get_madx_job(accel_inst: Accelerator) -> str: # use relative paths as we use model_dir as cwd - model_dir_backup = accel_inst.model_dir - accel_inst.model_dir = Path() - # get nominal setup from creator creator = _get_nominal_model_creator(accel_inst) - job_content = creator.get_base_madx_script() + job_content = creator.get_base_madx_script(accel_inst.model_dir) job_content += ( "select, flag=twiss, clear;\n" f"select, flag=twiss, pattern='{accel_inst.RE_DICT[AccElementTypes.BPMS]}', " - "column=NAME,S,BETX,ALFX,BETY,ALFY,DX,DY,DPX,DPY,X,Y,K1L,MUX,MUY,R11,R12,R21,R22;\n\n") + "column=NAME,S,BETX,ALFX,BETY,ALFY,DX,DY,DPX,DPY,X,Y,K1L,MUX,MUY,R11,R12,R21,R22;\n\n" + ) - # restore model_dir - accel_inst.model_dir = model_dir_backup return job_content def _get_nominal_model_creator(accel_inst: Accelerator) -> ModelCreator: - """ Get the nominal model creator, to which we can add the change of parameters. + """Get the nominal model creator, to which we can add the change of parameters. This is always done on the nominal model, not the best knowledge model, to ensure that the response matrix is in the most linear regime and therefore most accurate @@ -182,8 +177,8 @@ def _get_nominal_model_creator(accel_inst: Accelerator) -> ModelCreator: return creator_class(accel_inst) -def _call_madx(process_pool: multiprocessing.Pool, temp_dir: str, num_proc: int) -> None: # type: ignore - """ Call madx in parallel """ +def _call_madx(process_pool: multiprocessing.Pool, temp_dir: str, num_proc: int) -> None: # type: ignore + """Call madx in parallel""" LOG.debug(f"Starting {num_proc:d} MAD-X jobs...") madx_jobs = [_get_jobfiles(temp_dir, index) for index in range(num_proc)] process_pool.map(_launch_single_job, madx_jobs) @@ -191,7 +186,7 @@ def _call_madx(process_pool: multiprocessing.Pool, temp_dir: str, num_proc: int) def _clean_up(temp_dir: Path, num_proc: int) -> None: - """ Merge Logfiles and clean temporary outputfiles """ + """Merge Logfiles and clean temporary outputfiles""" LOG.debug("Cleaning output and building log...") full_log = "" for index in range(num_proc): @@ -205,23 +200,21 @@ def _clean_up(temp_dir: Path, num_proc: int) -> None: # write compressed full log file full_log_name = "response_madx_full.log" zipfile.ZipFile( - temp_dir / f"{full_log_name}.zip", - mode="w", - compression=zipfile.ZIP_DEFLATED + temp_dir / f"{full_log_name}.zip", mode="w", compression=zipfile.ZIP_DEFLATED ).writestr(full_log_name, full_log) def _load_madx_results( - variables: list[str], - process_pool, - incr_dict: dict, - temp_dir: Path + variables: list[str], process_pool, incr_dict: dict, temp_dir: Path ) -> dict[str, tfs.TfsDataFrame]: - """ Load the madx results in parallel and return var-tfs dictionary """ + """Load the madx results in parallel and return var-tfs dictionary""" LOG.debug("Loading Madx Results.") vars_and_paths = [] - for value in variables + ['0']: + for value in variables + ["0"]: vars_and_paths.append((value, temp_dir)) + LOG.debug("Retrieving Twiss files in parallel...") + for var, path in vars_and_paths: + LOG.debug(f" Variable: {var}, Path: {path}") var_to_twiss = {} for var, tfs_data in process_pool.map(_load_and_remove_twiss, vars_and_paths): tfs_data[INCR] = incr_dict[var] @@ -229,13 +222,28 @@ def _load_madx_results( return var_to_twiss -def _create_fullresponse_from_dict(var_to_twiss: dict[str, tfs.TfsDataFrame]) -> dict[str, pd.DataFrame]: - """ Convert var-tfs dictionary to fullresponse dictionary. """ +def _create_fullresponse_from_dict( + var_to_twiss: dict[str, tfs.TfsDataFrame], +) -> dict[str, pd.DataFrame]: + """Convert var-tfs dictionary to fullresponse dictionary.""" var_to_twiss = _add_coupling(var_to_twiss) keys = list(var_to_twiss.keys()) - columns = [f"{PHASE_ADV}X", f"{PHASE_ADV}Y", f"{BETA}X", f"{BETA}Y", f"{DISPERSION}X", f"{DISPERSION}Y", - f"{F1001}R", f"{F1001}I", f"{F1010}R", f"{F1010}I", f"{TUNE}1", f"{TUNE}2", INCR] + columns = [ + f"{PHASE_ADV}X", + f"{PHASE_ADV}Y", + f"{BETA}X", + f"{BETA}Y", + f"{DISPERSION}X", + f"{DISPERSION}Y", + f"{F1001}R", + f"{F1001}I", + f"{F1010}R", + f"{F1010}I", + f"{TUNE}1", + f"{TUNE}2", + INCR, + ] bpms = var_to_twiss["0"].index resp = np.empty((len(keys), bpms.size, len(columns))) @@ -247,34 +255,45 @@ def _create_fullresponse_from_dict(var_to_twiss: dict[str, tfs.TfsDataFrame]) -> model_index = list(keys).index("0") # Create normalized dispersion and dividing BET by nominal model - NDX_arr = np.divide(resp[columns.index(f"{DISPERSION}X")], np.sqrt(resp[columns.index(f"{BETA}X")])) # noqa: N806 - NDY_arr = np.divide(resp[columns.index(f"{DISPERSION}Y")], np.sqrt(resp[columns.index(f"{BETA}Y")])) # noqa: N806 + normalised_dispersion_x = np.divide( + resp[columns.index(f"{DISPERSION}X")], np.sqrt(resp[columns.index(f"{BETA}X")]) + ) # noqa: N806 + normalised_dispersion_y = np.divide( + resp[columns.index(f"{DISPERSION}Y")], np.sqrt(resp[columns.index(f"{BETA}Y")]) + ) # noqa: N806 resp[columns.index(f"{BETA}X")] = np.divide( resp[columns.index(f"{BETA}X")], - resp[columns.index(f"{BETA}X"), :, model_index][:, np.newaxis] + resp[columns.index(f"{BETA}X"), :, model_index][:, np.newaxis], ) resp[columns.index(f"{BETA}Y")] = np.divide( resp[columns.index(f"{BETA}Y")], - resp[columns.index(f"{BETA}Y"), :, model_index][:, np.newaxis] + resp[columns.index(f"{BETA}Y"), :, model_index][:, np.newaxis], ) # Subtracting nominal model from data resp = np.subtract(resp, resp[:, :, model_index][:, :, np.newaxis]) - NDX_arr = np.subtract(NDX_arr, NDX_arr[:, model_index][:, np.newaxis]) # noqa: N806 - NDY_arr = np.subtract(NDY_arr, NDY_arr[:, model_index][:, np.newaxis]) # noqa: N806 + normalised_dispersion_x = np.subtract( + normalised_dispersion_x, normalised_dispersion_x[:, model_index][:, np.newaxis] + ) # noqa: N806 + normalised_dispersion_y = np.subtract( + normalised_dispersion_y, normalised_dispersion_y[:, model_index][:, np.newaxis] + ) # noqa: N806 # Remove difference of nominal model with itself (bunch of zeros) and divide by increment resp = np.delete(resp, model_index, axis=2) - NDX_arr = np.delete(NDX_arr, model_index, axis=1) # noqa: N806 - NDY_arr = np.delete(NDY_arr, model_index, axis=1) # noqa: N806 + normalised_dispersion_x = np.delete(normalised_dispersion_x, model_index, axis=1) # noqa: N806 + normalised_dispersion_y = np.delete(normalised_dispersion_y, model_index, axis=1) # noqa: N806 keys.remove("0") # Divide by increment - NDX_arr = np.divide(NDX_arr, resp[columns.index(f"{INCR}")]) # noqa: N806 - NDY_arr = np.divide(NDY_arr, resp[columns.index(f"{INCR}")]) # noqa: N806 + normalised_dispersion_x = np.divide(normalised_dispersion_x, resp[columns.index(f"{INCR}")]) # noqa: N806 + normalised_dispersion_y = np.divide(normalised_dispersion_y, resp[columns.index(f"{INCR}")]) # noqa: N806 resp = np.divide(resp, resp[columns.index(f"{INCR}")]) - Q_arr = np.column_stack((resp[columns.index(f"{TUNE}1"), 0, :], resp[columns.index(f"{TUNE}2"), 0, :])).T # noqa: N806 + tune_arr = np.column_stack( + (resp[columns.index(f"{TUNE}1"), 0, :], resp[columns.index(f"{TUNE}2"), 0, :]) + ).T # noqa: N806 + # fmt: off with suppress_warnings(ComplexWarning): # raised as everything is complex-type now return { f"{PHASE_ADV}X": pd.DataFrame(data=resp[columns.index(f"{PHASE_ADV}X")], index=bpms, columns=keys).astype(np.float64), @@ -283,29 +302,32 @@ def _create_fullresponse_from_dict(var_to_twiss: dict[str, tfs.TfsDataFrame]) -> f"{BETA}Y": pd.DataFrame(data=resp[columns.index(f"{BETA}Y")], index=bpms, columns=keys).astype(np.float64), f"{DISPERSION}X": pd.DataFrame(data=resp[columns.index(f"{DISPERSION}X")], index=bpms, columns=keys).astype(np.float64), f"{DISPERSION}Y": pd.DataFrame(data=resp[columns.index(f"{DISPERSION}Y")], index=bpms, columns=keys).astype(np.float64), - f"{NORM_DISPERSION}X": pd.DataFrame(data=NDX_arr, index=bpms, columns=keys).astype(np.float64), - f"{NORM_DISPERSION}Y": pd.DataFrame(data=NDY_arr, index=bpms, columns=keys).astype(np.float64), + f"{NORM_DISPERSION}X": pd.DataFrame(data=normalised_dispersion_x, index=bpms, columns=keys).astype(np.float64), + f"{NORM_DISPERSION}Y": pd.DataFrame(data=normalised_dispersion_y, index=bpms, columns=keys).astype(np.float64), f"{F1001}R": pd.DataFrame(data=resp[columns.index(f"{F1001}R")], index=bpms, columns=keys).astype(np.float64), f"{F1001}I": pd.DataFrame(data=resp[columns.index(f"{F1001}I")], index=bpms, columns=keys).astype(np.float64), f"{F1010}R": pd.DataFrame(data=resp[columns.index(f"{F1010}R")], index=bpms, columns=keys).astype(np.float64), f"{F1010}I": pd.DataFrame(data=resp[columns.index(f"{F1010}I")], index=bpms, columns=keys).astype(np.float64), - f"{TUNE}": pd.DataFrame(data=Q_arr, index=[f"{TUNE}1", f"{TUNE}2"], columns=keys).astype(np.float64), + f"{TUNE}": pd.DataFrame(data=tune_arr, index=[f"{TUNE}1", f"{TUNE}2"], columns=keys).astype(np.float64), } + # fmt: on def _get_jobfiles(temp_dir: Path, index: int) -> Path: - """ Return names for jobfile and iterfile according to index """ + """Return names for jobfile and iterfile according to index""" return temp_dir / f"job.iterate.{index:d}.madx" def _launch_single_job(inputfile_path: Path) -> None: - """ Function for pool to start a single madx job """ + """Function for pool to start a single madx job""" log_file = inputfile_path.with_name(inputfile_path.name + ".log") madx_wrapper.run_file(inputfile_path, log_file=log_file, cwd=inputfile_path.parent) -def _load_and_remove_twiss(var_and_path: tuple[str, Path]) -> tuple[str, tfs.TfsDataFrame]: - """ Function for pool to retrieve results """ +def _load_and_remove_twiss( + var_and_path: tuple[str, Path], +) -> tuple[str, tfs.TfsDataFrame]: + """Function for pool to retrieve results""" (var, path) = var_and_path twissfile = path / f"twiss.{var}" tfs_data = tfs.read(twissfile, index=NAME) @@ -315,7 +337,9 @@ def _load_and_remove_twiss(var_and_path: tuple[str, Path]) -> tuple[str, tfs.Tfs return var, tfs_data -def _add_coupling(dict_of_tfs: dict[str, tfs.TfsDataFrame]) -> dict[str, tfs.TfsDataFrame]: +def _add_coupling( + dict_of_tfs: dict[str, tfs.TfsDataFrame], +) -> dict[str, tfs.TfsDataFrame]: """ For each TfsDataFrame in the input dictionary, computes the coupling RDTs and adds a column for the real and imaginary parts of the computed coupling RDTs. Returns a copy of the input dictionary with @@ -329,7 +353,8 @@ def _add_coupling(dict_of_tfs: dict[str, tfs.TfsDataFrame]) -> dict[str, tfs.Tfs """ result_dict_of_tfs = copy.deepcopy(dict_of_tfs) with timeit(lambda elapsed: LOG.debug(f" Time adding coupling: {elapsed} s")): - for var, tfs_dframe in result_dict_of_tfs.items(): # already copies, so it's safe to act on them + # already copies, so it's safe to act on them + for var, tfs_dframe in result_dict_of_tfs.items(): coupling_rdts_df = coupling_via_cmatrix(tfs_dframe) tfs_dframe[f"{F1001}R"] = np.real(coupling_rdts_df[f"{F1001}"]).astype(np.float64) tfs_dframe[f"{F1001}I"] = np.imag(coupling_rdts_df[f"{F1001}"]).astype(np.float64) diff --git a/omc3/correction/sequence_evaluation.py b/omc3/correction/sequence_evaluation.py index a407206e..83f79602 100644 --- a/omc3/correction/sequence_evaluation.py +++ b/omc3/correction/sequence_evaluation.py @@ -9,6 +9,7 @@ Compare results with case all==0. """ + from __future__ import annotations import multiprocessing @@ -44,17 +45,17 @@ def evaluate_for_variables( variable_categories: Sequence[str], order: int = 4, num_proc: int = multiprocessing.cpu_count(), - temp_dir: Path | None = None + temp_dir: Path | None = None, ) -> dict: - """ Generate a dictionary containing response matrices for - beta, phase, dispersion, tune and coupling and saves it to a file. - - Args: - accel_inst (Accelerator): Accelerator Instance. - variable_categories (list): Categories of the variables/knobs to use. (from .json) - order (int or tuple): Max or [min, max] of K-value order to use. - num_proc (int): Number of processes to use in parallel. - temp_dir (Path): temporary directory. If ``None``, uses model_dir. + """Generate a dictionary containing response matrices for + beta, phase, dispersion, tune and coupling and saves it to a file. + + Args: + accel_inst (Accelerator): Accelerator Instance. + variable_categories (list): Categories of the variables/knobs to use. (from .json) + order (int or tuple): Max or [min, max] of K-value order to use. + num_proc (int): Number of processes to use in parallel. + temp_dir (Path): temporary directory. If ``None``, uses model_dir. """ LOG.debug("Generating Fullresponse via Mad-X.") with timeit(lambda elapsed: LOG.debug(f" Total time generating fullresponse: {elapsed}s")): @@ -87,7 +88,8 @@ def _generate_madx_jobs( num_proc: int, temp_dir: Path, ) -> None: - """ Generates madx job-files """ + """Generates madx job-files""" + def _assign(var, value): return f"{var:s} = {value:d};\n" @@ -120,14 +122,14 @@ def _do_macro(var): job_content += "\n" # last thing to do: get baseline - if proc_index+1 == num_proc: + if proc_index + 1 == num_proc: job_content += _do_macro("0") _get_jobfile(temp_dir, proc_index).write_text(job_content) -def _call_madx(process_pool: multiprocessing.Pool, temp_dir: str, num_proc: int) -> None: # type: ignore - """ Call madx in parallel """ +def _call_madx(process_pool: multiprocessing.Pool, temp_dir: str, num_proc: int) -> None: # type: ignore + """Call madx in parallel""" LOG.debug(f"Starting {num_proc:d} MAD-X jobs...") madx_jobs = [_get_jobfile(temp_dir, index) for index in range(num_proc)] failed = [LOG.error(fail) for fail in process_pool.map(_launch_single_job, madx_jobs) if fail] @@ -137,9 +139,9 @@ def _call_madx(process_pool: multiprocessing.Pool, temp_dir: str, num_proc: int) def _clean_up(variables: list[str], temp_dir: Path, num_proc: int) -> None: - """ Merge Logfiles and clean temporary outputfiles """ + """Merge Logfiles and clean temporary outputfiles""" LOG.debug("Cleaning output and printing log...") - for var in (variables + ["0"]): + for var in variables + ["0"]: table_file = _get_tablefile(temp_dir, var) with suppress(FileNotFoundError): # py3.8 missing_ok=True table_file.unlink() @@ -166,10 +168,10 @@ def _clean_up(variables: list[str], temp_dir: Path, num_proc: int) -> None: def _load_madx_results( variables: Sequence[str], k_values: list[float], - process_pool: multiprocessing.Pool, # type: ignore + process_pool: multiprocessing.Pool, # type: ignore temp_dir: str, ) -> dict: - """ Load the madx results in parallel and return var-tfs dictionary """ + """Load the madx results in parallel and return var-tfs dictionary""" LOG.debug("Loading Madx Results.") path_and_vars = [] for value in variables: @@ -179,7 +181,7 @@ def _load_madx_results( mapping = dict([(order, {}) for order in k_values] + [(f"{order}L", {}) for order in k_values]) for var, tfs_data in process_pool.map(_load_and_remove_twiss, path_and_vars): for order in k_values: - diff = (tfs_data[order] - base_tfs[order]) + diff = tfs_data[order] - base_tfs[order] mask = diff != 0 # drop zeros, maybe abs(diff) < eps ? k_list = diff.loc[mask] mapping[order][var] = k_list @@ -191,7 +193,7 @@ def _load_madx_results( def _get_orders(order: int) -> Sequence[str]: - """ Returns a list of strings with K-values to be used """ + """Returns a list of strings with K-values to be used""" try: return [f"K{i:d}{s:s}" for i in range(3) for s in ["", "S"]] except TypeError: @@ -199,22 +201,22 @@ def _get_orders(order: int) -> Sequence[str]: def _get_jobfile(folder: Path, index: int) -> Path: - """ Return names for jobfile and iterfile according to index """ + """Return names for jobfile and iterfile according to index""" return folder / f"job.varmap.{index:d}.madx" def _get_tablefile(folder: Path, var: str) -> Path: - """ Return name of the variable-specific table file """ + """Return name of the variable-specific table file""" return folder / f"table.{var}" def _get_surveyfile(folder: Path, index: int) -> Path: - """ Returns the name of the temporary survey file """ + """Returns the name of the temporary survey file""" return folder / f"survey.{index:d}.tmp" def _launch_single_job(inputfile_path: Path): - """ Function for pool to start a single madx job """ + """Function for pool to start a single madx job""" log_file = inputfile_path.with_name(f"{inputfile_path.name}.log") try: madx_wrapper.run_file(inputfile_path, log_file=log_file, cwd=inputfile_path.parent) @@ -225,21 +227,23 @@ def _launch_single_job(inputfile_path: Path): def _load_and_remove_twiss(path_and_var: tuple[Path, str]) -> tuple[str, tfs.TfsDataFrame]: - """ Function for pool to retrieve results """ + """Function for pool to retrieve results""" path, var = path_and_var twissfile = _get_tablefile(path, var) tfs_data = tfs.read(twissfile, index="NAME") return var, tfs_data -def _create_basic_job(accel_inst: Accelerator, k_values: list[str], variables: Sequence[str]) -> str: - """ Create the madx-job basics needed - TEMPFILE needs to be replaced in the returned string. +def _create_basic_job( + accel_inst: Accelerator, k_values: list[str], variables: Sequence[str] +) -> str: + """Create the madx-job basics needed + TEMPFILE needs to be replaced in the returned string. """ # get nominal setup from creator creator_class = get_model_creator_class(accel_inst, CreatorType.NOMINAL) creator: ModelCreator = creator_class(accel_inst) - job_content = creator.get_base_madx_script() + job_content = creator.get_base_madx_script(accel_inst.model_dir) # create a survey and save it to a temporary file job_content += ( @@ -297,16 +301,18 @@ def _create_basic_job(accel_inst: Accelerator, k_values: list[str], variables: S def check_varmap_file(accel_inst: Accelerator, vars_categories): - """ Checks on varmap file and creates it if not in model folder. - """ + """Checks on varmap file and creates it if not in model folder.""" if accel_inst.modifiers is None: - raise ValueError("Optics not defined. Please provide modifiers.madx. " - "Otherwise MADX evaluation might be unstable.") + raise ValueError( + "Optics not defined. Please provide modifiers.madx. " + "Otherwise MADX evaluation might be unstable." + ) - varmap_path = Path(accel_inst.model_dir) / f"varmap_{'_'.join(sorted(set(vars_categories)))}.{EXT}" + varmap_path = ( + Path(accel_inst.model_dir) / f"varmap_{'_'.join(sorted(set(vars_categories)))}.{EXT}" + ) if not varmap_path.exists(): - LOG.info(f"Variable mapping '{str(varmap_path):s}' not found. " - "Evaluating it via madx.") + LOG.info(f"Variable mapping '{str(varmap_path):s}' not found. Evaluating it via madx.") mapping = evaluate_for_variables(accel_inst, vars_categories) write_varmap(varmap_path, mapping) diff --git a/omc3/model/accelerators/lhc.py b/omc3/model/accelerators/lhc.py index bcf04a22..a39646e0 100644 --- a/omc3/model/accelerators/lhc.py +++ b/omc3/model/accelerators/lhc.py @@ -77,6 +77,7 @@ """ + from __future__ import annotations from pathlib import Path @@ -109,6 +110,7 @@ class Lhc(Accelerator): """ Accelerator class for the Large Hadron Collider. """ + NAME: str = "lhc" LOCAL_REPO_NAME: str = "acc-models-lhc" RE_DICT: dict[str, str] = { @@ -126,11 +128,7 @@ class Lhc(Accelerator): def get_parameters(): params = super(Lhc, Lhc).get_parameters() params.add_parameter( - name="beam", - type=int, - choices=(1, 2), - required=True, - help="Beam to use." + name="beam", type=int, choices=(1, 2), required=True, help="Beam to use." ) params.add_parameter( name="year", @@ -174,16 +172,12 @@ def verify_object(self) -> None: # TODO: Maybe more checks? raise AcceleratorDefinitionError("Crossing on or off not set.") # TODO: write more output prints - LOGGER.debug( - "... verification passed. \nSome information about the accelerator:" - ) + LOGGER.debug("... verification passed. \nSome information about the accelerator:") LOGGER.debug(f"Class name {self.__class__.__name__}") LOGGER.debug(f"Beam {self.beam}") LOGGER.debug(f"Beam direction {self.beam_direction}") if self.modifiers: - LOGGER.debug( - f"Modifiers {', '.join([str(m) for m in self.modifiers])}" - ) + LOGGER.debug(f"Modifiers {', '.join([str(m) for m in self.modifiers])}") @property def beam(self) -> int: @@ -204,7 +198,12 @@ def beam(self, value) -> None: def get_lhc_error_dir() -> Path: return LHC_DIR / "systematic_errors" - def get_variables(self, frm: float | None = None, to: float | None = None, classes: Iterable[str] | None = None): + def get_variables( + self, + frm: float | None = None, + to: float | None = None, + classes: Iterable[str] | None = None, + ): corrector_beam_dir = Path(f"correctors_b{self.beam}") all_vars_by_class = load_multiple_jsons( *self._get_corrector_files(corrector_beam_dir / "beta_correctors.json"), @@ -217,8 +216,10 @@ def get_variables(self, frm: float | None = None, to: float | None = None, class # Sort variables by S (nice for comparing different files) return self.sort_variables_by_location(variables, frm, to) - def sort_variables_by_location(self, variables: Iterable[str], frm: float | None = None, to: str | None = None) -> list[str]: - """ Sorts the variables by location and filters them between `frm` and `to`. + def sort_variables_by_location( + self, variables: Iterable[str], frm: float | None = None, to: str | None = None + ) -> list[str]: + """Sorts the variables by location and filters them between `frm` and `to`. If `frm` is larger than `to` it loops back around to the start the accelerator. This is a useful function for the LHC that's why it is "public" but it is not part of the Accelerator-Class Interface. @@ -277,8 +278,7 @@ def log_status(self) -> None: LOGGER.info(f"Natural Tune X [{self.nat_tunes[0]:10.3f}]") LOGGER.info(f"Natural Tune Y [{self.nat_tunes[1]:10.3f}]") LOGGER.info( - f"Best Knowledge Model " - f"[{'NO' if self.model_best_knowledge is None else 'OK':>10s}]" + f"Best Knowledge Model [{'NO' if self.model_best_knowledge is None else 'OK':>10s}]" ) if self.excitation == AccExcitationMode.FREE: @@ -291,10 +291,9 @@ def log_status(self) -> None: LOGGER.info(f"> Driven Tune X [{self.drv_tunes[0]:10.3f}]") LOGGER.info(f"> Driven Tune Y [{self.drv_tunes[1]:10.3f}]") - def get_exciter_bpm(self, plane: str, commonbpms: list[str]) -> tuple[str, str]: - """ Returns the name of the BPM closest to the exciter (i.e. ADT or AC-Dipole) - as well as the name of the exciter element. """ + """Returns the name of the BPM closest to the exciter (i.e. ADT or AC-Dipole) + as well as the name of the exciter element.""" beam = self.beam adt = "H.C" if plane == "X" else "V.B" l_r = "L" if ((beam == 1) != (plane == "Y")) else "R" @@ -334,12 +333,12 @@ def get_accel_file(self, filename: Path | str) -> Path: # Private Methods ############################################################## def _get_corrector_elems(self) -> Path: - """ Return the corrector elements file, either from the instance's specific directory, - if it exists, or the default directory. """ + """Return the corrector elements file, either from the instance's specific directory, + if it exists, or the default directory.""" return self._get_corrector_files(f"corrector_elems_b{self.beam}.tfs")[-1] def _get_corrector_files(self, file_name: Path | str) -> list[Path]: - """ Get the corrector files from the default directory AND + """Get the corrector files from the default directory AND the instance's specific directory if it exists AND the model directroy if it exists, in that order. See also discussion in https://github.com/pylhc/omc3/pull/458#discussion_r1764829247 . @@ -347,13 +346,13 @@ def _get_corrector_files(self, file_name: Path | str) -> list[Path]: # add file from the default directory (i.e. "model/accelerators/lhc/correctors") default_file = Lhc.DEFAULT_CORRECTORS_DIR / file_name if not default_file.exists(): - msg = (f"Could not find {file_name} in {Lhc.DEFAULT_CORRECTORS_DIR}." - "Something went wrong with the variables getting logic.") + msg = ( + f"Could not find {file_name} in {Lhc.DEFAULT_CORRECTORS_DIR}." + "Something went wrong with the variables getting logic." + ) raise FileNotFoundError(msg) - LOGGER.debug( - f"Default corrector file {file_name} found in {default_file.parent}." - ) + LOGGER.debug(f"Default corrector file {file_name} found in {default_file.parent}.") corrector_files = [default_file] # add file from the accelerator directory (e.g. "model/accelerators/lhc/2024/correctors") @@ -378,7 +377,7 @@ def _get_corrector_files(self, file_name: Path | str) -> list[Path]: return corrector_files def find_modifier(self, modifier: Path | str): - """ Try to find a modifier file, which might be given only by its name. + """Try to find a modifier file, which might be given only by its name. This is looking for full-path, model-dir and in the acc-models-path's optics-dir., """ dirs = [] @@ -390,8 +389,10 @@ def find_modifier(self, modifier: Path | str): return find_file(modifier, dirs=dirs) + # General functions ########################################################## + def _flatten_list(my_list: Iterable) -> list: return [item for sublist in my_list for item in sublist] diff --git a/omc3/model/model_creators/abstract_model_creator.py b/omc3/model/model_creators/abstract_model_creator.py index 2fa81fed..a06e805f 100644 --- a/omc3/model/model_creators/abstract_model_creator.py +++ b/omc3/model/model_creators/abstract_model_creator.py @@ -4,9 +4,11 @@ This module provides the template for all model creators. """ + from __future__ import annotations import contextlib +import os import shutil from abc import ABC, abstractmethod from pathlib import Path @@ -49,6 +51,7 @@ from omc3.segment_by_segment.propagables import Propagable from omc3.segment_by_segment.segments import Segment + MADXInputType = Path | str | dict[str, str] | None LOGGER = logging_tools.get_logger(__file__) @@ -59,6 +62,7 @@ class ModelCreator(ABC): Abstract class for the implementation of a model creator. All mandatory methods and convenience functions are defined here. """ + jobfile: str = JOB_MODEL_MADX_NOMINAL # lowercase as it might be changed in subclasses __init__ def __init__(self, accel: Accelerator, logfile: Path = None, acc_models_path: Path = None): @@ -93,54 +97,83 @@ def prepare_options(self, opt): pass def full_run(self): - """ Does the full run: preparation, running madx, post_run. """ + """Does the full run: preparation, running madx, post_run.""" # Prepare model-dir output directory self.prepare_run() # get madx-script with relative output-paths # (model_dir is base-path in get_madx_script) - self.accel.model_dir = Path() - madx_script = self.get_madx_script() - self.accel.model_dir = self.output_dir + madx_script = self.get_madx_script(cwd=self.accel.model_dir) # Run madx to create model run_string( madx_script, output_file=self.accel.model_dir / self.jobfile, log_file=self.logfile, - cwd=self.accel.model_dir + cwd=self.accel.model_dir, ) # Check output and return accelerator instance self.post_run() @abstractmethod - def get_madx_script(self) -> str: + def get_madx_script(self, cwd: Path | str) -> str: """ Returns the ``MAD-X`` script used to create the model (directory). + Args: + cwd (Path | str): The current working directory where the madx script will be run. + Returns: The string of the ``MAD-X`` script used to used to create the model (directory). """ pass @abstractmethod - def get_base_madx_script(self) -> str: + def get_base_madx_script(self, cwd: Path | str) -> str: """ Returns the ``MAD-X`` script used to set-up the basic accelerator in MAD-X, without actually creating the twiss-output, as some modifications to the accelerator may come afterwards (depending on which model-creator is calling this). + Args: + cwd (Path | str): The current working directory where the madx script will be run. + Returns: The string of the ``MAD-X`` script used to used to set-up the machine. """ pass + def _madx_path(self, path: Path | str, cwd: Path | str) -> str: + """Return a MAD-X friendly path. + + Absolute paths are returned unchanged so MAD-X can consume them directly. If the provided + path is not absolute, it is expressed relative to the working directory ``cwd``. This + keeps the generated scripts consistent regardless of the caller's current directory. + """ + + target = Path(path) + if target.is_absolute(): + return target.as_posix() + + cwd_path = Path(cwd) + cwd_norm = os.path.normpath(str(cwd_path)) + target_norm = os.path.normpath(str(cwd_path / target)) + + try: + rel_path = os.path.relpath(target_norm, cwd_norm) + except ValueError as exc: # pragma: no cover - platform dependent edge case + raise AcceleratorDefinitionError( + f"Path '{target}' cannot be expressed relative to working directory '{cwd_path}'." + ) from exc + + return Path(rel_path).as_posix() + def get_update_deltap_script(self, deltap: float | str) -> str: - """ Get the madx snippet that updates the dpp in the machine. + """Get the madx snippet that updates the dpp in the machine. Args: deltap (float | str): The dpp to update the machine to. - """ + """ raise NotImplementedError("Update dpp not implemented for this model creator.") def prepare_run(self) -> None: @@ -184,8 +217,11 @@ def post_run(self) -> None: for filename in self.files_to_check: with contextlib.suppress(KeyError): # KeyError if just a file to check, not a file with attribute - setattr(self.accel, attribute_map[filename], tfs.read(self.accel.model_dir / filename, index=NAME)) - + setattr( + self.accel, + attribute_map[filename], + tfs.read(self.accel.model_dir / filename, index=NAME), + ) @property def files_to_check(self) -> list[str]: @@ -202,7 +238,6 @@ def files_to_check(self) -> list[str]: } return check_files + excitation_map[self.accel.excitation] - @staticmethod def _check_files_exist(dir_: Path | str, files: Sequence[str]) -> None: """ @@ -268,16 +303,16 @@ def prepare_symlink(self): ] def prepare_modifiers(self): - """ Loop over the modifiers and make them full paths if found. """ + """Loop over the modifiers and make them full paths if found.""" accel: Accelerator = self.accel if accel.modifiers is not None: accel.modifiers = [accel.find_modifier(m) for m in accel.modifiers] @staticmethod def _get_select_command(pattern: str | None = None, indent: int = 0): - """ Returns a basic select command with the given pattern, the default columns and correct indentation. """ + """Returns a basic select command with the given pattern, the default columns and correct indentation.""" space = " " * indent - pattern_str = f" pattern=\"{pattern}\"," if pattern is not None else "" + pattern_str = f' pattern="{pattern}",' if pattern is not None else "" return ( f"{space}select, flag=twiss, clear;\n" f"{space}select, flag=twiss,{pattern_str} column=" @@ -288,7 +323,7 @@ def _get_select_command(pattern: str | None = None, indent: int = 0): class SegmentCreator(ModelCreator, ABC): - """ Model creator for Segments, to be used in the Segment-by-Segment algorithm. + """Model creator for Segments, to be used in the Segment-by-Segment algorithm. These segments propagate the measured values from the beginning of the segment to the end. This only handles the MAD-X part of things. @@ -298,12 +333,20 @@ class SegmentCreator(ModelCreator, ABC): The output is stored in the `twiss_forward` and `twiss_backward` files, which in turn can be used for further processing by the implemented Propagables. """ + jobfile = None # set in init _sequence_name: str = None # to be set by any accelerator using the default `get_madx_script` - def __init__(self, accel: Accelerator, segment: Segment, measurables: Iterable[Propagable], - corrections: MADXInputType = None, *args, **kwargs): - """ Creates Segment of a model. """ + def __init__( + self, + accel: Accelerator, + segment: Segment, + measurables: Iterable[Propagable], + corrections: MADXInputType = None, + *args, + **kwargs, + ): + """Creates Segment of a model.""" LOGGER.debug("Initializing Segment Creator") super().__init__(accel, *args, **kwargs) self.segment = segment @@ -335,8 +378,13 @@ def _create_general_macros(self): shutil.copy(OMC3_MADX_MACROS_DIR / GENERAL_MACROS, general_macros_path) def _clean_models(self): - """ Remove models from previous runs. """ - for twiss_file in (self.twiss_forward, self.twiss_forward_corrected, self.twiss_backward, self.twiss_backward_corrected): + """Remove models from previous runs.""" + for twiss_file in ( + self.twiss_forward, + self.twiss_forward_corrected, + self.twiss_backward, + self.twiss_backward_corrected, + ): output_twiss: Path = self.output_dir / twiss_file output_twiss.unlink(missing_ok=True) @@ -372,9 +420,13 @@ def _create_corrections_file(self): raise NotImplementedError("Could not determine type of corrections. Aborting.") - def get_madx_script(self): - accel: Accelerator = self.accel - madx_script = self.get_base_madx_script() + def get_madx_script(self, cwd: Path | str) -> str: + madx_script = self.get_base_madx_script(cwd) + + macros_path = self._madx_path(self.output_dir / MACROS_DIR / GENERAL_MACROS, cwd) + measurement_path = self._madx_path(self.output_dir / self.measurement_madx, cwd) + twiss_forward_path = self._madx_path(self.output_dir / self.twiss_forward, cwd) + twiss_backward_path = self._madx_path(self.output_dir / self.twiss_backward, cwd) if self._sequence_name is None: raise ValueError( @@ -382,57 +434,71 @@ def get_madx_script(self): f"the derived class '{self.__class__.__name__}'" " must set the '_sequence_name' attribute.\n" "This error should only be encountered during development. " - "If you encounter it later, please open an issue!") - - madx_script += "\n".join([ - "", - f"! ----- Segment-by-Segment propagation for {self.segment.name} -----", - "", - f"call, file = '{accel.model_dir / MACROS_DIR / GENERAL_MACROS}';", - "", - "! Cycle the sequence to avoid negative length.", - f"seqedit, sequence={self._sequence_name};", - " flatten;", - f" cycle, start={self.segment.start};", - "endedit;", - "", - f"use, sequence = {self._sequence_name};", - "", - "twiss;", - "exec, save_initial_and_final_values(", - f" {self._sequence_name},", - f" {self.segment.start},", - f" {self.segment.end}, ", - f" \"{accel.model_dir / self.measurement_madx!s}\",", - " biniSbSParams,", - " bendSbSParams", - ");", - "", - "exec, extract_segment_sequence(", - f" {self._sequence_name},", - " forward_SbSSEQ,", - " backward_SbSSEQ,", - f" {self.segment.start},", - f" {self.segment.end},", - ");", - "", - "beam, particle = proton, sequence=forward_SbSSEQ;", - "beam, particle = proton, sequence=backward_SbSSEQ;", - "", - f"exec, twiss_segment(forward_SbSSEQ, \"{self.twiss_forward!s}\", biniSbSParams);", - f"exec, twiss_segment(backward_SbSSEQ, \"{self.twiss_backward!s}\", bendSbSParams);", - "", - ]) + "If you encounter it later, please open an issue!" + ) - if self.corrections is not None: - madx_script += "\n".join([ - f"call, file=\"{self.corrections_madx!s}\";", - f"exec, twiss_segment(forward_SbSSEQ, " - f"\"{self.twiss_forward_corrected}\", biniSbSParams);", - f"exec, twiss_segment(backward_SbSSEQ, " - f"\"{self.twiss_backward_corrected}\", bendSbSParams);", + madx_script += "\n".join( + [ + "", + f"! ----- Segment-by-Segment propagation for {self.segment.name} -----", + "", + f"call, file = '{macros_path}';", + "", + "! Cycle the sequence to avoid negative length.", + f"seqedit, sequence={self._sequence_name};", + " flatten;", + f" cycle, start={self.segment.start};", + "endedit;", + "", + f"use, sequence = {self._sequence_name};", + "", + "twiss;", + "exec, save_initial_and_final_values(", + f" {self._sequence_name},", + f" {self.segment.start},", + f" {self.segment.end}, ", + f' "{measurement_path}",', + " biniSbSParams,", + " bendSbSParams", + ");", + "", + "exec, extract_segment_sequence(", + f" {self._sequence_name},", + " forward_SbSSEQ,", + " backward_SbSSEQ,", + f" {self.segment.start},", + f" {self.segment.end},", + ");", + "", + "beam, particle = proton, sequence=forward_SbSSEQ;", + "beam, particle = proton, sequence=backward_SbSSEQ;", + "", + f'exec, twiss_segment(forward_SbSSEQ, "{twiss_forward_path}", biniSbSParams);', + f'exec, twiss_segment(backward_SbSSEQ, "{twiss_backward_path}", bendSbSParams);', "", - ]) + ] + ) + + if self.corrections is not None: + corrections_path = self._madx_path(self.output_dir / self.corrections_madx, cwd) + twiss_forward_corr_path = self._madx_path( + self.output_dir / self.twiss_forward_corrected, + cwd, + ) + twiss_backward_corr_path = self._madx_path( + self.output_dir / self.twiss_backward_corrected, + cwd, + ) + madx_script += "\n".join( + [ + f'call, file="{corrections_path}";', + f"exec, twiss_segment(forward_SbSSEQ, " + f'"{twiss_forward_corr_path}", biniSbSParams);', + f"exec, twiss_segment(backward_SbSSEQ, " + f'"{twiss_backward_corr_path}", bendSbSParams);', + "", + ] + ) return madx_script @@ -447,7 +513,13 @@ def files_to_check(self) -> list[str]: class CorrectionModelCreator(ModelCreator): jobfile = None # set in __init__ - def __init__(self, accel: Accelerator, twiss_out: Path | str, corr_files: Sequence[Path | str], update_dpp: bool = False): + def __init__( + self, + accel: Accelerator, + twiss_out: Path | str, + corr_files: Sequence[Path | str], + update_dpp: bool = False, + ): """Model creator for the corrected/matched model of the LHC. Args: @@ -462,12 +534,12 @@ def __init__(self, accel: Accelerator, twiss_out: Path | str, corr_files: Sequen # use absolute paths to force files into twiss_out directory instead of model-dir self.jobfile = self.twiss_out.parent.absolute() / f"job.create_{self.twiss_out.stem}.madx" - self.logfile= self.twiss_out.parent.absolute() / f"job.create_{self.twiss_out.stem}.log" + self.logfile = self.twiss_out.parent.absolute() / f"job.create_{self.twiss_out.stem}.log" self.corr_files = corr_files self.update_dpp = update_dpp - def get_madx_script(self) -> str: - """ Get the madx script for the correction model creator, which updates the model after correcion. + def get_madx_script(self, cwd: Path | str) -> str: + """Get the madx script for the correction model creator, which updates the model after correcion. This is a basic implementation which does not update the dpp, but should work for generic accelerators. """ @@ -476,18 +548,16 @@ def get_madx_script(self) -> str: f"Updating the dpp is not implemented for correction model creator of {self.accel.NAME}." ) - madx_script = self.get_base_madx_script() # do not get_madx_script as we don't need the uncorrected output. + # do not get_madx_script as we don't need the uncorrected output. + madx_script = self.get_base_madx_script(cwd) for corr_file in self.corr_files: # Load the corrections - madx_script += f"call, file = '{corr_file!s}';\n" + madx_script += f"call, file = '{self._madx_path(corr_file, cwd)}';\n" - madx_script += ( - f"{self._get_select_command()}\n" - f"twiss, file = {self.twiss_out!s};\n" - ) + twiss_out_path = self._madx_path(self.twiss_out, cwd) + madx_script += f"{self._get_select_command()}\ntwiss, file = {twiss_out_path};\n" return madx_script - @property def files_to_check(self) -> list[str]: return [self.twiss_out, self.jobfile, self.logfile] @@ -495,6 +565,7 @@ def files_to_check(self) -> list[str]: # Helper functions ------------------------------------------------------------- + def check_folder_choices( parent: Path, msg: str, @@ -502,7 +573,7 @@ def check_folder_choices( list_choices: bool = False, predicate=iotools.always_true, stem_only: bool = False, - ) -> Path: +) -> Path: """ A helper function that scans a selected folder for children, which will then be displayed as possible choices. This funciton allows the model-creator to get only the file/folder names, check @@ -542,4 +613,6 @@ def check_folder_choices( if list_choices: for choice in choices: print(choice) - raise AcceleratorDefinitionError(f"{msg}.\nSelected: '{selection}'.\nChoices: [{', '.join(choices)}]") + raise AcceleratorDefinitionError( + f"{msg}.\nSelected: '{selection}'.\nChoices: [{', '.join(choices)}]" + ) diff --git a/omc3/model/model_creators/lhc_model_creator.py b/omc3/model/model_creators/lhc_model_creator.py index 4a5056ba..bbabb3f8 100644 --- a/omc3/model/model_creators/lhc_model_creator.py +++ b/omc3/model/model_creators/lhc_model_creator.py @@ -4,6 +4,7 @@ This module provides convenience functions for model creation of the ``LHC``. """ + from __future__ import annotations import logging @@ -47,6 +48,7 @@ SegmentCreator, check_folder_choices, ) +from omc3.nxcals.constants import EXTRACTED_MQTS_FILENAME from omc3.optics_measurements.constants import NAME from omc3.utils.iotools import create_dirs, get_check_by_regex_func, get_check_suffix_func @@ -59,29 +61,42 @@ def _b2_columns() -> list[str]: - cols_outer = [f"{KP}{num}{S}L" for KP in ( - "K", "P") for num in range(21) for S in ("", "S")] - cols_middle = ["DX", "DY", "DS", "DPHI", "DTHETA", "DPSI", "MREX", "MREY", "MREDX", "MREDY", - "AREX", "AREY", "MSCALX", "MSCALY", "RFM_FREQ", "RFM_HARMON", "RFM_LAG"] + cols_outer = [f"{KP}{num}{S}L" for KP in ("K", "P") for num in range(21) for S in ("", "S")] + cols_middle = [ + "DX", + "DY", + "DS", + "DPHI", + "DTHETA", + "DPSI", + "MREX", + "MREY", + "MREDX", + "MREDY", + "AREX", + "AREY", + "MSCALX", + "MSCALY", + "RFM_FREQ", + "RFM_HARMON", + "RFM_LAG", + ] return cols_outer[:42] + cols_middle + cols_outer[42:] class LhcModelCreator(ModelCreator): - def __init__(self, accel: Lhc, *args, **kwargs): LOGGER.debug("Initializing LHC Model Creator") super().__init__(accel, *args, **kwargs) def prepare_options(self, opt): - """ Use the fetcher to list choices if requested. """ + """Use the fetcher to list choices if requested.""" accel: Lhc = self.accel # Set the fetcher paths --- if opt.fetch == Fetcher.PATH: if opt.path is None: - raise AcceleratorDefinitionError( - "Path fetcher chosen, but no path provided." - ) + raise AcceleratorDefinitionError("Path fetcher chosen, but no path provided.") acc_model_path = Path(opt.path) elif opt.fetch == Fetcher.AFS: @@ -91,7 +106,7 @@ def prepare_options(self, opt): msg="No optics tag (flag --year) given", selection=accel.year, list_choices=opt.list_choices, - predicate=Path.is_dir + predicate=Path.is_dir, ) # raises AcceleratorDefintionError if not valid choice else: LOGGER.warning( @@ -103,13 +118,15 @@ def prepare_options(self, opt): return # list optics choices --- - if opt.list_choices: # assumes we only want to list optics. Invoked even if modifiers are given! + if ( + opt.list_choices + ): # assumes we only want to list optics. Invoked even if modifiers are given! check_folder_choices( acc_model_path / OPTICS_SUBDIR, msg="No modifier given", selection=None, # TODO: could check if user made valid choice list_choices=opt.list_choices, - predicate=get_check_suffix_func(".madx") + predicate=get_check_suffix_func(".madx"), ) # raises AcceleratorDefinitionError # Set acc model path to be used in model creator --- @@ -164,35 +181,41 @@ def check_accelerator_instance(self) -> None: f"{', '.join([str(accel.model_dir / modifier) for modifier in inexistent_modifiers])}" ) - def get_madx_script(self) -> str: # nominal - """ Get madx script to create a LHC model.""" + def get_madx_script(self, cwd: Path | str) -> str: # nominal + """Get madx script to create a LHC model.""" accel: Lhc = self.accel + cwd = Path(cwd) - madx_script = self.get_base_madx_script() + madx_script = self.get_base_madx_script(cwd) + twiss_dat_path = self._madx_path(accel.model_dir / TWISS_DAT, cwd) + twiss_elements_path = self._madx_path(accel.model_dir / TWISS_ELEMENTS_DAT, cwd) madx_script += ( - f"exec, do_twiss_monitors(LHCB{accel.beam}, '{accel.model_dir / TWISS_DAT}', {accel.dpp});\n" - f"exec, do_twiss_elements(LHCB{accel.beam}, '{accel.model_dir / TWISS_ELEMENTS_DAT}', {accel.dpp});\n" + f"exec, do_twiss_monitors(LHCB{accel.beam}, '{twiss_dat_path}', {accel.dpp});\n" + f"exec, do_twiss_elements(LHCB{accel.beam}, '{twiss_elements_path}', {accel.dpp});\n" ) if accel.excitation != AccExcitationMode.FREE or accel.drv_tunes is not None: # allow user to modify script and enable excitation, if driven tunes are given use_acd = accel.excitation == AccExcitationMode.ACD use_adt = accel.excitation == AccExcitationMode.ADT + twiss_ac_path = self._madx_path(accel.model_dir / TWISS_AC_DAT, cwd) + twiss_adt_path = self._madx_path(accel.model_dir / TWISS_ADT_DAT, cwd) madx_script += ( f"use_acd={use_acd:d};\n" f"use_adt={use_adt:d};\n" f"if(use_acd == 1){{\n" - f"exec, twiss_ac_dipole({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.drv_tunes[0]}, {accel.drv_tunes[1]}, {accel.beam}, '{accel.model_dir / TWISS_AC_DAT}', {accel.dpp});\n" + f"exec, twiss_ac_dipole({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.drv_tunes[0]}, {accel.drv_tunes[1]}, {accel.beam}, '{twiss_ac_path}', {accel.dpp});\n" f"}}else if(use_adt == 1){{\n" - f"exec, twiss_adt({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.drv_tunes[0]}, {accel.drv_tunes[1]}, {accel.beam}, '{accel.model_dir / TWISS_ADT_DAT}', {accel.dpp});\n" + f"exec, twiss_adt({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.drv_tunes[0]}, {accel.drv_tunes[1]}, {accel.beam}, '{twiss_adt_path}', {accel.dpp});\n" f"}}\n" ) return madx_script - def get_base_madx_script(self) -> str: - """ Returns the base LHC madx script.""" + def get_base_madx_script(self, cwd: Path | str) -> str: + """Returns the base LHC madx script.""" accel: Lhc = self.accel + cwd = Path(cwd) - madx_script = self._get_sequence_initialize_script() + madx_script = self._get_sequence_initialize_script(cwd) if self._uses_ats_knobs(): madx_script += "\n! ----- Matching Knobs -----\n" LOGGER.debug( @@ -201,38 +224,41 @@ def get_base_madx_script(self) -> str: madx_script += f"exec, match_tunes_ats({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.beam});\n" madx_script += f"exec, coupling_knob_ats({accel.beam});\n" else: - madx_script += f"exec, match_tunes({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.beam});\n" + madx_script += ( + f"exec, match_tunes({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.beam});\n" + ) madx_script += f"exec, coupling_knob({accel.beam});\n" return madx_script - def _get_sequence_initialize_script(self) -> str: - """ Returns the LHC sequence initialization script. + def _get_sequence_initialize_script(self, cwd: Path) -> str: + """Returns the LHC sequence initialization script. This is split up here from the matching (in the base-script), to accompany the needs of the Best Knowledge Model Creator, see below. """ accel: Lhc = self.accel + acc_model_path = None + if accel.acc_model_path is not None: + acc_model_path = Path(self._madx_path(accel.acc_model_path, cwd)) madx_script = ( f"{self._get_madx_script_info_comments()}\n\n" - f"call, file = '{accel.model_dir / MACROS_DIR / GENERAL_MACROS}';\n" - f"call, file = '{accel.model_dir / MACROS_DIR / LHC_MACROS}';\n" + f"call, file = '{self._madx_path(accel.model_dir / MACROS_DIR / GENERAL_MACROS, cwd)}';\n" + f"call, file = '{self._madx_path(accel.model_dir / MACROS_DIR / LHC_MACROS, cwd)}';\n" ) madx_script += f"{MADX_ENERGY_VAR} = {accel.energy};\n" madx_script += "exec, define_nominal_beams();\n\n" if self._uses_run3_macros(): - LOGGER.debug( - "According to the optics year, Run 3 versions of the macros will be used" - ) - madx_script += ( - f"call, file = '{accel.model_dir / MACROS_DIR / LHC_MACROS_RUN3}';\n" - ) + LOGGER.debug("According to the optics year, Run 3 versions of the macros will be used") + madx_script += f"call, file = '{self._madx_path(accel.model_dir / MACROS_DIR / LHC_MACROS_RUN3, cwd)}';\n" madx_script += "\n! ----- Calling Sequence -----\n" - madx_script += "option, -echo; ! suppress output from base sequence loading to keep the log small\n" - madx_script += self._get_madx_main_sequence_loading() + madx_script += ( + "option, -echo; ! suppress output from base sequence loading to keep the log small\n" + ) + madx_script += self._get_madx_main_sequence_loading(acc_model_path) madx_script += "\noption, echo; ! re-enable output to see the optics settings\n" madx_script += "\n! ---- Call optics and other modifiers ----\n" @@ -240,11 +266,11 @@ def _get_sequence_initialize_script(self) -> str: if accel.modifiers is not None: # if modifier is an absolute path, go there, otherwise use the path refers from model_dir madx_script += "".join( - f"call, file = '{accel.model_dir / modifier}'; {MODIFIER_TAG}\n" + f"call, file = '{self._madx_path(modifier, cwd)}'; {MODIFIER_TAG}\n" for modifier in accel.modifiers ) - if accel.year in ['2012', '2015', '2016', '2017', '2018', '2021', 'hllhc1.3']: + if accel.year in ["2012", "2015", "2016", "2017", "2018", "2021", "hllhc1.3"]: # backwards compatibility with pre acc-models optics # WARNING: This might override values extracted via the knob-extractor. madx_script += ( @@ -258,12 +284,15 @@ def _get_sequence_initialize_script(self) -> str: ) if accel.acc_model_path is not None: - remove_symmetry_knob_madx = accel.acc_model_path / LHC_REMOVE_TRIPLET_SYMMETRY_RELPATH - if remove_symmetry_knob_madx.exists(): # alternatively check if year != 2018/2021 + remove_symmetry_knob_abs = ( + Path(accel.acc_model_path) / LHC_REMOVE_TRIPLET_SYMMETRY_RELPATH + ) + if remove_symmetry_knob_abs.exists(): # alternatively check if year != 2018/2021 + remove_symmetry_knob_path = self._madx_path(remove_symmetry_knob_abs, cwd) madx_script += ( - "\n! ----- Remove IR symmetry definitions -----\n" - f"\ncall, file=\"{remove_symmetry_knob_madx!s}\"; " - "! removes 'ktqx.r1 := -ktqx.l1'-type issues\n" + "\n! ----- Remove IR symmetry definitions -----\n" + f'\ncall, file="{remove_symmetry_knob_path}"; ' + "! removes 'ktqx.r1 := -ktqx.l1'-type issues\n" ) madx_script += ( @@ -274,7 +303,7 @@ def _get_sequence_initialize_script(self) -> str: return madx_script def get_update_deltap_script(self, deltap: float | str) -> str: - """ Update the dpp in the LHC. + """Update the dpp in the LHC. Args: deltap (float | str): The dpp to update the LHC to. @@ -286,11 +315,10 @@ def get_update_deltap_script(self, deltap: float | str) -> str: madx_script = ( f"twiss, deltap={deltap};\n" "correct, mode=svd;\n\n" - "! The same as match_tunes, but include deltap in the matching\n" f"exec, find_complete_tunes({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.beam});\n" f"match, deltap={deltap};\n" - ) # Works better when split up + ) # Works better when split up madx_script += "\n".join([f"vary, name={knob};" for knob in self.get_tune_knobs()]) + "\n" madx_script += ( "constraint, range=#E, mux=total_qx, muy=total_qy;\n" @@ -302,23 +330,19 @@ def get_update_deltap_script(self, deltap: float | str) -> str: def _get_madx_script_info_comments(self) -> str: accel: Lhc = self.accel info_comments = ( - f'title, "LHC Model created by omc3";\n' - f"! Model directory {Path(accel.model_dir).absolute()}\n" + f'title, "LHC Model created by omc3";\n' + f"! Model directory {Path(accel.model_dir).absolute()}\n" ) if accel.acc_model_path is not None: - info_comments += ( - f"! Acc-Models {Path(accel.acc_model_path).absolute()}\n" - ) + info_comments += f"! Acc-Models {Path(accel.acc_model_path).absolute()}\n" info_comments += ( - f"! LHC year {accel.year}\n" - f"! Natural Tune X {accel.nat_tunes[0]:10.3f}\n" - f"! Natural Tune Y {accel.nat_tunes[1]:10.3f}\n" - f"! Best Knowledge {'NO' if accel.model_best_knowledge is None else 'YES':>10s}\n" + f"! LHC year {accel.year}\n" + f"! Natural Tune X {accel.nat_tunes[0]:10.3f}\n" + f"! Natural Tune Y {accel.nat_tunes[1]:10.3f}\n" + f"! Best Knowledge {'NO' if accel.model_best_knowledge is None else 'YES':>10s}\n" ) if accel.excitation == AccExcitationMode.FREE: - info_comments += ( - f"! Excitation {'NO':>10s}\n" - ) + info_comments += f"! Excitation {'NO':>10s}\n" else: info_comments += ( f"! Excitation {'ACD' if accel.excitation == AccExcitationMode.ACD else 'ADT':>10s}\n" @@ -327,13 +351,15 @@ def _get_madx_script_info_comments(self) -> str: ) return info_comments - def _get_madx_main_sequence_loading(self) -> str: + def _get_madx_main_sequence_loading(self, acc_model_path: Path) -> str: accel: Lhc = self.accel - if accel.acc_model_path is not None: - main_call = f"call, file = '{accel.acc_model_path / 'lhc.seq'}';" + if acc_model_path is not None: + main_seq_rel = (acc_model_path / "lhc.seq").as_posix() + main_call = f"call, file = '{main_seq_rel}';" if accel.year.startswith("hl"): - main_call += f"\ncall, file = '{accel.acc_model_path / 'hllhc_sequence.madx'}';" + hllhc_rel = (acc_model_path / "hllhc_sequence.madx").as_posix() + main_call += f"\ncall, file = '{hllhc_rel}';" return main_call try: return self._get_call_main() @@ -387,7 +413,6 @@ def _get_call_main(self) -> str: # --------------------------------------------------------------------- class LhcBestKnowledgeCreator(LhcModelCreator): - EXTRACTED_MQTS_FILENAME: str = "extracted_mqts.str" CORRECTIONS_FILENAME: str = "corrections.madx" jobfile: str = JOB_MODEL_MADX_BEST_KNOWLEDGE files_to_check: tuple[str] = (TWISS_BEST_KNOWLEDGE_DAT, TWISS_ELEMENTS_BEST_KNOWLEDGE_DAT) @@ -439,7 +464,9 @@ def prepare_run(self): b2_stem_path = Path(f"{accel.b2_errors!s}.fakesuffix") # makes `with_suffix`` work # hint: if b2_stem_path is absolute, the following AFS parts are ignored - b2_error_path = AFS_B2_ERRORS_ROOT / f"Beam{accel.beam}" / b2_stem_path.with_suffix(".errors") + b2_error_path = ( + AFS_B2_ERRORS_ROOT / f"Beam{accel.beam}" / b2_stem_path.with_suffix(".errors") + ) b2_madx_path = AFS_B2_ERRORS_ROOT / f"Beam{accel.beam}" / b2_stem_path.with_suffix(".madx") # copy madx --- @@ -462,45 +489,61 @@ def prepare_run(self): save_index=NAME, ) - def get_madx_script(self) -> str: + def get_madx_script(self, cwd: Path | str) -> str: accel: Lhc = self.accel - madx_script = self.get_base_madx_script() + cwd = Path(cwd) + madx_script = self.get_base_madx_script(cwd) madx_script += "\n! ----- Load MQTs -----\n" - mqts_file = accel.model_dir / self.EXTRACTED_MQTS_FILENAME + mqts_file = accel.model_dir / EXTRACTED_MQTS_FILENAME if mqts_file.exists(): - madx_script += f"call, file = '{str(mqts_file.absolute())}';\n" + mqts_path = self._madx_path(mqts_file, cwd) + madx_script += f"call, file = '{mqts_path}';\n" madx_script += "\n! ----- Output Files -----\n" + twiss_bk_path = self._madx_path(accel.model_dir / TWISS_BEST_KNOWLEDGE_DAT, cwd) + twiss_bk_elements_path = self._madx_path( + accel.model_dir / TWISS_ELEMENTS_BEST_KNOWLEDGE_DAT, + cwd, + ) madx_script += ( - f"exec, do_twiss_monitors(LHCB{accel.beam}, '{accel.model_dir / TWISS_BEST_KNOWLEDGE_DAT}', {accel.dpp});\n" - f"exec, do_twiss_elements(LHCB{accel.beam}, '{accel.model_dir / TWISS_ELEMENTS_BEST_KNOWLEDGE_DAT}', {accel.dpp});\n" + f"exec, do_twiss_monitors(LHCB{accel.beam}, '{twiss_bk_path}', {accel.dpp});\n" + f"exec, do_twiss_elements(LHCB{accel.beam}, '{twiss_bk_elements_path}', {accel.dpp});\n" ) return madx_script - def get_base_madx_script(self) -> str: + def get_base_madx_script(self, cwd: Path | str) -> str: accel: Lhc = self.accel + cwd = Path(cwd) # don't load the super().get_base_madx_script as this also matches the tunes at the end, # which we skip here as we are using the data from the machine. - madx_script = self._get_sequence_initialize_script() + madx_script = self._get_sequence_initialize_script(cwd) madx_script += ( f"\n! ----- For Best Knowledge Model -----\n" - f"readmytable, file = '{accel.model_dir / B2_ERRORS_TFS}', table=errtab;\n" + f"readmytable, file = '{self._madx_path(accel.model_dir / B2_ERRORS_TFS, cwd)}', table=errtab;\n" f"seterr, table=errtab;\n" - f"call, file = '{accel.model_dir / B2_SETTINGS_MADX}';\n" + f"call, file = '{self._madx_path(accel.model_dir / B2_SETTINGS_MADX, cwd)}';\n" ) return madx_script -class LhcCorrectionModelCreator(CorrectionModelCreator, LhcModelCreator): # ------------------------------------------- +class LhcCorrectionModelCreator( + CorrectionModelCreator, LhcModelCreator +): # ------------------------------------------- """ Creates an updated model from multiple changeparameters inputs (used in iterative correction). """ - def __init__(self, accel: Lhc, twiss_out: Path | str, corr_files: Sequence[Path | str], update_dpp: bool = False): + def __init__( + self, + accel: Lhc, + twiss_out: Path | str, + corr_files: Sequence[Path | str], + update_dpp: bool = False, + ): """Model creator for the corrected/matched model of the LHC. Inheritance (i.e. __init__ calls) from here should be as follows: @@ -516,25 +559,28 @@ def __init__(self, accel: Lhc, twiss_out: Path | str, corr_files: Sequence[Path LOGGER.debug("Initializing LHC Correction Model Creator") super().__init__(accel, twiss_out, corr_files, update_dpp) - def get_madx_script(self) -> str: + def get_madx_script(self, cwd: Path | str) -> str: """Get the madx script for the correction model creator, which updates the model after correcion.""" accel: Lhc = self.accel + cwd = Path(cwd) # do not super().get_madx_script as we don't need the uncorrected output. - madx_script = self.get_base_madx_script() + madx_script = self.get_base_madx_script(cwd) # First set the dpp to the value in the accelerator model madx_script += f"{ORBIT_DPP} = {accel.dpp};\n" for corr_file in self.corr_files: # Load the corrections, can also update ORBIT_DPP - file = Path(corr_file).absolute() - madx_script += f"call, file = '{file!s}';\n" + madx_script += f"call, file = '{self._madx_path(corr_file, cwd)}';\n" # If we are doing orbit correction, we need to ensure that a correct and a match is done if self.update_dpp: madx_script += self.get_update_deltap_script(deltap=ORBIT_DPP) - madx_script += f'exec, do_twiss_elements(LHCB{accel.beam}, "{self.twiss_out.absolute()!s}", {ORBIT_DPP});\n' + twiss_out_path = self._madx_path(self.output_dir / self.twiss_out, cwd) + madx_script += ( + f'exec, do_twiss_elements(LHCB{accel.beam}, "{twiss_out_path}", {ORBIT_DPP});\n' + ) return madx_script def prepare_run(self) -> None: @@ -557,59 +603,75 @@ def files_to_check(self) -> list[str]: class LhcSegmentCreator(SegmentCreator, LhcModelCreator): - - def get_madx_script(self): + def get_madx_script(self, cwd: Path | str) -> str: accel: Lhc = self.accel - madx_script = self.get_base_madx_script() - - madx_script += "\n".join([ - "", - f"! ----- Segment-by-Segment propagation for {self.segment.name} -----", - "", - "! Cycle the sequence to avoid negative length.", - f"seqedit, sequence=LHCB{accel.beam};", - "flatten;", - f"cycle, start={self.segment.start};", - "endedit;", - "", - f"use, period = LHCB{accel.beam};", - "option, echo;", - "", - "twiss;", - "exec, save_initial_and_final_values(", - f" LHCB{accel.beam},", - f" {self.segment.start},", - f" {self.segment.end}, ", - f" \"{accel.model_dir / self.measurement_madx!s}\",", - f" biniLHCB{accel.beam},", - f" bendLHCB{accel.beam}", - ");", - "", - "exec, extract_segment_sequence(", - f" LHCB{accel.beam},", - f" forward_LHCB{accel.beam},", - f" backward_LHCB{accel.beam},", - f" {self.segment.start},", - f" {self.segment.end},", - ");", - "", - f"beam, particle = proton, sequence=forward_LHCB{accel.beam}, energy = {MADX_ENERGY_VAR}, bv={accel.beam_direction:d};", - f"beam, particle = proton, sequence=backward_LHCB{accel.beam}, energy = {MADX_ENERGY_VAR}, bv={accel.beam_direction:d};", - "", - f"exec, twiss_segment(forward_LHCB{accel.beam}, \"{self.twiss_forward!s}\", biniLHCB{accel.beam});", - f"exec, twiss_segment(backward_LHCB{accel.beam}, \"{self.twiss_backward!s}\", bendLHCB{accel.beam});", - "", - ]) + cwd = Path(cwd) + madx_script = self.get_base_madx_script(cwd) + measurement_path = self._madx_path(self.output_dir / self.measurement_madx, cwd) + twiss_forward_path = self._madx_path(self.output_dir / self.twiss_forward, cwd) + twiss_backward_path = self._madx_path(self.output_dir / self.twiss_backward, cwd) + + madx_script += "\n".join( + [ + "", + f"! ----- Segment-by-Segment propagation for {self.segment.name} -----", + "", + "! Cycle the sequence to avoid negative length.", + f"seqedit, sequence=LHCB{accel.beam};", + "flatten;", + f"cycle, start={self.segment.start};", + "endedit;", + "", + f"use, period = LHCB{accel.beam};", + "option, echo;", + "", + "twiss;", + "exec, save_initial_and_final_values(", + f" LHCB{accel.beam},", + f" {self.segment.start},", + f" {self.segment.end}, ", + f' "{measurement_path}",', + f" biniLHCB{accel.beam},", + f" bendLHCB{accel.beam}", + ");", + "", + "exec, extract_segment_sequence(", + f" LHCB{accel.beam},", + f" forward_LHCB{accel.beam},", + f" backward_LHCB{accel.beam},", + f" {self.segment.start},", + f" {self.segment.end},", + ");", + "", + f"beam, particle = proton, sequence=forward_LHCB{accel.beam}, energy = {MADX_ENERGY_VAR}, bv={accel.beam_direction:d};", + f"beam, particle = proton, sequence=backward_LHCB{accel.beam}, energy = {MADX_ENERGY_VAR}, bv={accel.beam_direction:d};", + "", + f'exec, twiss_segment(forward_LHCB{accel.beam}, "{twiss_forward_path}", biniLHCB{accel.beam});', + f'exec, twiss_segment(backward_LHCB{accel.beam}, "{twiss_backward_path}", bendLHCB{accel.beam});', + "", + ] + ) if self.corrections is not None: - madx_script += "\n".join([ - f"call, file=\"{self.corrections_madx!s}\";", - f"exec, twiss_segment(forward_LHCB{accel.beam}, " - f"\"{self.twiss_forward_corrected}\", biniLHCB{accel.beam});", - f"exec, twiss_segment(backward_LHCB{accel.beam}, " - f"\"{self.twiss_backward_corrected}\", bendLHCB{accel.beam});", - "", - ]) + corrections_path = self._madx_path(self.output_dir / self.corrections_madx, cwd) + twiss_forward_corr_path = self._madx_path( + self.output_dir / self.twiss_forward_corrected, + cwd, + ) + twiss_backward_corr_path = self._madx_path( + self.output_dir / self.twiss_backward_corrected, + cwd, + ) + madx_script += "\n".join( + [ + f'call, file = "{corrections_path}";', + f"exec, twiss_segment(forward_LHCB{accel.beam}, " + f'"{twiss_forward_corr_path}", biniLHCB{accel.beam});', + f"exec, twiss_segment(backward_LHCB{accel.beam}, " + f'"{twiss_backward_corr_path}", bendLHCB{accel.beam});', + "", + ] + ) return madx_script diff --git a/omc3/model/model_creators/ps_model_creator.py b/omc3/model/model_creators/ps_model_creator.py index 15d4cfbf..72d1e569 100644 --- a/omc3/model/model_creators/ps_model_creator.py +++ b/omc3/model/model_creators/ps_model_creator.py @@ -4,10 +4,12 @@ This module provides convenience functions for model creation of the ``PS``. """ + from __future__ import annotations import logging import shutil +from typing import TYPE_CHECKING from omc3.model.accelerators.accelerator import AccExcitationMode from omc3.model.accelerators.ps import Ps @@ -16,49 +18,61 @@ LOGGER = logging.getLogger(__name__) +if TYPE_CHECKING: + from pathlib import Path + class PsModelCreator(PsBaseModelCreator): acc_model_name: str = Ps.NAME - def get_madx_script(self) -> str: + def get_madx_script(self, cwd: Path) -> str: accel: Ps = self.accel - if (accel.energy is None): - raise RuntimeError("PS model creation currently relies on manual Energy management. Please provide the --energy ENERGY flag") - madx_script = self.get_base_madx_script() + if accel.energy is None: + raise RuntimeError( + "PS model creation currently relies on manual Energy management. Please provide the --energy ENERGY flag" + ) + madx_script = self.get_base_madx_script(cwd) replace_dict = { "USE_ACD": str(int(accel.excitation == AccExcitationMode.ACD)), "DPP": accel.dpp, - "OUTPUT": str(accel.model_dir), + "OUTPUT": self._madx_path(self.output_dir, cwd), } madx_template = accel.get_file("twiss.mask").read_text() madx_script += madx_template % replace_dict return madx_script - def get_base_madx_script(self): + def get_base_madx_script(self, cwd: Path | str) -> str: accel: Ps = self.accel use_acd = accel.excitation == AccExcitationMode.ACD replace_dict = { - "FILES_DIR": str(accel.get_dir()), + "FILES_DIR": self._madx_path(accel.get_dir(), cwd), "USE_ACD": str(int(use_acd)), "NAT_TUNE_X": accel.nat_tunes[0], "NAT_TUNE_Y": accel.nat_tunes[1], "KINETICENERGY": 0 if accel.energy is None else accel.energy, "USE_CUSTOM_PC": "0" if accel.energy is None else "1", - "ACC_MODELS_DIR": accel.acc_model_path, - "BEAM_FILE": accel.beam_file, - "STR_FILE": accel.str_file, + "ACC_MODELS_DIR": self._madx_path(accel.acc_model_path, cwd), + "BEAM_FILE": self._madx_path(accel.beam_file, cwd), + "STR_FILE": self._madx_path(accel.str_file, cwd), "DRV_TUNE_X": "0", "DRV_TUNE_Y": "0", "MODIFIERS": "", - "USE_MACROS": "0" if accel.year == "2018" else "1", # 2018 doesn't provide a macros file + "USE_MACROS": "0" + if accel.year == "2018" + else "1", # 2018 doesn't provide a macros file "PS_TUNE_METHOD": accel.tune_method, } if accel.modifiers: - replace_dict["MODIFIERS"] = '\n'.join([f" call, file = '{m}'; {MODIFIER_TAG}" for m in accel.modifiers]) + replace_dict["MODIFIERS"] = "\n".join( + [ + f" call, file = '{self._madx_path(modifier, cwd)}'; {MODIFIER_TAG}" + for modifier in accel.modifiers + ] + ) if use_acd: replace_dict["DRV_TUNE_X"] = accel.drv_tunes[0] replace_dict["DRV_TUNE_Y"] = accel.drv_tunes[1] - mask = accel.get_file('base.madx').read_text() + mask = accel.get_file("base.madx").read_text() return mask % replace_dict def prepare_run(self) -> None: diff --git a/omc3/model/model_creators/psbooster_model_creator.py b/omc3/model/model_creators/psbooster_model_creator.py index 67433660..1c91c8ee 100644 --- a/omc3/model/model_creators/psbooster_model_creator.py +++ b/omc3/model/model_creators/psbooster_model_creator.py @@ -4,6 +4,7 @@ This module provides convenience functions for model creation of the ``PSB``. """ + from __future__ import annotations import shutil @@ -14,50 +15,53 @@ from omc3.model.model_creators.ps_base_model_creator import PsBaseModelCreator if TYPE_CHECKING: + from pathlib import Path + from omc3.model.accelerators.psbooster import Psbooster class PsboosterModelCreator(PsBaseModelCreator): acc_model_name: str = "psb" - def get_madx_script(self) -> str: + def get_madx_script(self, cwd: Path | str) -> str: accel: Psbooster = self.accel - madx_script = self.get_base_madx_script() + madx_script = self.get_base_madx_script(cwd) replace_dict = { "USE_ACD": str(int(accel.excitation == AccExcitationMode.ACD)), "RING": accel.ring, "DPP": accel.dpp, - "OUTPUT": str(accel.model_dir), + "OUTPUT": self._madx_path(self.output_dir, cwd), } madx_template = accel.get_file("twiss.mask").read_text() madx_script += madx_template % replace_dict return madx_script - def get_base_madx_script(self): + def get_base_madx_script(self, cwd: Path | str) -> str: accel: Psbooster = self.accel use_acd = accel.excitation == AccExcitationMode.ACD replace_dict = { - "FILES_DIR": str(accel.get_dir()), + "FILES_DIR": self._madx_path(accel.get_dir(), cwd), "USE_ACD": str(int(use_acd)), "RING": str(accel.ring), "NAT_TUNE_X": accel.nat_tunes[0], "NAT_TUNE_Y": accel.nat_tunes[1], "KINETICENERGY": 0 if accel.energy is None else accel.energy, "USE_CUSTOM_PC": "0" if accel.energy is None else "1", - "ACC_MODELS_DIR": accel.acc_model_path, - "BEAM_FILE": accel.beam_file, - "STR_FILE": accel.str_file, + "ACC_MODELS_DIR": self._madx_path(accel.acc_model_path, cwd), + "BEAM_FILE": self._madx_path(accel.beam_file, cwd), + "STR_FILE": self._madx_path(accel.str_file, cwd), "DRV_TUNE_X": "", "DRV_TUNE_Y": "", } if use_acd: replace_dict["DRV_TUNE_X"] = accel.drv_tunes[0] replace_dict["DRV_TUNE_Y"] = accel.drv_tunes[1] - mask = accel.get_file('base.mask').read_text() + mask = accel.get_file("base.mask").read_text() return mask % replace_dict def prepare_run(self): super().prepare_run() shutil.copy( - self.accel.get_file(f"error_deff_ring{self.accel.ring}.txt"), self.accel.model_dir / ERROR_DEFFS_TXT + self.accel.get_file(f"error_deff_ring{self.accel.ring}.txt"), + self.accel.model_dir / ERROR_DEFFS_TXT, ) diff --git a/omc3/model/model_creators/sps_model_creator.py b/omc3/model/model_creators/sps_model_creator.py index b11cede1..f8c53742 100644 --- a/omc3/model/model_creators/sps_model_creator.py +++ b/omc3/model/model_creators/sps_model_creator.py @@ -2,6 +2,7 @@ SPS Model Creator ----------------- """ + from __future__ import annotations from abc import ABC @@ -66,8 +67,7 @@ def check_accelerator_instance(self) -> None: # hint: if modifiers are given as absolute paths: `path / abs_path` returns `abs_path` (jdilly) # hint: modifiers were at this point probably already checked (and adapted) in `prepare_run()`. - inexistent_modifiers = [ - m for m in accel.modifiers if not (accel.model_dir / m).exists()] + inexistent_modifiers = [m for m in accel.modifiers if not (accel.model_dir / m).exists()] if len(inexistent_modifiers): raise AcceleratorDefinitionError( "The following modifier files do not exist: " @@ -83,14 +83,12 @@ def check_accelerator_instance(self) -> None: ) def prepare_options(self, opt) -> bool: - """ Use the fetcher to list choices if requested. """ + """Use the fetcher to list choices if requested.""" accel: Sps = self.accel if opt.fetch == Fetcher.PATH: if opt.path is None: - raise AcceleratorDefinitionError( - "Path fetcher chosen, but no path proivided." - ) + raise AcceleratorDefinitionError("Path fetcher chosen, but no path proivided.") acc_model_path = Path(opt.path) elif opt.fetch == Fetcher.AFS: @@ -100,7 +98,7 @@ def prepare_options(self, opt) -> bool: msg="No optics tag (flag --year) given", selection=accel.year, list_choices=opt.list_choices, - predicate=Path.is_dir + predicate=Path.is_dir, ) # raises AcceleratorDefintionError if not valid choice else: LOGGER.warning( @@ -119,29 +117,32 @@ def prepare_options(self, opt) -> bool: msg="No/Unknown strength file (flag --str_file) selected", selection=None, list_choices=opt.list_choices, - predicate=get_check_suffix_func(".str") + predicate=get_check_suffix_func(".str"), ) # Set the found paths --- accel.acc_model_path = acc_model_path - def get_base_madx_script(self): + def get_base_madx_script(self, cwd: Path | str) -> str: accel: Sps = self.accel use_excitation = accel.excitation != AccExcitationMode.FREE + seq_path = self._madx_path(accel.acc_model_path / "sps.seq", cwd) # The very basics --- madx_script = ( "! Load Base Sequence and Strengths/Modifiers ---\n" "option, -echo; ! suppress output from base sequence loading to keep the log small\n\n" - f"call, file = '{accel.acc_model_path / 'sps.seq'!s}';\n" + f"call, file = '{seq_path}';\n" ) if accel.modifiers is not None: # includes the strengths file for modifier in accel.modifiers: - madx_script += f"call, file = '{accel.acc_model_path / modifier!s}'; {MODIFIER_TAG}\n" + modifier_path = self._madx_path(modifier, cwd) + madx_script += f"call, file = '{modifier_path}'; {MODIFIER_TAG}\n" + toolkit_path = self._madx_path(accel.acc_model_path / "toolkit" / "macro.madx", cwd) madx_script += ( - f"call, file ='{accel.acc_model_path / 'toolkit' / 'macro.madx'!s}';\n" + f"call, file ='{toolkit_path}';\n" "option, echo;\n\n" "! Create Beam ---\n" "beam;\n\n" @@ -154,8 +155,8 @@ def get_base_madx_script(self): if use_excitation or accel.drv_tunes is not None: # allow user to modify script and enable excitation, if driven tunes are given madx_script += ( - f"qxd={accel.drv_tunes[0]%1:.3f};\n" - f"qyd={accel.drv_tunes[1]%1:.3f};\n\n" + f"qxd={accel.drv_tunes[0] % 1:.3f};\n" + f"qyd={accel.drv_tunes[1] % 1:.3f};\n\n" "! Prepare ACDipole Elements ---\n" f"use_acd={use_excitation:d}; ! Switch the use of AC dipole\n\n" "hacmap21 = 0;\n" @@ -166,11 +167,7 @@ def get_base_madx_script(self): "ZKV_MARKER: marker;\n\n" ) - madx_script += ( - "! Cycle Sequence ---\n" - "seqedit, sequence=sps;\n" - " flatten;\n" - ) + madx_script += "! Cycle Sequence ---\nseqedit, sequence=sps;\n flatten;\n" if use_excitation or accel.drv_tunes is not None: # allow user to modify script and enable excitation, if driven tunes are given @@ -194,48 +191,45 @@ def get_base_madx_script(self): # f" install, element={marker_name}, at=-{self._start_bpm}->L, from={self._start_bpm};\n" # f" cycle, start = {marker_name};\n" # ) - madx_script += ( - f" cycle, start = {self._start_bpm};\n" - ) + madx_script += f" cycle, start = {self._start_bpm};\n" - madx_script += ( - "endedit;\n" - "use, sequence=sps;\n\n" - ) + madx_script += "endedit;\nuse, sequence=sps;\n\n" # Match tunes --- madx_script += ( - "! Match Tunes ---\n" - "exec, sps_match_tunes(qx0,qy0);\n\n" + "! Match Tunes ---\nexec, sps_match_tunes(qx0,qy0);\n\n" # "twiss, file = 'sps.tfs';\n" # also not sure if needed ) return madx_script - def get_madx_script(self): + def get_madx_script(self, cwd: Path | str) -> str: accel: Sps = self.accel use_excitation = accel.excitation != AccExcitationMode.FREE - madx_script = self.get_base_madx_script() + madx_script = self.get_base_madx_script(cwd) + twiss_path = self._madx_path(self.output_dir / TWISS_DAT, cwd) + twiss_elements_path = self._madx_path(self.output_dir / TWISS_ELEMENTS_DAT, cwd) madx_script += ( - "! Create twiss data files ---\n" + "! Create twiss data files ---\n" f"{self._get_select_command(pattern=accel.RE_DICT[AccElementTypes.BPMS])}" - f"twiss, file = {accel.model_dir / TWISS_DAT};\n" + f"twiss, file = '{twiss_path}';\n" "\n" f"{self._get_select_command()}" - f"twiss, file = {accel.model_dir / TWISS_ELEMENTS_DAT};\n" + f"twiss, file = '{twiss_elements_path}';\n" ) if use_excitation or accel.drv_tunes is not None: # allow user to modify script and enable excitation, if driven tunes are given + twiss_ac_path = self._madx_path(self.output_dir / TWISS_AC_DAT, cwd) madx_script += ( f"if(use_acd == 1){{\n" - " betxac = table(twiss, hacmap, betx);\n" - " betyac = table(twiss, vacmap, bety);\n" + " betxac = table(twiss, hacmap, betx);\n" + " betyac = table(twiss, vacmap, bety);\n" f" hacmap21 := 2*(cos(2*pi*qxd)-cos(2*pi*qx0))/(betxac*sin(2*pi*qx0));\n" f" vacmap43 := 2*(cos(2*pi*qyd)-cos(2*pi*qy0))/(betyac*sin(2*pi*qy0));\n" "\n" f"{self._get_select_command(pattern=accel.RE_DICT[AccElementTypes.BPMS], indent=4)}" - f" twiss, file = {accel.model_dir / TWISS_AC_DAT};\n" + f" twiss, file = '{twiss_ac_path}';\n" f"}}\n" ) return madx_script @@ -246,11 +240,13 @@ class SpsCorrectionModelCreator(CorrectionModelCreator, SpsModelCreator): Creates an updated model from multiple changeparameters inputs (used in iterative correction). """ + def prepare_run(self) -> None: # As the matched/corrected model is created in the same directory as the original model, # we do not need to prepare as much. self.check_accelerator_instance() + class SpsSegmentCreator(SegmentCreator, SpsModelCreator): _sequence_name = "sps" _start_bpm = None # prohibits first cycling (which inserts an additional element leading to negative drifts) diff --git a/omc3/nxcals/__init__.py b/omc3/nxcals/__init__.py new file mode 100644 index 00000000..6e60abe9 --- /dev/null +++ b/omc3/nxcals/__init__.py @@ -0,0 +1,2 @@ +# Importing * is a bad practice and you should be punished for using it +__all__ = [] diff --git a/omc3/nxcals/constants.py b/omc3/nxcals/constants.py new file mode 100644 index 00000000..43616f7a --- /dev/null +++ b/omc3/nxcals/constants.py @@ -0,0 +1 @@ +EXTRACTED_MQTS_FILENAME: str = "extracted_mqts.str" diff --git a/omc3/nxcals/knob_extraction.py b/omc3/nxcals/knob_extraction.py new file mode 100644 index 00000000..85a1dbbb --- /dev/null +++ b/omc3/nxcals/knob_extraction.py @@ -0,0 +1,274 @@ +from __future__ import annotations + +import logging +from dataclasses import dataclass +from datetime import datetime, timedelta, timezone + +import jpype +import pandas as pd +from pyspark.sql import SparkSession +from pyspark.sql import functions as f +from pyspark.sql.window import Window + +from omc3.utils.mock import cern_network_import + +pjlsa = cern_network_import("pjlsa") +builders = cern_network_import("nxcals.api.extraction.data.builders") + +logger = logging.getLogger(__name__) + + +@dataclass +class NXCalResult: + name: str + value: float + timestamp: pd.Timestamp + pc_name: str + + +def get_raw_vars( + spark: SparkSession, time: datetime, var_name: str, delta_days: int = 1 +) -> list[NXCalResult]: + """ + Retrieve raw variable values from NXCALS. + + Parameters + ---------- + spark : SparkSession + Active Spark session + time : datetime + Python datetime (timezone-aware recommended) + var_name : str + Name or pattern of the variable(s) to retrieve + + Returns + ------- + dict[str, tuple[float, pd.Timestamp, str]] + Dictionary with full variable name as key, and (value, timestamp, full variable name) as value + for the latest sample of each matching variable at the given time + """ + + # Ensure time is in UTC + if time.tzinfo is None: + time = time.replace(tzinfo=timezone.utc) + else: + time = time.astimezone(timezone.utc) + + # Look back 1 day to be sure we get at least one sample + start_time = time - timedelta(days=delta_days) + end_time = time + logger.info(f"Retrieving raw variables {var_name} from {start_time} to {end_time}") + + df = ( + builders.DataQuery.builder(spark) + .variables() + .system("CMW") + .nameLike(var_name) + .timeWindow(start_time, end_time) + .build() + ) + + # Avoid full count() – just check if we have at least one row + if df is None or not df.take(1): + raise RuntimeError(f"No data found for {var_name} in the given interval.") + + logger.info(f"Raw variables {var_name} retrieved successfully.") + + # Get the latest sample for each variable + window_spec = Window.partitionBy("nxcals_variable_name").orderBy( + f.col("nxcals_timestamp").desc() + ) + latest_df = ( + df.withColumn("row_num", f.row_number().over(window_spec)) + .filter(f.col("row_num") == 1) + .select("nxcals_variable_name", "nxcals_value", "nxcals_timestamp") + ) + + results = [] + for row in latest_df.collect(): + full_varname = row["nxcals_variable_name"] + raw_val = float(row["nxcals_value"]) + ts = pd.to_datetime(row["nxcals_timestamp"], unit="ns", utc=True) + results.append(NXCalResult(full_varname, raw_val, ts, strip_i_meas(full_varname))) + logger.info(f"LHC value retrieved: {full_varname} = {raw_val:.2f} at {ts}") + + return results + + +def get_energy(spark: SparkSession, time: datetime) -> tuple[float, pd.Timestamp]: + """ + + Args: + spark (SparkSession): Active Spark session + time (datetime): Python datetime (timezone-aware recommended) + + Returns: + (energy_gev, timestamp): Beam energy in GeV and its timestamp + """ + scale = 0.120 + raw_vars = get_raw_vars(spark, time, "HX:ENG") + if not raw_vars: + raise RuntimeError("No energy data found.") + return raw_vars[0].value * scale, raw_vars[0].timestamp + + +def map_pc_name_to_madx(pc_name: str) -> str: + """ + Convert a power converter name or circuit name to its corresponding MAD-X name. + + This function processes the input name by removing the ':I_MEAS' suffix if present, + extracting the circuit name from full power converter names (starting with 'RPMBB' or 'RPL'), + applying specific string replacements for MAD-X compatibility, and converting the result to lowercase. + + Args: + pc_name (str): The power converter or circuit name to convert. + + Returns: + str: The converted MAD-X name. + + Examples: + >>> map_pc_name_to_madx("RPMBB.UJ33.RCBH10.L1B1:I_MEAS") + 'acb10.l1b1' + >>> map_pc_name_to_madx("RQT12.L5B1") + 'kqt12.l5b1' + """ + # Remove the ':I_MEAS' suffix if it exists + pc_name = strip_i_meas(pc_name) + + # Extract circuit name from full power converter names + if "." in pc_name: + parts = pc_name.split(".") + # For full names like 'RPMBB.UJ33.RCBH10.L1B1', take from the third part onward, else use pc_name + circuit_name = ".".join(parts[2:]) if len(parts) > 2 else pc_name + else: + circuit_name = pc_name + + # Apply MAD-X specific replacements + replacements = {"RQT": "KQT", "RCB": "ACB"} + for old, new in replacements.items(): + circuit_name = circuit_name.replace(old, new) + + # Return in lowercase as required by MAD-X + return circuit_name.lower() + + +def strip_i_meas(text: str) -> str: + """ + Remove the I_MEAS suffix from a variable name. + """ + return text.removesuffix(":I_MEAS") + + +def calc_k_from_iref(lsa_client, currents: dict[str, float], energy: float) -> dict[str, float]: + """ + Calculate K values from IREF using the LSA service. + + Args: + lsa: The LSA service instance + currents: A dictionary of current values keyed by variable name + energy: The beam energy in GeV + + Returns: + A dictionary of K values keyed by variable name + """ + # 1) Use the **instance** PJLSA already created + lhc_service = lsa_client._lhcService # <-- INSTANCE (do NOT use self._LhcService) + + # 2) Build a java.util.HashMap (not a Python dict) + j_hash_map = jpype.JClass("java.util.HashMap") + j_double = jpype.JClass("java.lang.Double") + + jmap = j_hash_map() + for k, v in currents.items(): + if v is None: + raise ValueError(f"Current for {k} is None") + # ensure primitive-compatible double values + jmap.put(k, j_double.valueOf(v)) # boxed; Java unboxes internally + + # 3) Call: Map calculateKfromIREF(Map, double) + out = lhc_service.calculateKfromIREF(jmap, float(energy)) + + # 4) Convert java.util.Map -> Python dict + res = {} + it = out.entrySet().iterator() + while it.hasNext(): + e = it.next() + res[str(e.getKey())] = float(e.getValue()) + return res + + +def get_knob_vals( + spark: SparkSession, + time: datetime, + beam: int, + patterns: list[str], + expected_knobs: set[str] | None = None, + log_prefix: str = "", + delta_days: int = 1, +) -> list[NXCalResult]: + """ + Retrieve knob values for a given beam and time using specified patterns. + + Args: + spark: Spark session + time: Time to retrieve data for + beam: Beam number + patterns: List of variable patterns to retrieve + expected_knobs: Set of expected MAD-X knob names. If None, returns all found knobs. + log_prefix: Prefix for logging messages + + Returns: + List of NXCalResult with knob values + """ + logger.info(f"{log_prefix}Starting data retrieval for beam {beam} at time {time}") + + # Retrieve raw current measurements from NXCALS + combined_vars: list[NXCalResult] = [] + for pattern in patterns: + logger.info(f"{log_prefix}Getting currents for pattern {pattern} at {time}") + raw_vars = get_raw_vars(spark, time, pattern, delta_days) + combined_vars.extend(raw_vars) + + # Get beam energy for K-value calculations + energy, _ = get_energy(spark, time) + + # Prepare currents dict for LSA + currents = {strip_i_meas(var.name): var.value for var in combined_vars} + + logger.info( + f"{log_prefix}Calculating K values for {len(currents)} power converters with energy {energy} GeV" + ) + + # Calculate K values using LSA + lsa_client = pjlsa.LSAClient() + k_values = calc_k_from_iref(lsa_client, currents, energy) + + # Transform K values keys to MAD-X format + k_values_madx = {map_pc_name_to_madx(key): value for key, value in k_values.items()} + + found_keys = set(k_values_madx.keys()) + + if expected_knobs is None: + expected_knobs = found_keys + + if missing_keys := expected_knobs - found_keys: + logger.warning(f"{log_prefix}Missing K-values for knobs: {missing_keys}") + + if unknown_keys := found_keys - expected_knobs: + logger.warning(f"{log_prefix}Unknown K-values found: {unknown_keys}") + + # Build result list with timestamps + timestamps = {map_pc_name_to_madx(var.name): var.timestamp for var in combined_vars} + pc_names = {map_pc_name_to_madx(var.name): var.pc_name for var in combined_vars} + results = [] + for madx_name in expected_knobs: + value = k_values_madx.get(madx_name) + timestamp = timestamps.get(madx_name) + pc_name = pc_names.get(madx_name) + + if value is not None and timestamp is not None and pc_name is not None: + results.append(NXCalResult(madx_name, value, timestamp, pc_name)) + else: + logger.warning(f"{log_prefix}Missing data for {madx_name}") + + return results diff --git a/omc3/nxcals/mqt_extraction.py b/omc3/nxcals/mqt_extraction.py new file mode 100644 index 00000000..797a7f45 --- /dev/null +++ b/omc3/nxcals/mqt_extraction.py @@ -0,0 +1,126 @@ +""" +Extraction of MQT (Quadrupole Trim) knob values from NXCALS. +------------------------------------------------------------ + +This module provides functions to retrieve MQT knob values for the LHC for a specified beam +and time using NXCALS and LSA. + +**Arguments:** + +*--Required--* + +- **time** *(datetime|str)*: + + The timestamp for which to retrieve the data (timezone-aware recommended). If a string is provided, it will be parsed to a datetime object, assuming UTC timezone. + +- **beam** *(int)*: + + The beam number (1 or 2). + +- **output_dir** *(str|Path)*: + Path to the output directory where the MQT knob values will be saved in. +""" + +import logging +from datetime import datetime +from pathlib import Path + +from generic_parser import DotDict, EntryPointParameters, entrypoint + +# import jpype +from pyspark.sql import SparkSession + +from omc3.nxcals.constants import EXTRACTED_MQTS_FILENAME +from omc3.nxcals.knob_extraction import NXCalResult, get_knob_vals +from omc3.utils.iotools import DateOrStr, PathOrStr +from omc3.utils.mock import cern_network_import + +spark_session_builder = cern_network_import("nxcals.spark_session_builder") + +logger = logging.getLogger(__name__) + + +def _get_params() -> EntryPointParameters: + """ + Define the parameters for the MQT retrieval entry point. + """ + return EntryPointParameters( + time={ + "type": DateOrStr, + "help": "The timestamp for which to retrieve the data (timezone-aware recommended).", + }, + beam={"type": int, "help": "The beam number (1 or 2)."}, + output_dir={ + "type": PathOrStr, + "help": "Path to the output directory where the MQT knob values will be saved in.", + }, + ) + + +@entrypoint(_get_params(), strict=True) +def retrieve_mqts(opt: DotDict) -> None: + """ + Retrieve MQT (Quadrupole Trim) knob values from NXCALS for a specific time and beam, + and save them to a file in the specified output directory. + """ + spark = spark_session_builder.get_or_create() + mqt_vals = get_mqt_vals(spark, opt.time, opt.beam) + output_path = Path(opt.output_dir) / EXTRACTED_MQTS_FILENAME + with output_path.open("w") as f: + for result in mqt_vals: + timestamp_str = f"{result.timestamp:%Y-%m-%d %H:%M:%S%z}" + value_str = f"{result.value:.10E}".replace("E+", "E") + f.write( + f"{result.name:<15}= {value_str}; ! powerconverter: {result.pc_name} at {timestamp_str}\n" + ) + + +def get_mqts(beam: int) -> set[str]: + """ + Generate the set of MAD-X MQT (Quadrupole Trim) variable names for a given beam. + + Args: + beam (int): The beam number (1 or 2). + + Returns: + set[str]: A set of MAD-X variable names for MQT magnets, e.g., 'kqt12.a12b1'. + + Raises: + ValueError: If beam is not 1 or 2. + + Examples: + >>> get_mqts(1) + {'kqt12.a12b1', 'kqt12.a23b1', ..., 'kqtd.a81b1'} + """ + if beam not in (1, 2): + raise ValueError("Beam must be 1 or 2") + + types = ["f", "d"] + arcs = [12, 23, 34, 45, 56, 67, 78, 81] + return {f"kqt{t}.a{a}b{beam}" for t in types for a in arcs} + + +def get_mqt_vals(spark: SparkSession, time: datetime, beam: int) -> list[NXCalResult]: + """ + Retrieve MQT (Quadrupole Trim) knob values from NXCALS for a specific time and beam. + + This function queries NXCALS for current measurements of MQT power converters, + calculates the corresponding K-values (integrated quadrupole strengths) using LSA, + and returns them in MAD-X format with timestamps. + + Args: + spark (SparkSession): Active Spark session for NXCALS queries. + time (datetime): The timestamp for which to retrieve the data (timezone-aware recommended). + beam (int): The beam number (1 or 2). + + Returns: + list[NXCalResult]: List of NXCalResult objects containing the MAD-X knob names, K-values, and timestamps. + + Raises: + ValueError: If beam is not 1 or 2 (propagated from get_mqts). + RuntimeError: If no data is found in NXCALS or LSA calculations fail. + """ + madx_mqts = get_mqts(beam) + pattern = f"RPMBB.UA%.RQT%.A%B{beam}:I_MEAS" + patterns = [pattern] + return get_knob_vals(spark, time, beam, patterns, madx_mqts, "MQT: ") diff --git a/omc3/utils/iotools.py b/omc3/utils/iotools.py index a5a325ae..b23e1a83 100644 --- a/omc3/utils/iotools.py +++ b/omc3/utils/iotools.py @@ -4,15 +4,18 @@ Helper functions for input/output issues. """ + from __future__ import annotations import json import re import shutil import sys +from datetime import datetime from pathlib import Path from typing import TYPE_CHECKING, Any +import pandas as pd from generic_parser.entry_datatypes import DictAsString, get_instance_faker_meta, get_multi_class from generic_parser.entrypoint_parser import save_options_to_config from tfs import TfsDataFrame @@ -65,7 +68,7 @@ def copy_item(src_item: Path, dst_item: Path): def glob_regex(path: Path, pattern: str) -> Iterator[str]: - """ Do a glob on the given `path` based on the regular expression `pattern`. + """Do a glob on the given `path` based on the regular expression `pattern`. Returns only the matching filenames (as strings). Args: @@ -80,6 +83,7 @@ def glob_regex(path: Path, pattern: str) -> Iterator[str]: class PathOrStr(metaclass=get_instance_faker_meta(Path, str)): """A class that behaves like a Path when possible, otherwise like a string.""" + def __new__(cls, value): value = strip_quotes(value) try: @@ -90,6 +94,7 @@ def __new__(cls, value): class PathOrStrOrDataFrame(metaclass=get_instance_faker_meta(TfsDataFrame, Path, str)): """A class that behaves like a Path when possible, otherwise maybe a TfsDataFrame, otherwise like a string.""" + def __new__(cls, value): value = strip_quotes(value) try: @@ -108,6 +113,7 @@ def __new__(cls, value): class PathOrStrOrDict(metaclass=get_instance_faker_meta(dict, Path, str)): """A class that tries to parse/behaves like a dict when possible, otherwise either like a Path or like a string.""" + def __new__(cls, value): value = strip_quotes(value) try: @@ -123,8 +129,20 @@ def __new__(cls, value): return value +class DateOrStr(metaclass=get_instance_faker_meta(datetime, str)): + """A class that can be used as datetime and string parser input""" + + def __new__(cls, value): + value = strip_quotes(value) + if isinstance(value, datetime): + return value + # assume string, rely on pandas to parse and throw if not possible + return pd.to_datetime(value, utc=True) + + class UnionPathStr(metaclass=get_instance_faker_meta(Path, str)): """A class that can be used as Path and string parser input, but does not convert to path.""" + def __new__(cls, value): return strip_quotes(value) @@ -132,6 +150,7 @@ def __new__(cls, value): class UnionPathStrInt(metaclass=get_instance_faker_meta(Path, str, int)): """A class that can be used as Path, string, int parser input, but does not convert. Very special and used e.g. in the BBQ Input.""" + def __new__(cls, value): return strip_quotes(value) @@ -139,6 +158,7 @@ def __new__(cls, value): class OptionalStr(metaclass=get_instance_faker_meta(str, type(None))): """A class that allows `str` or `None`. Can be used in string-lists when individual entries can be `None`.""" + def __new__(cls, value): value = strip_quotes(value) if isinstance(value, str) and value.lower() == "none": @@ -169,7 +189,7 @@ def strip_quotes(value: Any) -> Any: """ if isinstance(value, str): - value = value.strip("\'\"") # behavior like dict-parser, IMPORTANT FOR EVERY STRING-FAKER + value = value.strip("'\"") # behavior like dict-parser, IMPORTANT FOR EVERY STRING-FAKER return value @@ -196,7 +216,7 @@ def convert_paths_in_dict_to_strings(dict_: dict) -> dict: def replace_in_path(path: Path, old: Path | str, new: Path | str) -> Path: - """ Replace a part of a path with a new path. + """Replace a part of a path with a new path. Useful for example to replace the original path with a path to a symlink or vice versa. Args: @@ -219,7 +239,7 @@ def remove_none_dict_entries(dict_: dict) -> dict: def maybe_add_command(opt: dict, script: str) -> dict: - """ Add a comment ';command' to the opt-dict, + """Add a comment ';command' to the opt-dict, which is the command used to run the script gotten from sys.argv, but only if the executed file (the file that run with the `python` command) equals ``script``, i.e. the script for which you are saving the parameters @@ -234,12 +254,13 @@ def maybe_add_command(opt: dict, script: str) -> dict: if the script names were different, just the original opt. """ if script == sys.argv[0]: - opt[";command"] = " ".join([sys.executable] + sys.argv) # will be sorted to the beginning below + opt[";command"] = " ".join( + [sys.executable] + sys.argv + ) # will be sorted to the beginning below return opt -def save_config(output_dir: Path, opt: dict, script: str, - unknown_opt: dict | list = None): +def save_config(output_dir: Path, opt: dict, script: str, unknown_opt: dict | list = None): """ Quick wrapper for ``save_options_to_config``. @@ -257,32 +278,36 @@ def save_config(output_dir: Path, opt: dict, script: str, save_options_to_config( output_dir / formats.get_config_filename(script), dict(sorted(opt.items())), - unknown=unknown_opt + unknown=unknown_opt, ) def always_true(*args, **kwargs) -> bool: - """ A function that is always True. """ + """A function that is always True.""" return True -def get_check_suffix_func(suffix: str) -> Callable[[Path],bool]: - """ Returns a function that checks the suffix of a given path against the suffix. """ +def get_check_suffix_func(suffix: str) -> Callable[[Path], bool]: + """Returns a function that checks the suffix of a given path against the suffix.""" + def check_suffix(path: Path) -> bool: return path.suffix == suffix + return check_suffix -def get_check_by_regex_func(pattern: str) -> Callable[[Path],bool]: - """ Returns a function that checks the name of a given path against the pattern. """ +def get_check_by_regex_func(pattern: str) -> Callable[[Path], bool]: + """Returns a function that checks the name of a given path against the pattern.""" + def check(path: Path) -> bool: return re.match(pattern, path.name) is not None + return check def load_multiple_jsons(*files) -> dict: - """ Load multiple json files into a single dict. - In case of duplicate keys, later files overwrite the earlier ones. """ + """Load multiple json files into a single dict. + In case of duplicate keys, later files overwrite the earlier ones.""" full_dict = {} for json_file in files: with Path(json_file).open() as json_data: @@ -291,7 +316,7 @@ def load_multiple_jsons(*files) -> dict: def find_file(file_name: Path | str, dirs: Iterable[Path | str]) -> Path: - """ Tries to find out if the given file exists, either on its own, or in the given directories. + """Tries to find out if the given file exists, either on its own, or in the given directories. Returns then the full path of the found file. If not found, raises a ``FileNotFoundError``. Args: diff --git a/tests/inputs/knob_extractor/extracted_mqts_b1.str b/tests/inputs/knob_extractor/extracted_mqts_b1.str new file mode 100644 index 00000000..38fa5bba --- /dev/null +++ b/tests/inputs/knob_extractor/extracted_mqts_b1.str @@ -0,0 +1,16 @@ +kqtf.a78b1 = 5.2952467864E-04; ! powerconverter: RPMBB.UA83.RQTF.A78B1 at 2025-04-24 16:08:00.58 +kqtf.a67b1 = 6.5856960835E-04; ! powerconverter: RPMBB.UA67.RQTF.A67B1 at 2025-04-24 16:08:00.6 +kqtf.a34b1 = -4.7499507672E-04; ! powerconverter: RPMBB.UA43.RQTF.A34B1 at 2025-04-24 16:08:00.6 +kqtf.a45b1 = 1.9335982122E-04; ! powerconverter: RPMBB.UA47.RQTF.A45B1 at 2025-04-24 16:08:00.6 +kqtf.a56b1 = -2.9329827751E-04; ! powerconverter: RPMBB.UA63.RQTF.A56B1 at 2025-04-24 16:08:00.6 +kqtf.a23b1 = -5.4437426272E-04; ! powerconverter: RPMBB.UA27.RQTF.A23B1 at 2025-04-24 16:08:00.58 +kqtf.a12b1 = -1.0715019208E-04; ! powerconverter: RPMBB.UA23.RQTF.A12B1 at 2025-04-24 16:08:00.58 +kqtf.a81b1 = 1.7714130878E-04; ! powerconverter: RPMBB.UA87.RQTF.A81B1 at 2025-04-24 16:08:00.58 +kqtd.a12b1 = -1.1114670233E-04; ! powerconverter: RPMBB.UA23.RQTD.A12B1 at 2025-04-24 16:08:00.58 +kqtd.a67b1 = -1.0210970110E-03; ! powerconverter: RPMBB.UA67.RQTD.A67B1 at 2025-04-24 16:08:00.6 +kqtd.a56b1 = -3.0490031092E-04; ! powerconverter: RPMBB.UA63.RQTD.A56B1 at 2025-04-24 16:08:00.58 +kqtd.a45b1 = 1.9346274762E-04; ! powerconverter: RPMBB.UA47.RQTD.A45B1 at 2025-04-24 16:08:00.6 +kqtd.a34b1 = 1.2378239916E-03; ! powerconverter: RPMBB.UA43.RQTD.A34B1 at 2025-04-24 16:08:00.6 +kqtd.a23b1 = 1.1549226785E-03; ! powerconverter: RPMBB.UA27.RQTD.A23B1 at 2025-04-24 16:08:00.58 +kqtd.a78b1 = -1.1778641812E-03; ! powerconverter: RPMBB.UA83.RQTD.A78B1 at 2025-04-24 16:08:00.58 +kqtd.a81b1 = 1.7725039489E-04; ! powerconverter: RPMBB.UA87.RQTD.A81B1 at 2025-04-24 16:08:00.58 diff --git a/tests/inputs/knob_extractor/extracted_mqts_b2.str b/tests/inputs/knob_extractor/extracted_mqts_b2.str new file mode 100644 index 00000000..e0f43733 --- /dev/null +++ b/tests/inputs/knob_extractor/extracted_mqts_b2.str @@ -0,0 +1,16 @@ +kqtf.a12b2 = 8.6361503057E-05; ! powerconverter: RPMBB.UA23.RQTF.A12B2 at 2025-04-21 16:58:33.58 +kqtf.a23b2 = 6.8126690391E-04; ! powerconverter: RPMBB.UA27.RQTF.A23B2 at 2025-04-21 16:58:33.58 +kqtf.a34b2 = 6.3296929603E-04; ! powerconverter: RPMBB.UA43.RQTF.A34B2 at 2025-04-21 16:58:33.6 +kqtf.a45b2 = -4.2242762402E-04; ! powerconverter: RPMBB.UA47.RQTF.A45B2 at 2025-04-21 16:58:33.6 +kqtf.a56b2 = 2.2150181418E-04; ! powerconverter: RPMBB.UA63.RQTF.A56B2 at 2025-04-21 16:58:33.6 +kqtf.a78b2 = -2.2045892703E-04; ! powerconverter: RPMBB.UA83.RQTF.A78B2 at 2025-04-21 16:58:33.58 +kqtf.a67b2 = -5.1946843266E-04; ! powerconverter: RPMBB.UA67.RQTF.A67B2 at 2025-04-21 16:58:33.6 +kqtf.a81b2 = -2.9612713717E-04; ! powerconverter: RPMBB.UA87.RQTF.A81B2 at 2025-04-21 16:58:33.58 +kqtd.a12b2 = 6.8279938819E-05; ! powerconverter: RPMBB.UA23.RQTD.A12B2 at 2025-04-21 16:58:33.58 +kqtd.a78b2 = 1.1512298098E-03; ! powerconverter: RPMBB.UA83.RQTD.A78B2 at 2025-04-21 16:58:33.58 +kqtd.a23b2 = -1.3344184688E-03; ! powerconverter: RPMBB.UA27.RQTD.A23B2 at 2025-04-21 16:58:33.58 +kqtd.a34b2 = -1.3840291975E-03; ! powerconverter: RPMBB.UA43.RQTD.A34B2 at 2025-04-21 16:58:33.6 +kqtd.a67b2 = 8.6325133483E-04; ! powerconverter: RPMBB.UA67.RQTD.A67B2 at 2025-04-21 16:58:33.6 +kqtd.a45b2 = -4.3636175393E-04; ! powerconverter: RPMBB.UA47.RQTD.A45B2 at 2025-04-21 16:58:33.6 +kqtd.a56b2 = 2.1367291615E-04; ! powerconverter: RPMBB.UA63.RQTD.A56B2 at 2025-04-21 16:58:33.58 +kqtd.a81b2 = -3.0417310517E-04; ! powerconverter: RPMBB.UA87.RQTD.A81B2 at 2025-04-21 16:58:33.58 diff --git a/tests/unit/test_model_creator.py b/tests/unit/test_model_creator.py index 6cc058df..7ea55cb0 100644 --- a/tests/unit/test_model_creator.py +++ b/tests/unit/test_model_creator.py @@ -29,6 +29,7 @@ LhcModelCreator, ) from omc3.model_creator import create_instance_and_model +from omc3.nxcals.constants import EXTRACTED_MQTS_FILENAME from omc3.optics_measurements.constants import NAME from tests.conftest import assert_frame_equal @@ -42,6 +43,7 @@ # ---- creation tests ------------------------------------------------------------------------------ + @pytest.mark.basic def test_booster_creation_nominal_driven(tmp_path, acc_models_psb_2021): accel_opt = { @@ -70,7 +72,10 @@ def test_booster_creation_nominal_driven(tmp_path, acc_models_psb_2021): accel_opt_duplicate["scenario"] = None with pytest.raises(AcceleratorDefinitionError) as e: create_instance_and_model( - type="nominal", outputdir=tmp_path, logfile=tmp_path / "madx_log.txt", **accel_opt_duplicate + type="nominal", + outputdir=tmp_path, + logfile=tmp_path / "madx_log.txt", + **accel_opt_duplicate, ) assert "flag --scenario" in str(e.value) assert "Selected: 'None'" in str(e.value) @@ -79,7 +84,10 @@ def test_booster_creation_nominal_driven(tmp_path, acc_models_psb_2021): accel_opt_duplicate["str_file"] = None with pytest.raises(AcceleratorDefinitionError) as e: create_instance_and_model( - type="nominal", outputdir=tmp_path, logfile=tmp_path / "madx_log.txt", **accel_opt_duplicate + type="nominal", + outputdir=tmp_path, + logfile=tmp_path / "madx_log.txt", + **accel_opt_duplicate, ) assert "flag --str_file" in str(e.value) assert "Selected: 'None'" in str(e.value) @@ -105,6 +113,7 @@ def test_booster_creation_nominal_free(tmp_path, acc_models_psb_2021): ) check_accel_from_dir_vs_options(tmp_path, accel_opt, accel) + # ps tune matching fails for 2018 optics # The magnets used for the different tune matching methods in > 2018 were installed in LS2. # TODO: check with PS expert a) if model creation <= 2018 is desired and b) how it worked @@ -135,7 +144,7 @@ def test_booster_creation_nominal_free(tmp_path, acc_models_psb_2021): def test_ps_creation_nominal_free_2018(tmp_path, acc_models_ps_2021): accel_opt = { "accel": "ps", - "nat_tunes": [0.21, 0.323], # from madx_job file in acc_models + "nat_tunes": [0.21, 0.323], # from madx_job file in acc_models "dpp": 0.0, "energy": 1.4, "year": "2018", @@ -158,7 +167,10 @@ def test_ps_creation_nominal_free_2018(tmp_path, acc_models_ps_2021): with pytest.raises(RuntimeError) as excinfo: create_instance_and_model( - type="nominal", outputdir=tmp_path, logfile=tmp_path / "madx_log.txt", **accel_opt_duplicate + type="nominal", + outputdir=tmp_path, + logfile=tmp_path / "madx_log.txt", + **accel_opt_duplicate, ) assert "Please provide the --energy ENERGY flag" in str(excinfo.value) @@ -220,7 +232,7 @@ def test_lhc_creation_nominal_free_high_beta(tmp_path, acc_models_lhc_2018): "energy": 6500.0, "fetch": Fetcher.PATH, "path": acc_models_lhc_2018, - "modifiers": LHC_2018_HIGH_BETA_MODIFIERS + "modifiers": LHC_2018_HIGH_BETA_MODIFIERS, } accel = create_instance_and_model( outputdir=tmp_path, type="nominal", logfile=tmp_path / "madx_log.txt", **accel_opt @@ -239,7 +251,7 @@ def test_lhc_creation_nominal_free(tmp_path, acc_models_lhc_2025): "energy": 6800.0, "fetch": Fetcher.PATH, "path": acc_models_lhc_2025, - "modifiers": LHC_2025_30CM_MODIFIERS + "modifiers": LHC_2025_30CM_MODIFIERS, } accel = create_instance_and_model( outputdir=tmp_path, type="nominal", logfile=tmp_path / "madx_log.txt", **accel_opt @@ -256,7 +268,7 @@ def test_lhc_creation_nominal_2016(tmp_path): "nat_tunes": [0.31, 0.32], "dpp": 0.0, "energy": 6500.0, - "modifiers": [INPUTS / "models" / "modifiers_2016" / "opt_400_10000_400_3000.madx"] + "modifiers": [INPUTS / "models" / "modifiers_2016" / "opt_400_10000_400_3000.madx"], } accel = create_instance_and_model( outputdir=tmp_path, type="nominal", logfile=tmp_path / "madx_log.txt", **accel_opt @@ -266,7 +278,7 @@ def test_lhc_creation_nominal_2016(tmp_path): @pytest.mark.basic def test_lhc_creation_best_knowledge(tmp_path, acc_models_lhc_2025): - (tmp_path / LhcBestKnowledgeCreator.EXTRACTED_MQTS_FILENAME).write_text("\n") + (tmp_path / EXTRACTED_MQTS_FILENAME).write_text("\n") corrections = tmp_path / "other_corrections.madx" corrections_str = "! just a comment to test the corrections file is actually loaded in madx. whfifhkdskjfshkdhfswojeorijr" @@ -284,7 +296,7 @@ def test_lhc_creation_best_knowledge(tmp_path, acc_models_lhc_2025): "energy": 6800.0, "fetch": Fetcher.PATH, "path": acc_models_lhc_2025, - "modifiers": LHC_2025_30CM_MODIFIERS + [corrections] + "modifiers": LHC_2025_30CM_MODIFIERS + [corrections], } # like from the GUI, dump best knowledge on top of nominal @@ -318,12 +330,14 @@ def test_lhc_creation_absolute_modifier_path(tmp_path: Path, acc_models_lhc_2022 "energy": 6800.0, "fetch": Fetcher.PATH, "path": acc_models_lhc_2022, - "modifiers": [(acc_models_lhc_2022 / rel_path).absolute()] + "modifiers": [(acc_models_lhc_2022 / rel_path).absolute()], } accel = create_instance_and_model( outputdir=tmp_path, type="nominal", logfile=tmp_path / "madx_log.txt", **accel_opt ) - absolute_path = tmp_path / f"{ACC_MODELS_PREFIX}-{accel.NAME}" / rel_path # replaced in model creation + absolute_path = ( + tmp_path / f"{ACC_MODELS_PREFIX}-{accel.NAME}" / rel_path + ) # replaced in model creation madx_string = f"call, file = '{absolute_path!s}" assert madx_string in (tmp_path / JOB_MODEL_MADX_NOMINAL).read_text() assert madx_string in (tmp_path / "madx_log.txt").read_text() @@ -343,7 +357,7 @@ def test_lhc_creation_modifier_nonexistent(tmp_path, acc_models_lhc_2018): "energy": 6800.0, "fetch": Fetcher.PATH, "path": acc_models_lhc_2018, - "modifiers": [non_existent] + "modifiers": [non_existent], } with pytest.raises(FileNotFoundError) as creation_error: create_instance_and_model( @@ -363,7 +377,10 @@ def test_lhc_creation_relative_modeldir_path(request, tmp_path, acc_models_lhc_2 model_dir_relpath.mkdir() optics_file_relpath = Path("R2022a_A30cmC30cmA10mL200cm.madx") - shutil.copy(acc_models_lhc_2022 / "operation/optics" / optics_file_relpath, model_dir_relpath / optics_file_relpath.name) + shutil.copy( + acc_models_lhc_2022 / "operation/optics" / optics_file_relpath, + model_dir_relpath / optics_file_relpath.name, + ) accel_opt = { "accel": "lhc", @@ -390,8 +407,8 @@ def test_lhc_creation_relative_modeldir_path(request, tmp_path, acc_models_lhc_2 @pytest.mark.basic def test_lhc_creation_nominal_driven_check_output(model_25cm_beam1): - """ Checks if the post_run() method succeeds on an already existing given model (dir), - and then checks that it failes when removing individual files from that model. """ + """Checks if the post_run() method succeeds on an already existing given model (dir), + and then checks that it failes when removing individual files from that model.""" accel = get_accelerator(**model_25cm_beam1) LhcModelCreator(accel).post_run() @@ -411,11 +428,12 @@ def test_lhc_creation_nominal_driven_check_output(model_25cm_beam1): if file_path_moved.exists(): shutil.move(file_path_moved, file_path) + # ---- cli tests ----------------------------------------------------------------------------------- + @pytest.mark.basic def test_lhc_creator_cli(tmp_path, acc_models_lhc_2025, capsys): - accel_opt = { "accel": "lhc", "year": "2025", @@ -444,6 +462,7 @@ def test_lhc_creator_cli(tmp_path, acc_models_lhc_2025, capsys): for m in modifiers: assert m.endswith(".madx") + @pytest.mark.basic def test_booster_creator_cli(tmp_path, acc_models_psb_2021, capsys): accel_opt = { @@ -469,14 +488,7 @@ def test_booster_creator_cli(tmp_path, acc_models_psb_2021, capsys): scenarios = [c for c in scenarios if len(c) > 0] # remove empty lines scenarios.sort() - assert scenarios == [ - "ad", - "east", - "isolde", - "lhc", - "sftpro", - "tof" - ] + assert scenarios == ["ad", "east", "isolde", "lhc", "sftpro", "tof"] accel_opt["scenario"] = "lhc" @@ -496,11 +508,12 @@ def test_booster_creator_cli(tmp_path, acc_models_psb_2021, capsys): "3_extraction", ] + @pytest.mark.basic def test_ps_creation_cli(tmp_path, acc_models_ps_2021, capsys): accel_opt = { "accel": "ps", - "nat_tunes": [0.21, 0.323], # from madx_job file in acc_models + "nat_tunes": [0.21, 0.323], # from madx_job file in acc_models "dpp": 0.0, "energy": 1.4, "year": "2018", @@ -527,14 +540,13 @@ def test_ps_creation_cli(tmp_path, acc_models_ps_2021, capsys): "4_flat_bottom_wo_QDN90", ] + # ---- helper -------------------------------------------------------------------------------------- + def check_accel_from_dir_vs_options( - model_dir: Path, - accel_options: DotDict, - accel_from_opt: Accelerator, - best_knowledge=False - ): + model_dir: Path, accel_options: DotDict, accel_from_opt: Accelerator, best_knowledge=False +): # creation via model_from_dir tests that all files are in place: accel_from_dir: Accelerator = get_accelerator( accel=accel_options["accel"], diff --git a/tests/unit/test_nxcals_mqt_extraction.py b/tests/unit/test_nxcals_mqt_extraction.py new file mode 100644 index 00000000..dd7a96df --- /dev/null +++ b/tests/unit/test_nxcals_mqt_extraction.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +import re +from pathlib import Path + +import pandas as pd +import pytest + +from omc3.nxcals import mqt_extraction +from omc3.nxcals.constants import EXTRACTED_MQTS_FILENAME +from omc3.nxcals.knob_extraction import NXCalResult + +SAMPLE_DIR = Path(__file__).parent.parent / "inputs" / "knob_extractor" +TEST_CASES = ( + (1, SAMPLE_DIR / "extracted_mqts_b1.str"), + (2, SAMPLE_DIR / "extracted_mqts_b2.str"), +) + + +def _parse_mqt_line(line: str, tz: str = "Europe/Zurich") -> tuple[str, float, str, pd.Timestamp]: + pattern = re.compile( + r"([a-zA-Z0-9_.]+)\s*=\s*([0-9.eE+-]+)\s*;\s*! powerconverter:\s*([a-zA-Z0-9_.]+)\s*at\s*(.+)" + ) + match = pattern.match(line.strip()) + if not match: + raise ValueError(f"Unexpected line format: {line}") + name, value_str, pc_name, timestamp_raw = match.groups() + value = float(value_str) + timestamp = pd.Timestamp(timestamp_raw.strip(), tz=tz).replace(microsecond=0) + return name, value, pc_name, timestamp + + +def _load_results_from_file(file_path: Path, tz: str = "Europe/Zurich") -> list[NXCalResult]: + results: list[NXCalResult] = [] + for raw_line in file_path.read_text().splitlines(): + line = raw_line.strip() + if not line or line.startswith("!"): + continue + name, value, pc_name, timestamp = _parse_mqt_line(line, tz=tz) + results.append( + NXCalResult( + name=name, + value=value, + timestamp=timestamp, + pc_name=pc_name, + ) + ) + return results + + +@pytest.mark.cern_network +@pytest.mark.parametrize("beam, sample_file", TEST_CASES) +def test_get_mqts_matches_sample(beam: int, sample_file: Path): + sample_results = _load_results_from_file(sample_file) + expected_names = {result.name for result in sample_results} + assert expected_names == mqt_extraction.get_mqts(beam=beam) + + +@pytest.mark.cern_network +@pytest.mark.parametrize("beam, sample_file", TEST_CASES) +def test_main_reproduces_reference_output(tmp_path, beam: int, sample_file: Path): + sample_results = _load_results_from_file(sample_file) + + latest_sample_time = max(result.timestamp for result in sample_results) + query_time = latest_sample_time.to_pydatetime() + + output_dir = tmp_path / f"beam{beam}" + output_dir.mkdir() + + mqt_extraction.retrieve_mqts(time=query_time, beam=beam, output_dir=output_dir) + + output_path = output_dir / EXTRACTED_MQTS_FILENAME + + expected_results = [ + NXCalResult( + name=r.name, + value=r.value, + pc_name=r.pc_name, + timestamp=r.timestamp.tz_convert("UTC"), + ) + for r in sample_results + ] + + actual_results = _load_results_from_file(output_path, tz="UTC") + + expected_sorted = sorted(expected_results, key=lambda x: x.name) + actual_sorted = sorted(actual_results, key=lambda x: x.name) + + for exp, act in zip(expected_sorted, actual_sorted): + assert exp.name == act.name + assert exp.pc_name == act.pc_name + assert exp.value == pytest.approx(act.value, rel=1e-4) + assert abs((exp.timestamp - act.timestamp).total_seconds()) <= 1 + + +@pytest.mark.cern_network +def test_get_mqts_invalid_beam(): + with pytest.raises(ValueError): + mqt_extraction.get_mqts(beam=3) diff --git a/tests/utils/generate_mqt_references.py b/tests/utils/generate_mqt_references.py new file mode 100644 index 00000000..bf316b8c --- /dev/null +++ b/tests/utils/generate_mqt_references.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +""" +Copy reference MQT extraction files for testing. + +This script copies the extracted_mqts_b1.str and extracted_mqts_b2.str files +from CERN data paths to the test inputs directory. +""" + +import shutil +from pathlib import Path + +# Source paths for MQT reference files +B1_SOURCE = Path( + "/user/slops/data/LHC_DATA/OP_DATA/Betabeat/2025-04-24/LHCB1/Models/B1_60cm_checks/extracted_mqts.str" +) +B2_SOURCE = Path( + "/user/slops/data/LHC_DATA/OP_DATA/Betabeat/2025-04-21/LHCB2/Models/b2_120cm_correct_knobs/extracted_mqts.str" +) + +# Output directory for test inputs +OUTPUT_DIR = Path(__file__).parent.parent / "inputs" / "knob_extractor" + + +def main(): + """Copy MQT reference files for both beams.""" + OUTPUT_DIR.mkdir(parents=True, exist_ok=True) + + print("Copying MQT reference for Beam 1...") + shutil.copy(B1_SOURCE, OUTPUT_DIR / "extracted_mqts_b1.str") + + print("Copying MQT reference for Beam 2...") + shutil.copy(B2_SOURCE, OUTPUT_DIR / "extracted_mqts_b2.str") + + print(f"Reference files copied to {OUTPUT_DIR}") + + +if __name__ == "__main__": + main() From 484bdb16800b87ea318ac6d3902e2f2bff3918ed Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Fri, 31 Oct 2025 17:31:32 +0100 Subject: [PATCH 03/37] Add NXCALS and PySpark as optional dependencies for enhanced functionality --- pyproject.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 49cfeeeb..1fc8a16d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,6 +63,7 @@ dependencies = [ [project.optional-dependencies] cern = [ "jpype1 >= 1.3", + "nxcals >= 1.6.6", # Direct NXCALS access "pytimber >= 3.0", # NXCALS support "pylogbook >= 3.4", "kerberos >= 1.3.1", # requires having krb-5config installed on the system @@ -74,6 +75,7 @@ optional = [ ] test = [ "jpype1 >= 1.3", + "pyspark >= 3.5", # needed only for cern tests "pytest >= 7.0", "pytest-cov >= 2.9", "pytest-timeout >= 1.4", From e091de877c9d0fd7d89c641e20617ccaf95ec85a Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Mon, 3 Nov 2025 11:21:09 +0100 Subject: [PATCH 04/37] Refactor path handling in ModelCreator to return Path objects instead of POSIX strings --- omc3/model/model_creators/abstract_model_creator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/omc3/model/model_creators/abstract_model_creator.py b/omc3/model/model_creators/abstract_model_creator.py index a06e805f..e58ea857 100644 --- a/omc3/model/model_creators/abstract_model_creator.py +++ b/omc3/model/model_creators/abstract_model_creator.py @@ -153,7 +153,7 @@ def _madx_path(self, path: Path | str, cwd: Path | str) -> str: target = Path(path) if target.is_absolute(): - return target.as_posix() + return target cwd_path = Path(cwd) cwd_norm = os.path.normpath(str(cwd_path)) @@ -166,7 +166,7 @@ def _madx_path(self, path: Path | str, cwd: Path | str) -> str: f"Path '{target}' cannot be expressed relative to working directory '{cwd_path}'." ) from exc - return Path(rel_path).as_posix() + return Path(rel_path) def get_update_deltap_script(self, deltap: float | str) -> str: """Get the madx snippet that updates the dpp in the machine. From 1d280f9dd722019b212e6c2eb70b7cc68af64613 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Tue, 4 Nov 2025 11:18:44 +0100 Subject: [PATCH 05/37] Refactor MAD-X script methods to remove cwd parameter and ensure paths are relative to model_dir --- omc3/correction/response_madx.py | 1 - .../model_creators/abstract_model_creator.py | 79 +++++++----------- .../model/model_creators/lhc_model_creator.py | 81 +++++++++---------- omc3/model/model_creators/ps_model_creator.py | 18 ++--- .../model_creators/psbooster_model_creator.py | 16 ++-- .../model/model_creators/sps_model_creator.py | 18 ++--- tests/unit/test_model_creator.py | 9 ++- 7 files changed, 98 insertions(+), 124 deletions(-) diff --git a/omc3/correction/response_madx.py b/omc3/correction/response_madx.py index c2461a8c..decc7b70 100644 --- a/omc3/correction/response_madx.py +++ b/omc3/correction/response_madx.py @@ -153,7 +153,6 @@ def _generate_madx_jobs( def _get_madx_job(accel_inst: Accelerator) -> str: - # use relative paths as we use model_dir as cwd # get nominal setup from creator creator = _get_nominal_model_creator(accel_inst) job_content = creator.get_base_madx_script(accel_inst.model_dir) diff --git a/omc3/model/model_creators/abstract_model_creator.py b/omc3/model/model_creators/abstract_model_creator.py index e58ea857..a68b04fe 100644 --- a/omc3/model/model_creators/abstract_model_creator.py +++ b/omc3/model/model_creators/abstract_model_creator.py @@ -8,7 +8,6 @@ from __future__ import annotations import contextlib -import os import shutil from abc import ABC, abstractmethod from pathlib import Path @@ -103,7 +102,7 @@ def full_run(self): # get madx-script with relative output-paths # (model_dir is base-path in get_madx_script) - madx_script = self.get_madx_script(cwd=self.accel.model_dir) + madx_script = self.get_madx_script() # Run madx to create model run_string( @@ -117,56 +116,36 @@ def full_run(self): self.post_run() @abstractmethod - def get_madx_script(self, cwd: Path | str) -> str: + def get_madx_script(self) -> str: """ Returns the ``MAD-X`` script used to create the model (directory). - - Args: - cwd (Path | str): The current working directory where the madx script will be run. - - Returns: - The string of the ``MAD-X`` script used to used to create the model (directory). """ pass @abstractmethod - def get_base_madx_script(self, cwd: Path | str) -> str: + def get_base_madx_script(self) -> str: """ Returns the ``MAD-X`` script used to set-up the basic accelerator in MAD-X, without actually creating the twiss-output, as some modifications to the accelerator may come afterwards (depending on which model-creator is calling this). - - Args: - cwd (Path | str): The current working directory where the madx script will be run. - - Returns: - The string of the ``MAD-X`` script used to used to set-up the machine. """ pass - def _madx_path(self, path: Path | str, cwd: Path | str) -> str: + def _madx_path(self, path: Path | str) -> str: """Return a MAD-X friendly path. - Absolute paths are returned unchanged so MAD-X can consume them directly. If the provided - path is not absolute, it is expressed relative to the working directory ``cwd``. This - keeps the generated scripts consistent regardless of the caller's current directory. + Args: + path (Path | str): The path to convert. + Returns: + str: The path as a string, relative to the model dir if possible. """ - - target = Path(path) - if target.is_absolute(): - return target - - cwd_path = Path(cwd) - cwd_norm = os.path.normpath(str(cwd_path)) - target_norm = os.path.normpath(str(cwd_path / target)) - try: - rel_path = os.path.relpath(target_norm, cwd_norm) - except ValueError as exc: # pragma: no cover - platform dependent edge case - raise AcceleratorDefinitionError( - f"Path '{target}' cannot be expressed relative to working directory '{cwd_path}'." - ) from exc - - return Path(rel_path) + # Try converting to a relative path if it is inside the cwd (model_dir) + return path.relative_to(self.accel.model_dir) + except ValueError: + LOGGER.debug( + f"Path {path} is not relative to the model dir {self.accel.model_dir}, using absolute path." + ) + return path.absolute() def get_update_deltap_script(self, deltap: float | str) -> str: """Get the madx snippet that updates the dpp in the machine. @@ -420,13 +399,13 @@ def _create_corrections_file(self): raise NotImplementedError("Could not determine type of corrections. Aborting.") - def get_madx_script(self, cwd: Path | str) -> str: - madx_script = self.get_base_madx_script(cwd) + def get_madx_script(self) -> str: + madx_script = self.get_base_madx_script() - macros_path = self._madx_path(self.output_dir / MACROS_DIR / GENERAL_MACROS, cwd) - measurement_path = self._madx_path(self.output_dir / self.measurement_madx, cwd) - twiss_forward_path = self._madx_path(self.output_dir / self.twiss_forward, cwd) - twiss_backward_path = self._madx_path(self.output_dir / self.twiss_backward, cwd) + macros_path = self._madx_path(self.output_dir / MACROS_DIR / GENERAL_MACROS) + measurement_path = self._madx_path(self.output_dir / self.measurement_madx) + twiss_forward_path = self._madx_path(self.output_dir / self.twiss_forward) + twiss_backward_path = self._madx_path(self.output_dir / self.twiss_backward) if self._sequence_name is None: raise ValueError( @@ -480,14 +459,12 @@ def get_madx_script(self, cwd: Path | str) -> str: ) if self.corrections is not None: - corrections_path = self._madx_path(self.output_dir / self.corrections_madx, cwd) + corrections_path = self._madx_path(self.output_dir / self.corrections_madx) twiss_forward_corr_path = self._madx_path( - self.output_dir / self.twiss_forward_corrected, - cwd, + self.output_dir / self.twiss_forward_corrected ) twiss_backward_corr_path = self._madx_path( - self.output_dir / self.twiss_backward_corrected, - cwd, + self.output_dir / self.twiss_backward_corrected ) madx_script += "\n".join( [ @@ -538,7 +515,7 @@ def __init__( self.corr_files = corr_files self.update_dpp = update_dpp - def get_madx_script(self, cwd: Path | str) -> str: + def get_madx_script(self) -> str: """Get the madx script for the correction model creator, which updates the model after correcion. This is a basic implementation which does not update the dpp, but should work for generic accelerators. @@ -549,12 +526,12 @@ def get_madx_script(self, cwd: Path | str) -> str: ) # do not get_madx_script as we don't need the uncorrected output. - madx_script = self.get_base_madx_script(cwd) + madx_script = self.get_base_madx_script() for corr_file in self.corr_files: # Load the corrections - madx_script += f"call, file = '{self._madx_path(corr_file, cwd)}';\n" + madx_script += f"call, file = '{self._madx_path(corr_file)}';\n" - twiss_out_path = self._madx_path(self.twiss_out, cwd) + twiss_out_path = self._madx_path(self.twiss_out) madx_script += f"{self._get_select_command()}\ntwiss, file = {twiss_out_path};\n" return madx_script diff --git a/omc3/model/model_creators/lhc_model_creator.py b/omc3/model/model_creators/lhc_model_creator.py index bbabb3f8..37842ce4 100644 --- a/omc3/model/model_creators/lhc_model_creator.py +++ b/omc3/model/model_creators/lhc_model_creator.py @@ -181,14 +181,13 @@ def check_accelerator_instance(self) -> None: f"{', '.join([str(accel.model_dir / modifier) for modifier in inexistent_modifiers])}" ) - def get_madx_script(self, cwd: Path | str) -> str: # nominal + def get_madx_script(self) -> str: # nominal """Get madx script to create a LHC model.""" accel: Lhc = self.accel - cwd = Path(cwd) - madx_script = self.get_base_madx_script(cwd) - twiss_dat_path = self._madx_path(accel.model_dir / TWISS_DAT, cwd) - twiss_elements_path = self._madx_path(accel.model_dir / TWISS_ELEMENTS_DAT, cwd) + madx_script = self.get_base_madx_script() + twiss_dat_path = self._madx_path(accel.model_dir / TWISS_DAT) + twiss_elements_path = self._madx_path(accel.model_dir / TWISS_ELEMENTS_DAT) madx_script += ( f"exec, do_twiss_monitors(LHCB{accel.beam}, '{twiss_dat_path}', {accel.dpp});\n" f"exec, do_twiss_elements(LHCB{accel.beam}, '{twiss_elements_path}', {accel.dpp});\n" @@ -197,8 +196,8 @@ def get_madx_script(self, cwd: Path | str) -> str: # nominal # allow user to modify script and enable excitation, if driven tunes are given use_acd = accel.excitation == AccExcitationMode.ACD use_adt = accel.excitation == AccExcitationMode.ADT - twiss_ac_path = self._madx_path(accel.model_dir / TWISS_AC_DAT, cwd) - twiss_adt_path = self._madx_path(accel.model_dir / TWISS_ADT_DAT, cwd) + twiss_ac_path = self._madx_path(accel.model_dir / TWISS_AC_DAT) + twiss_adt_path = self._madx_path(accel.model_dir / TWISS_ADT_DAT) madx_script += ( f"use_acd={use_acd:d};\n" f"use_adt={use_adt:d};\n" @@ -210,12 +209,11 @@ def get_madx_script(self, cwd: Path | str) -> str: # nominal ) return madx_script - def get_base_madx_script(self, cwd: Path | str) -> str: + def get_base_madx_script(self) -> str: """Returns the base LHC madx script.""" accel: Lhc = self.accel - cwd = Path(cwd) - madx_script = self._get_sequence_initialize_script(cwd) + madx_script = self._get_sequence_initialize_script() if self._uses_ats_knobs(): madx_script += "\n! ----- Matching Knobs -----\n" LOGGER.debug( @@ -231,7 +229,7 @@ def get_base_madx_script(self, cwd: Path | str) -> str: return madx_script - def _get_sequence_initialize_script(self, cwd: Path) -> str: + def _get_sequence_initialize_script(self) -> str: """Returns the LHC sequence initialization script. This is split up here from the matching (in the base-script), @@ -241,18 +239,18 @@ def _get_sequence_initialize_script(self, cwd: Path) -> str: accel: Lhc = self.accel acc_model_path = None if accel.acc_model_path is not None: - acc_model_path = Path(self._madx_path(accel.acc_model_path, cwd)) + acc_model_path = Path(self._madx_path(accel.acc_model_path)) madx_script = ( f"{self._get_madx_script_info_comments()}\n\n" - f"call, file = '{self._madx_path(accel.model_dir / MACROS_DIR / GENERAL_MACROS, cwd)}';\n" - f"call, file = '{self._madx_path(accel.model_dir / MACROS_DIR / LHC_MACROS, cwd)}';\n" + f"call, file = '{self._madx_path(accel.model_dir / MACROS_DIR / GENERAL_MACROS)}';\n" + f"call, file = '{self._madx_path(accel.model_dir / MACROS_DIR / LHC_MACROS)}';\n" ) madx_script += f"{MADX_ENERGY_VAR} = {accel.energy};\n" madx_script += "exec, define_nominal_beams();\n\n" if self._uses_run3_macros(): LOGGER.debug("According to the optics year, Run 3 versions of the macros will be used") - madx_script += f"call, file = '{self._madx_path(accel.model_dir / MACROS_DIR / LHC_MACROS_RUN3, cwd)}';\n" + madx_script += f"call, file = '{self._madx_path(accel.model_dir / MACROS_DIR / LHC_MACROS_RUN3)}';\n" madx_script += "\n! ----- Calling Sequence -----\n" madx_script += ( @@ -266,7 +264,7 @@ def _get_sequence_initialize_script(self, cwd: Path) -> str: if accel.modifiers is not None: # if modifier is an absolute path, go there, otherwise use the path refers from model_dir madx_script += "".join( - f"call, file = '{self._madx_path(modifier, cwd)}'; {MODIFIER_TAG}\n" + f"call, file = '{self._madx_path(modifier)}'; {MODIFIER_TAG}\n" for modifier in accel.modifiers ) @@ -288,7 +286,7 @@ def _get_sequence_initialize_script(self, cwd: Path) -> str: Path(accel.acc_model_path) / LHC_REMOVE_TRIPLET_SYMMETRY_RELPATH ) if remove_symmetry_knob_abs.exists(): # alternatively check if year != 2018/2021 - remove_symmetry_knob_path = self._madx_path(remove_symmetry_knob_abs, cwd) + remove_symmetry_knob_path = self._madx_path(remove_symmetry_knob_abs) madx_script += ( "\n! ----- Remove IR symmetry definitions -----\n" f'\ncall, file="{remove_symmetry_knob_path}"; ' @@ -489,22 +487,20 @@ def prepare_run(self): save_index=NAME, ) - def get_madx_script(self, cwd: Path | str) -> str: + def get_madx_script(self) -> str: accel: Lhc = self.accel - cwd = Path(cwd) - madx_script = self.get_base_madx_script(cwd) + madx_script = self.get_base_madx_script() madx_script += "\n! ----- Load MQTs -----\n" mqts_file = accel.model_dir / EXTRACTED_MQTS_FILENAME if mqts_file.exists(): - mqts_path = self._madx_path(mqts_file, cwd) + mqts_path = self._madx_path(mqts_file) madx_script += f"call, file = '{mqts_path}';\n" madx_script += "\n! ----- Output Files -----\n" - twiss_bk_path = self._madx_path(accel.model_dir / TWISS_BEST_KNOWLEDGE_DAT, cwd) + twiss_bk_path = self._madx_path(accel.model_dir / TWISS_BEST_KNOWLEDGE_DAT) twiss_bk_elements_path = self._madx_path( - accel.model_dir / TWISS_ELEMENTS_BEST_KNOWLEDGE_DAT, - cwd, + accel.model_dir / TWISS_ELEMENTS_BEST_KNOWLEDGE_DAT ) madx_script += ( f"exec, do_twiss_monitors(LHCB{accel.beam}, '{twiss_bk_path}', {accel.dpp});\n" @@ -512,19 +508,18 @@ def get_madx_script(self, cwd: Path | str) -> str: ) return madx_script - def get_base_madx_script(self, cwd: Path | str) -> str: + def get_base_madx_script(self) -> str: accel: Lhc = self.accel - cwd = Path(cwd) # don't load the super().get_base_madx_script as this also matches the tunes at the end, # which we skip here as we are using the data from the machine. - madx_script = self._get_sequence_initialize_script(cwd) + madx_script = self._get_sequence_initialize_script() madx_script += ( f"\n! ----- For Best Knowledge Model -----\n" - f"readmytable, file = '{self._madx_path(accel.model_dir / B2_ERRORS_TFS, cwd)}', table=errtab;\n" + f"readmytable, file = '{self._madx_path(accel.model_dir / B2_ERRORS_TFS)}', table=errtab;\n" f"seterr, table=errtab;\n" - f"call, file = '{self._madx_path(accel.model_dir / B2_SETTINGS_MADX, cwd)}';\n" + f"call, file = '{self._madx_path(accel.model_dir / B2_SETTINGS_MADX)}';\n" ) return madx_script @@ -559,25 +554,24 @@ def __init__( LOGGER.debug("Initializing LHC Correction Model Creator") super().__init__(accel, twiss_out, corr_files, update_dpp) - def get_madx_script(self, cwd: Path | str) -> str: + def get_madx_script(self) -> str: """Get the madx script for the correction model creator, which updates the model after correcion.""" accel: Lhc = self.accel - cwd = Path(cwd) # do not super().get_madx_script as we don't need the uncorrected output. - madx_script = self.get_base_madx_script(cwd) + madx_script = self.get_base_madx_script() # First set the dpp to the value in the accelerator model madx_script += f"{ORBIT_DPP} = {accel.dpp};\n" for corr_file in self.corr_files: # Load the corrections, can also update ORBIT_DPP - madx_script += f"call, file = '{self._madx_path(corr_file, cwd)}';\n" + madx_script += f"call, file = '{self._madx_path(corr_file)}';\n" # If we are doing orbit correction, we need to ensure that a correct and a match is done if self.update_dpp: madx_script += self.get_update_deltap_script(deltap=ORBIT_DPP) - twiss_out_path = self._madx_path(self.output_dir / self.twiss_out, cwd) + twiss_out_path = self._madx_path(self.output_dir / self.twiss_out) madx_script += ( f'exec, do_twiss_elements(LHCB{accel.beam}, "{twiss_out_path}", {ORBIT_DPP});\n' ) @@ -603,13 +597,12 @@ def files_to_check(self) -> list[str]: class LhcSegmentCreator(SegmentCreator, LhcModelCreator): - def get_madx_script(self, cwd: Path | str) -> str: + def get_madx_script(self) -> str: accel: Lhc = self.accel - cwd = Path(cwd) - madx_script = self.get_base_madx_script(cwd) - measurement_path = self._madx_path(self.output_dir / self.measurement_madx, cwd) - twiss_forward_path = self._madx_path(self.output_dir / self.twiss_forward, cwd) - twiss_backward_path = self._madx_path(self.output_dir / self.twiss_backward, cwd) + madx_script = self.get_base_madx_script() + measurement_path = self._madx_path(self.output_dir / self.measurement_madx) + twiss_forward_path = self._madx_path(self.output_dir / self.twiss_forward) + twiss_backward_path = self._madx_path(self.output_dir / self.twiss_backward) madx_script += "\n".join( [ @@ -653,14 +646,12 @@ def get_madx_script(self, cwd: Path | str) -> str: ) if self.corrections is not None: - corrections_path = self._madx_path(self.output_dir / self.corrections_madx, cwd) + corrections_path = self._madx_path(self.output_dir / self.corrections_madx) twiss_forward_corr_path = self._madx_path( - self.output_dir / self.twiss_forward_corrected, - cwd, + self.output_dir / self.twiss_forward_corrected ) twiss_backward_corr_path = self._madx_path( - self.output_dir / self.twiss_backward_corrected, - cwd, + self.output_dir / self.twiss_backward_corrected ) madx_script += "\n".join( [ diff --git a/omc3/model/model_creators/ps_model_creator.py b/omc3/model/model_creators/ps_model_creator.py index 72d1e569..2bfe06d8 100644 --- a/omc3/model/model_creators/ps_model_creator.py +++ b/omc3/model/model_creators/ps_model_creator.py @@ -25,35 +25,35 @@ class PsModelCreator(PsBaseModelCreator): acc_model_name: str = Ps.NAME - def get_madx_script(self, cwd: Path) -> str: + def get_madx_script(self) -> str: accel: Ps = self.accel if accel.energy is None: raise RuntimeError( "PS model creation currently relies on manual Energy management. Please provide the --energy ENERGY flag" ) - madx_script = self.get_base_madx_script(cwd) + madx_script = self.get_base_madx_script() replace_dict = { "USE_ACD": str(int(accel.excitation == AccExcitationMode.ACD)), "DPP": accel.dpp, - "OUTPUT": self._madx_path(self.output_dir, cwd), + "OUTPUT": self._madx_path(self.output_dir), } madx_template = accel.get_file("twiss.mask").read_text() madx_script += madx_template % replace_dict return madx_script - def get_base_madx_script(self, cwd: Path | str) -> str: + def get_base_madx_script(self) -> str: accel: Ps = self.accel use_acd = accel.excitation == AccExcitationMode.ACD replace_dict = { - "FILES_DIR": self._madx_path(accel.get_dir(), cwd), + "FILES_DIR": self._madx_path(accel.get_dir()), "USE_ACD": str(int(use_acd)), "NAT_TUNE_X": accel.nat_tunes[0], "NAT_TUNE_Y": accel.nat_tunes[1], "KINETICENERGY": 0 if accel.energy is None else accel.energy, "USE_CUSTOM_PC": "0" if accel.energy is None else "1", - "ACC_MODELS_DIR": self._madx_path(accel.acc_model_path, cwd), - "BEAM_FILE": self._madx_path(accel.beam_file, cwd), - "STR_FILE": self._madx_path(accel.str_file, cwd), + "ACC_MODELS_DIR": self._madx_path(accel.acc_model_path), + "BEAM_FILE": self._madx_path(accel.beam_file), + "STR_FILE": self._madx_path(accel.str_file), "DRV_TUNE_X": "0", "DRV_TUNE_Y": "0", "MODIFIERS": "", @@ -65,7 +65,7 @@ def get_base_madx_script(self, cwd: Path | str) -> str: if accel.modifiers: replace_dict["MODIFIERS"] = "\n".join( [ - f" call, file = '{self._madx_path(modifier, cwd)}'; {MODIFIER_TAG}" + f" call, file = '{self._madx_path(modifier)}'; {MODIFIER_TAG}" for modifier in accel.modifiers ] ) diff --git a/omc3/model/model_creators/psbooster_model_creator.py b/omc3/model/model_creators/psbooster_model_creator.py index 1c91c8ee..16c76726 100644 --- a/omc3/model/model_creators/psbooster_model_creator.py +++ b/omc3/model/model_creators/psbooster_model_creator.py @@ -23,33 +23,33 @@ class PsboosterModelCreator(PsBaseModelCreator): acc_model_name: str = "psb" - def get_madx_script(self, cwd: Path | str) -> str: + def get_madx_script(self) -> str: accel: Psbooster = self.accel - madx_script = self.get_base_madx_script(cwd) + madx_script = self.get_base_madx_script() replace_dict = { "USE_ACD": str(int(accel.excitation == AccExcitationMode.ACD)), "RING": accel.ring, "DPP": accel.dpp, - "OUTPUT": self._madx_path(self.output_dir, cwd), + "OUTPUT": self._madx_path(self.output_dir), } madx_template = accel.get_file("twiss.mask").read_text() madx_script += madx_template % replace_dict return madx_script - def get_base_madx_script(self, cwd: Path | str) -> str: + def get_base_madx_script(self) -> str: accel: Psbooster = self.accel use_acd = accel.excitation == AccExcitationMode.ACD replace_dict = { - "FILES_DIR": self._madx_path(accel.get_dir(), cwd), + "FILES_DIR": self._madx_path(accel.get_dir()), "USE_ACD": str(int(use_acd)), "RING": str(accel.ring), "NAT_TUNE_X": accel.nat_tunes[0], "NAT_TUNE_Y": accel.nat_tunes[1], "KINETICENERGY": 0 if accel.energy is None else accel.energy, "USE_CUSTOM_PC": "0" if accel.energy is None else "1", - "ACC_MODELS_DIR": self._madx_path(accel.acc_model_path, cwd), - "BEAM_FILE": self._madx_path(accel.beam_file, cwd), - "STR_FILE": self._madx_path(accel.str_file, cwd), + "ACC_MODELS_DIR": self._madx_path(accel.acc_model_path), + "BEAM_FILE": self._madx_path(accel.beam_file), + "STR_FILE": self._madx_path(accel.str_file), "DRV_TUNE_X": "", "DRV_TUNE_Y": "", } diff --git a/omc3/model/model_creators/sps_model_creator.py b/omc3/model/model_creators/sps_model_creator.py index f8c53742..8b2093b0 100644 --- a/omc3/model/model_creators/sps_model_creator.py +++ b/omc3/model/model_creators/sps_model_creator.py @@ -123,10 +123,10 @@ def prepare_options(self, opt) -> bool: # Set the found paths --- accel.acc_model_path = acc_model_path - def get_base_madx_script(self, cwd: Path | str) -> str: + def get_base_madx_script(self) -> str: accel: Sps = self.accel use_excitation = accel.excitation != AccExcitationMode.FREE - seq_path = self._madx_path(accel.acc_model_path / "sps.seq", cwd) + seq_path = self._madx_path(accel.acc_model_path / "sps.seq") # The very basics --- madx_script = ( @@ -137,10 +137,10 @@ def get_base_madx_script(self, cwd: Path | str) -> str: if accel.modifiers is not None: # includes the strengths file for modifier in accel.modifiers: - modifier_path = self._madx_path(modifier, cwd) + modifier_path = self._madx_path(modifier) madx_script += f"call, file = '{modifier_path}'; {MODIFIER_TAG}\n" - toolkit_path = self._madx_path(accel.acc_model_path / "toolkit" / "macro.madx", cwd) + toolkit_path = self._madx_path(accel.acc_model_path / "toolkit" / "macro.madx") madx_script += ( f"call, file ='{toolkit_path}';\n" "option, echo;\n\n" @@ -202,13 +202,13 @@ def get_base_madx_script(self, cwd: Path | str) -> str: ) return madx_script - def get_madx_script(self, cwd: Path | str) -> str: + def get_madx_script(self) -> str: accel: Sps = self.accel use_excitation = accel.excitation != AccExcitationMode.FREE - madx_script = self.get_base_madx_script(cwd) - twiss_path = self._madx_path(self.output_dir / TWISS_DAT, cwd) - twiss_elements_path = self._madx_path(self.output_dir / TWISS_ELEMENTS_DAT, cwd) + madx_script = self.get_base_madx_script() + twiss_path = self._madx_path(self.output_dir / TWISS_DAT) + twiss_elements_path = self._madx_path(self.output_dir / TWISS_ELEMENTS_DAT) madx_script += ( "! Create twiss data files ---\n" f"{self._get_select_command(pattern=accel.RE_DICT[AccElementTypes.BPMS])}" @@ -220,7 +220,7 @@ def get_madx_script(self, cwd: Path | str) -> str: if use_excitation or accel.drv_tunes is not None: # allow user to modify script and enable excitation, if driven tunes are given - twiss_ac_path = self._madx_path(self.output_dir / TWISS_AC_DAT, cwd) + twiss_ac_path = self._madx_path(self.output_dir / TWISS_AC_DAT) madx_script += ( f"if(use_acd == 1){{\n" " betxac = table(twiss, hacmap, betx);\n" diff --git a/tests/unit/test_model_creator.py b/tests/unit/test_model_creator.py index 7ea55cb0..d2ac2a85 100644 --- a/tests/unit/test_model_creator.py +++ b/tests/unit/test_model_creator.py @@ -556,7 +556,14 @@ def check_accel_from_dir_vs_options( _check_arrays(accel_from_opt.nat_tunes, accel_from_dir.nat_tunes, eps=1e-4, tunes=True) _check_arrays(accel_from_opt.drv_tunes, accel_from_dir.drv_tunes, eps=1e-4, tunes=True) - _check_arrays(accel_from_opt.modifiers, accel_from_dir.modifiers) + # Check that each modifier from dir is relative to model_dir if possible + for mod_opt, mod_dir in zip(accel_from_opt.modifiers, accel_from_dir.modifiers): + try: + mod_dir_rel = mod_dir.relative_to(model_dir) + except ValueError: + mod_dir_rel = mod_dir + assert mod_opt == mod_dir_rel + assert accel_from_opt.excitation == accel_from_dir.excitation assert accel_from_opt.model_dir == accel_from_dir.model_dir From fe9c2a5b7ad12f3c07f8c7ca831f2c500aab1dad Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Tue, 4 Nov 2025 11:27:21 +0100 Subject: [PATCH 06/37] Remove unused TYPE_CHECKING imports from model creator files --- omc3/model/model_creators/ps_model_creator.py | 4 ---- omc3/model/model_creators/psbooster_model_creator.py | 2 -- tests/unit/test_model_creator.py | 2 +- 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/omc3/model/model_creators/ps_model_creator.py b/omc3/model/model_creators/ps_model_creator.py index 2bfe06d8..3826a2cb 100644 --- a/omc3/model/model_creators/ps_model_creator.py +++ b/omc3/model/model_creators/ps_model_creator.py @@ -9,7 +9,6 @@ import logging import shutil -from typing import TYPE_CHECKING from omc3.model.accelerators.accelerator import AccExcitationMode from omc3.model.accelerators.ps import Ps @@ -18,9 +17,6 @@ LOGGER = logging.getLogger(__name__) -if TYPE_CHECKING: - from pathlib import Path - class PsModelCreator(PsBaseModelCreator): acc_model_name: str = Ps.NAME diff --git a/omc3/model/model_creators/psbooster_model_creator.py b/omc3/model/model_creators/psbooster_model_creator.py index 16c76726..b4aecf78 100644 --- a/omc3/model/model_creators/psbooster_model_creator.py +++ b/omc3/model/model_creators/psbooster_model_creator.py @@ -15,8 +15,6 @@ from omc3.model.model_creators.ps_base_model_creator import PsBaseModelCreator if TYPE_CHECKING: - from pathlib import Path - from omc3.model.accelerators.psbooster import Psbooster diff --git a/tests/unit/test_model_creator.py b/tests/unit/test_model_creator.py index d2ac2a85..352aaa9c 100644 --- a/tests/unit/test_model_creator.py +++ b/tests/unit/test_model_creator.py @@ -25,7 +25,7 @@ ) from omc3.model.manager import get_accelerator from omc3.model.model_creators.lhc_model_creator import ( - LhcBestKnowledgeCreator, + # LhcBestKnowledgeCreator, LhcModelCreator, ) from omc3.model_creator import create_instance_and_model From 492e6c7ee736a14a227bdd21a47cff38b4f97bb4 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Tue, 4 Nov 2025 12:04:44 +0100 Subject: [PATCH 07/37] Fix comparison of modifier paths in check_accel_from_dir_vs_options function --- tests/unit/test_model_creator.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_model_creator.py b/tests/unit/test_model_creator.py index 352aaa9c..b4061f82 100644 --- a/tests/unit/test_model_creator.py +++ b/tests/unit/test_model_creator.py @@ -559,10 +559,10 @@ def check_accel_from_dir_vs_options( # Check that each modifier from dir is relative to model_dir if possible for mod_opt, mod_dir in zip(accel_from_opt.modifiers, accel_from_dir.modifiers): try: - mod_dir_rel = mod_dir.relative_to(model_dir) + mod_opt_rel = mod_opt.relative_to(model_dir) except ValueError: - mod_dir_rel = mod_dir - assert mod_opt == mod_dir_rel + mod_opt_rel = mod_opt + assert mod_dir == mod_opt_rel assert accel_from_opt.excitation == accel_from_dir.excitation assert accel_from_opt.model_dir == accel_from_dir.model_dir From 59c57ec20c765b5bbcb867fadeba50c082aabafe Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Tue, 4 Nov 2025 14:16:49 +0100 Subject: [PATCH 08/37] Add MQT extraction script and update retrieval function to support output path --- doc/entrypoints/scripts.rst | 5 ++ omc3/correction/response_madx.py | 2 +- omc3/correction/sequence_evaluation.py | 2 +- omc3/nxcals/mqt_extraction.py | 78 ++++++----------------- omc3/scripts/mqt_extraction.py | 79 ++++++++++++++++++++++++ tests/unit/test_nxcals_mqt_extraction.py | 3 +- 6 files changed, 108 insertions(+), 61 deletions(-) create mode 100644 omc3/scripts/mqt_extraction.py diff --git a/doc/entrypoints/scripts.rst b/doc/entrypoints/scripts.rst index 5d2575ae..84097467 100644 --- a/doc/entrypoints/scripts.rst +++ b/doc/entrypoints/scripts.rst @@ -49,3 +49,8 @@ Scripts .. automodule:: omc3.scripts.lhc_corrector_list_check :members: :noindex: + + +.. automodule:: omc3.scripts.mqt_extraction + :members: + :noindex: diff --git a/omc3/correction/response_madx.py b/omc3/correction/response_madx.py index decc7b70..969db9aa 100644 --- a/omc3/correction/response_madx.py +++ b/omc3/correction/response_madx.py @@ -155,7 +155,7 @@ def _generate_madx_jobs( def _get_madx_job(accel_inst: Accelerator) -> str: # get nominal setup from creator creator = _get_nominal_model_creator(accel_inst) - job_content = creator.get_base_madx_script(accel_inst.model_dir) + job_content = creator.get_base_madx_script() job_content += ( "select, flag=twiss, clear;\n" f"select, flag=twiss, pattern='{accel_inst.RE_DICT[AccElementTypes.BPMS]}', " diff --git a/omc3/correction/sequence_evaluation.py b/omc3/correction/sequence_evaluation.py index 83f79602..8e58e79c 100644 --- a/omc3/correction/sequence_evaluation.py +++ b/omc3/correction/sequence_evaluation.py @@ -243,7 +243,7 @@ def _create_basic_job( # get nominal setup from creator creator_class = get_model_creator_class(accel_inst, CreatorType.NOMINAL) creator: ModelCreator = creator_class(accel_inst) - job_content = creator.get_base_madx_script(accel_inst.model_dir) + job_content = creator.get_base_madx_script() # create a survey and save it to a temporary file job_content += ( diff --git a/omc3/nxcals/mqt_extraction.py b/omc3/nxcals/mqt_extraction.py index 797a7f45..6211e154 100644 --- a/omc3/nxcals/mqt_extraction.py +++ b/omc3/nxcals/mqt_extraction.py @@ -4,77 +4,19 @@ This module provides functions to retrieve MQT knob values for the LHC for a specified beam and time using NXCALS and LSA. - -**Arguments:** - -*--Required--* - -- **time** *(datetime|str)*: - - The timestamp for which to retrieve the data (timezone-aware recommended). If a string is provided, it will be parsed to a datetime object, assuming UTC timezone. - -- **beam** *(int)*: - - The beam number (1 or 2). - -- **output_dir** *(str|Path)*: - Path to the output directory where the MQT knob values will be saved in. """ import logging from datetime import datetime -from pathlib import Path - -from generic_parser import DotDict, EntryPointParameters, entrypoint # import jpype from pyspark.sql import SparkSession -from omc3.nxcals.constants import EXTRACTED_MQTS_FILENAME from omc3.nxcals.knob_extraction import NXCalResult, get_knob_vals -from omc3.utils.iotools import DateOrStr, PathOrStr -from omc3.utils.mock import cern_network_import - -spark_session_builder = cern_network_import("nxcals.spark_session_builder") logger = logging.getLogger(__name__) -def _get_params() -> EntryPointParameters: - """ - Define the parameters for the MQT retrieval entry point. - """ - return EntryPointParameters( - time={ - "type": DateOrStr, - "help": "The timestamp for which to retrieve the data (timezone-aware recommended).", - }, - beam={"type": int, "help": "The beam number (1 or 2)."}, - output_dir={ - "type": PathOrStr, - "help": "Path to the output directory where the MQT knob values will be saved in.", - }, - ) - - -@entrypoint(_get_params(), strict=True) -def retrieve_mqts(opt: DotDict) -> None: - """ - Retrieve MQT (Quadrupole Trim) knob values from NXCALS for a specific time and beam, - and save them to a file in the specified output directory. - """ - spark = spark_session_builder.get_or_create() - mqt_vals = get_mqt_vals(spark, opt.time, opt.beam) - output_path = Path(opt.output_dir) / EXTRACTED_MQTS_FILENAME - with output_path.open("w") as f: - for result in mqt_vals: - timestamp_str = f"{result.timestamp:%Y-%m-%d %H:%M:%S%z}" - value_str = f"{result.value:.10E}".replace("E+", "E") - f.write( - f"{result.name:<15}= {value_str}; ! powerconverter: {result.pc_name} at {timestamp_str}\n" - ) - - def get_mqts(beam: int) -> set[str]: """ Generate the set of MAD-X MQT (Quadrupole Trim) variable names for a given beam. @@ -124,3 +66,23 @@ def get_mqt_vals(spark: SparkSession, time: datetime, beam: int) -> list[NXCalRe pattern = f"RPMBB.UA%.RQT%.A%B{beam}:I_MEAS" patterns = [pattern] return get_knob_vals(spark, time, beam, patterns, madx_mqts, "MQT: ") + + +def knobs_to_madx(mqt_vals: list[NXCalResult]) -> str: + """ + Convert a list of NXCalResult objects to a MAD-X script string. + + Args: + mqt_vals: List of NXCalResult objects containing knob values. + + Returns: + A string containing the MAD-X script with knob assignments. + """ + lines = [] + for result in mqt_vals: + timestamp_str = f"{result.timestamp:%Y-%m-%d %H:%M:%S%z}" + value_str = f"{result.value:.10E}".replace("E+", "E") + lines.append( + f"{result.name:<15}= {value_str}; ! powerconverter: {result.pc_name} at {timestamp_str}\n" + ) + return "".join(lines) diff --git a/omc3/scripts/mqt_extraction.py b/omc3/scripts/mqt_extraction.py new file mode 100644 index 00000000..558cc055 --- /dev/null +++ b/omc3/scripts/mqt_extraction.py @@ -0,0 +1,79 @@ +""" +MQT Extraction +--------------- + +Extract MQT (Quadrupole Trim) knob values from NXCALS for a specific time and beam. + +**Arguments:** + +*--Required--* + +- **time** *(datetime|str)*: + + The timestamp for which to retrieve the data (timezone-aware recommended). If a string is provided, it will be parsed to a datetime object, assuming UTC timezone. + +- **beam** *(int)*: + + The beam number (1 or 2). + +*--Optional--* + +- **output_path** *(str|Path)*: + Path to the output file or directory. If a directory is given, the file will be named 'extracted_mqts.str'. If not provided, no output will be written. +""" + +from pathlib import Path + +from generic_parser import DotDict, EntryPointParameters, entrypoint + +from omc3.nxcals.constants import EXTRACTED_MQTS_FILENAME +from omc3.nxcals.knob_extraction import NXCalResult +from omc3.nxcals.mqt_extraction import get_mqt_vals, knobs_to_madx +from omc3.utils.iotools import DateOrStr, PathOrStr +from omc3.utils.mock import cern_network_import + +spark_session_builder = cern_network_import("nxcals.spark_session_builder") + + +def _get_params() -> EntryPointParameters: + """ + Define the parameters for the MQT retrieval entry point. + """ + return EntryPointParameters( + time={ + "type": DateOrStr, + "help": "The timestamp for which to retrieve the data (timezone-aware recommended).", + }, + beam={"type": int, "help": "The beam number (1 or 2)."}, + output_path={ + "type": PathOrStr, + "default": EXTRACTED_MQTS_FILENAME, + "help": f"Path to the output file or directory. Defaults to '{EXTRACTED_MQTS_FILENAME}'. If a directory is given, the file will be named '{EXTRACTED_MQTS_FILENAME}'.", + }, + ) + + +@entrypoint(_get_params(), strict=True) +def retrieve_mqts(opt: DotDict) -> list[NXCalResult]: + """ + Retrieve MQT (Quadrupole Trim) knob values from NXCALS for a specific time and beam, + and optionally save them to a file. + + Returns: + The list of retrieved MQT knob values. + """ + spark = spark_session_builder.get_or_create() + mqt_vals = get_mqt_vals(spark, opt.time, opt.beam) + + if opt.output_path is not None: + output_path = Path(opt.output_path) + if output_path.is_dir(): + output_path = output_path / EXTRACTED_MQTS_FILENAME + with output_path.open("w") as f: + f.write(knobs_to_madx(mqt_vals)) + + return mqt_vals + + +if __name__ == "__main__": + retrieve_mqts() diff --git a/tests/unit/test_nxcals_mqt_extraction.py b/tests/unit/test_nxcals_mqt_extraction.py index dd7a96df..4644d4c2 100644 --- a/tests/unit/test_nxcals_mqt_extraction.py +++ b/tests/unit/test_nxcals_mqt_extraction.py @@ -9,6 +9,7 @@ from omc3.nxcals import mqt_extraction from omc3.nxcals.constants import EXTRACTED_MQTS_FILENAME from omc3.nxcals.knob_extraction import NXCalResult +from omc3.scripts import mqt_extraction as mqt_script SAMPLE_DIR = Path(__file__).parent.parent / "inputs" / "knob_extractor" TEST_CASES = ( @@ -67,7 +68,7 @@ def test_main_reproduces_reference_output(tmp_path, beam: int, sample_file: Path output_dir = tmp_path / f"beam{beam}" output_dir.mkdir() - mqt_extraction.retrieve_mqts(time=query_time, beam=beam, output_dir=output_dir) + mqt_script.retrieve_mqts(time=query_time, beam=beam, output_path=output_dir) output_path = output_dir / EXTRACTED_MQTS_FILENAME From 4afd69b6a8c14ae44a637ffc39f9cd0309c0995c Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Wed, 5 Nov 2025 16:36:43 +0100 Subject: [PATCH 09/37] Add checks for None modifiers in check_accel_from_dir_vs_options function --- tests/unit/test_model_creator.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/unit/test_model_creator.py b/tests/unit/test_model_creator.py index b4061f82..345a30fd 100644 --- a/tests/unit/test_model_creator.py +++ b/tests/unit/test_model_creator.py @@ -557,12 +557,15 @@ def check_accel_from_dir_vs_options( _check_arrays(accel_from_opt.nat_tunes, accel_from_dir.nat_tunes, eps=1e-4, tunes=True) _check_arrays(accel_from_opt.drv_tunes, accel_from_dir.drv_tunes, eps=1e-4, tunes=True) # Check that each modifier from dir is relative to model_dir if possible - for mod_opt, mod_dir in zip(accel_from_opt.modifiers, accel_from_dir.modifiers): - try: - mod_opt_rel = mod_opt.relative_to(model_dir) - except ValueError: - mod_opt_rel = mod_opt - assert mod_dir == mod_opt_rel + if accel_from_opt.modifiers is not None and accel_from_dir.modifiers is not None: + for mod_opt, mod_dir in zip(accel_from_opt.modifiers, accel_from_dir.modifiers): + try: + mod_opt_rel = mod_opt.relative_to(model_dir) + except ValueError: + mod_opt_rel = mod_opt + assert mod_dir == mod_opt_rel + else: + assert accel_from_opt.modifiers == accel_from_dir.modifiers assert accel_from_opt.excitation == accel_from_dir.excitation assert accel_from_opt.model_dir == accel_from_dir.model_dir From ca8fdcf033253a3258e6b365decbbd06c3694d7c Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Fri, 14 Nov 2025 10:40:43 +0100 Subject: [PATCH 10/37] Apply suggestion from @fsoubelet Co-authored-by: Felix Soubelet <19598248+fsoubelet@users.noreply.github.com> --- omc3/model/accelerators/lhc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/omc3/model/accelerators/lhc.py b/omc3/model/accelerators/lhc.py index a39646e0..8d258cb5 100644 --- a/omc3/model/accelerators/lhc.py +++ b/omc3/model/accelerators/lhc.py @@ -278,7 +278,7 @@ def log_status(self) -> None: LOGGER.info(f"Natural Tune X [{self.nat_tunes[0]:10.3f}]") LOGGER.info(f"Natural Tune Y [{self.nat_tunes[1]:10.3f}]") LOGGER.info( - f"Best Knowledge Model [{'NO' if self.model_best_knowledge is None else 'OK':>10s}]" + f"Best Knowledge Model: [{'NO' if self.model_best_knowledge is None else 'OK':>10s}]" ) if self.excitation == AccExcitationMode.FREE: From 4298a31f046ba1cbebe775315704cb848da6ea25 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Fri, 14 Nov 2025 11:16:13 +0100 Subject: [PATCH 11/37] Refactor knob_extraction.py: Improve docstrings and logging consistency --- omc3/nxcals/knob_extraction.py | 110 +++++++++++++++++++++------------ 1 file changed, 69 insertions(+), 41 deletions(-) diff --git a/omc3/nxcals/knob_extraction.py b/omc3/nxcals/knob_extraction.py index 85a1dbbb..7ca9641c 100644 --- a/omc3/nxcals/knob_extraction.py +++ b/omc3/nxcals/knob_extraction.py @@ -1,21 +1,36 @@ +""" +Knob Extraction +--------------- + +This module provides functionality to extract knob values from NXCALS and convert them to +MAD-X compatible format using LSA services. + +It handles retrieval of raw variable data from NXCALS, conversion of power converter +currents to K-values, and mapping of power converter names to MAD-X naming +conventions. +""" + from __future__ import annotations import logging from dataclasses import dataclass from datetime import datetime, timedelta, timezone +from typing import TYPE_CHECKING import jpype import pandas as pd -from pyspark.sql import SparkSession from pyspark.sql import functions as f from pyspark.sql.window import Window from omc3.utils.mock import cern_network_import +if TYPE_CHECKING: + from pyspark.sql import SparkSession + pjlsa = cern_network_import("pjlsa") builders = cern_network_import("nxcals.api.extraction.data.builders") -logger = logging.getLogger(__name__) +LOGGER = logging.getLogger(__name__) @dataclass @@ -32,20 +47,19 @@ def get_raw_vars( """ Retrieve raw variable values from NXCALS. - Parameters - ---------- - spark : SparkSession - Active Spark session - time : datetime - Python datetime (timezone-aware recommended) - var_name : str - Name or pattern of the variable(s) to retrieve - - Returns - ------- - dict[str, tuple[float, pd.Timestamp, str]] - Dictionary with full variable name as key, and (value, timestamp, full variable name) as value - for the latest sample of each matching variable at the given time + Args: + spark (SparkSession): Active Spark session. + time (datetime): Python datetime (timezone-aware recommended). + var_name (str): Name or pattern of the variable(s) to retrieve. + delta_days (int): Number of days to look back for data. Default is 1. + + Returns: + list[NXCalResult]: List of NXCalResult containing variable name, value, timestamp, + and power converter name for the latest sample of each matching variable at the given time. + + Raises: + RuntimeError: If no data is found for the variable in the given interval. + You may need to increase the delta_days if necessary. """ # Ensure time is in UTC @@ -54,10 +68,10 @@ def get_raw_vars( else: time = time.astimezone(timezone.utc) - # Look back 1 day to be sure we get at least one sample + # Look back delta_days (1 for mqts) to be sure we get at least one sample start_time = time - timedelta(days=delta_days) end_time = time - logger.info(f"Retrieving raw variables {var_name} from {start_time} to {end_time}") + LOGGER.info(f"Retrieving raw variables {var_name} from {start_time} to {end_time}") df = ( builders.DataQuery.builder(spark) @@ -70,9 +84,12 @@ def get_raw_vars( # Avoid full count() – just check if we have at least one row if df is None or not df.take(1): - raise RuntimeError(f"No data found for {var_name} in the given interval.") + raise RuntimeError( + f"No data found for {var_name} in {start_time} to {end_time}. " + f"You may need to increase the delta_days from {delta_days} days if necessary." + ) - logger.info(f"Raw variables {var_name} retrieved successfully.") + LOGGER.info(f"Raw variables {var_name} retrieved successfully.") # Get the latest sample for each variable window_spec = Window.partitionBy("nxcals_variable_name").orderBy( @@ -90,20 +107,24 @@ def get_raw_vars( raw_val = float(row["nxcals_value"]) ts = pd.to_datetime(row["nxcals_timestamp"], unit="ns", utc=True) results.append(NXCalResult(full_varname, raw_val, ts, strip_i_meas(full_varname))) - logger.info(f"LHC value retrieved: {full_varname} = {raw_val:.2f} at {ts}") + LOGGER.info(f"LHC value retrieved: {full_varname} = {raw_val:.2f} at {ts}") return results def get_energy(spark: SparkSession, time: datetime) -> tuple[float, pd.Timestamp]: """ + Retrieve the beam energy from NXCALS. Args: - spark (SparkSession): Active Spark session - time (datetime): Python datetime (timezone-aware recommended) + spark (SparkSession): Active Spark session. + time (datetime): Python datetime (timezone-aware recommended). Returns: - (energy_gev, timestamp): Beam energy in GeV and its timestamp + tuple[float, pd.Timestamp]: Beam energy in GeV and its timestamp. + + Raises: + RuntimeError: If no energy data is found. """ scale = 0.120 raw_vars = get_raw_vars(spark, time, "HX:ENG") @@ -155,6 +176,12 @@ def map_pc_name_to_madx(pc_name: str) -> str: def strip_i_meas(text: str) -> str: """ Remove the I_MEAS suffix from a variable name. + + Args: + text (str): The variable name possibly ending with ':I_MEAS'. + + Returns: + str: The variable name without the ':I_MEAS' suffix. """ return text.removesuffix(":I_MEAS") @@ -164,12 +191,12 @@ def calc_k_from_iref(lsa_client, currents: dict[str, float], energy: float) -> d Calculate K values from IREF using the LSA service. Args: - lsa: The LSA service instance - currents: A dictionary of current values keyed by variable name - energy: The beam energy in GeV + lsa_client: The LSA client instance. + currents (dict[str, float]): Dictionary of current values keyed by variable name. + energy (float): The beam energy in GeV. Returns: - A dictionary of K values keyed by variable name + dict[str, float]: Dictionary of K values keyed by variable name. """ # 1) Use the **instance** PJLSA already created lhc_service = lsa_client._lhcService # <-- INSTANCE (do NOT use self._LhcService) @@ -210,22 +237,23 @@ def get_knob_vals( Retrieve knob values for a given beam and time using specified patterns. Args: - spark: Spark session - time: Time to retrieve data for - beam: Beam number - patterns: List of variable patterns to retrieve - expected_knobs: Set of expected MAD-X knob names. If None, returns all found knobs. - log_prefix: Prefix for logging messages + spark (SparkSession): Active Spark session. + time (datetime): Time to retrieve data for. + beam (int): Beam number. + patterns (list[str]): List of variable patterns to retrieve. + expected_knobs (set[str] | None): Set of expected MAD-X knob names. If None, returns all found knobs. + log_prefix (str): Prefix for logging messages. + delta_days (int): Number of days to look back for data. Default is 1. Returns: - List of NXCalResult with knob values + list[NXCalResult]: List of NXCalResult objects with knob values. """ - logger.info(f"{log_prefix}Starting data retrieval for beam {beam} at time {time}") + LOGGER.info(f"{log_prefix}Starting data retrieval for beam {beam} at time {time}") # Retrieve raw current measurements from NXCALS combined_vars: list[NXCalResult] = [] for pattern in patterns: - logger.info(f"{log_prefix}Getting currents for pattern {pattern} at {time}") + LOGGER.info(f"{log_prefix}Getting currents for pattern {pattern} at {time}") raw_vars = get_raw_vars(spark, time, pattern, delta_days) combined_vars.extend(raw_vars) @@ -235,7 +263,7 @@ def get_knob_vals( # Prepare currents dict for LSA currents = {strip_i_meas(var.name): var.value for var in combined_vars} - logger.info( + LOGGER.info( f"{log_prefix}Calculating K values for {len(currents)} power converters with energy {energy} GeV" ) @@ -252,10 +280,10 @@ def get_knob_vals( expected_knobs = found_keys if missing_keys := expected_knobs - found_keys: - logger.warning(f"{log_prefix}Missing K-values for knobs: {missing_keys}") + LOGGER.warning(f"{log_prefix}Missing K-values for knobs: {missing_keys}") if unknown_keys := found_keys - expected_knobs: - logger.warning(f"{log_prefix}Unknown K-values found: {unknown_keys}") + LOGGER.warning(f"{log_prefix}Unknown K-values found: {unknown_keys}") # Build result list with timestamps timestamps = {map_pc_name_to_madx(var.name): var.timestamp for var in combined_vars} @@ -269,6 +297,6 @@ def get_knob_vals( if value is not None and timestamp is not None and pc_name is not None: results.append(NXCalResult(madx_name, value, timestamp, pc_name)) else: - logger.warning(f"{log_prefix}Missing data for {madx_name}") + LOGGER.warning(f"{log_prefix}Missing data for {madx_name}") return results From 141c7c33840bf5d76057861cfc61900622f55ebf Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Fri, 14 Nov 2025 11:16:24 +0100 Subject: [PATCH 12/37] Refactor model creators to use resolve_madx_path for path handling --- .../model_creators/abstract_model_creator.py | 46 +++++++------ .../model/model_creators/lhc_model_creator.py | 68 ++++++++++--------- omc3/model/model_creators/ps_model_creator.py | 12 ++-- .../model_creators/psbooster_model_creator.py | 10 +-- .../model/model_creators/sps_model_creator.py | 12 ++-- 5 files changed, 77 insertions(+), 71 deletions(-) diff --git a/omc3/model/model_creators/abstract_model_creator.py b/omc3/model/model_creators/abstract_model_creator.py index a68b04fe..72f4539c 100644 --- a/omc3/model/model_creators/abstract_model_creator.py +++ b/omc3/model/model_creators/abstract_model_creator.py @@ -100,8 +100,7 @@ def full_run(self): # Prepare model-dir output directory self.prepare_run() - # get madx-script with relative output-paths - # (model_dir is base-path in get_madx_script) + # get madx-script with paths relative to model-dir if possible, otherwise absolute madx_script = self.get_madx_script() # Run madx to create model @@ -130,14 +129,15 @@ def get_base_madx_script(self) -> str: """ pass - def _madx_path(self, path: Path | str) -> str: - """Return a MAD-X friendly path. + def resolve_madx_path(self, path: Path | str) -> Path: + """Converts a given path to a path relative to the model dir if possible, otherwise returns the absolute path. Args: path (Path | str): The path to convert. Returns: - str: The path as a string, relative to the model dir if possible. + madx_path (Path): The converted path for MAD-X usage. """ + path = Path(path) try: # Try converting to a relative path if it is inside the cwd (model_dir) return path.relative_to(self.accel.model_dir) @@ -402,10 +402,10 @@ def _create_corrections_file(self): def get_madx_script(self) -> str: madx_script = self.get_base_madx_script() - macros_path = self._madx_path(self.output_dir / MACROS_DIR / GENERAL_MACROS) - measurement_path = self._madx_path(self.output_dir / self.measurement_madx) - twiss_forward_path = self._madx_path(self.output_dir / self.twiss_forward) - twiss_backward_path = self._madx_path(self.output_dir / self.twiss_backward) + macros_path = self.resolve_madx_path(self.output_dir / MACROS_DIR / GENERAL_MACROS) + measurement_path = self.resolve_madx_path(self.output_dir / self.measurement_madx) + twiss_forward_path = self.resolve_madx_path(self.output_dir / self.twiss_forward) + twiss_backward_path = self.resolve_madx_path(self.output_dir / self.twiss_backward) if self._sequence_name is None: raise ValueError( @@ -459,11 +459,11 @@ def get_madx_script(self) -> str: ) if self.corrections is not None: - corrections_path = self._madx_path(self.output_dir / self.corrections_madx) - twiss_forward_corr_path = self._madx_path( + corrections_path = self.resolve_madx_path(self.output_dir / self.corrections_madx) + twiss_forward_corr_path = self.resolve_madx_path( self.output_dir / self.twiss_forward_corrected ) - twiss_backward_corr_path = self._madx_path( + twiss_backward_corr_path = self.resolve_madx_path( self.output_dir / self.twiss_backward_corrected ) madx_script += "\n".join( @@ -507,12 +507,16 @@ def __init__( """ LOGGER.debug("Initializing Correction Model Creator Base Attributes") super().__init__(accel) - self.twiss_out = Path(twiss_out) + self.twiss_out = self.resolve_madx_path(twiss_out) - # use absolute paths to force files into twiss_out directory instead of model-dir - self.jobfile = self.twiss_out.parent.absolute() / f"job.create_{self.twiss_out.stem}.madx" - self.logfile = self.twiss_out.parent.absolute() / f"job.create_{self.twiss_out.stem}.log" - self.corr_files = corr_files + # Take the directory of the twiss output as output dir + self.jobfile = self.resolve_madx_path( + self.twiss_out.parent / f"job.create_{self.twiss_out.stem}.madx" + ) + self.logfile = self.resolve_madx_path( + self.twiss_out.parent / f"job.create_{self.twiss_out.stem}.log" + ) + self.corr_files = [self.resolve_madx_path(f) for f in corr_files] self.update_dpp = update_dpp def get_madx_script(self) -> str: @@ -525,14 +529,14 @@ def get_madx_script(self) -> str: f"Updating the dpp is not implemented for correction model creator of {self.accel.NAME}." ) - # do not get_madx_script as we don't need the uncorrected output. + # use only base-part and not the full madx-script as we don't need the uncorrected output. madx_script = self.get_base_madx_script() + # We assume for the following that the correction files have already been resolved to madx paths. for corr_file in self.corr_files: # Load the corrections - madx_script += f"call, file = '{self._madx_path(corr_file)}';\n" + madx_script += f"call, file = '{corr_file}';\n" - twiss_out_path = self._madx_path(self.twiss_out) - madx_script += f"{self._get_select_command()}\ntwiss, file = {twiss_out_path};\n" + madx_script += f"{self._get_select_command()}\ntwiss, file = {self.twiss_out};\n" return madx_script @property diff --git a/omc3/model/model_creators/lhc_model_creator.py b/omc3/model/model_creators/lhc_model_creator.py index 37842ce4..1b17c399 100644 --- a/omc3/model/model_creators/lhc_model_creator.py +++ b/omc3/model/model_creators/lhc_model_creator.py @@ -118,9 +118,8 @@ def prepare_options(self, opt): return # list optics choices --- - if ( - opt.list_choices - ): # assumes we only want to list optics. Invoked even if modifiers are given! + if opt.list_choices: + # assumes we only want to list optics. Invoked even if modifiers are given! check_folder_choices( acc_model_path / OPTICS_SUBDIR, msg="No modifier given", @@ -186,8 +185,8 @@ def get_madx_script(self) -> str: # nominal accel: Lhc = self.accel madx_script = self.get_base_madx_script() - twiss_dat_path = self._madx_path(accel.model_dir / TWISS_DAT) - twiss_elements_path = self._madx_path(accel.model_dir / TWISS_ELEMENTS_DAT) + twiss_dat_path = self.resolve_madx_path(accel.model_dir / TWISS_DAT) + twiss_elements_path = self.resolve_madx_path(accel.model_dir / TWISS_ELEMENTS_DAT) madx_script += ( f"exec, do_twiss_monitors(LHCB{accel.beam}, '{twiss_dat_path}', {accel.dpp});\n" f"exec, do_twiss_elements(LHCB{accel.beam}, '{twiss_elements_path}', {accel.dpp});\n" @@ -196,8 +195,8 @@ def get_madx_script(self) -> str: # nominal # allow user to modify script and enable excitation, if driven tunes are given use_acd = accel.excitation == AccExcitationMode.ACD use_adt = accel.excitation == AccExcitationMode.ADT - twiss_ac_path = self._madx_path(accel.model_dir / TWISS_AC_DAT) - twiss_adt_path = self._madx_path(accel.model_dir / TWISS_ADT_DAT) + twiss_ac_path = self.resolve_madx_path(accel.model_dir / TWISS_AC_DAT) + twiss_adt_path = self.resolve_madx_path(accel.model_dir / TWISS_ADT_DAT) madx_script += ( f"use_acd={use_acd:d};\n" f"use_adt={use_adt:d};\n" @@ -239,18 +238,18 @@ def _get_sequence_initialize_script(self) -> str: accel: Lhc = self.accel acc_model_path = None if accel.acc_model_path is not None: - acc_model_path = Path(self._madx_path(accel.acc_model_path)) + acc_model_path = Path(self.resolve_madx_path(accel.acc_model_path)) madx_script = ( f"{self._get_madx_script_info_comments()}\n\n" - f"call, file = '{self._madx_path(accel.model_dir / MACROS_DIR / GENERAL_MACROS)}';\n" - f"call, file = '{self._madx_path(accel.model_dir / MACROS_DIR / LHC_MACROS)}';\n" + f"call, file = '{self.resolve_madx_path(accel.model_dir / MACROS_DIR / GENERAL_MACROS)}';\n" + f"call, file = '{self.resolve_madx_path(accel.model_dir / MACROS_DIR / LHC_MACROS)}';\n" ) madx_script += f"{MADX_ENERGY_VAR} = {accel.energy};\n" madx_script += "exec, define_nominal_beams();\n\n" if self._uses_run3_macros(): LOGGER.debug("According to the optics year, Run 3 versions of the macros will be used") - madx_script += f"call, file = '{self._madx_path(accel.model_dir / MACROS_DIR / LHC_MACROS_RUN3)}';\n" + madx_script += f"call, file = '{self.resolve_madx_path(accel.model_dir / MACROS_DIR / LHC_MACROS_RUN3)}';\n" madx_script += "\n! ----- Calling Sequence -----\n" madx_script += ( @@ -262,9 +261,9 @@ def _get_sequence_initialize_script(self) -> str: madx_script += "\n! ---- Call optics and other modifiers ----\n" if accel.modifiers is not None: - # if modifier is an absolute path, go there, otherwise use the path refers from model_dir + # if the modifier can be found in the model dir, use relative path madx_script += "".join( - f"call, file = '{self._madx_path(modifier)}'; {MODIFIER_TAG}\n" + f"call, file = '{self.resolve_madx_path(modifier)}'; {MODIFIER_TAG}\n" for modifier in accel.modifiers ) @@ -286,7 +285,7 @@ def _get_sequence_initialize_script(self) -> str: Path(accel.acc_model_path) / LHC_REMOVE_TRIPLET_SYMMETRY_RELPATH ) if remove_symmetry_knob_abs.exists(): # alternatively check if year != 2018/2021 - remove_symmetry_knob_path = self._madx_path(remove_symmetry_knob_abs) + remove_symmetry_knob_path = self.resolve_madx_path(remove_symmetry_knob_abs) madx_script += ( "\n! ----- Remove IR symmetry definitions -----\n" f'\ncall, file="{remove_symmetry_knob_path}"; ' @@ -349,14 +348,14 @@ def _get_madx_script_info_comments(self) -> str: ) return info_comments - def _get_madx_main_sequence_loading(self, acc_model_path: Path) -> str: + def _get_madx_main_sequence_loading(self, acc_model_path: Path | None) -> str: accel: Lhc = self.accel if acc_model_path is not None: - main_seq_rel = (acc_model_path / "lhc.seq").as_posix() + main_seq_rel = self.resolve_madx_path(acc_model_path / "lhc.seq") main_call = f"call, file = '{main_seq_rel}';" if accel.year.startswith("hl"): - hllhc_rel = (acc_model_path / "hllhc_sequence.madx").as_posix() + hllhc_rel = self.resolve_madx_path(acc_model_path / "hllhc_sequence.madx") main_call += f"\ncall, file = '{hllhc_rel}';" return main_call try: @@ -494,12 +493,16 @@ def get_madx_script(self) -> str: madx_script += "\n! ----- Load MQTs -----\n" mqts_file = accel.model_dir / EXTRACTED_MQTS_FILENAME if mqts_file.exists(): - mqts_path = self._madx_path(mqts_file) + mqts_path = self.resolve_madx_path(mqts_file) madx_script += f"call, file = '{mqts_path}';\n" + else: + LOGGER.warning( + f"MQT file for best knowledge model not found at {mqts_file!s}, skipping." + ) madx_script += "\n! ----- Output Files -----\n" - twiss_bk_path = self._madx_path(accel.model_dir / TWISS_BEST_KNOWLEDGE_DAT) - twiss_bk_elements_path = self._madx_path( + twiss_bk_path = self.resolve_madx_path(accel.model_dir / TWISS_BEST_KNOWLEDGE_DAT) + twiss_bk_elements_path = self.resolve_madx_path( accel.model_dir / TWISS_ELEMENTS_BEST_KNOWLEDGE_DAT ) madx_script += ( @@ -517,16 +520,14 @@ def get_base_madx_script(self) -> str: madx_script += ( f"\n! ----- For Best Knowledge Model -----\n" - f"readmytable, file = '{self._madx_path(accel.model_dir / B2_ERRORS_TFS)}', table=errtab;\n" + f"readmytable, file = '{self.resolve_madx_path(accel.model_dir / B2_ERRORS_TFS)}', table=errtab;\n" f"seterr, table=errtab;\n" - f"call, file = '{self._madx_path(accel.model_dir / B2_SETTINGS_MADX)}';\n" + f"call, file = '{self.resolve_madx_path(accel.model_dir / B2_SETTINGS_MADX)}';\n" ) return madx_script -class LhcCorrectionModelCreator( - CorrectionModelCreator, LhcModelCreator -): # ------------------------------------------- +class LhcCorrectionModelCreator(CorrectionModelCreator, LhcModelCreator): # ----------------------- """ Creates an updated model from multiple changeparameters inputs (used in iterative correction). @@ -565,13 +566,14 @@ def get_madx_script(self) -> str: madx_script += f"{ORBIT_DPP} = {accel.dpp};\n" for corr_file in self.corr_files: # Load the corrections, can also update ORBIT_DPP - madx_script += f"call, file = '{self._madx_path(corr_file)}';\n" + # Should be the correct path already from the init + madx_script += f"call, file = '{corr_file}';\n" # If we are doing orbit correction, we need to ensure that a correct and a match is done if self.update_dpp: madx_script += self.get_update_deltap_script(deltap=ORBIT_DPP) - twiss_out_path = self._madx_path(self.output_dir / self.twiss_out) + twiss_out_path = self.resolve_madx_path(self.output_dir / self.twiss_out) madx_script += ( f'exec, do_twiss_elements(LHCB{accel.beam}, "{twiss_out_path}", {ORBIT_DPP});\n' ) @@ -600,9 +602,9 @@ class LhcSegmentCreator(SegmentCreator, LhcModelCreator): def get_madx_script(self) -> str: accel: Lhc = self.accel madx_script = self.get_base_madx_script() - measurement_path = self._madx_path(self.output_dir / self.measurement_madx) - twiss_forward_path = self._madx_path(self.output_dir / self.twiss_forward) - twiss_backward_path = self._madx_path(self.output_dir / self.twiss_backward) + measurement_path = self.resolve_madx_path(self.output_dir / self.measurement_madx) + twiss_forward_path = self.resolve_madx_path(self.output_dir / self.twiss_forward) + twiss_backward_path = self.resolve_madx_path(self.output_dir / self.twiss_backward) madx_script += "\n".join( [ @@ -646,11 +648,11 @@ def get_madx_script(self) -> str: ) if self.corrections is not None: - corrections_path = self._madx_path(self.output_dir / self.corrections_madx) - twiss_forward_corr_path = self._madx_path( + corrections_path = self.resolve_madx_path(self.output_dir / self.corrections_madx) + twiss_forward_corr_path = self.resolve_madx_path( self.output_dir / self.twiss_forward_corrected ) - twiss_backward_corr_path = self._madx_path( + twiss_backward_corr_path = self.resolve_madx_path( self.output_dir / self.twiss_backward_corrected ) madx_script += "\n".join( diff --git a/omc3/model/model_creators/ps_model_creator.py b/omc3/model/model_creators/ps_model_creator.py index 3826a2cb..8d398589 100644 --- a/omc3/model/model_creators/ps_model_creator.py +++ b/omc3/model/model_creators/ps_model_creator.py @@ -31,7 +31,7 @@ def get_madx_script(self) -> str: replace_dict = { "USE_ACD": str(int(accel.excitation == AccExcitationMode.ACD)), "DPP": accel.dpp, - "OUTPUT": self._madx_path(self.output_dir), + "OUTPUT": self.resolve_madx_path(self.output_dir), } madx_template = accel.get_file("twiss.mask").read_text() madx_script += madx_template % replace_dict @@ -41,15 +41,15 @@ def get_base_madx_script(self) -> str: accel: Ps = self.accel use_acd = accel.excitation == AccExcitationMode.ACD replace_dict = { - "FILES_DIR": self._madx_path(accel.get_dir()), + "FILES_DIR": self.resolve_madx_path(accel.get_dir()), "USE_ACD": str(int(use_acd)), "NAT_TUNE_X": accel.nat_tunes[0], "NAT_TUNE_Y": accel.nat_tunes[1], "KINETICENERGY": 0 if accel.energy is None else accel.energy, "USE_CUSTOM_PC": "0" if accel.energy is None else "1", - "ACC_MODELS_DIR": self._madx_path(accel.acc_model_path), - "BEAM_FILE": self._madx_path(accel.beam_file), - "STR_FILE": self._madx_path(accel.str_file), + "ACC_MODELS_DIR": self.resolve_madx_path(accel.acc_model_path), + "BEAM_FILE": self.resolve_madx_path(accel.beam_file), + "STR_FILE": self.resolve_madx_path(accel.str_file), "DRV_TUNE_X": "0", "DRV_TUNE_Y": "0", "MODIFIERS": "", @@ -61,7 +61,7 @@ def get_base_madx_script(self) -> str: if accel.modifiers: replace_dict["MODIFIERS"] = "\n".join( [ - f" call, file = '{self._madx_path(modifier)}'; {MODIFIER_TAG}" + f" call, file = '{self.resolve_madx_path(modifier)}'; {MODIFIER_TAG}" for modifier in accel.modifiers ] ) diff --git a/omc3/model/model_creators/psbooster_model_creator.py b/omc3/model/model_creators/psbooster_model_creator.py index b4aecf78..3c2ee973 100644 --- a/omc3/model/model_creators/psbooster_model_creator.py +++ b/omc3/model/model_creators/psbooster_model_creator.py @@ -28,7 +28,7 @@ def get_madx_script(self) -> str: "USE_ACD": str(int(accel.excitation == AccExcitationMode.ACD)), "RING": accel.ring, "DPP": accel.dpp, - "OUTPUT": self._madx_path(self.output_dir), + "OUTPUT": self.resolve_madx_path(self.output_dir), } madx_template = accel.get_file("twiss.mask").read_text() madx_script += madx_template % replace_dict @@ -38,16 +38,16 @@ def get_base_madx_script(self) -> str: accel: Psbooster = self.accel use_acd = accel.excitation == AccExcitationMode.ACD replace_dict = { - "FILES_DIR": self._madx_path(accel.get_dir()), + "FILES_DIR": self.resolve_madx_path(accel.get_dir()), "USE_ACD": str(int(use_acd)), "RING": str(accel.ring), "NAT_TUNE_X": accel.nat_tunes[0], "NAT_TUNE_Y": accel.nat_tunes[1], "KINETICENERGY": 0 if accel.energy is None else accel.energy, "USE_CUSTOM_PC": "0" if accel.energy is None else "1", - "ACC_MODELS_DIR": self._madx_path(accel.acc_model_path), - "BEAM_FILE": self._madx_path(accel.beam_file), - "STR_FILE": self._madx_path(accel.str_file), + "ACC_MODELS_DIR": self.resolve_madx_path(accel.acc_model_path), + "BEAM_FILE": self.resolve_madx_path(accel.beam_file), + "STR_FILE": self.resolve_madx_path(accel.str_file), "DRV_TUNE_X": "", "DRV_TUNE_Y": "", } diff --git a/omc3/model/model_creators/sps_model_creator.py b/omc3/model/model_creators/sps_model_creator.py index 8b2093b0..8f592ea8 100644 --- a/omc3/model/model_creators/sps_model_creator.py +++ b/omc3/model/model_creators/sps_model_creator.py @@ -126,7 +126,7 @@ def prepare_options(self, opt) -> bool: def get_base_madx_script(self) -> str: accel: Sps = self.accel use_excitation = accel.excitation != AccExcitationMode.FREE - seq_path = self._madx_path(accel.acc_model_path / "sps.seq") + seq_path = self.resolve_madx_path(accel.acc_model_path / "sps.seq") # The very basics --- madx_script = ( @@ -137,10 +137,10 @@ def get_base_madx_script(self) -> str: if accel.modifiers is not None: # includes the strengths file for modifier in accel.modifiers: - modifier_path = self._madx_path(modifier) + modifier_path = self.resolve_madx_path(modifier) madx_script += f"call, file = '{modifier_path}'; {MODIFIER_TAG}\n" - toolkit_path = self._madx_path(accel.acc_model_path / "toolkit" / "macro.madx") + toolkit_path = self.resolve_madx_path(accel.acc_model_path / "toolkit" / "macro.madx") madx_script += ( f"call, file ='{toolkit_path}';\n" "option, echo;\n\n" @@ -207,8 +207,8 @@ def get_madx_script(self) -> str: use_excitation = accel.excitation != AccExcitationMode.FREE madx_script = self.get_base_madx_script() - twiss_path = self._madx_path(self.output_dir / TWISS_DAT) - twiss_elements_path = self._madx_path(self.output_dir / TWISS_ELEMENTS_DAT) + twiss_path = self.resolve_madx_path(self.output_dir / TWISS_DAT) + twiss_elements_path = self.resolve_madx_path(self.output_dir / TWISS_ELEMENTS_DAT) madx_script += ( "! Create twiss data files ---\n" f"{self._get_select_command(pattern=accel.RE_DICT[AccElementTypes.BPMS])}" @@ -220,7 +220,7 @@ def get_madx_script(self) -> str: if use_excitation or accel.drv_tunes is not None: # allow user to modify script and enable excitation, if driven tunes are given - twiss_ac_path = self._madx_path(self.output_dir / TWISS_AC_DAT) + twiss_ac_path = self.resolve_madx_path(self.output_dir / TWISS_AC_DAT) madx_script += ( f"if(use_acd == 1){{\n" " betxac = table(twiss, hacmap, betx);\n" From 88b1d296ac360a6853d4fe6c0ce3ac962d778faa Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Fri, 14 Nov 2025 11:16:32 +0100 Subject: [PATCH 13/37] Fix MAD-X job generation logic for last process index check --- omc3/correction/response_madx.py | 18 +++++++----------- omc3/correction/sequence_evaluation.py | 2 +- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/omc3/correction/response_madx.py b/omc3/correction/response_madx.py index 969db9aa..24bb1b36 100644 --- a/omc3/correction/response_madx.py +++ b/omc3/correction/response_madx.py @@ -128,16 +128,16 @@ def _generate_madx_jobs( if var_idx >= len(no_dpp_vars): break var = no_dpp_vars[var_idx] - outfile = temp_dir / f"twiss.{var}" incr_dict[var] = delta_k current_job += f"{var} = {var}{delta_k:+.15e};\n" - current_job += f"twiss, file='{str(outfile.absolute())}'{deltap_twiss};\n" + current_job += f"twiss, file='{creator.resolve_madx_path(temp_dir / f'twiss.{var}')}'{deltap_twiss};\n" current_job += f"{var} = {var}{-delta_k:+.15e};\n\n" if proc_idx == num_proc - 1: - outfile = temp_dir / "twiss.0" - current_job += f"twiss, file='{str(outfile.absolute())}'{deltap_twiss};\n" + current_job += ( + f"twiss, file='{creator.resolve_madx_path(temp_dir / 'twiss.0')}'{deltap_twiss};\n" + ) if compute_deltap: # If ORBIT_DPP is in variables, we run this in the last iteration @@ -146,8 +146,7 @@ def _generate_madx_jobs( current_job += f"{ORBIT_DPP} = {ORBIT_DPP}{delta_k:+.15e};\n" # Do twiss, correct, match current_job += creator.get_update_deltap_script(deltap=ORBIT_DPP) - outfile = temp_dir / f"twiss.{ORBIT_DPP}" - current_job += f"twiss, deltap={ORBIT_DPP}, file='{str(outfile.absolute())}';\n" + current_job += f"twiss, deltap={ORBIT_DPP}, file='{creator.resolve_madx_path(temp_dir / f'twiss.{ORBIT_DPP}')}';\n" jobfile_path.write_text(current_job) return incr_dict @@ -211,9 +210,7 @@ def _load_madx_results( vars_and_paths = [] for value in variables + ["0"]: vars_and_paths.append((value, temp_dir)) - LOG.debug("Retrieving Twiss files in parallel...") - for var, path in vars_and_paths: - LOG.debug(f" Variable: {var}, Path: {path}") + LOG.debug(f"Retrieving Twiss files for variables: {vars_and_paths}") var_to_twiss = {} for var, tfs_data in process_pool.map(_load_and_remove_twiss, vars_and_paths): tfs_data[INCR] = incr_dict[var] @@ -352,8 +349,7 @@ def _add_coupling( """ result_dict_of_tfs = copy.deepcopy(dict_of_tfs) with timeit(lambda elapsed: LOG.debug(f" Time adding coupling: {elapsed} s")): - # already copies, so it's safe to act on them - for var, tfs_dframe in result_dict_of_tfs.items(): + for _, tfs_dframe in result_dict_of_tfs.items(): coupling_rdts_df = coupling_via_cmatrix(tfs_dframe) tfs_dframe[f"{F1001}R"] = np.real(coupling_rdts_df[f"{F1001}"]).astype(np.float64) tfs_dframe[f"{F1001}I"] = np.imag(coupling_rdts_df[f"{F1001}"]).astype(np.float64) diff --git a/omc3/correction/sequence_evaluation.py b/omc3/correction/sequence_evaluation.py index 8e58e79c..b032f01b 100644 --- a/omc3/correction/sequence_evaluation.py +++ b/omc3/correction/sequence_evaluation.py @@ -122,7 +122,7 @@ def _do_macro(var): job_content += "\n" # last thing to do: get baseline - if proc_index + 1 == num_proc: + if proc_index == num_proc - 1: job_content += _do_macro("0") _get_jobfile(temp_dir, proc_index).write_text(job_content) From 896a97fb249f9406a9c6d216f19c1e1ccf6334c6 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Fri, 14 Nov 2025 11:19:55 +0100 Subject: [PATCH 14/37] Reorder functions --- omc3/nxcals/knob_extraction.py | 236 +++++++++++++++++---------------- 1 file changed, 124 insertions(+), 112 deletions(-) diff --git a/omc3/nxcals/knob_extraction.py b/omc3/nxcals/knob_extraction.py index 7ca9641c..29aeda2b 100644 --- a/omc3/nxcals/knob_extraction.py +++ b/omc3/nxcals/knob_extraction.py @@ -41,6 +41,90 @@ class NXCalResult: pc_name: str +# High-level Knob Extraction --------------------------------------------------- + + +def get_knob_vals( + spark: SparkSession, + time: datetime, + beam: int, + patterns: list[str], + expected_knobs: set[str] | None = None, + log_prefix: str = "", + delta_days: int = 1, +) -> list[NXCalResult]: + """ + Retrieve knob values for a given beam and time using specified patterns. + + Args: + spark (SparkSession): Active Spark session. + time (datetime): Time to retrieve data for. + beam (int): Beam number. + patterns (list[str]): List of variable patterns to retrieve. + expected_knobs (set[str] | None): Set of expected MAD-X knob names. If None, returns all found knobs. + log_prefix (str): Prefix for logging messages. + delta_days (int): Number of days to look back for data. Default is 1. + + Returns: + list[NXCalResult]: List of NXCalResult objects with knob values. + """ + LOGGER.info(f"{log_prefix}Starting data retrieval for beam {beam} at time {time}") + + # Retrieve raw current measurements from NXCALS + combined_vars: list[NXCalResult] = [] + for pattern in patterns: + LOGGER.info(f"{log_prefix}Getting currents for pattern {pattern} at {time}") + raw_vars = get_raw_vars(spark, time, pattern, delta_days) + combined_vars.extend(raw_vars) + + # Get beam energy for K-value calculations + energy, _ = get_energy(spark, time) + + # Prepare currents dict for LSA + currents = {strip_i_meas(var.name): var.value for var in combined_vars} + + LOGGER.info( + f"{log_prefix}Calculating K values for {len(currents)} power converters with energy {energy} GeV" + ) + + # Calculate K values using LSA + lsa_client = pjlsa.LSAClient() + k_values = calc_k_from_iref(lsa_client, currents, energy) + + # Transform K values keys to MAD-X format + k_values_madx = {map_pc_name_to_madx(key): value for key, value in k_values.items()} + + found_keys = set(k_values_madx.keys()) + + if expected_knobs is None: + expected_knobs = found_keys + + if missing_keys := expected_knobs - found_keys: + LOGGER.warning(f"{log_prefix}Missing K-values for knobs: {missing_keys}") + + if unknown_keys := found_keys - expected_knobs: + LOGGER.warning(f"{log_prefix}Unknown K-values found: {unknown_keys}") + + # Build result list with timestamps + timestamps = {map_pc_name_to_madx(var.name): var.timestamp for var in combined_vars} + pc_names = {map_pc_name_to_madx(var.name): var.pc_name for var in combined_vars} + results = [] + for madx_name in expected_knobs: + value = k_values_madx.get(madx_name) + timestamp = timestamps.get(madx_name) + pc_name = pc_names.get(madx_name) + + if value is not None and timestamp is not None and pc_name is not None: + results.append(NXCalResult(madx_name, value, timestamp, pc_name)) + else: + LOGGER.warning(f"{log_prefix}Missing data for {madx_name}") + + return results + + +# NXCALS Data Retrieval -------------------------------------------------------- + + def get_raw_vars( spark: SparkSession, time: datetime, var_name: str, delta_days: int = 1 ) -> list[NXCalResult]: @@ -133,57 +217,7 @@ def get_energy(spark: SparkSession, time: datetime) -> tuple[float, pd.Timestamp return raw_vars[0].value * scale, raw_vars[0].timestamp -def map_pc_name_to_madx(pc_name: str) -> str: - """ - Convert a power converter name or circuit name to its corresponding MAD-X name. - - This function processes the input name by removing the ':I_MEAS' suffix if present, - extracting the circuit name from full power converter names (starting with 'RPMBB' or 'RPL'), - applying specific string replacements for MAD-X compatibility, and converting the result to lowercase. - - Args: - pc_name (str): The power converter or circuit name to convert. - - Returns: - str: The converted MAD-X name. - - Examples: - >>> map_pc_name_to_madx("RPMBB.UJ33.RCBH10.L1B1:I_MEAS") - 'acb10.l1b1' - >>> map_pc_name_to_madx("RQT12.L5B1") - 'kqt12.l5b1' - """ - # Remove the ':I_MEAS' suffix if it exists - pc_name = strip_i_meas(pc_name) - - # Extract circuit name from full power converter names - if "." in pc_name: - parts = pc_name.split(".") - # For full names like 'RPMBB.UJ33.RCBH10.L1B1', take from the third part onward, else use pc_name - circuit_name = ".".join(parts[2:]) if len(parts) > 2 else pc_name - else: - circuit_name = pc_name - - # Apply MAD-X specific replacements - replacements = {"RQT": "KQT", "RCB": "ACB"} - for old, new in replacements.items(): - circuit_name = circuit_name.replace(old, new) - - # Return in lowercase as required by MAD-X - return circuit_name.lower() - - -def strip_i_meas(text: str) -> str: - """ - Remove the I_MEAS suffix from a variable name. - - Args: - text (str): The variable name possibly ending with ':I_MEAS'. - - Returns: - str: The variable name without the ':I_MEAS' suffix. - """ - return text.removesuffix(":I_MEAS") +# LSA K-value Calculation ------------------------------------------------------ def calc_k_from_iref(lsa_client, currents: dict[str, float], energy: float) -> dict[str, float]: @@ -224,79 +258,57 @@ def calc_k_from_iref(lsa_client, currents: dict[str, float], energy: float) -> d return res -def get_knob_vals( - spark: SparkSession, - time: datetime, - beam: int, - patterns: list[str], - expected_knobs: set[str] | None = None, - log_prefix: str = "", - delta_days: int = 1, -) -> list[NXCalResult]: +# Utility Functions ------------------------------------------------------------ + + +def strip_i_meas(text: str) -> str: """ - Retrieve knob values for a given beam and time using specified patterns. + Remove the I_MEAS suffix from a variable name. Args: - spark (SparkSession): Active Spark session. - time (datetime): Time to retrieve data for. - beam (int): Beam number. - patterns (list[str]): List of variable patterns to retrieve. - expected_knobs (set[str] | None): Set of expected MAD-X knob names. If None, returns all found knobs. - log_prefix (str): Prefix for logging messages. - delta_days (int): Number of days to look back for data. Default is 1. + text (str): The variable name possibly ending with ':I_MEAS'. Returns: - list[NXCalResult]: List of NXCalResult objects with knob values. + str: The variable name without the ':I_MEAS' suffix. """ - LOGGER.info(f"{log_prefix}Starting data retrieval for beam {beam} at time {time}") - - # Retrieve raw current measurements from NXCALS - combined_vars: list[NXCalResult] = [] - for pattern in patterns: - LOGGER.info(f"{log_prefix}Getting currents for pattern {pattern} at {time}") - raw_vars = get_raw_vars(spark, time, pattern, delta_days) - combined_vars.extend(raw_vars) - - # Get beam energy for K-value calculations - energy, _ = get_energy(spark, time) - - # Prepare currents dict for LSA - currents = {strip_i_meas(var.name): var.value for var in combined_vars} - - LOGGER.info( - f"{log_prefix}Calculating K values for {len(currents)} power converters with energy {energy} GeV" - ) + return text.removesuffix(":I_MEAS") - # Calculate K values using LSA - lsa_client = pjlsa.LSAClient() - k_values = calc_k_from_iref(lsa_client, currents, energy) - # Transform K values keys to MAD-X format - k_values_madx = {map_pc_name_to_madx(key): value for key, value in k_values.items()} +def map_pc_name_to_madx(pc_name: str) -> str: + """ + Convert a power converter name or circuit name to its corresponding MAD-X name. - found_keys = set(k_values_madx.keys()) + This function processes the input name by removing the ':I_MEAS' suffix if present, + extracting the circuit name from full power converter names (starting with 'RPMBB' or 'RPL'), + applying specific string replacements for MAD-X compatibility, and converting the result to lowercase. - if expected_knobs is None: - expected_knobs = found_keys + Args: + pc_name (str): The power converter or circuit name to convert. - if missing_keys := expected_knobs - found_keys: - LOGGER.warning(f"{log_prefix}Missing K-values for knobs: {missing_keys}") + Returns: + str: The converted MAD-X name. - if unknown_keys := found_keys - expected_knobs: - LOGGER.warning(f"{log_prefix}Unknown K-values found: {unknown_keys}") + Examples: + >>> map_pc_name_to_madx("RPMBB.UJ33.RCBH10.L1B1:I_MEAS") + 'acb10.l1b1' + >>> map_pc_name_to_madx("RQT12.L5B1") + 'kqt12.l5b1' + """ + # Remove the ':I_MEAS' suffix if it exists + pc_name = strip_i_meas(pc_name) - # Build result list with timestamps - timestamps = {map_pc_name_to_madx(var.name): var.timestamp for var in combined_vars} - pc_names = {map_pc_name_to_madx(var.name): var.pc_name for var in combined_vars} - results = [] - for madx_name in expected_knobs: - value = k_values_madx.get(madx_name) - timestamp = timestamps.get(madx_name) - pc_name = pc_names.get(madx_name) + # Extract circuit name from full power converter names + if "." in pc_name: + parts = pc_name.split(".") + # For full names like 'RPMBB.UJ33.RCBH10.L1B1', take from the third part onward, else use pc_name + circuit_name = ".".join(parts[2:]) if len(parts) > 2 else pc_name + else: + circuit_name = pc_name - if value is not None and timestamp is not None and pc_name is not None: - results.append(NXCalResult(madx_name, value, timestamp, pc_name)) - else: - LOGGER.warning(f"{log_prefix}Missing data for {madx_name}") + # Apply MAD-X specific replacements + replacements = {"RQT": "KQT", "RCB": "ACB"} + for old, new in replacements.items(): + circuit_name = circuit_name.replace(old, new) - return results + # Return in lowercase as required by MAD-X + return circuit_name.lower() From 54a56e634dc54d386ec63a01a8db6d03bb6b2559 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Fri, 14 Nov 2025 11:54:27 +0100 Subject: [PATCH 15/37] Adjust further based on comments --- .../mqt_extraction.py => mqt_extractor.py} | 0 omc3/nxcals/knob_extraction.py | 32 ++++- omc3/nxcals/mqt_extraction.py | 20 ++- omc3/utils/iotools.py | 15 +-- .../generate_mqt_references.py | 2 +- tests/unit/test_nxcals_mqt_extraction.py | 120 +++++++++++++++++- 6 files changed, 158 insertions(+), 31 deletions(-) rename omc3/{scripts/mqt_extraction.py => mqt_extractor.py} (100%) rename tests/{utils => inputs/knob_extractor}/generate_mqt_references.py (93%) diff --git a/omc3/scripts/mqt_extraction.py b/omc3/mqt_extractor.py similarity index 100% rename from omc3/scripts/mqt_extraction.py rename to omc3/mqt_extractor.py diff --git a/omc3/nxcals/knob_extraction.py b/omc3/nxcals/knob_extraction.py index 29aeda2b..ad927bfc 100644 --- a/omc3/nxcals/knob_extraction.py +++ b/omc3/nxcals/knob_extraction.py @@ -56,17 +56,37 @@ def get_knob_vals( """ Retrieve knob values for a given beam and time using specified patterns. + This is the main entry point for extracting magnet knob values from NXCALS. The function + performs a complete workflow: + + 1. Queries NXCALS for power converter current measurements (I_MEAS) using variable patterns + 2. Retrieves the beam energy at the specified time + 3. Converts currents to K-values (integrated quadrupole strengths) using LSA + 4. Maps power converter names to MAD-X naming conventions + 5. Returns knob values with their timestamps + + The difference between patterns and knob names: + - **patterns**: NXCALS variable patterns (e.g., "RPMBB.UA%.RQT%.A%B1:I_MEAS") used to query + raw power converter current measurements. These follow CERN naming conventions and may + include wildcards (%). + - **expected_knobs**: MAD-X element names (e.g., "kqt12.a12b1") representing the final + knob names as used in MAD-X scripts. These are lowercase, simplified names. + Args: - spark (SparkSession): Active Spark session. + spark (SparkSession): Active Spark session for NXCALS queries. time (datetime): Time to retrieve data for. - beam (int): Beam number. - patterns (list[str]): List of variable patterns to retrieve. - expected_knobs (set[str] | None): Set of expected MAD-X knob names. If None, returns all found knobs. - log_prefix (str): Prefix for logging messages. + beam (int): Beam number (1 or 2). + patterns (list[str]): List of NXCALS variable patterns to query for power converter + currents. Patterns can include SQL-like wildcards (%). Example: + "RPMBB.UA%.RQT%.A%B1:I_MEAS" matches all MQT quadrupole trim magnets for beam 1. + expected_knobs (set[str] | None): Set of expected MAD-X knob names to validate and filter + results. If None, returns all found knobs without validation. + log_prefix (str): Prefix for logging messages to distinguish different extraction runs. delta_days (int): Number of days to look back for data. Default is 1. Returns: - list[NXCalResult]: List of NXCalResult objects with knob values. + list[NXCalResult]: List of NXCalResult objects containing MAD-X knob names, K-values, + timestamps, and power converter names. """ LOGGER.info(f"{log_prefix}Starting data retrieval for beam {beam} at time {time}") diff --git a/omc3/nxcals/mqt_extraction.py b/omc3/nxcals/mqt_extraction.py index 6211e154..21beec64 100644 --- a/omc3/nxcals/mqt_extraction.py +++ b/omc3/nxcals/mqt_extraction.py @@ -1,9 +1,14 @@ """ -Extraction of MQT (Quadrupole Trim) knob values from NXCALS. ------------------------------------------------------------- +Extraction of MQT knobs from NXCALS. +------------------------------------ -This module provides functions to retrieve MQT knob values for the LHC for a specified beam -and time using NXCALS and LSA. +This module provides functions to retrieve MQT (Quadrupole Trim) knob values for the LHC +for a specified beam and time using NXCALS and LSA. + +There are two types per arc (focusing 'f' and defocusing 'd'), resulting in 16 MQT circuits +per beam (8 arcs * 2 planes). + +The extraction uses the underlying `knob_extraction` module with MQT-specific patterns. """ import logging @@ -42,7 +47,9 @@ def get_mqts(beam: int) -> set[str]: return {f"kqt{t}.a{a}b{beam}" for t in types for a in arcs} -def get_mqt_vals(spark: SparkSession, time: datetime, beam: int) -> list[NXCalResult]: +def get_mqt_vals( + spark: SparkSession, time: datetime, beam: int, delta_days: int = 1 +) -> list[NXCalResult]: """ Retrieve MQT (Quadrupole Trim) knob values from NXCALS for a specific time and beam. @@ -54,6 +61,7 @@ def get_mqt_vals(spark: SparkSession, time: datetime, beam: int) -> list[NXCalRe spark (SparkSession): Active Spark session for NXCALS queries. time (datetime): The timestamp for which to retrieve the data (timezone-aware recommended). beam (int): The beam number (1 or 2). + delta_days (int): Number of days to look back for data. Default is 1. Returns: list[NXCalResult]: List of NXCalResult objects containing the MAD-X knob names, K-values, and timestamps. @@ -65,7 +73,7 @@ def get_mqt_vals(spark: SparkSession, time: datetime, beam: int) -> list[NXCalRe madx_mqts = get_mqts(beam) pattern = f"RPMBB.UA%.RQT%.A%B{beam}:I_MEAS" patterns = [pattern] - return get_knob_vals(spark, time, beam, patterns, madx_mqts, "MQT: ") + return get_knob_vals(spark, time, beam, patterns, madx_mqts, "MQT: ", delta_days) def knobs_to_madx(mqt_vals: list[NXCalResult]) -> str: diff --git a/omc3/utils/iotools.py b/omc3/utils/iotools.py index b23e1a83..614bdcdc 100644 --- a/omc3/utils/iotools.py +++ b/omc3/utils/iotools.py @@ -129,17 +129,6 @@ def __new__(cls, value): return value -class DateOrStr(metaclass=get_instance_faker_meta(datetime, str)): - """A class that can be used as datetime and string parser input""" - - def __new__(cls, value): - value = strip_quotes(value) - if isinstance(value, datetime): - return value - # assume string, rely on pandas to parse and throw if not possible - return pd.to_datetime(value, utc=True) - - class UnionPathStr(metaclass=get_instance_faker_meta(Path, str)): """A class that can be used as Path and string parser input, but does not convert to path.""" @@ -254,9 +243,7 @@ def maybe_add_command(opt: dict, script: str) -> dict: if the script names were different, just the original opt. """ if script == sys.argv[0]: - opt[";command"] = " ".join( - [sys.executable] + sys.argv - ) # will be sorted to the beginning below + opt[";command"] = " ".join([sys.executable] + sys.argv) return opt diff --git a/tests/utils/generate_mqt_references.py b/tests/inputs/knob_extractor/generate_mqt_references.py similarity index 93% rename from tests/utils/generate_mqt_references.py rename to tests/inputs/knob_extractor/generate_mqt_references.py index bf316b8c..57a4ffd1 100644 --- a/tests/utils/generate_mqt_references.py +++ b/tests/inputs/knob_extractor/generate_mqt_references.py @@ -18,7 +18,7 @@ ) # Output directory for test inputs -OUTPUT_DIR = Path(__file__).parent.parent / "inputs" / "knob_extractor" +OUTPUT_DIR = Path(__file__).parent def main(): diff --git a/tests/unit/test_nxcals_mqt_extraction.py b/tests/unit/test_nxcals_mqt_extraction.py index 4644d4c2..6091a4d8 100644 --- a/tests/unit/test_nxcals_mqt_extraction.py +++ b/tests/unit/test_nxcals_mqt_extraction.py @@ -6,10 +6,10 @@ import pandas as pd import pytest +from omc3 import mqt_extractor from omc3.nxcals import mqt_extraction from omc3.nxcals.constants import EXTRACTED_MQTS_FILENAME from omc3.nxcals.knob_extraction import NXCalResult -from omc3.scripts import mqt_extraction as mqt_script SAMPLE_DIR = Path(__file__).parent.parent / "inputs" / "knob_extractor" TEST_CASES = ( @@ -67,11 +67,10 @@ def test_main_reproduces_reference_output(tmp_path, beam: int, sample_file: Path output_dir = tmp_path / f"beam{beam}" output_dir.mkdir() - - mqt_script.retrieve_mqts(time=query_time, beam=beam, output_path=output_dir) - output_path = output_dir / EXTRACTED_MQTS_FILENAME + mqt_extractor.main(time=query_time.isoformat(), beam=beam, output=output_path) + expected_results = [ NXCalResult( name=r.name, @@ -98,3 +97,116 @@ def test_main_reproduces_reference_output(tmp_path, beam: int, sample_file: Path def test_get_mqts_invalid_beam(): with pytest.raises(ValueError): mqt_extraction.get_mqts(beam=3) + + +@pytest.mark.cern_network +@pytest.mark.parametrize("beam, sample_file", TEST_CASES) +def test_main_returns_tfs_dataframe(tmp_path, beam: int, sample_file: Path): + """Test that main returns a TfsDataFrame with correct structure.""" + sample_results = _load_results_from_file(sample_file) + latest_sample_time = max(result.timestamp for result in sample_results) + query_time = latest_sample_time.to_pydatetime() + + output_path = tmp_path / f"test_output_b{beam}.madx" + + # Call main and verify return type + result_df = mqt_extractor.main(time=query_time.isoformat(), beam=beam, output=output_path) + + # Check it's a TfsDataFrame + import tfs + + assert isinstance(result_df, tfs.TfsDataFrame), "main should return a TfsDataFrame" + + # Check headers + assert "EXTRACTION_TIME" in result_df.headers, "Missing EXTRACTION_TIME header" + assert "BEAM" in result_df.headers, "Missing BEAM header" + assert result_df.headers["BEAM"] == beam, f"Expected beam {beam} in headers" + + # Check columns + expected_columns = ["madx", "value", "timestamp", "pc_name"] + for col in expected_columns: + assert col in result_df.columns, f"Missing column {col}" + + # Check we have the right number of rows + assert len(result_df) == 16, f"Expected 16 MQT entries, got {len(result_df)}" + + +@pytest.mark.cern_network +@pytest.mark.parametrize("beam", [1, 2]) +def test_main_with_timedelta(tmp_path, beam: int): + """Test that timedelta parameter works correctly.""" + from datetime import datetime + + output_path = tmp_path / f"test_timedelta_b{beam}.madx" + + # Get current time + now = datetime.now() + + # Call with timedelta going back 1 day + result_df = mqt_extractor.main( + time="now", + timedelta="_1d", # 1 day ago + beam=beam, + output=output_path, + ) + + # Verify the extraction time is approximately 1 day ago + extraction_time = result_df.headers["EXTRACTION_TIME"] + time_diff = abs((now - extraction_time).total_seconds()) + + # Should be close to 1 day (86400 seconds), allow 5 minute tolerance + one_day_seconds = 86400 + assert abs(time_diff - one_day_seconds) < 300, ( + f"Time difference should be ~1 day, but got {time_diff} seconds" + ) + + +@pytest.mark.cern_network +@pytest.mark.parametrize("beam", [1, 2]) +def test_main_with_delta_days(tmp_path, beam: int): + """Test that delta_days parameter is properly passed through.""" + from datetime import datetime, timedelta + + output_path = tmp_path / f"test_delta_days_b{beam}.madx" + + # Use a time 3 days ago with delta_days=5 to ensure we get data + past_time = datetime.now() - timedelta(days=3) + + # This should work because we're looking back 5 days + result_df = mqt_extractor.main( + time=past_time.isoformat(), beam=beam, output=output_path, delta_days=5 + ) + + # Should succeed and return valid data + assert len(result_df) == 16, "Expected 16 MQT entries with delta_days=5" + assert result_df.headers["BEAM"] == beam + + +def test_parse_time_now(): + """Test that _parse_time correctly handles 'now'.""" + from datetime import datetime + + result = mqt_extractor._parse_time("now") + now = datetime.now() + + # Should be very close to now (within 1 second) + diff = abs((now - result).total_seconds()) + assert diff < 1, f"_parse_time('now') should return current time, diff was {diff}s" + + +def test_parse_time_with_timedelta(): + """Test that _parse_time correctly applies timedelta.""" + from datetime import datetime + + now_str = datetime.now().isoformat() + + # Test positive timedelta + result_plus = mqt_extractor._parse_time(now_str, "1h") + result_base = mqt_extractor._parse_time(now_str) + diff_plus = (result_plus - result_base).total_seconds() + assert abs(diff_plus - 3600) < 1, f"1h timedelta should add 3600s, got {diff_plus}s" + + # Test negative timedelta + result_minus = mqt_extractor._parse_time(now_str, "_2h") + diff_minus = (result_minus - result_base).total_seconds() + assert abs(diff_minus + 7200) < 1, f"_2h timedelta should subtract 7200s, got {diff_minus}s" From 3404daef6b73cc44d21b5e262a9ff7d8d84286bb Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Fri, 14 Nov 2025 11:57:41 +0100 Subject: [PATCH 16/37] Enhance MQT extractor documentation and update dependencies for Spark support --- omc3/mqt_extractor.py | 203 ++++++++++++++++++++++++++++++++++-------- pyproject.toml | 3 +- 2 files changed, 166 insertions(+), 40 deletions(-) diff --git a/omc3/mqt_extractor.py b/omc3/mqt_extractor.py index 558cc055..db1c78a1 100644 --- a/omc3/mqt_extractor.py +++ b/omc3/mqt_extractor.py @@ -1,79 +1,204 @@ -""" -MQT Extraction ---------------- +r""" +MQT Extractor +------------- -Extract MQT (Quadrupole Trim) knob values from NXCALS for a specific time and beam. +Entrypoint to extract MQT (Quadrupole Trim) knob values from ``NXCALS`` at a given time, +and eventually write them out to a file. +The data is fetched from ``NXCALS`` through PySpark queries and converted to K-values using ``LSA``. -**Arguments:** +MQT magnets are trim quadrupoles located in the LHC arcs, used for fine-tuning the optics. +There are two types per arc (focusing 'f' and defocusing 'd'), resulting in 16 MQT magnets +per beam (8 arcs × 2 types). -*--Required--* +.. note:: + Please note that access to the GPN is required to use this functionality. -- **time** *(datetime|str)*: +**Arguments:** - The timestamp for which to retrieve the data (timezone-aware recommended). If a string is provided, it will be parsed to a datetime object, assuming UTC timezone. +*--Required--* - **beam** *(int)*: The beam number (1 or 2). + +- **time** *(str)*: + + At what time to extract the MQT knobs. Accepts ISO-format (YYYY-MM- + DDThh:mm:ss), timestamp or 'now'. The default timezone for the ISO- + format is local time, but you can force e.g. UTC by adding +00:00. + + default: ``now`` + + *--Optional--* -- **output_path** *(str|Path)*: - Path to the output file or directory. If a directory is given, the file will be named 'extracted_mqts.str'. If not provided, no output will be written. +- **output** *(PathOrStr)*: + + Specify user-defined output path. This should probably be + `model_dir/mqts.madx` + + +- **timedelta** *(str)*: + + Add this timedelta to the given time. The format of timedelta is + '((\d+)(\w))+' with the second token being one of s(seconds), + m(minutes), h(hours), d(days), w(weeks), M(months) e.g 7m = 7 minutes, + 1d = 1day, 7m30s = 7 min 30 secs. A prefix '_' specifies a negative + timedelta. This allows for easily getting the setting e.g. 2h ago: + '_2h' while setting the `time` argument to 'now' (default). + + +- **delta_days** *(int)*: + + Number of days to look back for data in NXCALS. + + default: ``1`` + + """ +from __future__ import annotations + +import argparse from pathlib import Path +from typing import TYPE_CHECKING -from generic_parser import DotDict, EntryPointParameters, entrypoint +import tfs +from generic_parser import EntryPointParameters, entrypoint -from omc3.nxcals.constants import EXTRACTED_MQTS_FILENAME -from omc3.nxcals.knob_extraction import NXCalResult from omc3.nxcals.mqt_extraction import get_mqt_vals, knobs_to_madx -from omc3.utils.iotools import DateOrStr, PathOrStr +from omc3.utils.iotools import PathOrStr +from omc3.utils.logging_tools import get_logger from omc3.utils.mock import cern_network_import +if TYPE_CHECKING: + from datetime import datetime + spark_session_builder = cern_network_import("nxcals.spark_session_builder") +LOGGER = get_logger(__name__) -def _get_params() -> EntryPointParameters: - """ - Define the parameters for the MQT retrieval entry point. - """ +USAGE_EXAMPLES = """Usage Examples: + +python -m omc3.mqt_extractor --beam 1 --time 2022-05-04T14:00 + extracts the MQT knobs for beam 1 at 14h on May 4th 2022 + +python -m omc3.mqt_extractor --beam 2 --time now --timedelta _2h + extracts the MQT knobs for beam 2 as of 2 hours ago + +python -m omc3.mqt_extractor --beam 1 --time now --output model_dir/mqts.madx + extracts the current MQT settings for beam 1 and writes to file +""" + + +def get_params(): return EntryPointParameters( + beam={ + "type": int, + "help": "The beam number (1 or 2).", + }, time={ - "type": DateOrStr, - "help": "The timestamp for which to retrieve the data (timezone-aware recommended).", + "type": str, + "help": ( + "At what time to extract the MQT knobs. " + "Accepts ISO-format (YYYY-MM-DDThh:mm:ss), timestamp or 'now'. " + "The default timezone for the ISO-format is local time, " + "but you can force e.g. UTC by adding +00:00." + ), + "default": "now", }, - beam={"type": int, "help": "The beam number (1 or 2)."}, - output_path={ + timedelta={ + "type": str, + "help": ( + "Add this timedelta to the given time. " + "The format of timedelta is '((\\d+)(\\w))+' " + "with the second token being one of " + "s(seconds), m(minutes), h(hours), d(days), w(weeks), M(months) " + "e.g 7m = 7 minutes, 1d = 1day, 7m30s = 7 min 30 secs. " + "A prefix '_' specifies a negative timedelta. " + "This allows for easily getting the setting " + "e.g. 2h ago: '_2h' while setting the `time` argument to 'now' (default)." + ), + }, + output={ "type": PathOrStr, - "default": EXTRACTED_MQTS_FILENAME, - "help": f"Path to the output file or directory. Defaults to '{EXTRACTED_MQTS_FILENAME}'. If a directory is given, the file will be named '{EXTRACTED_MQTS_FILENAME}'.", + "help": ( + "Specify user-defined output path. This should probably be `model_dir/mqts.madx`" + ), + }, + delta_days={ + "type": int, + "help": "Number of days to look back for data in NXCALS.", + "default": 1, }, ) -@entrypoint(_get_params(), strict=True) -def retrieve_mqts(opt: DotDict) -> list[NXCalResult]: +@entrypoint( + get_params(), + strict=True, + argument_parser_args={ + "epilog": USAGE_EXAMPLES, + "formatter_class": argparse.RawDescriptionHelpFormatter, + "prog": "MQT Extraction Tool.", + }, +) +def main(opt) -> tfs.TfsDataFrame: """ - Retrieve MQT (Quadrupole Trim) knob values from NXCALS for a specific time and beam, - and optionally save them to a file. + Main MQT extracting function. + + Retrieves MQT (Quadrupole Trim) knob values from NXCALS for a specific time and beam, + and optionally saves them to a file. Returns: - The list of retrieved MQT knob values. + tfs.TfsDataFrame: DataFrame containing the extracted MQT knob values with columns + for madx name, value, timestamp, and power converter name. """ - spark = spark_session_builder.get_or_create() - mqt_vals = get_mqt_vals(spark, opt.time, opt.beam) + spark = spark_session_builder.get_or_create(conf={"spark.ui.showConsoleProgress": "false"}) + time = _parse_time(opt.time, opt.timedelta) + + LOGGER.info(f"---- EXTRACTING MQT KNOBS @ {time} for Beam {opt.beam} ----") + mqt_vals = get_mqt_vals(spark, time, opt.beam, delta_days=opt.delta_days) + + # Convert to TfsDataFrame for consistency with knob_extractor + mqt_df = tfs.TfsDataFrame( + index=[result.name for result in mqt_vals], + columns=["madx", "value", "timestamp", "pc_name"], + headers={"EXTRACTION_TIME": time, "BEAM": opt.beam}, + ) + for result in mqt_vals: + mqt_df.loc[result.name, "madx"] = result.name + mqt_df.loc[result.name, "value"] = result.value + mqt_df.loc[result.name, "timestamp"] = result.timestamp + mqt_df.loc[result.name, "pc_name"] = result.pc_name + + if opt.output: + _write_mqt_file(opt.output, mqt_vals, time, opt.beam) + + return mqt_df + + +def _write_mqt_file(output: Path | str, mqt_vals, time: datetime, beam: int): + """Write MQT knobs to a MAD-X file.""" + with Path(output).open("w") as outfile: + outfile.write("!! --- MQT knobs extracted by mqt_extractor\n") + outfile.write(f"!! --- extracted MQT knobs for time {time}, beam {beam}\n\n") + outfile.write(knobs_to_madx(mqt_vals)) + + +# Time Tools (copied from knob_extractor for consistency) --------------------- + - if opt.output_path is not None: - output_path = Path(opt.output_path) - if output_path.is_dir(): - output_path = output_path / EXTRACTED_MQTS_FILENAME - with output_path.open("w") as f: - f.write(knobs_to_madx(mqt_vals)) +def _parse_time(time: str, timedelta: str = None) -> datetime: + """Parse time from given time-input.""" + from omc3.knob_extractor import _add_time_delta, _parse_time_from_str - return mqt_vals + t = _parse_time_from_str(time) + if timedelta: + t = _add_time_delta(t, timedelta) + return t if __name__ == "__main__": - retrieve_mqts() + main() diff --git a/pyproject.toml b/pyproject.toml index 1fc8a16d..e2132575 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,6 +64,7 @@ dependencies = [ cern = [ "jpype1 >= 1.3", "nxcals >= 1.6.6", # Direct NXCALS access + "pyspark >= 3.5", # Needed when extracting from NXCALS using Spark "pytimber >= 3.0", # NXCALS support "pylogbook >= 3.4", "kerberos >= 1.3.1", # requires having krb-5config installed on the system @@ -75,7 +76,7 @@ optional = [ ] test = [ "jpype1 >= 1.3", - "pyspark >= 3.5", # needed only for cern tests + "pyspark >= 3.5", # needed for test collection when cern extras are not installed "pytest >= 7.0", "pytest-cov >= 2.9", "pytest-timeout >= 1.4", From 2bd00c8a21f1463af90b606b7f3f8fa5380dfc9a Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Fri, 14 Nov 2025 13:27:34 +0100 Subject: [PATCH 17/37] Fix tests --- omc3/model/model_creators/lhc_model_creator.py | 4 ++-- tests/unit/test_model_creator.py | 9 +++------ 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/omc3/model/model_creators/lhc_model_creator.py b/omc3/model/model_creators/lhc_model_creator.py index 1b17c399..69e36668 100644 --- a/omc3/model/model_creators/lhc_model_creator.py +++ b/omc3/model/model_creators/lhc_model_creator.py @@ -352,10 +352,10 @@ def _get_madx_main_sequence_loading(self, acc_model_path: Path | None) -> str: accel: Lhc = self.accel if acc_model_path is not None: - main_seq_rel = self.resolve_madx_path(acc_model_path / "lhc.seq") + main_seq_rel = acc_model_path / "lhc.seq" main_call = f"call, file = '{main_seq_rel}';" if accel.year.startswith("hl"): - hllhc_rel = self.resolve_madx_path(acc_model_path / "hllhc_sequence.madx") + hllhc_rel = acc_model_path / "hllhc_sequence.madx" main_call += f"\ncall, file = '{hllhc_rel}';" return main_call try: diff --git a/tests/unit/test_model_creator.py b/tests/unit/test_model_creator.py index 345a30fd..2200ad65 100644 --- a/tests/unit/test_model_creator.py +++ b/tests/unit/test_model_creator.py @@ -14,7 +14,6 @@ AccExcitationMode, ) from omc3.model.constants import ( - ACC_MODELS_PREFIX, JOB_MODEL_MADX_NOMINAL, OPTICS_SUBDIR, TWISS_AC_DAT, @@ -318,7 +317,7 @@ def test_lhc_creation_best_knowledge(tmp_path, acc_models_lhc_2025): @pytest.mark.basic -def test_lhc_creation_absolute_modifier_path(tmp_path: Path, acc_models_lhc_2022: Path): +def test_lhc_creation_relative_modifier_path(tmp_path: Path, acc_models_lhc_2022: Path): rel_path = OPTICS_SUBDIR / "R2022a_A30cmC30cmA10mL200cm.madx" accel_opt = { "accel": "lhc", @@ -335,10 +334,8 @@ def test_lhc_creation_absolute_modifier_path(tmp_path: Path, acc_models_lhc_2022 accel = create_instance_and_model( outputdir=tmp_path, type="nominal", logfile=tmp_path / "madx_log.txt", **accel_opt ) - absolute_path = ( - tmp_path / f"{ACC_MODELS_PREFIX}-{accel.NAME}" / rel_path - ) # replaced in model creation - madx_string = f"call, file = '{absolute_path!s}" + + madx_string = f"call, file = '{rel_path!s}" assert madx_string in (tmp_path / JOB_MODEL_MADX_NOMINAL).read_text() assert madx_string in (tmp_path / "madx_log.txt").read_text() check_accel_from_dir_vs_options(tmp_path, accel_opt, accel) From 9f66b221457afb6b21b1e11bceb6b729fef89e9b Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Fri, 14 Nov 2025 14:17:32 +0100 Subject: [PATCH 18/37] Fix test more --- tests/unit/test_model_creator.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_model_creator.py b/tests/unit/test_model_creator.py index 2200ad65..84407402 100644 --- a/tests/unit/test_model_creator.py +++ b/tests/unit/test_model_creator.py @@ -14,6 +14,7 @@ AccExcitationMode, ) from omc3.model.constants import ( + ACC_MODELS_PREFIX, JOB_MODEL_MADX_NOMINAL, OPTICS_SUBDIR, TWISS_AC_DAT, @@ -335,7 +336,7 @@ def test_lhc_creation_relative_modifier_path(tmp_path: Path, acc_models_lhc_2022 outputdir=tmp_path, type="nominal", logfile=tmp_path / "madx_log.txt", **accel_opt ) - madx_string = f"call, file = '{rel_path!s}" + madx_string = f"call, file = '{ACC_MODELS_PREFIX}-lhc/{rel_path!s}" assert madx_string in (tmp_path / JOB_MODEL_MADX_NOMINAL).read_text() assert madx_string in (tmp_path / "madx_log.txt").read_text() check_accel_from_dir_vs_options(tmp_path, accel_opt, accel) From 1955e5bf83f4a6c776106996a90f963b29f2fad0 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Fri, 14 Nov 2025 15:33:14 +0100 Subject: [PATCH 19/37] Fix test for windows --- tests/unit/test_model_creator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_model_creator.py b/tests/unit/test_model_creator.py index 84407402..42fae72e 100644 --- a/tests/unit/test_model_creator.py +++ b/tests/unit/test_model_creator.py @@ -336,7 +336,7 @@ def test_lhc_creation_relative_modifier_path(tmp_path: Path, acc_models_lhc_2022 outputdir=tmp_path, type="nominal", logfile=tmp_path / "madx_log.txt", **accel_opt ) - madx_string = f"call, file = '{ACC_MODELS_PREFIX}-lhc/{rel_path!s}" + madx_string = f"call, file = '{f'{ACC_MODELS_PREFIX}-lhc' / rel_path!s}'" assert madx_string in (tmp_path / JOB_MODEL_MADX_NOMINAL).read_text() assert madx_string in (tmp_path / "madx_log.txt").read_text() check_accel_from_dir_vs_options(tmp_path, accel_opt, accel) From 37d03f336839cf3ad760419b13065d62bda361c8 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Sat, 29 Nov 2025 10:01:35 +0100 Subject: [PATCH 20/37] Fix module paths in documentation and add NXCALS module --- doc/entrypoints/scripts.rst | 6 +++--- doc/modules/model.rst | 8 ++++++++ doc/modules/nxcals.rst | 16 ++++++++++++++++ 3 files changed, 27 insertions(+), 3 deletions(-) create mode 100644 doc/modules/nxcals.rst diff --git a/doc/entrypoints/scripts.rst b/doc/entrypoints/scripts.rst index 84097467..dfbe557e 100644 --- a/doc/entrypoints/scripts.rst +++ b/doc/entrypoints/scripts.rst @@ -6,7 +6,7 @@ Scripts :noindex: -.. automodule:: omc3.scripts.kmod_averages +.. automodule:: omc3.scripts.kmod_average :members: :noindex: @@ -46,11 +46,11 @@ Scripts :noindex: -.. automodule:: omc3.scripts.lhc_corrector_list_check +.. automodule:: omc3.scripts.create_logbook_entry :members: :noindex: -.. automodule:: omc3.scripts.mqt_extraction +.. automodule:: omc3.scripts.lhc_corrector_list_check :members: :noindex: diff --git a/doc/modules/model.rst b/doc/modules/model.rst index 23c944a4..d3742580 100644 --- a/doc/modules/model.rst +++ b/doc/modules/model.rst @@ -85,3 +85,11 @@ Model :members: :noindex: + +.. automodule:: omc3.model.model_creators.abstract_model_creator + :members: + :noindex: + +.. automodule:: omc3.model.model_creators.segment_creator + :members: + :noindex: diff --git a/doc/modules/nxcals.rst b/doc/modules/nxcals.rst new file mode 100644 index 00000000..01c237ab --- /dev/null +++ b/doc/modules/nxcals.rst @@ -0,0 +1,16 @@ +NXCALS +************************** + +.. automodule:: omc3.nxcals.constants + :members: + :noindex: + + +.. automodule:: omc3.nxcals.knob_extraction + :members: + :noindex: + + +.. automodule:: omc3.nxcals.mqt_extraction + :members: + :noindex: From b0b13a0760a7978b97f207fe3a5220b94ea3b808 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Sat, 29 Nov 2025 10:24:58 +0100 Subject: [PATCH 21/37] Refactor coupling addition loop to improve readability --- omc3/correction/response_madx.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/omc3/correction/response_madx.py b/omc3/correction/response_madx.py index 24bb1b36..0601dabb 100644 --- a/omc3/correction/response_madx.py +++ b/omc3/correction/response_madx.py @@ -349,7 +349,7 @@ def _add_coupling( """ result_dict_of_tfs = copy.deepcopy(dict_of_tfs) with timeit(lambda elapsed: LOG.debug(f" Time adding coupling: {elapsed} s")): - for _, tfs_dframe in result_dict_of_tfs.items(): + for tfs_dframe in result_dict_of_tfs.values(): coupling_rdts_df = coupling_via_cmatrix(tfs_dframe) tfs_dframe[f"{F1001}R"] = np.real(coupling_rdts_df[f"{F1001}"]).astype(np.float64) tfs_dframe[f"{F1001}I"] = np.imag(coupling_rdts_df[f"{F1001}"]).astype(np.float64) From 688bba606b1fc738874528c8182276318d8f4ff0 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Sat, 29 Nov 2025 10:25:37 +0100 Subject: [PATCH 22/37] Refactor MADX script generation for improved readability --- .../model/model_creators/lhc_model_creator.py | 93 +++++++++---------- 1 file changed, 43 insertions(+), 50 deletions(-) diff --git a/omc3/model/model_creators/lhc_model_creator.py b/omc3/model/model_creators/lhc_model_creator.py index 69e36668..f702ddca 100644 --- a/omc3/model/model_creators/lhc_model_creator.py +++ b/omc3/model/model_creators/lhc_model_creator.py @@ -606,46 +606,43 @@ def get_madx_script(self) -> str: twiss_forward_path = self.resolve_madx_path(self.output_dir / self.twiss_forward) twiss_backward_path = self.resolve_madx_path(self.output_dir / self.twiss_backward) - madx_script += "\n".join( - [ - "", - f"! ----- Segment-by-Segment propagation for {self.segment.name} -----", - "", - "! Cycle the sequence to avoid negative length.", - f"seqedit, sequence=LHCB{accel.beam};", - "flatten;", - f"cycle, start={self.segment.start};", - "endedit;", - "", - f"use, period = LHCB{accel.beam};", - "option, echo;", - "", - "twiss;", - "exec, save_initial_and_final_values(", - f" LHCB{accel.beam},", - f" {self.segment.start},", - f" {self.segment.end}, ", - f' "{measurement_path}",', - f" biniLHCB{accel.beam},", - f" bendLHCB{accel.beam}", - ");", - "", - "exec, extract_segment_sequence(", - f" LHCB{accel.beam},", - f" forward_LHCB{accel.beam},", - f" backward_LHCB{accel.beam},", - f" {self.segment.start},", - f" {self.segment.end},", - ");", - "", - f"beam, particle = proton, sequence=forward_LHCB{accel.beam}, energy = {MADX_ENERGY_VAR}, bv={accel.beam_direction:d};", - f"beam, particle = proton, sequence=backward_LHCB{accel.beam}, energy = {MADX_ENERGY_VAR}, bv={accel.beam_direction:d};", - "", - f'exec, twiss_segment(forward_LHCB{accel.beam}, "{twiss_forward_path}", biniLHCB{accel.beam});', - f'exec, twiss_segment(backward_LHCB{accel.beam}, "{twiss_backward_path}", bendLHCB{accel.beam});', - "", - ] - ) + madx_script += f""" +! ----- Segment-by-Segment propagation for {self.segment.name} ----- + +! Cycle the sequence to avoid negative length. +seqedit, sequence=LHCB{accel.beam}; +flatten; +cycle, start={self.segment.start}; +endedit; + +use, period = LHCB{accel.beam}; +option, echo; + +twiss; +exec, save_initial_and_final_values( + LHCB{accel.beam}, + {self.segment.start}, + {self.segment.end}, + "{measurement_path}", + biniLHCB{accel.beam}, + bendLHCB{accel.beam} +); + +exec, extract_segment_sequence( + LHCB{accel.beam}, + forward_LHCB{accel.beam}, + backward_LHCB{accel.beam}, + {self.segment.start}, + {self.segment.end}, +); + +beam, particle = proton, sequence=forward_LHCB{accel.beam}, energy = {MADX_ENERGY_VAR}, bv={accel.beam_direction:d}; +beam, particle = proton, sequence=backward_LHCB{accel.beam}, energy = {MADX_ENERGY_VAR}, bv={accel.beam_direction:d}; + +exec, twiss_segment(forward_LHCB{accel.beam}, "{twiss_forward_path}", biniLHCB{accel.beam}); +exec, twiss_segment(backward_LHCB{accel.beam}, "{twiss_backward_path}", bendLHCB{accel.beam}); + +""" if self.corrections is not None: corrections_path = self.resolve_madx_path(self.output_dir / self.corrections_madx) @@ -655,16 +652,12 @@ def get_madx_script(self) -> str: twiss_backward_corr_path = self.resolve_madx_path( self.output_dir / self.twiss_backward_corrected ) - madx_script += "\n".join( - [ - f'call, file = "{corrections_path}";', - f"exec, twiss_segment(forward_LHCB{accel.beam}, " - f'"{twiss_forward_corr_path}", biniLHCB{accel.beam});', - f"exec, twiss_segment(backward_LHCB{accel.beam}, " - f'"{twiss_backward_corr_path}", bendLHCB{accel.beam});', - "", - ] - ) + madx_script += f""" +call, file = "{corrections_path}"; +exec, twiss_segment(forward_LHCB{accel.beam}, "{twiss_forward_corr_path}", biniLHCB{accel.beam}); +exec, twiss_segment(backward_LHCB{accel.beam}, "{twiss_backward_corr_path}", bendLHCB{accel.beam}); + +""" return madx_script From c9d497dfbe538cd286e928b3fd748f86d1ff0648 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Sat, 29 Nov 2025 10:28:54 +0100 Subject: [PATCH 23/37] Rename NXCalResult to NXCALSResult and update references across modules --- omc3/nxcals/knob_extraction.py | 20 ++++++++++---------- omc3/nxcals/mqt_extraction.py | 6 +++--- tests/unit/test_nxcals_mqt_extraction.py | 10 +++++----- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/omc3/nxcals/knob_extraction.py b/omc3/nxcals/knob_extraction.py index ad927bfc..19133068 100644 --- a/omc3/nxcals/knob_extraction.py +++ b/omc3/nxcals/knob_extraction.py @@ -19,7 +19,7 @@ import jpype import pandas as pd -from pyspark.sql import functions as f +from pyspark.sql import functions from pyspark.sql.window import Window from omc3.utils.mock import cern_network_import @@ -34,7 +34,7 @@ @dataclass -class NXCalResult: +class NXCALSResult: name: str value: float timestamp: pd.Timestamp @@ -52,7 +52,7 @@ def get_knob_vals( expected_knobs: set[str] | None = None, log_prefix: str = "", delta_days: int = 1, -) -> list[NXCalResult]: +) -> list[NXCALSResult]: """ Retrieve knob values for a given beam and time using specified patterns. @@ -91,7 +91,7 @@ def get_knob_vals( LOGGER.info(f"{log_prefix}Starting data retrieval for beam {beam} at time {time}") # Retrieve raw current measurements from NXCALS - combined_vars: list[NXCalResult] = [] + combined_vars: list[NXCALSResult] = [] for pattern in patterns: LOGGER.info(f"{log_prefix}Getting currents for pattern {pattern} at {time}") raw_vars = get_raw_vars(spark, time, pattern, delta_days) @@ -135,7 +135,7 @@ def get_knob_vals( pc_name = pc_names.get(madx_name) if value is not None and timestamp is not None and pc_name is not None: - results.append(NXCalResult(madx_name, value, timestamp, pc_name)) + results.append(NXCALSResult(madx_name, value, timestamp, pc_name)) else: LOGGER.warning(f"{log_prefix}Missing data for {madx_name}") @@ -147,7 +147,7 @@ def get_knob_vals( def get_raw_vars( spark: SparkSession, time: datetime, var_name: str, delta_days: int = 1 -) -> list[NXCalResult]: +) -> list[NXCALSResult]: """ Retrieve raw variable values from NXCALS. @@ -197,11 +197,11 @@ def get_raw_vars( # Get the latest sample for each variable window_spec = Window.partitionBy("nxcals_variable_name").orderBy( - f.col("nxcals_timestamp").desc() + functions.col("nxcals_timestamp").desc() ) latest_df = ( - df.withColumn("row_num", f.row_number().over(window_spec)) - .filter(f.col("row_num") == 1) + df.withColumn("row_num", functions.row_number().over(window_spec)) + .filter(functions.col("row_num") == 1) .select("nxcals_variable_name", "nxcals_value", "nxcals_timestamp") ) @@ -210,7 +210,7 @@ def get_raw_vars( full_varname = row["nxcals_variable_name"] raw_val = float(row["nxcals_value"]) ts = pd.to_datetime(row["nxcals_timestamp"], unit="ns", utc=True) - results.append(NXCalResult(full_varname, raw_val, ts, strip_i_meas(full_varname))) + results.append(NXCALSResult(full_varname, raw_val, ts, strip_i_meas(full_varname))) LOGGER.info(f"LHC value retrieved: {full_varname} = {raw_val:.2f} at {ts}") return results diff --git a/omc3/nxcals/mqt_extraction.py b/omc3/nxcals/mqt_extraction.py index 21beec64..dde48b40 100644 --- a/omc3/nxcals/mqt_extraction.py +++ b/omc3/nxcals/mqt_extraction.py @@ -17,7 +17,7 @@ # import jpype from pyspark.sql import SparkSession -from omc3.nxcals.knob_extraction import NXCalResult, get_knob_vals +from omc3.nxcals.knob_extraction import NXCALSResult, get_knob_vals logger = logging.getLogger(__name__) @@ -49,7 +49,7 @@ def get_mqts(beam: int) -> set[str]: def get_mqt_vals( spark: SparkSession, time: datetime, beam: int, delta_days: int = 1 -) -> list[NXCalResult]: +) -> list[NXCALSResult]: """ Retrieve MQT (Quadrupole Trim) knob values from NXCALS for a specific time and beam. @@ -76,7 +76,7 @@ def get_mqt_vals( return get_knob_vals(spark, time, beam, patterns, madx_mqts, "MQT: ", delta_days) -def knobs_to_madx(mqt_vals: list[NXCalResult]) -> str: +def knobs_to_madx(mqt_vals: list[NXCALSResult]) -> str: """ Convert a list of NXCalResult objects to a MAD-X script string. diff --git a/tests/unit/test_nxcals_mqt_extraction.py b/tests/unit/test_nxcals_mqt_extraction.py index 6091a4d8..5c1e438a 100644 --- a/tests/unit/test_nxcals_mqt_extraction.py +++ b/tests/unit/test_nxcals_mqt_extraction.py @@ -9,7 +9,7 @@ from omc3 import mqt_extractor from omc3.nxcals import mqt_extraction from omc3.nxcals.constants import EXTRACTED_MQTS_FILENAME -from omc3.nxcals.knob_extraction import NXCalResult +from omc3.nxcals.knob_extraction import NXCALSResult SAMPLE_DIR = Path(__file__).parent.parent / "inputs" / "knob_extractor" TEST_CASES = ( @@ -31,15 +31,15 @@ def _parse_mqt_line(line: str, tz: str = "Europe/Zurich") -> tuple[str, float, s return name, value, pc_name, timestamp -def _load_results_from_file(file_path: Path, tz: str = "Europe/Zurich") -> list[NXCalResult]: - results: list[NXCalResult] = [] +def _load_results_from_file(file_path: Path, tz: str = "Europe/Zurich") -> list[NXCALSResult]: + results: list[NXCALSResult] = [] for raw_line in file_path.read_text().splitlines(): line = raw_line.strip() if not line or line.startswith("!"): continue name, value, pc_name, timestamp = _parse_mqt_line(line, tz=tz) results.append( - NXCalResult( + NXCALSResult( name=name, value=value, timestamp=timestamp, @@ -72,7 +72,7 @@ def test_main_reproduces_reference_output(tmp_path, beam: int, sample_file: Path mqt_extractor.main(time=query_time.isoformat(), beam=beam, output=output_path) expected_results = [ - NXCalResult( + NXCALSResult( name=r.name, value=r.value, pc_name=r.pc_name, From 8dfce03bfb4a786a324180934b97d07808d8248b Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Sat, 29 Nov 2025 11:38:20 +0100 Subject: [PATCH 24/37] Update time handling and parameter types for MQT extraction - Change time parsing to return UTC timezone-aware datetime. - Update delta_days parameter type from int to float across multiple functions. - Rename get_mqts to generate_mqt_names for clarity. - Adjust default values and documentation for delta_days. - Modify tests to reflect changes in function names and time handling. --- omc3/knob_extractor.py | 2 +- omc3/mqt_extractor.py | 17 ++++++--- omc3/nxcals/knob_extraction.py | 20 +++++----- omc3/nxcals/mqt_extraction.py | 15 +++++--- omc3/utils/iotools.py | 2 - .../knob_extractor/generate_mqt_references.py | 1 - tests/unit/test_nxcals_mqt_extraction.py | 38 +++++++++---------- 7 files changed, 49 insertions(+), 46 deletions(-) diff --git a/omc3/knob_extractor.py b/omc3/knob_extractor.py index 104c6e83..4fde9744 100644 --- a/omc3/knob_extractor.py +++ b/omc3/knob_extractor.py @@ -591,7 +591,7 @@ def _parse_time_from_str(time_str: str) -> datetime: """ Parse time from given string. """ # Now? --- if time_str.lower() == "now": - return datetime.now() + return datetime.now(timezone.utc) # ISOFormat? --- try: diff --git a/omc3/mqt_extractor.py b/omc3/mqt_extractor.py index db1c78a1..6934864b 100644 --- a/omc3/mqt_extractor.py +++ b/omc3/mqt_extractor.py @@ -49,12 +49,11 @@ '_2h' while setting the `time` argument to 'now' (default). -- **delta_days** *(int)*: +- **delta_days** *(float)*: Number of days to look back for data in NXCALS. - default: ``1`` - + default: ``0.25`` (e.g. 6 hours) """ @@ -128,9 +127,9 @@ def get_params(): ), }, delta_days={ - "type": int, + "type": float, "help": "Number of days to look back for data in NXCALS.", - "default": 1, + "default": 0.25, }, ) @@ -191,12 +190,18 @@ def _write_mqt_file(output: Path | str, mqt_vals, time: datetime, beam: int): def _parse_time(time: str, timedelta: str = None) -> datetime: - """Parse time from given time-input.""" + """Parse time from given time-input from command line.""" + from datetime import timezone + from omc3.knob_extractor import _add_time_delta, _parse_time_from_str t = _parse_time_from_str(time) if timedelta: t = _add_time_delta(t, timedelta) + if t.tzinfo is None: + # Since we were given a string, there is no way to know the intended timezone. + # We default to UTC to avoid confusion. + t = t.replace(tzinfo=timezone.utc) return t diff --git a/omc3/nxcals/knob_extraction.py b/omc3/nxcals/knob_extraction.py index 19133068..76744c7b 100644 --- a/omc3/nxcals/knob_extraction.py +++ b/omc3/nxcals/knob_extraction.py @@ -51,7 +51,7 @@ def get_knob_vals( patterns: list[str], expected_knobs: set[str] | None = None, log_prefix: str = "", - delta_days: int = 1, + delta_days: float = 0.25, ) -> list[NXCALSResult]: """ Retrieve knob values for a given beam and time using specified patterns. @@ -82,7 +82,7 @@ def get_knob_vals( expected_knobs (set[str] | None): Set of expected MAD-X knob names to validate and filter results. If None, returns all found knobs without validation. log_prefix (str): Prefix for logging messages to distinguish different extraction runs. - delta_days (int): Number of days to look back for data. Default is 1. + delta_days (float): Number of days to look back for data. Default is 0.25. Returns: list[NXCalResult]: List of NXCalResult objects containing MAD-X knob names, K-values, @@ -146,7 +146,7 @@ def get_knob_vals( def get_raw_vars( - spark: SparkSession, time: datetime, var_name: str, delta_days: int = 1 + spark: SparkSession, time: datetime, var_name: str, delta_days: float = 0.25 ) -> list[NXCALSResult]: """ Retrieve raw variable values from NXCALS. @@ -155,7 +155,7 @@ def get_raw_vars( spark (SparkSession): Active Spark session. time (datetime): Python datetime (timezone-aware recommended). var_name (str): Name or pattern of the variable(s) to retrieve. - delta_days (int): Number of days to look back for data. Default is 1. + delta_days (float): Number of days to look back for data. Default is 0.25. Returns: list[NXCalResult]: List of NXCalResult containing variable name, value, timestamp, @@ -172,7 +172,7 @@ def get_raw_vars( else: time = time.astimezone(timezone.utc) - # Look back delta_days (1 for mqts) to be sure we get at least one sample + # Look back delta_days, may need up to 1 day to find data start_time = time - timedelta(days=delta_days) end_time = time LOGGER.info(f"Retrieving raw variables {var_name} from {start_time} to {end_time}") @@ -260,11 +260,11 @@ def calc_k_from_iref(lsa_client, currents: dict[str, float], energy: float) -> d j_double = jpype.JClass("java.lang.Double") jmap = j_hash_map() - for k, v in currents.items(): - if v is None: - raise ValueError(f"Current for {k} is None") + for power_converter, current in currents.items(): + if current is None: + raise ValueError(f"Current for {power_converter} is None") # ensure primitive-compatible double values - jmap.put(k, j_double.valueOf(v)) # boxed; Java unboxes internally + jmap.put(power_converter, j_double.valueOf(current)) # boxed; Java unboxes internally # 3) Call: Map calculateKfromIREF(Map, double) out = lhc_service.calculateKfromIREF(jmap, float(energy)) @@ -326,7 +326,7 @@ def map_pc_name_to_madx(pc_name: str) -> str: circuit_name = pc_name # Apply MAD-X specific replacements - replacements = {"RQT": "KQT", "RCB": "ACB"} + replacements = {"RQ": "KQ", "RCB": "ACB"} for old, new in replacements.items(): circuit_name = circuit_name.replace(old, new) diff --git a/omc3/nxcals/mqt_extraction.py b/omc3/nxcals/mqt_extraction.py index dde48b40..f2e5026c 100644 --- a/omc3/nxcals/mqt_extraction.py +++ b/omc3/nxcals/mqt_extraction.py @@ -22,7 +22,7 @@ logger = logging.getLogger(__name__) -def get_mqts(beam: int) -> set[str]: +def generate_mqt_names(beam: int) -> set[str]: """ Generate the set of MAD-X MQT (Quadrupole Trim) variable names for a given beam. @@ -48,7 +48,7 @@ def get_mqts(beam: int) -> set[str]: def get_mqt_vals( - spark: SparkSession, time: datetime, beam: int, delta_days: int = 1 + spark: SparkSession, time: datetime, beam: int, delta_days: float = 0.25 ) -> list[NXCALSResult]: """ Retrieve MQT (Quadrupole Trim) knob values from NXCALS for a specific time and beam. @@ -59,18 +59,21 @@ def get_mqt_vals( Args: spark (SparkSession): Active Spark session for NXCALS queries. - time (datetime): The timestamp for which to retrieve the data (timezone-aware recommended). + time (datetime): The timestamp for which to retrieve the data (timezone-aware required). beam (int): The beam number (1 or 2). - delta_days (int): Number of days to look back for data. Default is 1. + delta_days (float): Number of days to look back for data. Default is 0.25. Returns: list[NXCalResult]: List of NXCalResult objects containing the MAD-X knob names, K-values, and timestamps. Raises: - ValueError: If beam is not 1 or 2 (propagated from get_mqts). + ValueError: If beam is not 1 or 2 (propagated from get_mqts), or if time is not timezone-aware. RuntimeError: If no data is found in NXCALS or LSA calculations fail. """ - madx_mqts = get_mqts(beam) + if time.tzinfo is None: + raise ValueError("Time must be timezone-aware.") + + madx_mqts = generate_mqt_names(beam) pattern = f"RPMBB.UA%.RQT%.A%B{beam}:I_MEAS" patterns = [pattern] return get_knob_vals(spark, time, beam, patterns, madx_mqts, "MQT: ", delta_days) diff --git a/omc3/utils/iotools.py b/omc3/utils/iotools.py index 614bdcdc..059ebfd1 100644 --- a/omc3/utils/iotools.py +++ b/omc3/utils/iotools.py @@ -11,11 +11,9 @@ import re import shutil import sys -from datetime import datetime from pathlib import Path from typing import TYPE_CHECKING, Any -import pandas as pd from generic_parser.entry_datatypes import DictAsString, get_instance_faker_meta, get_multi_class from generic_parser.entrypoint_parser import save_options_to_config from tfs import TfsDataFrame diff --git a/tests/inputs/knob_extractor/generate_mqt_references.py b/tests/inputs/knob_extractor/generate_mqt_references.py index 57a4ffd1..68f645bb 100644 --- a/tests/inputs/knob_extractor/generate_mqt_references.py +++ b/tests/inputs/knob_extractor/generate_mqt_references.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """ Copy reference MQT extraction files for testing. diff --git a/tests/unit/test_nxcals_mqt_extraction.py b/tests/unit/test_nxcals_mqt_extraction.py index 5c1e438a..3df8dbce 100644 --- a/tests/unit/test_nxcals_mqt_extraction.py +++ b/tests/unit/test_nxcals_mqt_extraction.py @@ -1,6 +1,7 @@ from __future__ import annotations import re +from datetime import timezone from pathlib import Path import pandas as pd @@ -54,7 +55,7 @@ def _load_results_from_file(file_path: Path, tz: str = "Europe/Zurich") -> list[ def test_get_mqts_matches_sample(beam: int, sample_file: Path): sample_results = _load_results_from_file(sample_file) expected_names = {result.name for result in sample_results} - assert expected_names == mqt_extraction.get_mqts(beam=beam) + assert expected_names == mqt_extraction.generate_mqt_names(beam=beam) @pytest.mark.cern_network @@ -96,7 +97,7 @@ def test_main_reproduces_reference_output(tmp_path, beam: int, sample_file: Path @pytest.mark.cern_network def test_get_mqts_invalid_beam(): with pytest.raises(ValueError): - mqt_extraction.get_mqts(beam=3) + mqt_extraction.generate_mqt_names(beam=3) @pytest.mark.cern_network @@ -132,16 +133,13 @@ def test_main_returns_tfs_dataframe(tmp_path, beam: int, sample_file: Path): @pytest.mark.cern_network -@pytest.mark.parametrize("beam", [1, 2]) +@pytest.mark.parametrize("beam", [1, 2], ids=["beam1", "beam2"]) def test_main_with_timedelta(tmp_path, beam: int): """Test that timedelta parameter works correctly.""" - from datetime import datetime + from datetime import datetime, timedelta output_path = tmp_path / f"test_timedelta_b{beam}.madx" - # Get current time - now = datetime.now() - # Call with timedelta going back 1 day result_df = mqt_extractor.main( time="now", @@ -152,33 +150,33 @@ def test_main_with_timedelta(tmp_path, beam: int): # Verify the extraction time is approximately 1 day ago extraction_time = result_df.headers["EXTRACTION_TIME"] - time_diff = abs((now - extraction_time).total_seconds()) + expected_time = datetime.now(timezone.utc) - timedelta(days=1) + time_diff = abs((extraction_time - expected_time).total_seconds()) - # Should be close to 1 day (86400 seconds), allow 5 minute tolerance - one_day_seconds = 86400 - assert abs(time_diff - one_day_seconds) < 300, ( - f"Time difference should be ~1 day, but got {time_diff} seconds" + # Should be close to 1 day ago, allow 5 minute tolerance + assert time_diff < 300, ( + f"Extraction time should be ~1 day ago, but diff is {time_diff} seconds" ) @pytest.mark.cern_network -@pytest.mark.parametrize("beam", [1, 2]) +@pytest.mark.parametrize("beam", [1, 2], ids=["beam1", "beam2"]) def test_main_with_delta_days(tmp_path, beam: int): """Test that delta_days parameter is properly passed through.""" from datetime import datetime, timedelta output_path = tmp_path / f"test_delta_days_b{beam}.madx" - # Use a time 3 days ago with delta_days=5 to ensure we get data - past_time = datetime.now() - timedelta(days=3) + # Use a time 1 day ago with delta_days=1 to ensure we get data + past_time = datetime.now(timezone.utc) - timedelta(hours=2) - # This should work because we're looking back 5 days + # This should work because we're looking back 1 day result_df = mqt_extractor.main( - time=past_time.isoformat(), beam=beam, output=output_path, delta_days=5 + time=past_time.isoformat(), beam=beam, output=output_path, delta_days=1 / 12 ) # Should succeed and return valid data - assert len(result_df) == 16, "Expected 16 MQT entries with delta_days=5" + assert len(result_df) == 16, "Expected 16 MQT entries with delta_days=1" assert result_df.headers["BEAM"] == beam @@ -187,7 +185,7 @@ def test_parse_time_now(): from datetime import datetime result = mqt_extractor._parse_time("now") - now = datetime.now() + now = datetime.now(timezone.utc) # Should be very close to now (within 1 second) diff = abs((now - result).total_seconds()) @@ -198,7 +196,7 @@ def test_parse_time_with_timedelta(): """Test that _parse_time correctly applies timedelta.""" from datetime import datetime - now_str = datetime.now().isoformat() + now_str = datetime.now(timezone.utc).isoformat() # Test positive timedelta result_plus = mqt_extractor._parse_time(now_str, "1h") From 5c8216c30c4e3252bfdaafa26198b6ef4b3681dd Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Sat, 29 Nov 2025 11:39:47 +0100 Subject: [PATCH 25/37] Simplify assertion message formatting in test_main_with_timedelta --- tests/unit/test_nxcals_mqt_extraction.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/unit/test_nxcals_mqt_extraction.py b/tests/unit/test_nxcals_mqt_extraction.py index 3df8dbce..06fcfcc6 100644 --- a/tests/unit/test_nxcals_mqt_extraction.py +++ b/tests/unit/test_nxcals_mqt_extraction.py @@ -154,9 +154,7 @@ def test_main_with_timedelta(tmp_path, beam: int): time_diff = abs((extraction_time - expected_time).total_seconds()) # Should be close to 1 day ago, allow 5 minute tolerance - assert time_diff < 300, ( - f"Extraction time should be ~1 day ago, but diff is {time_diff} seconds" - ) + assert time_diff < 300, f"Extraction time should be ~1 day ago, but diff is {time_diff} seconds" @pytest.mark.cern_network From 81738e03e98e5e21c3b410afa838449ffb46a9cb Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Sat, 29 Nov 2025 11:41:42 +0100 Subject: [PATCH 26/37] Add timezone support by importing timezone from datetime --- omc3/knob_extractor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/omc3/knob_extractor.py b/omc3/knob_extractor.py index 4fde9744..b784e85d 100644 --- a/omc3/knob_extractor.py +++ b/omc3/knob_extractor.py @@ -83,7 +83,7 @@ import logging import math import re -from datetime import datetime +from datetime import datetime, timezone from pathlib import Path from typing import TYPE_CHECKING From ec2198f5a98083bf894e5660ea3a1c4349945273 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Sat, 29 Nov 2025 11:44:13 +0100 Subject: [PATCH 27/37] Ruff the knob extractor and add to git blame --- .git-blame-ignore-revs | 1 + omc3/knob_extractor.py | 146 ++++++++++++++++++++++++----------------- 2 files changed, 86 insertions(+), 61 deletions(-) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 51ca63dd..3b25d8fb 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -5,3 +5,4 @@ 820a51e3c67c8675880b9c949ac09f4db8bd8e9f 4e59cd0f7eb6a4279dc3f5cfbaed2b03b1918df3 0c19f7f7aa2e69a46c0e72fb99d4c3a29a1726c1 +81738e03 diff --git a/omc3/knob_extractor.py b/omc3/knob_extractor.py index b784e85d..0ebba0e8 100644 --- a/omc3/knob_extractor.py +++ b/omc3/knob_extractor.py @@ -61,6 +61,7 @@ """ + from __future__ import annotations ####### WORKAROUND FOR JAVA ISSUES WITH LHCOP ################################## @@ -97,29 +98,32 @@ from omc3.utils.mock import cern_network_import if TYPE_CHECKING: - from collections.abc import Sequence + from collections.abc import Sequence pytimber = cern_network_import("pytimber") LOGGER = get_logger(__name__) -AFS_ACC_MODELS_LHC = Path("/afs/cern.ch/eng/acc-models/lhc/current") # make sure 'current' linked correctly! +AFS_ACC_MODELS_LHC = Path( + "/afs/cern.ch/eng/acc-models/lhc/current" +) # make sure 'current' linked correctly! ACC_MODELS_LHC = Path("acc-models-lhc") KNOBS_FILE_ACC_MODELS = ACC_MODELS_LHC / "operation" / "knobs.txt" KNOBS_FILE_AFS = AFS_ACC_MODELS_LHC / "operation" / "knobs.txt" MINUS_CHARS: tuple[str, ...] = ("_", "-") STATE_VARIABLES: dict[str, str] = { - 'opticName': 'Optics', - 'beamProcess': 'Beam Process', - 'opticId': 'Optics ID', - 'hyperCycle': 'HyperCycle', + "opticName": "Optics", + "beamProcess": "Beam Process", + "opticId": "Optics ID", + "hyperCycle": "HyperCycle", # 'secondsInBeamProcess ': 'Beam Process running (s)', } class Col: - """ DataFrame Columns used in this script. """ + """DataFrame Columns used in this script.""" + madx: str = "madx" lsa: str = "lsa" scaling: str = "scaling" @@ -127,7 +131,8 @@ class Col: class Head: - """ TFS Headers used in this script.""" + """TFS Headers used in this script.""" + time: str = "EXTRACTION_TIME" @@ -179,26 +184,20 @@ class Head: "LHCBEAM1:IP-SDISP-QPBUMP", "LHCBEAM2:IP-SDISP-QPBUMP", ], - "mo": [ - "LHCBEAM1:LANDAU_DAMPING", - "LHCBEAM2:LANDAU_DAMPING" - ], + "mo": ["LHCBEAM1:LANDAU_DAMPING", "LHCBEAM2:LANDAU_DAMPING"], "lumi_scan": [ "LHCBEAM1:IP1_SEPSCAN_X_MM", "LHCBEAM1:IP1_SEPSCAN_Y_MM", "LHCBEAM2:IP1_SEPSCAN_X_MM", "LHCBEAM2:IP1_SEPSCAN_Y_MM", - "LHCBEAM1:IP2_SEPSCAN_X_MM", "LHCBEAM1:IP2_SEPSCAN_Y_MM", "LHCBEAM2:IP2_SEPSCAN_X_MM", "LHCBEAM2:IP2_SEPSCAN_Y_MM", - "LHCBEAM1:IP5_SEPSCAN_X_MM", "LHCBEAM1:IP5_SEPSCAN_Y_MM", "LHCBEAM2:IP5_SEPSCAN_X_MM", "LHCBEAM2:IP5_SEPSCAN_Y_MM", - "LHCBEAM1:IP8_SEPSCAN_X_MM", "LHCBEAM1:IP8_SEPSCAN_Y_MM", "LHCBEAM2:IP8_SEPSCAN_X_MM", @@ -230,7 +229,7 @@ def get_params(): return EntryPointParameters( knobs={ "type": str, - "nargs": '*', + "nargs": "*", "help": ( "A list of knob names or categories to extract. " f"Available categories are: {', '.join(KNOB_CATEGORIES.keys())}." @@ -261,17 +260,13 @@ def get_params(): ), }, state={ - "action": 'store_true', - "help": ( - "Prints the state of the StateTracker. " - "Does not extract anything else." - ), + "action": "store_true", + "help": ("Prints the state of the StateTracker. Does not extract anything else."), }, output={ "type": PathOrStr, "help": ( - "Specify user-defined output path. " - "This should probably be `model_dir/knobs.madx`" + "Specify user-defined output path. This should probably be `model_dir/knobs.madx`" ), }, knob_definitions={ @@ -286,17 +281,20 @@ def get_params(): @entrypoint( - get_params(), strict=True, + get_params(), + strict=True, argument_parser_args={ "epilog": USAGE_EXAMPLES, "formatter_class": argparse.RawDescriptionHelpFormatter, - "prog": "Knob Extraction Tool." - } + "prog": "Knob Extraction Tool.", + }, ) def main(opt) -> tfs.TfsDataFrame: - """ Main knob extracting function. """ - ldb = pytimber.LoggingDB(source="nxcals", loglevel=logging.ERROR, - sparkprops={"spark.ui.showConsoleProgress": "false"} + """Main knob extracting function.""" + ldb = pytimber.LoggingDB( + source="nxcals", + loglevel=logging.ERROR, + sparkprops={"spark.ui.showConsoleProgress": "false"}, ) time = _parse_time(opt.time, opt.timedelta) @@ -315,6 +313,7 @@ def main(opt) -> tfs.TfsDataFrame: # State Extraction ------------------------------------------------------------- + def get_state(ldb, time: datetime) -> dict[str, str]: """ Standalone function to gather and log state data from the StateTracker. @@ -348,9 +347,11 @@ def _get_state_as_df(state_dict: dict[str, str], time: datetime) -> tfs.TfsDataF Returns: tfs.DataFrame: States packed into dataframe with readable index. """ - state_df = tfs.TfsDataFrame(index=list(STATE_VARIABLES.values()), - columns=[Col.value, Col.lsa], - headers={Head.time: time}) + state_df = tfs.TfsDataFrame( + index=list(STATE_VARIABLES.values()), + columns=[Col.value, Col.lsa], + headers={Head.time: time}, + ) for name, value in state_dict.items(): state_df.loc[STATE_VARIABLES[name], Col.lsa] = name state_df.loc[STATE_VARIABLES[name], Col.value] = value @@ -359,6 +360,7 @@ def _get_state_as_df(state_dict: dict[str, str], time: datetime) -> tfs.TfsDataF # Knobs Extraction ------------------------------------------------------------- + def extract(ldb, knobs: Sequence[str], time: datetime) -> dict[str, float]: """ Standalone function to gather data from the StateTracker. @@ -383,7 +385,9 @@ def extract(ldb, knobs: Sequence[str], time: datetime) -> dict[str, float]: knobkey = f"LhcStateTracker:{knob}:target" knobs_extracted[knob] = None # to log that this was tried to be extracted. - knobvalue = ldb.get(knobkey, time.timestamp()) # use timestamp to preserve timezone info + knobvalue = ldb.get( + knobkey, time.timestamp() + ) # use timestamp to preserve timezone info if knobkey not in knobvalue: LOGGER.debug(f"{knob} not found in StateTracker") continue @@ -405,7 +409,7 @@ def extract(ldb, knobs: Sequence[str], time: datetime) -> dict[str, float]: def check_for_undefined_knobs(knobs_definitions: pd.DataFrame, knob_categories: Sequence[str]): - """ Check that all knobs are actually defined in the knobs-definitions. + """Check that all knobs are actually defined in the knobs-definitions. Args: @@ -416,7 +420,9 @@ def check_for_undefined_knobs(knobs_definitions: pd.DataFrame, knob_categories: KeyError: If one or more of the knobs don't have a definition. """ - knob_names = [knob for category in knob_categories for knob in KNOB_CATEGORIES.get(category, [category])] + knob_names = [ + knob for category in knob_categories for knob in KNOB_CATEGORIES.get(category, [category]) + ] undefined_knobs = [knob for knob in knob_names if knob not in knobs_definitions.index] if undefined_knobs: raise KeyError( @@ -425,9 +431,9 @@ def check_for_undefined_knobs(knobs_definitions: pd.DataFrame, knob_categories: ) -def _extract_and_gather(ldb, knobs_definitions: pd.DataFrame, - knob_categories: Sequence[str], - time: datetime) -> tfs.TfsDataFrame: +def _extract_and_gather( + ldb, knobs_definitions: pd.DataFrame, knob_categories: Sequence[str], time: datetime +) -> tfs.TfsDataFrame: """ Main function to gather data from the StateTracker and the knob-definitions. All given knobs (either in categories or as knob names) to be extracted @@ -451,16 +457,18 @@ def _extract_and_gather(ldb, knobs_definitions: pd.DataFrame, extracted_knobs = extract(ldb, knobs=knob_categories, time=time) knob_names = list(extracted_knobs.keys()) - knobs = tfs.TfsDataFrame(index=knob_names, - columns=[Col.lsa, Col.madx, Col.scaling, Col.value], - headers={Head.time: time}) + knobs = tfs.TfsDataFrame( + index=knob_names, + columns=[Col.lsa, Col.madx, Col.scaling, Col.value], + headers={Head.time: time}, + ) knobs[[Col.lsa, Col.madx, Col.scaling]] = knobs_definitions.loc[knob_names, :] knobs[Col.value] = pd.Series(extracted_knobs) return knobs def _write_knobsfile(output: Path | str, collected_knobs: tfs.TfsDataFrame): - """ Takes the collected knobs and writes them out into a text-file. """ + """Takes the collected knobs and writes them out into a text-file.""" collected_knobs = collected_knobs.copy() # to not modify the df # Sort the knobs by category @@ -490,26 +498,31 @@ def _write_knobsfile(output: Path | str, collected_knobs: tfs.TfsDataFrame): # Knobs Definitions ------------------------------------------------------------ + def _get_knobs_def_file(user_defined: Path | str | None = None) -> Path: - """ Check which knobs-definition file is appropriate to take. """ + """Check which knobs-definition file is appropriate to take.""" if user_defined is not None: LOGGER.info(f"Using user knobs-definition file: '{user_defined}") return Path(user_defined) if KNOBS_FILE_ACC_MODELS.is_file(): - LOGGER.info(f"Using given acc-models folder's knobs.txt as knobsdefinition file: '{KNOBS_FILE_ACC_MODELS}") + LOGGER.info( + f"Using given acc-models folder's knobs.txt as knobsdefinition file: '{KNOBS_FILE_ACC_MODELS}" + ) return KNOBS_FILE_ACC_MODELS if KNOBS_FILE_AFS.is_file(): # if all fails, fall back to lhc acc-models - LOGGER.info(f"Using afs-fallback acc-models folder's knobs.txt as knobs-definition file: '{KNOBS_FILE_AFS}'") + LOGGER.info( + f"Using afs-fallback acc-models folder's knobs.txt as knobs-definition file: '{KNOBS_FILE_AFS}'" + ) return KNOBS_FILE_AFS raise FileNotFoundError("None of the knobs-definition files are available.") def load_knobs_definitions(file_path: Path | str) -> pd.DataFrame: - """ Load the knobs-definition file and convert into a DataFrame. + """Load the knobs-definition file and convert into a DataFrame. Each line in this file should consist of at least three comma separated entries in the following order: madx-name, lsa-name, scaling factor. Other columns are ignored. @@ -530,17 +543,19 @@ def load_knobs_definitions(file_path: Path | str) -> pd.DataFrame: converters = {Col.madx: str.strip, Col.lsa: str.strip} # strip whitespaces dtypes = {Col.scaling: float} names = (Col.madx, Col.lsa, Col.scaling) - df = pd.read_csv(file_path, - comment="#", - usecols=list(range(len(names))), # only read the first columns - names=names, - dtype=dtypes, - converters=converters) + df = pd.read_csv( + file_path, + comment="#", + usecols=list(range(len(names))), # only read the first columns + names=names, + dtype=dtypes, + converters=converters, + ) return _to_knobs_dataframe(df) def _to_knobs_dataframe(df: pd.DataFrame) -> pd.DataFrame: - """ Adapts a DataFrame to the conventions used here: + """Adapts a DataFrame to the conventions used here: StateTracker variable name as index, all columns lower-case. Args: @@ -559,7 +574,7 @@ def _to_knobs_dataframe(df: pd.DataFrame) -> pd.DataFrame: def _parse_knobs_defintions(knobs_def_input: Path | str | pd.DataFrame | None) -> pd.DataFrame: - """ Parse the given knob-definitions either from a csv-file or from a DataFrame. """ + """Parse the given knob-definitions either from a csv-file or from a DataFrame.""" if isinstance(knobs_def_input, pd.DataFrame): return _to_knobs_dataframe(knobs_def_input) @@ -570,8 +585,10 @@ def _parse_knobs_defintions(knobs_def_input: Path | str | pd.DataFrame | None) - def get_madx_command(knob_data: pd.Series) -> str: if Col.value not in knob_data.index: - raise KeyError("Value entry not found in extracted knob_data. " - "Something went wrong as it should at least be NaN.") + raise KeyError( + "Value entry not found in extracted knob_data. " + "Something went wrong as it should at least be NaN." + ) if knob_data[Col.value] is None or pd.isna(knob_data[Col.value]): return f"! {knob_data[Col.madx]} : No Value extracted" return f"{knob_data[Col.madx]} := {knob_data[Col.value] * knob_data[Col.scaling]};" @@ -579,8 +596,9 @@ def get_madx_command(knob_data: pd.Series) -> str: # Time Tools ------------------------------------------------------------------- + def _parse_time(time: str, timedelta: str = None) -> datetime: - """ Parse time from given time-input. """ + """Parse time from given time-input.""" t = _parse_time_from_str(time) if timedelta: t = _add_time_delta(t, timedelta) @@ -588,7 +606,7 @@ def _parse_time(time: str, timedelta: str = None) -> datetime: def _parse_time_from_str(time_str: str) -> datetime: - """ Parse time from given string. """ + """Parse time from given string.""" # Now? --- if time_str.lower() == "now": return datetime.now(timezone.utc) @@ -611,14 +629,19 @@ def _parse_time_from_str(time_str: str) -> datetime: def _add_time_delta(time: datetime, delta_str: str) -> datetime: - """ Parse delta-string and add time-delta to time. """ + """Parse delta-string and add time-delta to time.""" sign = -1 if delta_str[0] in MINUS_CHARS else 1 all_deltas = re.findall(r"(\d+)(\w)", delta_str) # tuples (value, timeunit-char) # mapping char to the time-unit as accepted by relativedelta, # following ISO-8601 for time durations char2unit = { - "s": 'seconds', "m": 'minutes', "h": 'hours', - "d": 'days', "w": 'weeks', "M": 'months', "Y": "years", + "s": "seconds", + "m": "minutes", + "h": "hours", + "d": "days", + "w": "weeks", + "M": "months", + "Y": "years", } # add all deltas, which are tuples of (value, timeunit-char) time_parts = {char2unit[delta[1]]: sign * int(delta[0]) for delta in all_deltas} @@ -627,6 +650,7 @@ def _add_time_delta(time: datetime, delta_str: str) -> datetime: # Other tools ------------------------------------------------------------------ + def lsa2name(lsa_name: str) -> str: """LSA name -> Variable in Timber/StateTracker conversion.""" return lsa_name.replace("/", ":") From 6643ed31d3d47f7712e0deb2e4061abc4a3a0086 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Sat, 29 Nov 2025 11:46:08 +0100 Subject: [PATCH 28/37] Update .git-blame-ignore-revs to include additional formatting commit SHA --- .git-blame-ignore-revs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 3b25d8fb..36effa28 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -5,4 +5,4 @@ 820a51e3c67c8675880b9c949ac09f4db8bd8e9f 4e59cd0f7eb6a4279dc3f5cfbaed2b03b1918df3 0c19f7f7aa2e69a46c0e72fb99d4c3a29a1726c1 -81738e03 +ec2198f5a98083bf894e5660ea3a1c4349945273 From ad1a536fb5398991f01e886769326ccdee02fae2 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Sat, 29 Nov 2025 11:55:47 +0100 Subject: [PATCH 29/37] Clarify LHC context in knob extraction module and related functions --- omc3/nxcals/knob_extraction.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/omc3/nxcals/knob_extraction.py b/omc3/nxcals/knob_extraction.py index 76744c7b..2a0b9f43 100644 --- a/omc3/nxcals/knob_extraction.py +++ b/omc3/nxcals/knob_extraction.py @@ -2,8 +2,8 @@ Knob Extraction --------------- -This module provides functionality to extract knob values from NXCALS and convert them to -MAD-X compatible format using LSA services. +This module provides functionality to extract knob values from NXCALS for the LHC and +convert them to MAD-X compatible format using LSA services. It handles retrieval of raw variable data from NXCALS, conversion of power converter currents to K-values, and mapping of power converter names to MAD-X naming @@ -54,7 +54,7 @@ def get_knob_vals( delta_days: float = 0.25, ) -> list[NXCALSResult]: """ - Retrieve knob values for a given beam and time using specified patterns. + Retrieve knob values for a given beam and time using specified patterns for the LHC. This is the main entry point for extracting magnet knob values from NXCALS. The function performs a complete workflow: @@ -218,7 +218,7 @@ def get_raw_vars( def get_energy(spark: SparkSession, time: datetime) -> tuple[float, pd.Timestamp]: """ - Retrieve the beam energy from NXCALS. + Retrieve the beam energy of the LHC from NXCALS. Args: spark (SparkSession): Active Spark session. @@ -242,7 +242,7 @@ def get_energy(spark: SparkSession, time: datetime) -> tuple[float, pd.Timestamp def calc_k_from_iref(lsa_client, currents: dict[str, float], energy: float) -> dict[str, float]: """ - Calculate K values from IREF using the LSA service. + Calculate K values in the LHC from IREF using the LSA service. Args: lsa_client: The LSA client instance. @@ -294,9 +294,11 @@ def strip_i_meas(text: str) -> str: return text.removesuffix(":I_MEAS") +# Note: this will have to be updated if we ever want to support other magnet types +# such as dipoles, sextupoles, octupoles, etc. def map_pc_name_to_madx(pc_name: str) -> str: """ - Convert a power converter name or circuit name to its corresponding MAD-X name. + Convert an LHC power converter name or circuit name to its corresponding MAD-X name. This function processes the input name by removing the ':I_MEAS' suffix if present, extracting the circuit name from full power converter names (starting with 'RPMBB' or 'RPL'), @@ -329,6 +331,4 @@ def map_pc_name_to_madx(pc_name: str) -> str: replacements = {"RQ": "KQ", "RCB": "ACB"} for old, new in replacements.items(): circuit_name = circuit_name.replace(old, new) - - # Return in lowercase as required by MAD-X return circuit_name.lower() From ee8e3ffeee5ac7358c06fd9e477c30d9595b0280 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Wed, 3 Dec 2025 16:30:30 +0100 Subject: [PATCH 30/37] Formatting --- omc3/correction/response_madx.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/omc3/correction/response_madx.py b/omc3/correction/response_madx.py index 0601dabb..ad5e063d 100644 --- a/omc3/correction/response_madx.py +++ b/omc3/correction/response_madx.py @@ -253,10 +253,10 @@ def _create_fullresponse_from_dict( # Create normalized dispersion and dividing BET by nominal model normalised_dispersion_x = np.divide( resp[columns.index(f"{DISPERSION}X")], np.sqrt(resp[columns.index(f"{BETA}X")]) - ) # noqa: N806 + ) normalised_dispersion_y = np.divide( resp[columns.index(f"{DISPERSION}Y")], np.sqrt(resp[columns.index(f"{BETA}Y")]) - ) # noqa: N806 + ) resp[columns.index(f"{BETA}X")] = np.divide( resp[columns.index(f"{BETA}X")], resp[columns.index(f"{BETA}X"), :, model_index][:, np.newaxis], @@ -270,24 +270,27 @@ def _create_fullresponse_from_dict( resp = np.subtract(resp, resp[:, :, model_index][:, :, np.newaxis]) normalised_dispersion_x = np.subtract( normalised_dispersion_x, normalised_dispersion_x[:, model_index][:, np.newaxis] - ) # noqa: N806 + ) normalised_dispersion_y = np.subtract( normalised_dispersion_y, normalised_dispersion_y[:, model_index][:, np.newaxis] - ) # noqa: N806 + ) # Remove difference of nominal model with itself (bunch of zeros) and divide by increment resp = np.delete(resp, model_index, axis=2) - normalised_dispersion_x = np.delete(normalised_dispersion_x, model_index, axis=1) # noqa: N806 - normalised_dispersion_y = np.delete(normalised_dispersion_y, model_index, axis=1) # noqa: N806 + normalised_dispersion_x = np.delete(normalised_dispersion_x, model_index, axis=1) + normalised_dispersion_y = np.delete(normalised_dispersion_y, model_index, axis=1) keys.remove("0") # Divide by increment - normalised_dispersion_x = np.divide(normalised_dispersion_x, resp[columns.index(f"{INCR}")]) # noqa: N806 - normalised_dispersion_y = np.divide(normalised_dispersion_y, resp[columns.index(f"{INCR}")]) # noqa: N806 + normalised_dispersion_x = np.divide(normalised_dispersion_x, resp[columns.index(f"{INCR}")]) + normalised_dispersion_y = np.divide(normalised_dispersion_y, resp[columns.index(f"{INCR}")]) resp = np.divide(resp, resp[columns.index(f"{INCR}")]) tune_arr = np.column_stack( - (resp[columns.index(f"{TUNE}1"), 0, :], resp[columns.index(f"{TUNE}2"), 0, :]) - ).T # noqa: N806 + ( + resp[columns.index(f"{TUNE}1"), 0, :], + resp[columns.index(f"{TUNE}2"), 0, :], + ) + ).T # fmt: off with suppress_warnings(ComplexWarning): # raised as everything is complex-type now From 1b2032dbcc178179c294d3ca775a1fdfc7740fe5 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Wed, 3 Dec 2025 16:50:58 +0100 Subject: [PATCH 31/37] Refactor time parsing functions and consolidate time tools for consistency across modules --- omc3/knob_extractor.py | 67 +++++----------------------------------- omc3/mqt_extractor.py | 24 ++------------ omc3/utils/time_tools.py | 65 +++++++++++++++++++++++++++++++++++++- 3 files changed, 74 insertions(+), 82 deletions(-) diff --git a/omc3/knob_extractor.py b/omc3/knob_extractor.py index 0ebba0e8..707c67f0 100644 --- a/omc3/knob_extractor.py +++ b/omc3/knob_extractor.py @@ -83,22 +83,21 @@ import argparse import logging import math -import re -from datetime import datetime, timezone from pathlib import Path from typing import TYPE_CHECKING import pandas as pd import tfs -from dateutil.relativedelta import relativedelta from generic_parser import EntryPointParameters, entrypoint from omc3.utils.iotools import PathOrStr, PathOrStrOrDataFrame from omc3.utils.logging_tools import get_logger from omc3.utils.mock import cern_network_import +from omc3.utils.time_tools import parse_time if TYPE_CHECKING: from collections.abc import Sequence + from datetime import datetime pytimber = cern_network_import("pytimber") @@ -111,7 +110,6 @@ KNOBS_FILE_ACC_MODELS = ACC_MODELS_LHC / "operation" / "knobs.txt" KNOBS_FILE_AFS = AFS_ACC_MODELS_LHC / "operation" / "knobs.txt" -MINUS_CHARS: tuple[str, ...] = ("_", "-") STATE_VARIABLES: dict[str, str] = { "opticName": "Optics", "beamProcess": "Beam Process", @@ -184,7 +182,10 @@ class Head: "LHCBEAM1:IP-SDISP-QPBUMP", "LHCBEAM2:IP-SDISP-QPBUMP", ], - "mo": ["LHCBEAM1:LANDAU_DAMPING", "LHCBEAM2:LANDAU_DAMPING"], + "mo": [ + "LHCBEAM1:LANDAU_DAMPING", + "LHCBEAM2:LANDAU_DAMPING", + ], "lumi_scan": [ "LHCBEAM1:IP1_SEPSCAN_X_MM", "LHCBEAM1:IP1_SEPSCAN_Y_MM", @@ -296,7 +297,7 @@ def main(opt) -> tfs.TfsDataFrame: loglevel=logging.ERROR, sparkprops={"spark.ui.showConsoleProgress": "false"}, ) - time = _parse_time(opt.time, opt.timedelta) + time = parse_time(opt.time, opt.timedelta) if opt.state: # Only print the state of the machine. @@ -594,60 +595,6 @@ def get_madx_command(knob_data: pd.Series) -> str: return f"{knob_data[Col.madx]} := {knob_data[Col.value] * knob_data[Col.scaling]};" -# Time Tools ------------------------------------------------------------------- - - -def _parse_time(time: str, timedelta: str = None) -> datetime: - """Parse time from given time-input.""" - t = _parse_time_from_str(time) - if timedelta: - t = _add_time_delta(t, timedelta) - return t - - -def _parse_time_from_str(time_str: str) -> datetime: - """Parse time from given string.""" - # Now? --- - if time_str.lower() == "now": - return datetime.now(timezone.utc) - - # ISOFormat? --- - try: - return datetime.fromisoformat(time_str) - except (TypeError, ValueError): - LOGGER.debug("Could not parse time string as ISO format") - pass - - # Timestamp? --- - try: - return datetime.fromtimestamp(int(time_str)) - except (TypeError, ValueError): - LOGGER.debug("Could not parse time string as a timestamp") - pass - - raise ValueError(f"Couldn't read datetime '{time_str}'") - - -def _add_time_delta(time: datetime, delta_str: str) -> datetime: - """Parse delta-string and add time-delta to time.""" - sign = -1 if delta_str[0] in MINUS_CHARS else 1 - all_deltas = re.findall(r"(\d+)(\w)", delta_str) # tuples (value, timeunit-char) - # mapping char to the time-unit as accepted by relativedelta, - # following ISO-8601 for time durations - char2unit = { - "s": "seconds", - "m": "minutes", - "h": "hours", - "d": "days", - "w": "weeks", - "M": "months", - "Y": "years", - } - # add all deltas, which are tuples of (value, timeunit-char) - time_parts = {char2unit[delta[1]]: sign * int(delta[0]) for delta in all_deltas} - return time + relativedelta(**time_parts) - - # Other tools ------------------------------------------------------------------ diff --git a/omc3/mqt_extractor.py b/omc3/mqt_extractor.py index 6934864b..d1df2380 100644 --- a/omc3/mqt_extractor.py +++ b/omc3/mqt_extractor.py @@ -8,7 +8,7 @@ MQT magnets are trim quadrupoles located in the LHC arcs, used for fine-tuning the optics. There are two types per arc (focusing 'f' and defocusing 'd'), resulting in 16 MQT magnets -per beam (8 arcs × 2 types). +per beam (8 arcs x 2 types). .. note:: Please note that access to the GPN is required to use this functionality. @@ -70,6 +70,7 @@ from omc3.utils.iotools import PathOrStr from omc3.utils.logging_tools import get_logger from omc3.utils.mock import cern_network_import +from omc3.utils.time_tools import parse_time if TYPE_CHECKING: from datetime import datetime @@ -155,7 +156,7 @@ def main(opt) -> tfs.TfsDataFrame: for madx name, value, timestamp, and power converter name. """ spark = spark_session_builder.get_or_create(conf={"spark.ui.showConsoleProgress": "false"}) - time = _parse_time(opt.time, opt.timedelta) + time = parse_time(opt.time, opt.timedelta) LOGGER.info(f"---- EXTRACTING MQT KNOBS @ {time} for Beam {opt.beam} ----") mqt_vals = get_mqt_vals(spark, time, opt.beam, delta_days=opt.delta_days) @@ -186,24 +187,5 @@ def _write_mqt_file(output: Path | str, mqt_vals, time: datetime, beam: int): outfile.write(knobs_to_madx(mqt_vals)) -# Time Tools (copied from knob_extractor for consistency) --------------------- - - -def _parse_time(time: str, timedelta: str = None) -> datetime: - """Parse time from given time-input from command line.""" - from datetime import timezone - - from omc3.knob_extractor import _add_time_delta, _parse_time_from_str - - t = _parse_time_from_str(time) - if timedelta: - t = _add_time_delta(t, timedelta) - if t.tzinfo is None: - # Since we were given a string, there is no way to know the intended timezone. - # We default to UTC to avoid confusion. - t = t.replace(tzinfo=timezone.utc) - return t - - if __name__ == "__main__": main() diff --git a/omc3/utils/time_tools.py b/omc3/utils/time_tools.py index 14a8669c..82b8c672 100644 --- a/omc3/utils/time_tools.py +++ b/omc3/utils/time_tools.py @@ -7,12 +7,18 @@ """ from __future__ import annotations -from datetime import datetime, timedelta +import logging +import re +from datetime import datetime, timedelta, timezone import dateutil.tz as tz +from dateutil.relativedelta import relativedelta from omc3.definitions.formats import TIME +LOGGER = logging.getLogger(__name__) +MINUS_CHARS: tuple[str, ...] = ("_", "-") + # Datetime Conversions ######################################################### @@ -78,6 +84,63 @@ def check_tz(localized_dt, timezone): f"but was '{localized_dt.tzinfo}'" ) +# Knob extractor specific time tools ############################################# + + +def _parse_time_from_str(time_str: str) -> datetime: + """Parse time from given string.""" + # Now? --- + if time_str.lower() == "now": + return datetime.now(timezone.utc) + + # ISOFormat? --- + try: + return datetime.fromisoformat(time_str) + except (TypeError, ValueError): + LOGGER.debug("Could not parse time string as ISO format") + pass + + # Timestamp? --- + try: + return datetime.fromtimestamp(int(time_str)) + except (TypeError, ValueError): + LOGGER.debug("Could not parse time string as a timestamp") + pass + + raise ValueError(f"Couldn't read datetime '{time_str}'") + + +def _add_time_delta(time: datetime, delta_str: str) -> datetime: + """Parse delta-string and add time-delta to time.""" + sign = -1 if delta_str[0] in MINUS_CHARS else 1 + all_deltas = re.findall(r"(\d+)(\w)", delta_str) # tuples (value, timeunit-char) + # mapping char to the time-unit as accepted by relativedelta, + # following ISO-8601 for time durations + char2unit = { + "s": "seconds", + "m": "minutes", + "h": "hours", + "d": "days", + "w": "weeks", + "M": "months", + "Y": "years", + } + # add all deltas, which are tuples of (value, timeunit-char) + time_parts = {char2unit[delta[1]]: sign * int(delta[0]) for delta in all_deltas} + return time + relativedelta(**time_parts) + + +def parse_time(time: str, timedelta: str = None) -> datetime: + """Parse time from given time-input from command line.""" + t = _parse_time_from_str(time) + if timedelta: + t = _add_time_delta(t, timedelta) + if t.tzinfo is None: + # Since we were given a string, there is no way to know the intended timezone. + # We default to UTC to avoid confusion. + t = t.replace(tzinfo=timezone.utc) + return t + # AccDatetime Classes ########################################################## From ca36dcc40bd7b1fae8295d6d0cb738774b9f1299 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Wed, 3 Dec 2025 16:51:05 +0100 Subject: [PATCH 32/37] Refactor MADX path resolution in CorrectionModelCreator for improved readability --- omc3/model/accelerators/lhc.py | 8 ++++++-- omc3/model/model_creators/abstract_model_creator.py | 9 +++------ 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/omc3/model/accelerators/lhc.py b/omc3/model/accelerators/lhc.py index 8d258cb5..c01d0129 100644 --- a/omc3/model/accelerators/lhc.py +++ b/omc3/model/accelerators/lhc.py @@ -128,7 +128,11 @@ class Lhc(Accelerator): def get_parameters(): params = super(Lhc, Lhc).get_parameters() params.add_parameter( - name="beam", type=int, choices=(1, 2), required=True, help="Beam to use." + name="beam", + type=int, + choices=(1, 2), + required=True, + help="Beam to use.", ) params.add_parameter( name="year", @@ -329,7 +333,7 @@ def important_phase_advances(self) -> list[list[str]] | None: return None def get_accel_file(self, filename: Path | str) -> Path: - return Path(LHC_DIR / self.year / filename).absolute() + return (LHC_DIR / self.year / filename).absolute() # Private Methods ############################################################## def _get_corrector_elems(self) -> Path: diff --git a/omc3/model/model_creators/abstract_model_creator.py b/omc3/model/model_creators/abstract_model_creator.py index 72f4539c..78b220a1 100644 --- a/omc3/model/model_creators/abstract_model_creator.py +++ b/omc3/model/model_creators/abstract_model_creator.py @@ -510,12 +510,9 @@ def __init__( self.twiss_out = self.resolve_madx_path(twiss_out) # Take the directory of the twiss output as output dir - self.jobfile = self.resolve_madx_path( - self.twiss_out.parent / f"job.create_{self.twiss_out.stem}.madx" - ) - self.logfile = self.resolve_madx_path( - self.twiss_out.parent / f"job.create_{self.twiss_out.stem}.log" - ) + self.jobfile = self.twiss_out.parent / f"job.create_{self.twiss_out.stem}.madx" + + self.logfile = self.twiss_out.parent / f"job.create_{self.twiss_out.stem}.log" self.corr_files = [self.resolve_madx_path(f) for f in corr_files] self.update_dpp = update_dpp From e97c9d8ea38109ad5c079aaf64e8dbf9352bf5cb Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Wed, 3 Dec 2025 17:02:10 +0100 Subject: [PATCH 33/37] Remove old comment from previous version --- omc3/nxcals/knob_extraction.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/omc3/nxcals/knob_extraction.py b/omc3/nxcals/knob_extraction.py index 2a0b9f43..85a65732 100644 --- a/omc3/nxcals/knob_extraction.py +++ b/omc3/nxcals/knob_extraction.py @@ -253,7 +253,7 @@ def calc_k_from_iref(lsa_client, currents: dict[str, float], energy: float) -> d dict[str, float]: Dictionary of K values keyed by variable name. """ # 1) Use the **instance** PJLSA already created - lhc_service = lsa_client._lhcService # <-- INSTANCE (do NOT use self._LhcService) + lhc_service = lsa_client._lhcService # 2) Build a java.util.HashMap (not a Python dict) j_hash_map = jpype.JClass("java.util.HashMap") From 3b5e5cc983e5167be4874923f2002070a0dd17bd Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Wed, 3 Dec 2025 17:03:50 +0100 Subject: [PATCH 34/37] Enforce tz aware --- omc3/nxcals/knob_extraction.py | 5 ++--- omc3/utils/time_tools.py | 4 +--- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/omc3/nxcals/knob_extraction.py b/omc3/nxcals/knob_extraction.py index 85a65732..c6005566 100644 --- a/omc3/nxcals/knob_extraction.py +++ b/omc3/nxcals/knob_extraction.py @@ -168,9 +168,8 @@ def get_raw_vars( # Ensure time is in UTC if time.tzinfo is None: - time = time.replace(tzinfo=timezone.utc) - else: - time = time.astimezone(timezone.utc) + raise ValueError("Datetime object must be timezone-aware") + time = time.astimezone(timezone.utc) # Look back delta_days, may need up to 1 day to find data start_time = time - timedelta(days=delta_days) diff --git a/omc3/utils/time_tools.py b/omc3/utils/time_tools.py index 82b8c672..d10323cb 100644 --- a/omc3/utils/time_tools.py +++ b/omc3/utils/time_tools.py @@ -136,9 +136,7 @@ def parse_time(time: str, timedelta: str = None) -> datetime: if timedelta: t = _add_time_delta(t, timedelta) if t.tzinfo is None: - # Since we were given a string, there is no way to know the intended timezone. - # We default to UTC to avoid confusion. - t = t.replace(tzinfo=timezone.utc) + raise ValueError("Datetime object must be timezone-aware") return t From c171cacbfff3d0d3ab045a7ef27be98489a73897 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Wed, 3 Dec 2025 17:24:18 +0100 Subject: [PATCH 35/37] Enforce timezones --- omc3/knob_extractor.py | 4 +--- tests/unit/test_knob_extractor.py | 29 +++++++++++++---------------- 2 files changed, 14 insertions(+), 19 deletions(-) diff --git a/omc3/knob_extractor.py b/omc3/knob_extractor.py index 707c67f0..09baef62 100644 --- a/omc3/knob_extractor.py +++ b/omc3/knob_extractor.py @@ -266,9 +266,7 @@ def get_params(): }, output={ "type": PathOrStr, - "help": ( - "Specify user-defined output path. This should probably be `model_dir/knobs.madx`" - ), + "help": "Specify user-defined output path. This should probably be `model_dir/knobs.madx`", }, knob_definitions={ "type": PathOrStrOrDataFrame, diff --git a/tests/unit/test_knob_extractor.py b/tests/unit/test_knob_extractor.py index b94dfcad..2ce30e55 100644 --- a/tests/unit/test_knob_extractor.py +++ b/tests/unit/test_knob_extractor.py @@ -18,10 +18,8 @@ STATE_VARIABLES, Col, Head, - _add_time_delta, _extract_and_gather, _parse_knobs_defintions, - _parse_time, _write_knobsfile, check_for_undefined_knobs, get_madx_command, @@ -30,6 +28,7 @@ lsa2name, main, ) +from omc3.utils.time_tools import _add_time_delta, parse_time from tests.conftest import cli_args INPUTS = Path(__file__).parent.parent / "inputs" / "knob_extractor" @@ -115,7 +114,7 @@ class MockTimber: monkeypatch.setattr(knob_extractor, "pytimber", MockTimber()) - time = datetime.now() + time = datetime.now(timezone.utc) # Main --- with caplog.at_level(logging.INFO): if commandline: @@ -341,40 +340,38 @@ def test_load_knobdefinitions_fail_wrong_scaling(self, tmp_path): class TestTime: @pytest.mark.basic def test_time_and_delta(self): - time_str = "2022-06-26T03:00" - t1 = _parse_time(time_str) + time_str = "2022-06-26T03:00+00:00" + t1 = parse_time(time_str) - assert t1 == datetime(2022, 6, 26, 3, 0, 0) + assert t1 == datetime(2022, 6, 26, 3, 0, 0, tzinfo=timezone.utc) # 2 hours earlier t2 = _add_time_delta(t1, "_2h") - assert t2 == datetime(2022, 6, 26, 1, 0, 0) + assert t2 == datetime(2022, 6, 26, 1, 0, 0, tzinfo=timezone.utc) # 1 week earlier t2 = _add_time_delta(t1, "_1w") - assert t2 == datetime(2022, 6, 19, 3, 0, 0) + assert t2 == datetime(2022, 6, 19, 3, 0, 0, tzinfo=timezone.utc) # 1 week and 1 hour earlier t2 = _add_time_delta(t1, "_1w1h") - assert t2 == datetime(2022, 6, 19, 2, 0, 0) + assert t2 == datetime(2022, 6, 19, 2, 0, 0, tzinfo=timezone.utc) # 1 month later t2 = _add_time_delta(t1, "1M") - assert t2 == datetime(2022, 7, 26, 3, 0, 0) + assert t2 == datetime(2022, 7, 26, 3, 0, 0, tzinfo=timezone.utc) # 20 years later t2 = _add_time_delta(t1, "20Y") - assert t2 == datetime(2042, 6, 26, 3, 0, 0) + assert t2 == datetime(2042, 6, 26, 3, 0, 0, tzinfo=timezone.utc) - t3 = _parse_time(time_str, "20Y") + t3 = parse_time(time_str, "20Y") assert t2 == t3 @pytest.mark.basic def test_timezones(self): - assert ( - _parse_time("2022-06-26T03:00+02:00") - == - datetime(2022, 6, 26, 3, 0, 0, tzinfo=timezone(timedelta(seconds=7200))) + assert parse_time("2022-06-26T03:00+02:00") == datetime( + 2022, 6, 26, 3, 0, 0, tzinfo=timezone(timedelta(seconds=7200)) ) From 927f394ae10e5b67febd551e0f030a206e49a7c3 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Wed, 3 Dec 2025 17:31:26 +0100 Subject: [PATCH 36/37] Improve _get_madx_main_sequence_loading --- omc3/model/model_creators/lhc_model_creator.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/omc3/model/model_creators/lhc_model_creator.py b/omc3/model/model_creators/lhc_model_creator.py index f702ddca..b0705d28 100644 --- a/omc3/model/model_creators/lhc_model_creator.py +++ b/omc3/model/model_creators/lhc_model_creator.py @@ -236,10 +236,6 @@ def _get_sequence_initialize_script(self) -> str: see below. """ accel: Lhc = self.accel - acc_model_path = None - if accel.acc_model_path is not None: - acc_model_path = Path(self.resolve_madx_path(accel.acc_model_path)) - madx_script = ( f"{self._get_madx_script_info_comments()}\n\n" f"call, file = '{self.resolve_madx_path(accel.model_dir / MACROS_DIR / GENERAL_MACROS)}';\n" @@ -255,7 +251,7 @@ def _get_sequence_initialize_script(self) -> str: madx_script += ( "option, -echo; ! suppress output from base sequence loading to keep the log small\n" ) - madx_script += self._get_madx_main_sequence_loading(acc_model_path) + madx_script += self._get_madx_main_sequence_loading() madx_script += "\noption, echo; ! re-enable output to see the optics settings\n" madx_script += "\n! ---- Call optics and other modifiers ----\n" @@ -348,10 +344,12 @@ def _get_madx_script_info_comments(self) -> str: ) return info_comments - def _get_madx_main_sequence_loading(self, acc_model_path: Path | None) -> str: + def _get_madx_main_sequence_loading(self) -> str: accel: Lhc = self.accel - if acc_model_path is not None: + if accel.acc_model_path is not None: + acc_model_path = Path(self.resolve_madx_path(accel.acc_model_path)) + main_seq_rel = acc_model_path / "lhc.seq" main_call = f"call, file = '{main_seq_rel}';" if accel.year.startswith("hl"): From 11d022ca59075651fd41f2ace2c4e3e98178b6b5 Mon Sep 17 00:00:00 2001 From: Joshua Gray <61458311+jgray-19@users.noreply.github.com> Date: Wed, 3 Dec 2025 17:38:32 +0100 Subject: [PATCH 37/37] Update time handling in knob and MQT extractors for improved clarity and consistency --- omc3/knob_extractor.py | 11 +++++------ omc3/mqt_extractor.py | 11 +++++------ tests/unit/test_nxcals_mqt_extraction.py | 14 +++++++++----- 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/omc3/knob_extractor.py b/omc3/knob_extractor.py index 09baef62..a2a619a4 100644 --- a/omc3/knob_extractor.py +++ b/omc3/knob_extractor.py @@ -44,8 +44,8 @@ - **time** *(str)*: At what time to extract the knobs. Accepts ISO-format (YYYY-MM- - DDThh:mm:ss), timestamp or 'now'. The default timezone for the ISO- - format is local time, but you can force e.g. UTC by adding +00:00. + DDThh:mm:ss) with timezone, timestamp or 'now'. Timezone must be + specified for ISO-format (e.g. +00:00 for UTC). default: ``now`` @@ -212,7 +212,7 @@ class Head: USAGE_EXAMPLES = """Usage Examples: -python -m omc3.knob_extractor --knobs disp chroma --time 2022-05-04T14:00 +python -m omc3.knob_extractor --knobs disp chroma --time 2022-05-04T14:00+00:00 extracts the chromaticity and dispersion knobs at 14h on May 4th 2022 python -m omc3.knob_extractor --knobs disp chroma --time now _2h @@ -241,9 +241,8 @@ def get_params(): "type": str, "help": ( "At what time to extract the knobs. " - "Accepts ISO-format (YYYY-MM-DDThh:mm:ss), timestamp or 'now'. " - "The default timezone for the ISO-format is local time, " - "but you can force e.g. UTC by adding +00:00." + "Accepts ISO-format (YYYY-MM-DDThh:mm:ss) with timezone, timestamp or 'now'. " + "Timezone must be specified for ISO-format (e.g. +00:00 for UTC)." ), "default": "now", }, diff --git a/omc3/mqt_extractor.py b/omc3/mqt_extractor.py index d1df2380..39c2e4c0 100644 --- a/omc3/mqt_extractor.py +++ b/omc3/mqt_extractor.py @@ -25,8 +25,8 @@ - **time** *(str)*: At what time to extract the MQT knobs. Accepts ISO-format (YYYY-MM- - DDThh:mm:ss), timestamp or 'now'. The default timezone for the ISO- - format is local time, but you can force e.g. UTC by adding +00:00. + DDThh:mm:ss) with timezone, timestamp or 'now'. Timezone must be + specified for ISO-format (e.g. +00:00 for UTC). default: ``now`` @@ -81,7 +81,7 @@ USAGE_EXAMPLES = """Usage Examples: -python -m omc3.mqt_extractor --beam 1 --time 2022-05-04T14:00 +python -m omc3.mqt_extractor --beam 1 --time 2022-05-04T14:00+00:00 extracts the MQT knobs for beam 1 at 14h on May 4th 2022 python -m omc3.mqt_extractor --beam 2 --time now --timedelta _2h @@ -102,9 +102,8 @@ def get_params(): "type": str, "help": ( "At what time to extract the MQT knobs. " - "Accepts ISO-format (YYYY-MM-DDThh:mm:ss), timestamp or 'now'. " - "The default timezone for the ISO-format is local time, " - "but you can force e.g. UTC by adding +00:00." + "Accepts ISO-format (YYYY-MM-DDThh:mm:ss) with timezone, timestamp or 'now'. " + "Timezone must be specified for ISO-format (e.g. +00:00 for UTC)." ), "default": "now", }, diff --git a/tests/unit/test_nxcals_mqt_extraction.py b/tests/unit/test_nxcals_mqt_extraction.py index 06fcfcc6..75e380ba 100644 --- a/tests/unit/test_nxcals_mqt_extraction.py +++ b/tests/unit/test_nxcals_mqt_extraction.py @@ -182,27 +182,31 @@ def test_parse_time_now(): """Test that _parse_time correctly handles 'now'.""" from datetime import datetime - result = mqt_extractor._parse_time("now") + from omc3.utils.time_tools import parse_time + + result = parse_time("now") now = datetime.now(timezone.utc) # Should be very close to now (within 1 second) diff = abs((now - result).total_seconds()) - assert diff < 1, f"_parse_time('now') should return current time, diff was {diff}s" + assert diff < 1, f"parse_time('now') should return current time, diff was {diff}s" def test_parse_time_with_timedelta(): """Test that _parse_time correctly applies timedelta.""" from datetime import datetime + from omc3.utils.time_tools import parse_time + now_str = datetime.now(timezone.utc).isoformat() # Test positive timedelta - result_plus = mqt_extractor._parse_time(now_str, "1h") - result_base = mqt_extractor._parse_time(now_str) + result_plus = parse_time(now_str, "1h") + result_base = parse_time(now_str) diff_plus = (result_plus - result_base).total_seconds() assert abs(diff_plus - 3600) < 1, f"1h timedelta should add 3600s, got {diff_plus}s" # Test negative timedelta - result_minus = mqt_extractor._parse_time(now_str, "_2h") + result_minus = parse_time(now_str, "_2h") diff_minus = (result_minus - result_base).total_seconds() assert abs(diff_minus + 7200) < 1, f"_2h timedelta should subtract 7200s, got {diff_minus}s"