diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 51ca63dd..36effa28 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -5,3 +5,4 @@ 820a51e3c67c8675880b9c949ac09f4db8bd8e9f 4e59cd0f7eb6a4279dc3f5cfbaed2b03b1918df3 0c19f7f7aa2e69a46c0e72fb99d4c3a29a1726c1 +ec2198f5a98083bf894e5660ea3a1c4349945273 diff --git a/doc/entrypoints/scripts.rst b/doc/entrypoints/scripts.rst index 5d2575ae..dfbe557e 100644 --- a/doc/entrypoints/scripts.rst +++ b/doc/entrypoints/scripts.rst @@ -6,7 +6,7 @@ Scripts :noindex: -.. automodule:: omc3.scripts.kmod_averages +.. automodule:: omc3.scripts.kmod_average :members: :noindex: @@ -46,6 +46,11 @@ Scripts :noindex: +.. automodule:: omc3.scripts.create_logbook_entry + :members: + :noindex: + + .. automodule:: omc3.scripts.lhc_corrector_list_check :members: :noindex: diff --git a/doc/modules/model.rst b/doc/modules/model.rst index 23c944a4..d3742580 100644 --- a/doc/modules/model.rst +++ b/doc/modules/model.rst @@ -85,3 +85,11 @@ Model :members: :noindex: + +.. automodule:: omc3.model.model_creators.abstract_model_creator + :members: + :noindex: + +.. automodule:: omc3.model.model_creators.segment_creator + :members: + :noindex: diff --git a/doc/modules/nxcals.rst b/doc/modules/nxcals.rst new file mode 100644 index 00000000..01c237ab --- /dev/null +++ b/doc/modules/nxcals.rst @@ -0,0 +1,16 @@ +NXCALS +************************** + +.. automodule:: omc3.nxcals.constants + :members: + :noindex: + + +.. automodule:: omc3.nxcals.knob_extraction + :members: + :noindex: + + +.. automodule:: omc3.nxcals.mqt_extraction + :members: + :noindex: diff --git a/omc3/correction/response_madx.py b/omc3/correction/response_madx.py index b51f2c6f..ad5e063d 100644 --- a/omc3/correction/response_madx.py +++ b/omc3/correction/response_madx.py @@ -12,6 +12,7 @@ :author: Lukas Malina, Joschua Dilly, Jaime (...) Coello de Portugal """ + from __future__ import annotations import copy @@ -59,17 +60,17 @@ def create_fullresponse( variable_categories: Sequence[str], delta_k: float = 2e-5, num_proc: int = multiprocessing.cpu_count(), - temp_dir: Path | None = None + temp_dir: Path | None = None, ) -> dict[str, pd.DataFrame]: - """ Generate a dictionary containing response matrices for - beta, phase, dispersion, tune and coupling and saves it to a file. - - Args: - accel_inst : Accelerator Instance. - variable_categories (list): Categories of the variables/knobs to use. (from .json) - delta_k (float): delta K1L to be applied to quads for sensitivity matrix - num_proc (int): Number of processes to use in parallel. - temp_dir (str): temporary directory. If ``None``, uses folder of original_jobfile. + """Generate a dictionary containing response matrices for + beta, phase, dispersion, tune and coupling and saves it to a file. + + Args: + accel_inst : Accelerator Instance. + variable_categories (list): Categories of the variables/knobs to use. (from .json) + delta_k (float): delta K1L to be applied to quads for sensitivity matrix + num_proc (int): Number of processes to use in parallel. + temp_dir (str): temporary directory. If ``None``, uses folder of original_jobfile. """ LOG.debug("Generating Fullresponse via Mad-X.") with timeit(lambda t: LOG.debug(f" Total time generating fullresponse: {t} s")): @@ -96,11 +97,11 @@ def _generate_madx_jobs( variables: Sequence[str], delta_k: float, num_proc: int, - temp_dir: Path + temp_dir: Path, ) -> dict[str, float]: - """ Generates madx job-files """ + """Generates madx job-files""" LOG.debug("Generating MAD-X jobfiles.") - incr_dict = {'0': 0.0} + incr_dict = {"0": 0.0} compute_deltap: bool = ORBIT_DPP in variables no_dpp_vars = [var for var in variables if var != ORBIT_DPP] vars_per_proc = int(np.ceil(len(no_dpp_vars) / num_proc)) @@ -112,7 +113,7 @@ def _generate_madx_jobs( # This is here only for multiple iteration of the global correction # By including dpp here, it means that if deltap is in variables and dpp is not 0, the orbit and tune magnets change # We have to be very careful that DELTAP_NAME is not used ANYWHERE else in MAD-X - madx_job += f"{ORBIT_DPP} = {accel_inst.dpp};\n" # Set deltap to 0 + madx_job += f"{ORBIT_DPP} = {accel_inst.dpp};\n" # Set deltap to 0 # get update deltap setup from model creator madx_job += creator.get_update_deltap_script(deltap=ORBIT_DPP) @@ -127,44 +128,44 @@ def _generate_madx_jobs( if var_idx >= len(no_dpp_vars): break var = no_dpp_vars[var_idx] + incr_dict[var] = delta_k current_job += f"{var} = {var}{delta_k:+.15e};\n" - current_job += f"twiss, file='{str(temp_dir / f'twiss.{var}')}'{deltap_twiss};\n" + current_job += f"twiss, file='{creator.resolve_madx_path(temp_dir / f'twiss.{var}')}'{deltap_twiss};\n" current_job += f"{var} = {var}{-delta_k:+.15e};\n\n" if proc_idx == num_proc - 1: - current_job += f"twiss, file='{str(temp_dir / 'twiss.0')}'{deltap_twiss};\n" + current_job += ( + f"twiss, file='{creator.resolve_madx_path(temp_dir / 'twiss.0')}'{deltap_twiss};\n" + ) - if compute_deltap: # If ORBIT_DPP is in variables, we run this in the last iteration - # Due to the match and correction of the orbit, this needs to be run at the end of the process + if compute_deltap: + # If ORBIT_DPP is in variables, we run this in the last iteration + # Due to the matching and correction of the orbit, this needs to be run at the end of the process incr_dict[ORBIT_DPP] = delta_k current_job += f"{ORBIT_DPP} = {ORBIT_DPP}{delta_k:+.15e};\n" - current_job += creator.get_update_deltap_script(deltap=ORBIT_DPP) # Do twiss, correct, match - current_job += f"twiss, deltap={ORBIT_DPP}, file='{str(temp_dir/f'twiss.{ORBIT_DPP}')}';\n" + # Do twiss, correct, match + current_job += creator.get_update_deltap_script(deltap=ORBIT_DPP) + current_job += f"twiss, deltap={ORBIT_DPP}, file='{creator.resolve_madx_path(temp_dir / f'twiss.{ORBIT_DPP}')}';\n" jobfile_path.write_text(current_job) return incr_dict def _get_madx_job(accel_inst: Accelerator) -> str: - # use relative paths as we use model_dir as cwd - model_dir_backup = accel_inst.model_dir - accel_inst.model_dir = Path() - # get nominal setup from creator creator = _get_nominal_model_creator(accel_inst) job_content = creator.get_base_madx_script() job_content += ( "select, flag=twiss, clear;\n" f"select, flag=twiss, pattern='{accel_inst.RE_DICT[AccElementTypes.BPMS]}', " - "column=NAME,S,BETX,ALFX,BETY,ALFY,DX,DY,DPX,DPY,X,Y,K1L,MUX,MUY,R11,R12,R21,R22;\n\n") + "column=NAME,S,BETX,ALFX,BETY,ALFY,DX,DY,DPX,DPY,X,Y,K1L,MUX,MUY,R11,R12,R21,R22;\n\n" + ) - # restore model_dir - accel_inst.model_dir = model_dir_backup return job_content def _get_nominal_model_creator(accel_inst: Accelerator) -> ModelCreator: - """ Get the nominal model creator, to which we can add the change of parameters. + """Get the nominal model creator, to which we can add the change of parameters. This is always done on the nominal model, not the best knowledge model, to ensure that the response matrix is in the most linear regime and therefore most accurate @@ -174,8 +175,8 @@ def _get_nominal_model_creator(accel_inst: Accelerator) -> ModelCreator: return creator_class(accel_inst) -def _call_madx(process_pool: multiprocessing.Pool, temp_dir: str, num_proc: int) -> None: # type: ignore - """ Call madx in parallel """ +def _call_madx(process_pool: multiprocessing.Pool, temp_dir: str, num_proc: int) -> None: # type: ignore + """Call madx in parallel""" LOG.debug(f"Starting {num_proc:d} MAD-X jobs...") madx_jobs = [_get_jobfiles(temp_dir, index) for index in range(num_proc)] process_pool.map(_launch_single_job, madx_jobs) @@ -183,7 +184,7 @@ def _call_madx(process_pool: multiprocessing.Pool, temp_dir: str, num_proc: int) def _clean_up(temp_dir: Path, num_proc: int) -> None: - """ Merge Logfiles and clean temporary outputfiles """ + """Merge Logfiles and clean temporary outputfiles""" LOG.debug("Cleaning output and building log...") full_log = "" for index in range(num_proc): @@ -197,23 +198,19 @@ def _clean_up(temp_dir: Path, num_proc: int) -> None: # write compressed full log file full_log_name = "response_madx_full.log" zipfile.ZipFile( - temp_dir / f"{full_log_name}.zip", - mode="w", - compression=zipfile.ZIP_DEFLATED + temp_dir / f"{full_log_name}.zip", mode="w", compression=zipfile.ZIP_DEFLATED ).writestr(full_log_name, full_log) def _load_madx_results( - variables: list[str], - process_pool, - incr_dict: dict, - temp_dir: Path + variables: list[str], process_pool, incr_dict: dict, temp_dir: Path ) -> dict[str, tfs.TfsDataFrame]: - """ Load the madx results in parallel and return var-tfs dictionary """ + """Load the madx results in parallel and return var-tfs dictionary""" LOG.debug("Loading Madx Results.") vars_and_paths = [] - for value in variables + ['0']: + for value in variables + ["0"]: vars_and_paths.append((value, temp_dir)) + LOG.debug(f"Retrieving Twiss files for variables: {vars_and_paths}") var_to_twiss = {} for var, tfs_data in process_pool.map(_load_and_remove_twiss, vars_and_paths): tfs_data[INCR] = incr_dict[var] @@ -221,13 +218,28 @@ def _load_madx_results( return var_to_twiss -def _create_fullresponse_from_dict(var_to_twiss: dict[str, tfs.TfsDataFrame]) -> dict[str, pd.DataFrame]: - """ Convert var-tfs dictionary to fullresponse dictionary. """ +def _create_fullresponse_from_dict( + var_to_twiss: dict[str, tfs.TfsDataFrame], +) -> dict[str, pd.DataFrame]: + """Convert var-tfs dictionary to fullresponse dictionary.""" var_to_twiss = _add_coupling(var_to_twiss) keys = list(var_to_twiss.keys()) - columns = [f"{PHASE_ADV}X", f"{PHASE_ADV}Y", f"{BETA}X", f"{BETA}Y", f"{DISPERSION}X", f"{DISPERSION}Y", - f"{F1001}R", f"{F1001}I", f"{F1010}R", f"{F1010}I", f"{TUNE}1", f"{TUNE}2", INCR] + columns = [ + f"{PHASE_ADV}X", + f"{PHASE_ADV}Y", + f"{BETA}X", + f"{BETA}Y", + f"{DISPERSION}X", + f"{DISPERSION}Y", + f"{F1001}R", + f"{F1001}I", + f"{F1010}R", + f"{F1010}I", + f"{TUNE}1", + f"{TUNE}2", + INCR, + ] bpms = var_to_twiss["0"].index resp = np.empty((len(keys), bpms.size, len(columns))) @@ -239,34 +251,48 @@ def _create_fullresponse_from_dict(var_to_twiss: dict[str, tfs.TfsDataFrame]) -> model_index = list(keys).index("0") # Create normalized dispersion and dividing BET by nominal model - NDX_arr = np.divide(resp[columns.index(f"{DISPERSION}X")], np.sqrt(resp[columns.index(f"{BETA}X")])) # noqa: N806 - NDY_arr = np.divide(resp[columns.index(f"{DISPERSION}Y")], np.sqrt(resp[columns.index(f"{BETA}Y")])) # noqa: N806 + normalised_dispersion_x = np.divide( + resp[columns.index(f"{DISPERSION}X")], np.sqrt(resp[columns.index(f"{BETA}X")]) + ) + normalised_dispersion_y = np.divide( + resp[columns.index(f"{DISPERSION}Y")], np.sqrt(resp[columns.index(f"{BETA}Y")]) + ) resp[columns.index(f"{BETA}X")] = np.divide( resp[columns.index(f"{BETA}X")], - resp[columns.index(f"{BETA}X"), :, model_index][:, np.newaxis] + resp[columns.index(f"{BETA}X"), :, model_index][:, np.newaxis], ) resp[columns.index(f"{BETA}Y")] = np.divide( resp[columns.index(f"{BETA}Y")], - resp[columns.index(f"{BETA}Y"), :, model_index][:, np.newaxis] + resp[columns.index(f"{BETA}Y"), :, model_index][:, np.newaxis], ) # Subtracting nominal model from data resp = np.subtract(resp, resp[:, :, model_index][:, :, np.newaxis]) - NDX_arr = np.subtract(NDX_arr, NDX_arr[:, model_index][:, np.newaxis]) # noqa: N806 - NDY_arr = np.subtract(NDY_arr, NDY_arr[:, model_index][:, np.newaxis]) # noqa: N806 + normalised_dispersion_x = np.subtract( + normalised_dispersion_x, normalised_dispersion_x[:, model_index][:, np.newaxis] + ) + normalised_dispersion_y = np.subtract( + normalised_dispersion_y, normalised_dispersion_y[:, model_index][:, np.newaxis] + ) # Remove difference of nominal model with itself (bunch of zeros) and divide by increment resp = np.delete(resp, model_index, axis=2) - NDX_arr = np.delete(NDX_arr, model_index, axis=1) # noqa: N806 - NDY_arr = np.delete(NDY_arr, model_index, axis=1) # noqa: N806 + normalised_dispersion_x = np.delete(normalised_dispersion_x, model_index, axis=1) + normalised_dispersion_y = np.delete(normalised_dispersion_y, model_index, axis=1) keys.remove("0") # Divide by increment - NDX_arr = np.divide(NDX_arr, resp[columns.index(f"{INCR}")]) # noqa: N806 - NDY_arr = np.divide(NDY_arr, resp[columns.index(f"{INCR}")]) # noqa: N806 + normalised_dispersion_x = np.divide(normalised_dispersion_x, resp[columns.index(f"{INCR}")]) + normalised_dispersion_y = np.divide(normalised_dispersion_y, resp[columns.index(f"{INCR}")]) resp = np.divide(resp, resp[columns.index(f"{INCR}")]) - Q_arr = np.column_stack((resp[columns.index(f"{TUNE}1"), 0, :], resp[columns.index(f"{TUNE}2"), 0, :])).T # noqa: N806 - + tune_arr = np.column_stack( + ( + resp[columns.index(f"{TUNE}1"), 0, :], + resp[columns.index(f"{TUNE}2"), 0, :], + ) + ).T + + # fmt: off with suppress_warnings(ComplexWarning): # raised as everything is complex-type now return { f"{PHASE_ADV}X": pd.DataFrame(data=resp[columns.index(f"{PHASE_ADV}X")], index=bpms, columns=keys).astype(np.float64), @@ -275,29 +301,32 @@ def _create_fullresponse_from_dict(var_to_twiss: dict[str, tfs.TfsDataFrame]) -> f"{BETA}Y": pd.DataFrame(data=resp[columns.index(f"{BETA}Y")], index=bpms, columns=keys).astype(np.float64), f"{DISPERSION}X": pd.DataFrame(data=resp[columns.index(f"{DISPERSION}X")], index=bpms, columns=keys).astype(np.float64), f"{DISPERSION}Y": pd.DataFrame(data=resp[columns.index(f"{DISPERSION}Y")], index=bpms, columns=keys).astype(np.float64), - f"{NORM_DISPERSION}X": pd.DataFrame(data=NDX_arr, index=bpms, columns=keys).astype(np.float64), - f"{NORM_DISPERSION}Y": pd.DataFrame(data=NDY_arr, index=bpms, columns=keys).astype(np.float64), + f"{NORM_DISPERSION}X": pd.DataFrame(data=normalised_dispersion_x, index=bpms, columns=keys).astype(np.float64), + f"{NORM_DISPERSION}Y": pd.DataFrame(data=normalised_dispersion_y, index=bpms, columns=keys).astype(np.float64), f"{F1001}R": pd.DataFrame(data=resp[columns.index(f"{F1001}R")], index=bpms, columns=keys).astype(np.float64), f"{F1001}I": pd.DataFrame(data=resp[columns.index(f"{F1001}I")], index=bpms, columns=keys).astype(np.float64), f"{F1010}R": pd.DataFrame(data=resp[columns.index(f"{F1010}R")], index=bpms, columns=keys).astype(np.float64), f"{F1010}I": pd.DataFrame(data=resp[columns.index(f"{F1010}I")], index=bpms, columns=keys).astype(np.float64), - f"{TUNE}": pd.DataFrame(data=Q_arr, index=[f"{TUNE}1", f"{TUNE}2"], columns=keys).astype(np.float64), + f"{TUNE}": pd.DataFrame(data=tune_arr, index=[f"{TUNE}1", f"{TUNE}2"], columns=keys).astype(np.float64), } + # fmt: on def _get_jobfiles(temp_dir: Path, index: int) -> Path: - """ Return names for jobfile and iterfile according to index """ + """Return names for jobfile and iterfile according to index""" return temp_dir / f"job.iterate.{index:d}.madx" def _launch_single_job(inputfile_path: Path) -> None: - """ Function for pool to start a single madx job """ + """Function for pool to start a single madx job""" log_file = inputfile_path.with_name(inputfile_path.name + ".log") madx_wrapper.run_file(inputfile_path, log_file=log_file, cwd=inputfile_path.parent) -def _load_and_remove_twiss(var_and_path: tuple[str, Path]) -> tuple[str, tfs.TfsDataFrame]: - """ Function for pool to retrieve results """ +def _load_and_remove_twiss( + var_and_path: tuple[str, Path], +) -> tuple[str, tfs.TfsDataFrame]: + """Function for pool to retrieve results""" (var, path) = var_and_path twissfile = path / f"twiss.{var}" tfs_data = tfs.read(twissfile, index=NAME) @@ -307,7 +336,9 @@ def _load_and_remove_twiss(var_and_path: tuple[str, Path]) -> tuple[str, tfs.Tfs return var, tfs_data -def _add_coupling(dict_of_tfs: dict[str, tfs.TfsDataFrame]) -> dict[str, tfs.TfsDataFrame]: +def _add_coupling( + dict_of_tfs: dict[str, tfs.TfsDataFrame], +) -> dict[str, tfs.TfsDataFrame]: """ For each TfsDataFrame in the input dictionary, computes the coupling RDTs and adds a column for the real and imaginary parts of the computed coupling RDTs. Returns a copy of the input dictionary with @@ -321,7 +352,7 @@ def _add_coupling(dict_of_tfs: dict[str, tfs.TfsDataFrame]) -> dict[str, tfs.Tfs """ result_dict_of_tfs = copy.deepcopy(dict_of_tfs) with timeit(lambda elapsed: LOG.debug(f" Time adding coupling: {elapsed} s")): - for var, tfs_dframe in result_dict_of_tfs.items(): # already copies, so it's safe to act on them + for tfs_dframe in result_dict_of_tfs.values(): coupling_rdts_df = coupling_via_cmatrix(tfs_dframe) tfs_dframe[f"{F1001}R"] = np.real(coupling_rdts_df[f"{F1001}"]).astype(np.float64) tfs_dframe[f"{F1001}I"] = np.imag(coupling_rdts_df[f"{F1001}"]).astype(np.float64) diff --git a/omc3/correction/sequence_evaluation.py b/omc3/correction/sequence_evaluation.py index a407206e..b032f01b 100644 --- a/omc3/correction/sequence_evaluation.py +++ b/omc3/correction/sequence_evaluation.py @@ -9,6 +9,7 @@ Compare results with case all==0. """ + from __future__ import annotations import multiprocessing @@ -44,17 +45,17 @@ def evaluate_for_variables( variable_categories: Sequence[str], order: int = 4, num_proc: int = multiprocessing.cpu_count(), - temp_dir: Path | None = None + temp_dir: Path | None = None, ) -> dict: - """ Generate a dictionary containing response matrices for - beta, phase, dispersion, tune and coupling and saves it to a file. - - Args: - accel_inst (Accelerator): Accelerator Instance. - variable_categories (list): Categories of the variables/knobs to use. (from .json) - order (int or tuple): Max or [min, max] of K-value order to use. - num_proc (int): Number of processes to use in parallel. - temp_dir (Path): temporary directory. If ``None``, uses model_dir. + """Generate a dictionary containing response matrices for + beta, phase, dispersion, tune and coupling and saves it to a file. + + Args: + accel_inst (Accelerator): Accelerator Instance. + variable_categories (list): Categories of the variables/knobs to use. (from .json) + order (int or tuple): Max or [min, max] of K-value order to use. + num_proc (int): Number of processes to use in parallel. + temp_dir (Path): temporary directory. If ``None``, uses model_dir. """ LOG.debug("Generating Fullresponse via Mad-X.") with timeit(lambda elapsed: LOG.debug(f" Total time generating fullresponse: {elapsed}s")): @@ -87,7 +88,8 @@ def _generate_madx_jobs( num_proc: int, temp_dir: Path, ) -> None: - """ Generates madx job-files """ + """Generates madx job-files""" + def _assign(var, value): return f"{var:s} = {value:d};\n" @@ -120,14 +122,14 @@ def _do_macro(var): job_content += "\n" # last thing to do: get baseline - if proc_index+1 == num_proc: + if proc_index == num_proc - 1: job_content += _do_macro("0") _get_jobfile(temp_dir, proc_index).write_text(job_content) -def _call_madx(process_pool: multiprocessing.Pool, temp_dir: str, num_proc: int) -> None: # type: ignore - """ Call madx in parallel """ +def _call_madx(process_pool: multiprocessing.Pool, temp_dir: str, num_proc: int) -> None: # type: ignore + """Call madx in parallel""" LOG.debug(f"Starting {num_proc:d} MAD-X jobs...") madx_jobs = [_get_jobfile(temp_dir, index) for index in range(num_proc)] failed = [LOG.error(fail) for fail in process_pool.map(_launch_single_job, madx_jobs) if fail] @@ -137,9 +139,9 @@ def _call_madx(process_pool: multiprocessing.Pool, temp_dir: str, num_proc: int) def _clean_up(variables: list[str], temp_dir: Path, num_proc: int) -> None: - """ Merge Logfiles and clean temporary outputfiles """ + """Merge Logfiles and clean temporary outputfiles""" LOG.debug("Cleaning output and printing log...") - for var in (variables + ["0"]): + for var in variables + ["0"]: table_file = _get_tablefile(temp_dir, var) with suppress(FileNotFoundError): # py3.8 missing_ok=True table_file.unlink() @@ -166,10 +168,10 @@ def _clean_up(variables: list[str], temp_dir: Path, num_proc: int) -> None: def _load_madx_results( variables: Sequence[str], k_values: list[float], - process_pool: multiprocessing.Pool, # type: ignore + process_pool: multiprocessing.Pool, # type: ignore temp_dir: str, ) -> dict: - """ Load the madx results in parallel and return var-tfs dictionary """ + """Load the madx results in parallel and return var-tfs dictionary""" LOG.debug("Loading Madx Results.") path_and_vars = [] for value in variables: @@ -179,7 +181,7 @@ def _load_madx_results( mapping = dict([(order, {}) for order in k_values] + [(f"{order}L", {}) for order in k_values]) for var, tfs_data in process_pool.map(_load_and_remove_twiss, path_and_vars): for order in k_values: - diff = (tfs_data[order] - base_tfs[order]) + diff = tfs_data[order] - base_tfs[order] mask = diff != 0 # drop zeros, maybe abs(diff) < eps ? k_list = diff.loc[mask] mapping[order][var] = k_list @@ -191,7 +193,7 @@ def _load_madx_results( def _get_orders(order: int) -> Sequence[str]: - """ Returns a list of strings with K-values to be used """ + """Returns a list of strings with K-values to be used""" try: return [f"K{i:d}{s:s}" for i in range(3) for s in ["", "S"]] except TypeError: @@ -199,22 +201,22 @@ def _get_orders(order: int) -> Sequence[str]: def _get_jobfile(folder: Path, index: int) -> Path: - """ Return names for jobfile and iterfile according to index """ + """Return names for jobfile and iterfile according to index""" return folder / f"job.varmap.{index:d}.madx" def _get_tablefile(folder: Path, var: str) -> Path: - """ Return name of the variable-specific table file """ + """Return name of the variable-specific table file""" return folder / f"table.{var}" def _get_surveyfile(folder: Path, index: int) -> Path: - """ Returns the name of the temporary survey file """ + """Returns the name of the temporary survey file""" return folder / f"survey.{index:d}.tmp" def _launch_single_job(inputfile_path: Path): - """ Function for pool to start a single madx job """ + """Function for pool to start a single madx job""" log_file = inputfile_path.with_name(f"{inputfile_path.name}.log") try: madx_wrapper.run_file(inputfile_path, log_file=log_file, cwd=inputfile_path.parent) @@ -225,16 +227,18 @@ def _launch_single_job(inputfile_path: Path): def _load_and_remove_twiss(path_and_var: tuple[Path, str]) -> tuple[str, tfs.TfsDataFrame]: - """ Function for pool to retrieve results """ + """Function for pool to retrieve results""" path, var = path_and_var twissfile = _get_tablefile(path, var) tfs_data = tfs.read(twissfile, index="NAME") return var, tfs_data -def _create_basic_job(accel_inst: Accelerator, k_values: list[str], variables: Sequence[str]) -> str: - """ Create the madx-job basics needed - TEMPFILE needs to be replaced in the returned string. +def _create_basic_job( + accel_inst: Accelerator, k_values: list[str], variables: Sequence[str] +) -> str: + """Create the madx-job basics needed + TEMPFILE needs to be replaced in the returned string. """ # get nominal setup from creator creator_class = get_model_creator_class(accel_inst, CreatorType.NOMINAL) @@ -297,16 +301,18 @@ def _create_basic_job(accel_inst: Accelerator, k_values: list[str], variables: S def check_varmap_file(accel_inst: Accelerator, vars_categories): - """ Checks on varmap file and creates it if not in model folder. - """ + """Checks on varmap file and creates it if not in model folder.""" if accel_inst.modifiers is None: - raise ValueError("Optics not defined. Please provide modifiers.madx. " - "Otherwise MADX evaluation might be unstable.") + raise ValueError( + "Optics not defined. Please provide modifiers.madx. " + "Otherwise MADX evaluation might be unstable." + ) - varmap_path = Path(accel_inst.model_dir) / f"varmap_{'_'.join(sorted(set(vars_categories)))}.{EXT}" + varmap_path = ( + Path(accel_inst.model_dir) / f"varmap_{'_'.join(sorted(set(vars_categories)))}.{EXT}" + ) if not varmap_path.exists(): - LOG.info(f"Variable mapping '{str(varmap_path):s}' not found. " - "Evaluating it via madx.") + LOG.info(f"Variable mapping '{str(varmap_path):s}' not found. Evaluating it via madx.") mapping = evaluate_for_variables(accel_inst, vars_categories) write_varmap(varmap_path, mapping) diff --git a/omc3/knob_extractor.py b/omc3/knob_extractor.py index 104c6e83..a2a619a4 100644 --- a/omc3/knob_extractor.py +++ b/omc3/knob_extractor.py @@ -44,8 +44,8 @@ - **time** *(str)*: At what time to extract the knobs. Accepts ISO-format (YYYY-MM- - DDThh:mm:ss), timestamp or 'now'. The default timezone for the ISO- - format is local time, but you can force e.g. UTC by adding +00:00. + DDThh:mm:ss) with timezone, timestamp or 'now'. Timezone must be + specified for ISO-format (e.g. +00:00 for UTC). default: ``now`` @@ -61,6 +61,7 @@ """ + from __future__ import annotations ####### WORKAROUND FOR JAVA ISSUES WITH LHCOP ################################## @@ -82,44 +83,45 @@ import argparse import logging import math -import re -from datetime import datetime from pathlib import Path from typing import TYPE_CHECKING import pandas as pd import tfs -from dateutil.relativedelta import relativedelta from generic_parser import EntryPointParameters, entrypoint from omc3.utils.iotools import PathOrStr, PathOrStrOrDataFrame from omc3.utils.logging_tools import get_logger from omc3.utils.mock import cern_network_import +from omc3.utils.time_tools import parse_time if TYPE_CHECKING: - from collections.abc import Sequence + from collections.abc import Sequence + from datetime import datetime pytimber = cern_network_import("pytimber") LOGGER = get_logger(__name__) -AFS_ACC_MODELS_LHC = Path("/afs/cern.ch/eng/acc-models/lhc/current") # make sure 'current' linked correctly! +AFS_ACC_MODELS_LHC = Path( + "/afs/cern.ch/eng/acc-models/lhc/current" +) # make sure 'current' linked correctly! ACC_MODELS_LHC = Path("acc-models-lhc") KNOBS_FILE_ACC_MODELS = ACC_MODELS_LHC / "operation" / "knobs.txt" KNOBS_FILE_AFS = AFS_ACC_MODELS_LHC / "operation" / "knobs.txt" -MINUS_CHARS: tuple[str, ...] = ("_", "-") STATE_VARIABLES: dict[str, str] = { - 'opticName': 'Optics', - 'beamProcess': 'Beam Process', - 'opticId': 'Optics ID', - 'hyperCycle': 'HyperCycle', + "opticName": "Optics", + "beamProcess": "Beam Process", + "opticId": "Optics ID", + "hyperCycle": "HyperCycle", # 'secondsInBeamProcess ': 'Beam Process running (s)', } class Col: - """ DataFrame Columns used in this script. """ + """DataFrame Columns used in this script.""" + madx: str = "madx" lsa: str = "lsa" scaling: str = "scaling" @@ -127,7 +129,8 @@ class Col: class Head: - """ TFS Headers used in this script.""" + """TFS Headers used in this script.""" + time: str = "EXTRACTION_TIME" @@ -181,24 +184,21 @@ class Head: ], "mo": [ "LHCBEAM1:LANDAU_DAMPING", - "LHCBEAM2:LANDAU_DAMPING" + "LHCBEAM2:LANDAU_DAMPING", ], "lumi_scan": [ "LHCBEAM1:IP1_SEPSCAN_X_MM", "LHCBEAM1:IP1_SEPSCAN_Y_MM", "LHCBEAM2:IP1_SEPSCAN_X_MM", "LHCBEAM2:IP1_SEPSCAN_Y_MM", - "LHCBEAM1:IP2_SEPSCAN_X_MM", "LHCBEAM1:IP2_SEPSCAN_Y_MM", "LHCBEAM2:IP2_SEPSCAN_X_MM", "LHCBEAM2:IP2_SEPSCAN_Y_MM", - "LHCBEAM1:IP5_SEPSCAN_X_MM", "LHCBEAM1:IP5_SEPSCAN_Y_MM", "LHCBEAM2:IP5_SEPSCAN_X_MM", "LHCBEAM2:IP5_SEPSCAN_Y_MM", - "LHCBEAM1:IP8_SEPSCAN_X_MM", "LHCBEAM1:IP8_SEPSCAN_Y_MM", "LHCBEAM2:IP8_SEPSCAN_X_MM", @@ -212,7 +212,7 @@ class Head: USAGE_EXAMPLES = """Usage Examples: -python -m omc3.knob_extractor --knobs disp chroma --time 2022-05-04T14:00 +python -m omc3.knob_extractor --knobs disp chroma --time 2022-05-04T14:00+00:00 extracts the chromaticity and dispersion knobs at 14h on May 4th 2022 python -m omc3.knob_extractor --knobs disp chroma --time now _2h @@ -230,7 +230,7 @@ def get_params(): return EntryPointParameters( knobs={ "type": str, - "nargs": '*', + "nargs": "*", "help": ( "A list of knob names or categories to extract. " f"Available categories are: {', '.join(KNOB_CATEGORIES.keys())}." @@ -241,9 +241,8 @@ def get_params(): "type": str, "help": ( "At what time to extract the knobs. " - "Accepts ISO-format (YYYY-MM-DDThh:mm:ss), timestamp or 'now'. " - "The default timezone for the ISO-format is local time, " - "but you can force e.g. UTC by adding +00:00." + "Accepts ISO-format (YYYY-MM-DDThh:mm:ss) with timezone, timestamp or 'now'. " + "Timezone must be specified for ISO-format (e.g. +00:00 for UTC)." ), "default": "now", }, @@ -261,18 +260,12 @@ def get_params(): ), }, state={ - "action": 'store_true', - "help": ( - "Prints the state of the StateTracker. " - "Does not extract anything else." - ), + "action": "store_true", + "help": ("Prints the state of the StateTracker. Does not extract anything else."), }, output={ "type": PathOrStr, - "help": ( - "Specify user-defined output path. " - "This should probably be `model_dir/knobs.madx`" - ), + "help": "Specify user-defined output path. This should probably be `model_dir/knobs.madx`", }, knob_definitions={ "type": PathOrStrOrDataFrame, @@ -286,19 +279,22 @@ def get_params(): @entrypoint( - get_params(), strict=True, + get_params(), + strict=True, argument_parser_args={ "epilog": USAGE_EXAMPLES, "formatter_class": argparse.RawDescriptionHelpFormatter, - "prog": "Knob Extraction Tool." - } + "prog": "Knob Extraction Tool.", + }, ) def main(opt) -> tfs.TfsDataFrame: - """ Main knob extracting function. """ - ldb = pytimber.LoggingDB(source="nxcals", loglevel=logging.ERROR, - sparkprops={"spark.ui.showConsoleProgress": "false"} + """Main knob extracting function.""" + ldb = pytimber.LoggingDB( + source="nxcals", + loglevel=logging.ERROR, + sparkprops={"spark.ui.showConsoleProgress": "false"}, ) - time = _parse_time(opt.time, opt.timedelta) + time = parse_time(opt.time, opt.timedelta) if opt.state: # Only print the state of the machine. @@ -315,6 +311,7 @@ def main(opt) -> tfs.TfsDataFrame: # State Extraction ------------------------------------------------------------- + def get_state(ldb, time: datetime) -> dict[str, str]: """ Standalone function to gather and log state data from the StateTracker. @@ -348,9 +345,11 @@ def _get_state_as_df(state_dict: dict[str, str], time: datetime) -> tfs.TfsDataF Returns: tfs.DataFrame: States packed into dataframe with readable index. """ - state_df = tfs.TfsDataFrame(index=list(STATE_VARIABLES.values()), - columns=[Col.value, Col.lsa], - headers={Head.time: time}) + state_df = tfs.TfsDataFrame( + index=list(STATE_VARIABLES.values()), + columns=[Col.value, Col.lsa], + headers={Head.time: time}, + ) for name, value in state_dict.items(): state_df.loc[STATE_VARIABLES[name], Col.lsa] = name state_df.loc[STATE_VARIABLES[name], Col.value] = value @@ -359,6 +358,7 @@ def _get_state_as_df(state_dict: dict[str, str], time: datetime) -> tfs.TfsDataF # Knobs Extraction ------------------------------------------------------------- + def extract(ldb, knobs: Sequence[str], time: datetime) -> dict[str, float]: """ Standalone function to gather data from the StateTracker. @@ -383,7 +383,9 @@ def extract(ldb, knobs: Sequence[str], time: datetime) -> dict[str, float]: knobkey = f"LhcStateTracker:{knob}:target" knobs_extracted[knob] = None # to log that this was tried to be extracted. - knobvalue = ldb.get(knobkey, time.timestamp()) # use timestamp to preserve timezone info + knobvalue = ldb.get( + knobkey, time.timestamp() + ) # use timestamp to preserve timezone info if knobkey not in knobvalue: LOGGER.debug(f"{knob} not found in StateTracker") continue @@ -405,7 +407,7 @@ def extract(ldb, knobs: Sequence[str], time: datetime) -> dict[str, float]: def check_for_undefined_knobs(knobs_definitions: pd.DataFrame, knob_categories: Sequence[str]): - """ Check that all knobs are actually defined in the knobs-definitions. + """Check that all knobs are actually defined in the knobs-definitions. Args: @@ -416,7 +418,9 @@ def check_for_undefined_knobs(knobs_definitions: pd.DataFrame, knob_categories: KeyError: If one or more of the knobs don't have a definition. """ - knob_names = [knob for category in knob_categories for knob in KNOB_CATEGORIES.get(category, [category])] + knob_names = [ + knob for category in knob_categories for knob in KNOB_CATEGORIES.get(category, [category]) + ] undefined_knobs = [knob for knob in knob_names if knob not in knobs_definitions.index] if undefined_knobs: raise KeyError( @@ -425,9 +429,9 @@ def check_for_undefined_knobs(knobs_definitions: pd.DataFrame, knob_categories: ) -def _extract_and_gather(ldb, knobs_definitions: pd.DataFrame, - knob_categories: Sequence[str], - time: datetime) -> tfs.TfsDataFrame: +def _extract_and_gather( + ldb, knobs_definitions: pd.DataFrame, knob_categories: Sequence[str], time: datetime +) -> tfs.TfsDataFrame: """ Main function to gather data from the StateTracker and the knob-definitions. All given knobs (either in categories or as knob names) to be extracted @@ -451,16 +455,18 @@ def _extract_and_gather(ldb, knobs_definitions: pd.DataFrame, extracted_knobs = extract(ldb, knobs=knob_categories, time=time) knob_names = list(extracted_knobs.keys()) - knobs = tfs.TfsDataFrame(index=knob_names, - columns=[Col.lsa, Col.madx, Col.scaling, Col.value], - headers={Head.time: time}) + knobs = tfs.TfsDataFrame( + index=knob_names, + columns=[Col.lsa, Col.madx, Col.scaling, Col.value], + headers={Head.time: time}, + ) knobs[[Col.lsa, Col.madx, Col.scaling]] = knobs_definitions.loc[knob_names, :] knobs[Col.value] = pd.Series(extracted_knobs) return knobs def _write_knobsfile(output: Path | str, collected_knobs: tfs.TfsDataFrame): - """ Takes the collected knobs and writes them out into a text-file. """ + """Takes the collected knobs and writes them out into a text-file.""" collected_knobs = collected_knobs.copy() # to not modify the df # Sort the knobs by category @@ -490,26 +496,31 @@ def _write_knobsfile(output: Path | str, collected_knobs: tfs.TfsDataFrame): # Knobs Definitions ------------------------------------------------------------ + def _get_knobs_def_file(user_defined: Path | str | None = None) -> Path: - """ Check which knobs-definition file is appropriate to take. """ + """Check which knobs-definition file is appropriate to take.""" if user_defined is not None: LOGGER.info(f"Using user knobs-definition file: '{user_defined}") return Path(user_defined) if KNOBS_FILE_ACC_MODELS.is_file(): - LOGGER.info(f"Using given acc-models folder's knobs.txt as knobsdefinition file: '{KNOBS_FILE_ACC_MODELS}") + LOGGER.info( + f"Using given acc-models folder's knobs.txt as knobsdefinition file: '{KNOBS_FILE_ACC_MODELS}" + ) return KNOBS_FILE_ACC_MODELS if KNOBS_FILE_AFS.is_file(): # if all fails, fall back to lhc acc-models - LOGGER.info(f"Using afs-fallback acc-models folder's knobs.txt as knobs-definition file: '{KNOBS_FILE_AFS}'") + LOGGER.info( + f"Using afs-fallback acc-models folder's knobs.txt as knobs-definition file: '{KNOBS_FILE_AFS}'" + ) return KNOBS_FILE_AFS raise FileNotFoundError("None of the knobs-definition files are available.") def load_knobs_definitions(file_path: Path | str) -> pd.DataFrame: - """ Load the knobs-definition file and convert into a DataFrame. + """Load the knobs-definition file and convert into a DataFrame. Each line in this file should consist of at least three comma separated entries in the following order: madx-name, lsa-name, scaling factor. Other columns are ignored. @@ -530,17 +541,19 @@ def load_knobs_definitions(file_path: Path | str) -> pd.DataFrame: converters = {Col.madx: str.strip, Col.lsa: str.strip} # strip whitespaces dtypes = {Col.scaling: float} names = (Col.madx, Col.lsa, Col.scaling) - df = pd.read_csv(file_path, - comment="#", - usecols=list(range(len(names))), # only read the first columns - names=names, - dtype=dtypes, - converters=converters) + df = pd.read_csv( + file_path, + comment="#", + usecols=list(range(len(names))), # only read the first columns + names=names, + dtype=dtypes, + converters=converters, + ) return _to_knobs_dataframe(df) def _to_knobs_dataframe(df: pd.DataFrame) -> pd.DataFrame: - """ Adapts a DataFrame to the conventions used here: + """Adapts a DataFrame to the conventions used here: StateTracker variable name as index, all columns lower-case. Args: @@ -559,7 +572,7 @@ def _to_knobs_dataframe(df: pd.DataFrame) -> pd.DataFrame: def _parse_knobs_defintions(knobs_def_input: Path | str | pd.DataFrame | None) -> pd.DataFrame: - """ Parse the given knob-definitions either from a csv-file or from a DataFrame. """ + """Parse the given knob-definitions either from a csv-file or from a DataFrame.""" if isinstance(knobs_def_input, pd.DataFrame): return _to_knobs_dataframe(knobs_def_input) @@ -570,63 +583,18 @@ def _parse_knobs_defintions(knobs_def_input: Path | str | pd.DataFrame | None) - def get_madx_command(knob_data: pd.Series) -> str: if Col.value not in knob_data.index: - raise KeyError("Value entry not found in extracted knob_data. " - "Something went wrong as it should at least be NaN.") + raise KeyError( + "Value entry not found in extracted knob_data. " + "Something went wrong as it should at least be NaN." + ) if knob_data[Col.value] is None or pd.isna(knob_data[Col.value]): return f"! {knob_data[Col.madx]} : No Value extracted" return f"{knob_data[Col.madx]} := {knob_data[Col.value] * knob_data[Col.scaling]};" -# Time Tools ------------------------------------------------------------------- - -def _parse_time(time: str, timedelta: str = None) -> datetime: - """ Parse time from given time-input. """ - t = _parse_time_from_str(time) - if timedelta: - t = _add_time_delta(t, timedelta) - return t - - -def _parse_time_from_str(time_str: str) -> datetime: - """ Parse time from given string. """ - # Now? --- - if time_str.lower() == "now": - return datetime.now() - - # ISOFormat? --- - try: - return datetime.fromisoformat(time_str) - except (TypeError, ValueError): - LOGGER.debug("Could not parse time string as ISO format") - pass - - # Timestamp? --- - try: - return datetime.fromtimestamp(int(time_str)) - except (TypeError, ValueError): - LOGGER.debug("Could not parse time string as a timestamp") - pass - - raise ValueError(f"Couldn't read datetime '{time_str}'") - - -def _add_time_delta(time: datetime, delta_str: str) -> datetime: - """ Parse delta-string and add time-delta to time. """ - sign = -1 if delta_str[0] in MINUS_CHARS else 1 - all_deltas = re.findall(r"(\d+)(\w)", delta_str) # tuples (value, timeunit-char) - # mapping char to the time-unit as accepted by relativedelta, - # following ISO-8601 for time durations - char2unit = { - "s": 'seconds', "m": 'minutes', "h": 'hours', - "d": 'days', "w": 'weeks', "M": 'months', "Y": "years", - } - # add all deltas, which are tuples of (value, timeunit-char) - time_parts = {char2unit[delta[1]]: sign * int(delta[0]) for delta in all_deltas} - return time + relativedelta(**time_parts) - - # Other tools ------------------------------------------------------------------ + def lsa2name(lsa_name: str) -> str: """LSA name -> Variable in Timber/StateTracker conversion.""" return lsa_name.replace("/", ":") diff --git a/omc3/model/accelerators/lhc.py b/omc3/model/accelerators/lhc.py index 1cd5097e..c01d0129 100644 --- a/omc3/model/accelerators/lhc.py +++ b/omc3/model/accelerators/lhc.py @@ -77,6 +77,7 @@ """ + from __future__ import annotations from pathlib import Path @@ -109,6 +110,7 @@ class Lhc(Accelerator): """ Accelerator class for the Large Hadron Collider. """ + NAME: str = "lhc" LOCAL_REPO_NAME: str = "acc-models-lhc" RE_DICT: dict[str, str] = { @@ -130,7 +132,7 @@ def get_parameters(): type=int, choices=(1, 2), required=True, - help="Beam to use." + help="Beam to use.", ) params.add_parameter( name="year", @@ -174,16 +176,12 @@ def verify_object(self) -> None: # TODO: Maybe more checks? raise AcceleratorDefinitionError("Crossing on or off not set.") # TODO: write more output prints - LOGGER.debug( - "... verification passed. \nSome information about the accelerator:" - ) + LOGGER.debug("... verification passed. \nSome information about the accelerator:") LOGGER.debug(f"Class name {self.__class__.__name__}") LOGGER.debug(f"Beam {self.beam}") LOGGER.debug(f"Beam direction {self.beam_direction}") if self.modifiers: - LOGGER.debug( - f"Modifiers {', '.join([str(m) for m in self.modifiers])}" - ) + LOGGER.debug(f"Modifiers {', '.join([str(m) for m in self.modifiers])}") @property def beam(self) -> int: @@ -204,7 +202,12 @@ def beam(self, value) -> None: def get_lhc_error_dir() -> Path: return LHC_DIR / "systematic_errors" - def get_variables(self, frm: float | None = None, to: float | None = None, classes: Iterable[str] | None = None): + def get_variables( + self, + frm: float | None = None, + to: float | None = None, + classes: Iterable[str] | None = None, + ): corrector_beam_dir = Path(f"correctors_b{self.beam}") all_vars_by_class = load_multiple_jsons( *self._get_corrector_files(corrector_beam_dir / "beta_correctors.json"), @@ -217,8 +220,10 @@ def get_variables(self, frm: float | None = None, to: float | None = None, class # Sort variables by S (nice for comparing different files) return self.sort_variables_by_location(variables, frm, to) - def sort_variables_by_location(self, variables: Iterable[str], frm: float | None = None, to: str | None = None) -> list[str]: - """ Sorts the variables by location and filters them between `frm` and `to`. + def sort_variables_by_location( + self, variables: Iterable[str], frm: float | None = None, to: str | None = None + ) -> list[str]: + """Sorts the variables by location and filters them between `frm` and `to`. If `frm` is larger than `to` it loops back around to the start the accelerator. This is a useful function for the LHC that's why it is "public" but it is not part of the Accelerator-Class Interface. @@ -277,8 +282,7 @@ def log_status(self) -> None: LOGGER.info(f"Natural Tune X [{self.nat_tunes[0]:10.3f}]") LOGGER.info(f"Natural Tune Y [{self.nat_tunes[1]:10.3f}]") LOGGER.info( - f"Best Knowledge Model " - f"[{'NO' if self.model_best_knowledge is None else 'OK':>10s}]" + f"Best Knowledge Model: [{'NO' if self.model_best_knowledge is None else 'OK':>10s}]" ) if self.excitation == AccExcitationMode.FREE: @@ -291,10 +295,9 @@ def log_status(self) -> None: LOGGER.info(f"> Driven Tune X [{self.drv_tunes[0]:10.3f}]") LOGGER.info(f"> Driven Tune Y [{self.drv_tunes[1]:10.3f}]") - def get_exciter_bpm(self, plane: str, commonbpms: list[str]) -> tuple[str, str]: - """ Returns the name of the BPM closest to the exciter (i.e. ADT or AC-Dipole) - as well as the name of the exciter element. """ + """Returns the name of the BPM closest to the exciter (i.e. ADT or AC-Dipole) + as well as the name of the exciter element.""" beam = self.beam adt = "H.C" if plane == "X" else "V.B" l_r = "L" if ((beam == 1) != (plane == "Y")) else "R" @@ -330,17 +333,16 @@ def important_phase_advances(self) -> list[list[str]] | None: return None def get_accel_file(self, filename: Path | str) -> Path: - return LHC_DIR / self.year / filename - + return (LHC_DIR / self.year / filename).absolute() # Private Methods ############################################################## def _get_corrector_elems(self) -> Path: - """ Return the corrector elements file, either from the instance's specific directory, - if it exists, or the default directory. """ + """Return the corrector elements file, either from the instance's specific directory, + if it exists, or the default directory.""" return self._get_corrector_files(f"corrector_elems_b{self.beam}.tfs")[-1] def _get_corrector_files(self, file_name: Path | str) -> list[Path]: - """ Get the corrector files from the default directory AND + """Get the corrector files from the default directory AND the instance's specific directory if it exists AND the model directroy if it exists, in that order. See also discussion in https://github.com/pylhc/omc3/pull/458#discussion_r1764829247 . @@ -348,13 +350,13 @@ def _get_corrector_files(self, file_name: Path | str) -> list[Path]: # add file from the default directory (i.e. "model/accelerators/lhc/correctors") default_file = Lhc.DEFAULT_CORRECTORS_DIR / file_name if not default_file.exists(): - msg = (f"Could not find {file_name} in {Lhc.DEFAULT_CORRECTORS_DIR}." - "Something went wrong with the variables getting logic.") + msg = ( + f"Could not find {file_name} in {Lhc.DEFAULT_CORRECTORS_DIR}." + "Something went wrong with the variables getting logic." + ) raise FileNotFoundError(msg) - LOGGER.debug( - f"Default corrector file {file_name} found in {default_file.parent}." - ) + LOGGER.debug(f"Default corrector file {file_name} found in {default_file.parent}.") corrector_files = [default_file] # add file from the accelerator directory (e.g. "model/accelerators/lhc/2024/correctors") @@ -379,7 +381,7 @@ def _get_corrector_files(self, file_name: Path | str) -> list[Path]: return corrector_files def find_modifier(self, modifier: Path | str): - """ Try to find a modifier file, which might be given only by its name. + """Try to find a modifier file, which might be given only by its name. This is looking for full-path, model-dir and in the acc-models-path's optics-dir., """ dirs = [] @@ -391,8 +393,10 @@ def find_modifier(self, modifier: Path | str): return find_file(modifier, dirs=dirs) + # General functions ########################################################## + def _flatten_list(my_list: Iterable) -> list: return [item for sublist in my_list for item in sublist] diff --git a/omc3/model/model_creators/abstract_model_creator.py b/omc3/model/model_creators/abstract_model_creator.py index 2fa81fed..78b220a1 100644 --- a/omc3/model/model_creators/abstract_model_creator.py +++ b/omc3/model/model_creators/abstract_model_creator.py @@ -4,6 +4,7 @@ This module provides the template for all model creators. """ + from __future__ import annotations import contextlib @@ -49,6 +50,7 @@ from omc3.segment_by_segment.propagables import Propagable from omc3.segment_by_segment.segments import Segment + MADXInputType = Path | str | dict[str, str] | None LOGGER = logging_tools.get_logger(__file__) @@ -59,6 +61,7 @@ class ModelCreator(ABC): Abstract class for the implementation of a model creator. All mandatory methods and convenience functions are defined here. """ + jobfile: str = JOB_MODEL_MADX_NOMINAL # lowercase as it might be changed in subclasses __init__ def __init__(self, accel: Accelerator, logfile: Path = None, acc_models_path: Path = None): @@ -93,22 +96,19 @@ def prepare_options(self, opt): pass def full_run(self): - """ Does the full run: preparation, running madx, post_run. """ + """Does the full run: preparation, running madx, post_run.""" # Prepare model-dir output directory self.prepare_run() - # get madx-script with relative output-paths - # (model_dir is base-path in get_madx_script) - self.accel.model_dir = Path() + # get madx-script with paths relative to model-dir if possible, otherwise absolute madx_script = self.get_madx_script() - self.accel.model_dir = self.output_dir # Run madx to create model run_string( madx_script, output_file=self.accel.model_dir / self.jobfile, log_file=self.logfile, - cwd=self.accel.model_dir + cwd=self.accel.model_dir, ) # Check output and return accelerator instance @@ -118,9 +118,6 @@ def full_run(self): def get_madx_script(self) -> str: """ Returns the ``MAD-X`` script used to create the model (directory). - - Returns: - The string of the ``MAD-X`` script used to used to create the model (directory). """ pass @@ -129,18 +126,33 @@ def get_base_madx_script(self) -> str: """ Returns the ``MAD-X`` script used to set-up the basic accelerator in MAD-X, without actually creating the twiss-output, as some modifications to the accelerator may come afterwards (depending on which model-creator is calling this). + """ + pass + + def resolve_madx_path(self, path: Path | str) -> Path: + """Converts a given path to a path relative to the model dir if possible, otherwise returns the absolute path. + Args: + path (Path | str): The path to convert. Returns: - The string of the ``MAD-X`` script used to used to set-up the machine. + madx_path (Path): The converted path for MAD-X usage. """ - pass + path = Path(path) + try: + # Try converting to a relative path if it is inside the cwd (model_dir) + return path.relative_to(self.accel.model_dir) + except ValueError: + LOGGER.debug( + f"Path {path} is not relative to the model dir {self.accel.model_dir}, using absolute path." + ) + return path.absolute() def get_update_deltap_script(self, deltap: float | str) -> str: - """ Get the madx snippet that updates the dpp in the machine. + """Get the madx snippet that updates the dpp in the machine. Args: deltap (float | str): The dpp to update the machine to. - """ + """ raise NotImplementedError("Update dpp not implemented for this model creator.") def prepare_run(self) -> None: @@ -184,8 +196,11 @@ def post_run(self) -> None: for filename in self.files_to_check: with contextlib.suppress(KeyError): # KeyError if just a file to check, not a file with attribute - setattr(self.accel, attribute_map[filename], tfs.read(self.accel.model_dir / filename, index=NAME)) - + setattr( + self.accel, + attribute_map[filename], + tfs.read(self.accel.model_dir / filename, index=NAME), + ) @property def files_to_check(self) -> list[str]: @@ -202,7 +217,6 @@ def files_to_check(self) -> list[str]: } return check_files + excitation_map[self.accel.excitation] - @staticmethod def _check_files_exist(dir_: Path | str, files: Sequence[str]) -> None: """ @@ -268,16 +282,16 @@ def prepare_symlink(self): ] def prepare_modifiers(self): - """ Loop over the modifiers and make them full paths if found. """ + """Loop over the modifiers and make them full paths if found.""" accel: Accelerator = self.accel if accel.modifiers is not None: accel.modifiers = [accel.find_modifier(m) for m in accel.modifiers] @staticmethod def _get_select_command(pattern: str | None = None, indent: int = 0): - """ Returns a basic select command with the given pattern, the default columns and correct indentation. """ + """Returns a basic select command with the given pattern, the default columns and correct indentation.""" space = " " * indent - pattern_str = f" pattern=\"{pattern}\"," if pattern is not None else "" + pattern_str = f' pattern="{pattern}",' if pattern is not None else "" return ( f"{space}select, flag=twiss, clear;\n" f"{space}select, flag=twiss,{pattern_str} column=" @@ -288,7 +302,7 @@ def _get_select_command(pattern: str | None = None, indent: int = 0): class SegmentCreator(ModelCreator, ABC): - """ Model creator for Segments, to be used in the Segment-by-Segment algorithm. + """Model creator for Segments, to be used in the Segment-by-Segment algorithm. These segments propagate the measured values from the beginning of the segment to the end. This only handles the MAD-X part of things. @@ -298,12 +312,20 @@ class SegmentCreator(ModelCreator, ABC): The output is stored in the `twiss_forward` and `twiss_backward` files, which in turn can be used for further processing by the implemented Propagables. """ + jobfile = None # set in init _sequence_name: str = None # to be set by any accelerator using the default `get_madx_script` - def __init__(self, accel: Accelerator, segment: Segment, measurables: Iterable[Propagable], - corrections: MADXInputType = None, *args, **kwargs): - """ Creates Segment of a model. """ + def __init__( + self, + accel: Accelerator, + segment: Segment, + measurables: Iterable[Propagable], + corrections: MADXInputType = None, + *args, + **kwargs, + ): + """Creates Segment of a model.""" LOGGER.debug("Initializing Segment Creator") super().__init__(accel, *args, **kwargs) self.segment = segment @@ -335,8 +357,13 @@ def _create_general_macros(self): shutil.copy(OMC3_MADX_MACROS_DIR / GENERAL_MACROS, general_macros_path) def _clean_models(self): - """ Remove models from previous runs. """ - for twiss_file in (self.twiss_forward, self.twiss_forward_corrected, self.twiss_backward, self.twiss_backward_corrected): + """Remove models from previous runs.""" + for twiss_file in ( + self.twiss_forward, + self.twiss_forward_corrected, + self.twiss_backward, + self.twiss_backward_corrected, + ): output_twiss: Path = self.output_dir / twiss_file output_twiss.unlink(missing_ok=True) @@ -372,67 +399,83 @@ def _create_corrections_file(self): raise NotImplementedError("Could not determine type of corrections. Aborting.") - def get_madx_script(self): - accel: Accelerator = self.accel + def get_madx_script(self) -> str: madx_script = self.get_base_madx_script() + macros_path = self.resolve_madx_path(self.output_dir / MACROS_DIR / GENERAL_MACROS) + measurement_path = self.resolve_madx_path(self.output_dir / self.measurement_madx) + twiss_forward_path = self.resolve_madx_path(self.output_dir / self.twiss_forward) + twiss_backward_path = self.resolve_madx_path(self.output_dir / self.twiss_backward) + if self._sequence_name is None: raise ValueError( "To get the default Segment-by-Segment MAD-X script, " f"the derived class '{self.__class__.__name__}'" " must set the '_sequence_name' attribute.\n" "This error should only be encountered during development. " - "If you encounter it later, please open an issue!") - - madx_script += "\n".join([ - "", - f"! ----- Segment-by-Segment propagation for {self.segment.name} -----", - "", - f"call, file = '{accel.model_dir / MACROS_DIR / GENERAL_MACROS}';", - "", - "! Cycle the sequence to avoid negative length.", - f"seqedit, sequence={self._sequence_name};", - " flatten;", - f" cycle, start={self.segment.start};", - "endedit;", - "", - f"use, sequence = {self._sequence_name};", - "", - "twiss;", - "exec, save_initial_and_final_values(", - f" {self._sequence_name},", - f" {self.segment.start},", - f" {self.segment.end}, ", - f" \"{accel.model_dir / self.measurement_madx!s}\",", - " biniSbSParams,", - " bendSbSParams", - ");", - "", - "exec, extract_segment_sequence(", - f" {self._sequence_name},", - " forward_SbSSEQ,", - " backward_SbSSEQ,", - f" {self.segment.start},", - f" {self.segment.end},", - ");", - "", - "beam, particle = proton, sequence=forward_SbSSEQ;", - "beam, particle = proton, sequence=backward_SbSSEQ;", - "", - f"exec, twiss_segment(forward_SbSSEQ, \"{self.twiss_forward!s}\", biniSbSParams);", - f"exec, twiss_segment(backward_SbSSEQ, \"{self.twiss_backward!s}\", bendSbSParams);", - "", - ]) + "If you encounter it later, please open an issue!" + ) - if self.corrections is not None: - madx_script += "\n".join([ - f"call, file=\"{self.corrections_madx!s}\";", - f"exec, twiss_segment(forward_SbSSEQ, " - f"\"{self.twiss_forward_corrected}\", biniSbSParams);", - f"exec, twiss_segment(backward_SbSSEQ, " - f"\"{self.twiss_backward_corrected}\", bendSbSParams);", + madx_script += "\n".join( + [ "", - ]) + f"! ----- Segment-by-Segment propagation for {self.segment.name} -----", + "", + f"call, file = '{macros_path}';", + "", + "! Cycle the sequence to avoid negative length.", + f"seqedit, sequence={self._sequence_name};", + " flatten;", + f" cycle, start={self.segment.start};", + "endedit;", + "", + f"use, sequence = {self._sequence_name};", + "", + "twiss;", + "exec, save_initial_and_final_values(", + f" {self._sequence_name},", + f" {self.segment.start},", + f" {self.segment.end}, ", + f' "{measurement_path}",', + " biniSbSParams,", + " bendSbSParams", + ");", + "", + "exec, extract_segment_sequence(", + f" {self._sequence_name},", + " forward_SbSSEQ,", + " backward_SbSSEQ,", + f" {self.segment.start},", + f" {self.segment.end},", + ");", + "", + "beam, particle = proton, sequence=forward_SbSSEQ;", + "beam, particle = proton, sequence=backward_SbSSEQ;", + "", + f'exec, twiss_segment(forward_SbSSEQ, "{twiss_forward_path}", biniSbSParams);', + f'exec, twiss_segment(backward_SbSSEQ, "{twiss_backward_path}", bendSbSParams);', + "", + ] + ) + + if self.corrections is not None: + corrections_path = self.resolve_madx_path(self.output_dir / self.corrections_madx) + twiss_forward_corr_path = self.resolve_madx_path( + self.output_dir / self.twiss_forward_corrected + ) + twiss_backward_corr_path = self.resolve_madx_path( + self.output_dir / self.twiss_backward_corrected + ) + madx_script += "\n".join( + [ + f'call, file="{corrections_path}";', + f"exec, twiss_segment(forward_SbSSEQ, " + f'"{twiss_forward_corr_path}", biniSbSParams);', + f"exec, twiss_segment(backward_SbSSEQ, " + f'"{twiss_backward_corr_path}", bendSbSParams);', + "", + ] + ) return madx_script @@ -447,7 +490,13 @@ def files_to_check(self) -> list[str]: class CorrectionModelCreator(ModelCreator): jobfile = None # set in __init__ - def __init__(self, accel: Accelerator, twiss_out: Path | str, corr_files: Sequence[Path | str], update_dpp: bool = False): + def __init__( + self, + accel: Accelerator, + twiss_out: Path | str, + corr_files: Sequence[Path | str], + update_dpp: bool = False, + ): """Model creator for the corrected/matched model of the LHC. Args: @@ -458,16 +507,17 @@ def __init__(self, accel: Accelerator, twiss_out: Path | str, corr_files: Sequen """ LOGGER.debug("Initializing Correction Model Creator Base Attributes") super().__init__(accel) - self.twiss_out = Path(twiss_out) + self.twiss_out = self.resolve_madx_path(twiss_out) - # use absolute paths to force files into twiss_out directory instead of model-dir - self.jobfile = self.twiss_out.parent.absolute() / f"job.create_{self.twiss_out.stem}.madx" - self.logfile= self.twiss_out.parent.absolute() / f"job.create_{self.twiss_out.stem}.log" - self.corr_files = corr_files + # Take the directory of the twiss output as output dir + self.jobfile = self.twiss_out.parent / f"job.create_{self.twiss_out.stem}.madx" + + self.logfile = self.twiss_out.parent / f"job.create_{self.twiss_out.stem}.log" + self.corr_files = [self.resolve_madx_path(f) for f in corr_files] self.update_dpp = update_dpp def get_madx_script(self) -> str: - """ Get the madx script for the correction model creator, which updates the model after correcion. + """Get the madx script for the correction model creator, which updates the model after correcion. This is a basic implementation which does not update the dpp, but should work for generic accelerators. """ @@ -476,18 +526,16 @@ def get_madx_script(self) -> str: f"Updating the dpp is not implemented for correction model creator of {self.accel.NAME}." ) - madx_script = self.get_base_madx_script() # do not get_madx_script as we don't need the uncorrected output. + # use only base-part and not the full madx-script as we don't need the uncorrected output. + madx_script = self.get_base_madx_script() + # We assume for the following that the correction files have already been resolved to madx paths. for corr_file in self.corr_files: # Load the corrections - madx_script += f"call, file = '{corr_file!s}';\n" + madx_script += f"call, file = '{corr_file}';\n" - madx_script += ( - f"{self._get_select_command()}\n" - f"twiss, file = {self.twiss_out!s};\n" - ) + madx_script += f"{self._get_select_command()}\ntwiss, file = {self.twiss_out};\n" return madx_script - @property def files_to_check(self) -> list[str]: return [self.twiss_out, self.jobfile, self.logfile] @@ -495,6 +543,7 @@ def files_to_check(self) -> list[str]: # Helper functions ------------------------------------------------------------- + def check_folder_choices( parent: Path, msg: str, @@ -502,7 +551,7 @@ def check_folder_choices( list_choices: bool = False, predicate=iotools.always_true, stem_only: bool = False, - ) -> Path: +) -> Path: """ A helper function that scans a selected folder for children, which will then be displayed as possible choices. This funciton allows the model-creator to get only the file/folder names, check @@ -542,4 +591,6 @@ def check_folder_choices( if list_choices: for choice in choices: print(choice) - raise AcceleratorDefinitionError(f"{msg}.\nSelected: '{selection}'.\nChoices: [{', '.join(choices)}]") + raise AcceleratorDefinitionError( + f"{msg}.\nSelected: '{selection}'.\nChoices: [{', '.join(choices)}]" + ) diff --git a/omc3/model/model_creators/lhc_model_creator.py b/omc3/model/model_creators/lhc_model_creator.py index 7ccbe3fc..b0705d28 100644 --- a/omc3/model/model_creators/lhc_model_creator.py +++ b/omc3/model/model_creators/lhc_model_creator.py @@ -4,6 +4,7 @@ This module provides convenience functions for model creation of the ``LHC``. """ + from __future__ import annotations import logging @@ -47,6 +48,7 @@ SegmentCreator, check_folder_choices, ) +from omc3.nxcals.constants import EXTRACTED_MQTS_FILENAME from omc3.optics_measurements.constants import NAME from omc3.utils.iotools import create_dirs, get_check_by_regex_func, get_check_suffix_func @@ -59,29 +61,42 @@ def _b2_columns() -> list[str]: - cols_outer = [f"{KP}{num}{S}L" for KP in ( - "K", "P") for num in range(21) for S in ("", "S")] - cols_middle = ["DX", "DY", "DS", "DPHI", "DTHETA", "DPSI", "MREX", "MREY", "MREDX", "MREDY", - "AREX", "AREY", "MSCALX", "MSCALY", "RFM_FREQ", "RFM_HARMON", "RFM_LAG"] + cols_outer = [f"{KP}{num}{S}L" for KP in ("K", "P") for num in range(21) for S in ("", "S")] + cols_middle = [ + "DX", + "DY", + "DS", + "DPHI", + "DTHETA", + "DPSI", + "MREX", + "MREY", + "MREDX", + "MREDY", + "AREX", + "AREY", + "MSCALX", + "MSCALY", + "RFM_FREQ", + "RFM_HARMON", + "RFM_LAG", + ] return cols_outer[:42] + cols_middle + cols_outer[42:] class LhcModelCreator(ModelCreator): - def __init__(self, accel: Lhc, *args, **kwargs): LOGGER.debug("Initializing LHC Model Creator") super().__init__(accel, *args, **kwargs) def prepare_options(self, opt): - """ Use the fetcher to list choices if requested. """ + """Use the fetcher to list choices if requested.""" accel: Lhc = self.accel # Set the fetcher paths --- if opt.fetch == Fetcher.PATH: if opt.path is None: - raise AcceleratorDefinitionError( - "Path fetcher chosen, but no path provided." - ) + raise AcceleratorDefinitionError("Path fetcher chosen, but no path provided.") acc_model_path = Path(opt.path) elif opt.fetch == Fetcher.AFS: @@ -91,7 +106,7 @@ def prepare_options(self, opt): msg="No optics tag (flag --year) given", selection=accel.year, list_choices=opt.list_choices, - predicate=Path.is_dir + predicate=Path.is_dir, ) # raises AcceleratorDefintionError if not valid choice else: LOGGER.warning( @@ -103,13 +118,14 @@ def prepare_options(self, opt): return # list optics choices --- - if opt.list_choices: # assumes we only want to list optics. Invoked even if modifiers are given! + if opt.list_choices: + # assumes we only want to list optics. Invoked even if modifiers are given! check_folder_choices( acc_model_path / OPTICS_SUBDIR, msg="No modifier given", selection=None, # TODO: could check if user made valid choice list_choices=opt.list_choices, - predicate=get_check_suffix_func(".madx") + predicate=get_check_suffix_func(".madx"), ) # raises AcceleratorDefinitionError # Set acc model path to be used in model creator --- @@ -131,14 +147,12 @@ def prepare_run(self) -> None: shutil.copy(OMC3_MADX_MACROS_DIR / LHC_MACROS, macros_path / LHC_MACROS) shutil.copy(OMC3_MADX_MACROS_DIR / LHC_MACROS_RUN3, macros_path / LHC_MACROS_RUN3) - if accel.energy is not None: core = f"{int(accel.energy):04d}" LOGGER.debug("Copying error defs for analytical N-BPM method") error_dir_path = accel.get_lhc_error_dir() - shutil.copy(error_dir_path / - f"{core}GeV.tfs", accel.model_dir / ERROR_DEFFS_TXT) + shutil.copy(error_dir_path / f"{core}GeV.tfs", accel.model_dir / ERROR_DEFFS_TXT) def check_accelerator_instance(self) -> None: accel = self.accel @@ -159,8 +173,7 @@ def check_accelerator_instance(self) -> None: # hint: if modifiers are given as absolute paths: `path / abs_path` returns `abs_path` (jdilly) # hint: modifiers were at this point probably already checked (and adapted) in `prepare_run()`. - inexistent_modifiers = [ - m for m in accel.modifiers if not (accel.model_dir / m).exists()] + inexistent_modifiers = [m for m in accel.modifiers if not (accel.model_dir / m).exists()] if len(inexistent_modifiers): raise AcceleratorDefinitionError( "The following modifier files do not exist: " @@ -168,31 +181,35 @@ def check_accelerator_instance(self) -> None: ) def get_madx_script(self) -> str: # nominal - """ Get madx script to create a LHC model.""" + """Get madx script to create a LHC model.""" accel: Lhc = self.accel madx_script = self.get_base_madx_script() + twiss_dat_path = self.resolve_madx_path(accel.model_dir / TWISS_DAT) + twiss_elements_path = self.resolve_madx_path(accel.model_dir / TWISS_ELEMENTS_DAT) madx_script += ( - f"exec, do_twiss_monitors(LHCB{accel.beam}, '{accel.model_dir / TWISS_DAT}', {accel.dpp});\n" - f"exec, do_twiss_elements(LHCB{accel.beam}, '{accel.model_dir / TWISS_ELEMENTS_DAT}', {accel.dpp});\n" + f"exec, do_twiss_monitors(LHCB{accel.beam}, '{twiss_dat_path}', {accel.dpp});\n" + f"exec, do_twiss_elements(LHCB{accel.beam}, '{twiss_elements_path}', {accel.dpp});\n" ) if accel.excitation != AccExcitationMode.FREE or accel.drv_tunes is not None: # allow user to modify script and enable excitation, if driven tunes are given use_acd = accel.excitation == AccExcitationMode.ACD use_adt = accel.excitation == AccExcitationMode.ADT + twiss_ac_path = self.resolve_madx_path(accel.model_dir / TWISS_AC_DAT) + twiss_adt_path = self.resolve_madx_path(accel.model_dir / TWISS_ADT_DAT) madx_script += ( f"use_acd={use_acd:d};\n" f"use_adt={use_adt:d};\n" f"if(use_acd == 1){{\n" - f"exec, twiss_ac_dipole({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.drv_tunes[0]}, {accel.drv_tunes[1]}, {accel.beam}, '{accel.model_dir / TWISS_AC_DAT}', {accel.dpp});\n" + f"exec, twiss_ac_dipole({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.drv_tunes[0]}, {accel.drv_tunes[1]}, {accel.beam}, '{twiss_ac_path}', {accel.dpp});\n" f"}}else if(use_adt == 1){{\n" - f"exec, twiss_adt({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.drv_tunes[0]}, {accel.drv_tunes[1]}, {accel.beam}, '{accel.model_dir / TWISS_ADT_DAT}', {accel.dpp});\n" + f"exec, twiss_adt({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.drv_tunes[0]}, {accel.drv_tunes[1]}, {accel.beam}, '{twiss_adt_path}', {accel.dpp});\n" f"}}\n" ) return madx_script def get_base_madx_script(self) -> str: - """ Returns the base LHC madx script.""" + """Returns the base LHC madx script.""" accel: Lhc = self.accel madx_script = self._get_sequence_initialize_script() @@ -204,50 +221,49 @@ def get_base_madx_script(self) -> str: madx_script += f"exec, match_tunes_ats({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.beam});\n" madx_script += f"exec, coupling_knob_ats({accel.beam});\n" else: - madx_script += f"exec, match_tunes({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.beam});\n" + madx_script += ( + f"exec, match_tunes({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.beam});\n" + ) madx_script += f"exec, coupling_knob({accel.beam});\n" return madx_script def _get_sequence_initialize_script(self) -> str: - """ Returns the LHC sequence initialization script. + """Returns the LHC sequence initialization script. This is split up here from the matching (in the base-script), to accompany the needs of the Best Knowledge Model Creator, see below. """ accel: Lhc = self.accel - madx_script = ( f"{self._get_madx_script_info_comments()}\n\n" - f"call, file = '{accel.model_dir / MACROS_DIR / GENERAL_MACROS}';\n" - f"call, file = '{accel.model_dir / MACROS_DIR / LHC_MACROS}';\n" + f"call, file = '{self.resolve_madx_path(accel.model_dir / MACROS_DIR / GENERAL_MACROS)}';\n" + f"call, file = '{self.resolve_madx_path(accel.model_dir / MACROS_DIR / LHC_MACROS)}';\n" ) madx_script += f"{MADX_ENERGY_VAR} = {accel.energy};\n" madx_script += "exec, define_nominal_beams();\n\n" if self._uses_run3_macros(): - LOGGER.debug( - "According to the optics year, Run 3 versions of the macros will be used" - ) - madx_script += ( - f"call, file = '{accel.model_dir / MACROS_DIR / LHC_MACROS_RUN3}';\n" - ) + LOGGER.debug("According to the optics year, Run 3 versions of the macros will be used") + madx_script += f"call, file = '{self.resolve_madx_path(accel.model_dir / MACROS_DIR / LHC_MACROS_RUN3)}';\n" madx_script += "\n! ----- Calling Sequence -----\n" - madx_script += "option, -echo; ! suppress output from base sequence loading to keep the log small\n" + madx_script += ( + "option, -echo; ! suppress output from base sequence loading to keep the log small\n" + ) madx_script += self._get_madx_main_sequence_loading() madx_script += "\noption, echo; ! re-enable output to see the optics settings\n" madx_script += "\n! ---- Call optics and other modifiers ----\n" if accel.modifiers is not None: - # if modifier is an absolute path, go there, otherwise use the path refers from model_dir + # if the modifier can be found in the model dir, use relative path madx_script += "".join( - f"call, file = '{accel.model_dir / modifier}'; {MODIFIER_TAG}\n" + f"call, file = '{self.resolve_madx_path(modifier)}'; {MODIFIER_TAG}\n" for modifier in accel.modifiers ) - if accel.year in ['2012', '2015', '2016', '2017', '2018', '2021', 'hllhc1.3']: + if accel.year in ["2012", "2015", "2016", "2017", "2018", "2021", "hllhc1.3"]: # backwards compatibility with pre acc-models optics # WARNING: This might override values extracted via the knob-extractor. madx_script += ( @@ -261,12 +277,15 @@ def _get_sequence_initialize_script(self) -> str: ) if accel.acc_model_path is not None: - remove_symmetry_knob_madx = accel.acc_model_path / LHC_REMOVE_TRIPLET_SYMMETRY_RELPATH - if remove_symmetry_knob_madx.exists(): # alternatively check if year != 2018/2021 + remove_symmetry_knob_abs = ( + Path(accel.acc_model_path) / LHC_REMOVE_TRIPLET_SYMMETRY_RELPATH + ) + if remove_symmetry_knob_abs.exists(): # alternatively check if year != 2018/2021 + remove_symmetry_knob_path = self.resolve_madx_path(remove_symmetry_knob_abs) madx_script += ( - "\n! ----- Remove IR symmetry definitions -----\n" - f"\ncall, file=\"{remove_symmetry_knob_madx!s}\"; " - "! removes 'ktqx.r1 := -ktqx.l1'-type issues\n" + "\n! ----- Remove IR symmetry definitions -----\n" + f'\ncall, file="{remove_symmetry_knob_path}"; ' + "! removes 'ktqx.r1 := -ktqx.l1'-type issues\n" ) madx_script += ( @@ -277,7 +296,7 @@ def _get_sequence_initialize_script(self) -> str: return madx_script def get_update_deltap_script(self, deltap: float | str) -> str: - """ Update the dpp in the LHC. + """Update the dpp in the LHC. Args: deltap (float | str): The dpp to update the LHC to. @@ -289,11 +308,10 @@ def get_update_deltap_script(self, deltap: float | str) -> str: madx_script = ( f"twiss, deltap={deltap};\n" "correct, mode=svd;\n\n" - "! The same as match_tunes, but include deltap in the matching\n" f"exec, find_complete_tunes({accel.nat_tunes[0]}, {accel.nat_tunes[1]}, {accel.beam});\n" f"match, deltap={deltap};\n" - ) # Works better when split up + ) # Works better when split up madx_script += "\n".join([f"vary, name={knob};" for knob in self.get_tune_knobs()]) + "\n" madx_script += ( "constraint, range=#E, mux=total_qx, muy=total_qy;\n" @@ -305,23 +323,19 @@ def get_update_deltap_script(self, deltap: float | str) -> str: def _get_madx_script_info_comments(self) -> str: accel: Lhc = self.accel info_comments = ( - f'title, "LHC Model created by omc3";\n' - f"! Model directory {Path(accel.model_dir).absolute()}\n" + f'title, "LHC Model created by omc3";\n' + f"! Model directory {Path(accel.model_dir).absolute()}\n" ) if accel.acc_model_path is not None: - info_comments += ( - f"! Acc-Models {Path(accel.acc_model_path).absolute()}\n" - ) + info_comments += f"! Acc-Models {Path(accel.acc_model_path).absolute()}\n" info_comments += ( - f"! LHC year {accel.year}\n" - f"! Natural Tune X {accel.nat_tunes[0]:10.3f}\n" - f"! Natural Tune Y {accel.nat_tunes[1]:10.3f}\n" - f"! Best Knowledge {'NO' if accel.model_best_knowledge is None else 'YES':>10s}\n" + f"! LHC year {accel.year}\n" + f"! Natural Tune X {accel.nat_tunes[0]:10.3f}\n" + f"! Natural Tune Y {accel.nat_tunes[1]:10.3f}\n" + f"! Best Knowledge {'NO' if accel.model_best_knowledge is None else 'YES':>10s}\n" ) if accel.excitation == AccExcitationMode.FREE: - info_comments += ( - f"! Excitation {'NO':>10s}\n" - ) + info_comments += f"! Excitation {'NO':>10s}\n" else: info_comments += ( f"! Excitation {'ACD' if accel.excitation == AccExcitationMode.ACD else 'ADT':>10s}\n" @@ -334,9 +348,13 @@ def _get_madx_main_sequence_loading(self) -> str: accel: Lhc = self.accel if accel.acc_model_path is not None: - main_call = f'call, file = \'{accel.acc_model_path / "lhc.seq"}\';' - if accel.year.startswith('hl'): - main_call += f'\ncall, file = \'{accel.acc_model_path / "hllhc_sequence.madx"}\';' + acc_model_path = Path(self.resolve_madx_path(accel.acc_model_path)) + + main_seq_rel = acc_model_path / "lhc.seq" + main_call = f"call, file = '{main_seq_rel}';" + if accel.year.startswith("hl"): + hllhc_rel = acc_model_path / "hllhc_sequence.madx" + main_call += f"\ncall, file = '{hllhc_rel}';" return main_call try: return self._get_call_main() @@ -388,9 +406,8 @@ def _get_call_main(self) -> str: return call_main - -class LhcBestKnowledgeCreator(LhcModelCreator): # --------------------------------------------------------------------- - EXTRACTED_MQTS_FILENAME: str = "extracted_mqts.str" +# --------------------------------------------------------------------- +class LhcBestKnowledgeCreator(LhcModelCreator): CORRECTIONS_FILENAME: str = "corrections.madx" jobfile: str = JOB_MODEL_MADX_BEST_KNOWLEDGE files_to_check: tuple[str] = (TWISS_BEST_KNOWLEDGE_DAT, TWISS_ELEMENTS_BEST_KNOWLEDGE_DAT) @@ -432,9 +449,7 @@ def check_accelerator_instance(self): ) if accel.excitation is not AccExcitationMode.FREE: - raise AcceleratorDefinitionError( - "Don't set ACD or ADT for best knowledge model." - ) + raise AcceleratorDefinitionError("Don't set ACD or ADT for best knowledge model.") def prepare_run(self): accel: Lhc = self.accel @@ -444,7 +459,9 @@ def prepare_run(self): b2_stem_path = Path(f"{accel.b2_errors!s}.fakesuffix") # makes `with_suffix`` work # hint: if b2_stem_path is absolute, the following AFS parts are ignored - b2_error_path = AFS_B2_ERRORS_ROOT / f"Beam{accel.beam}" / b2_stem_path.with_suffix(".errors") + b2_error_path = ( + AFS_B2_ERRORS_ROOT / f"Beam{accel.beam}" / b2_stem_path.with_suffix(".errors") + ) b2_madx_path = AFS_B2_ERRORS_ROOT / f"Beam{accel.beam}" / b2_stem_path.with_suffix(".madx") # copy madx --- @@ -455,7 +472,7 @@ def prepare_run(self): # copy errors table --- b2_table = tfs.read(b2_error_path, index=NAME) gen_df = pd.DataFrame( - data=0., + data=0.0, index=b2_table.index, columns=_b2_columns(), ) @@ -472,14 +489,23 @@ def get_madx_script(self) -> str: madx_script = self.get_base_madx_script() madx_script += "\n! ----- Load MQTs -----\n" - mqts_file = accel.model_dir / self.EXTRACTED_MQTS_FILENAME + mqts_file = accel.model_dir / EXTRACTED_MQTS_FILENAME if mqts_file.exists(): - madx_script += f"call, file = '{mqts_file}';\n" + mqts_path = self.resolve_madx_path(mqts_file) + madx_script += f"call, file = '{mqts_path}';\n" + else: + LOGGER.warning( + f"MQT file for best knowledge model not found at {mqts_file!s}, skipping." + ) madx_script += "\n! ----- Output Files -----\n" + twiss_bk_path = self.resolve_madx_path(accel.model_dir / TWISS_BEST_KNOWLEDGE_DAT) + twiss_bk_elements_path = self.resolve_madx_path( + accel.model_dir / TWISS_ELEMENTS_BEST_KNOWLEDGE_DAT + ) madx_script += ( - f"exec, do_twiss_monitors(LHCB{accel.beam}, '{accel.model_dir / TWISS_BEST_KNOWLEDGE_DAT}', {accel.dpp});\n" - f"exec, do_twiss_elements(LHCB{accel.beam}, '{accel.model_dir / TWISS_ELEMENTS_BEST_KNOWLEDGE_DAT}', {accel.dpp});\n" + f"exec, do_twiss_monitors(LHCB{accel.beam}, '{twiss_bk_path}', {accel.dpp});\n" + f"exec, do_twiss_elements(LHCB{accel.beam}, '{twiss_bk_elements_path}', {accel.dpp});\n" ) return madx_script @@ -492,20 +518,26 @@ def get_base_madx_script(self) -> str: madx_script += ( f"\n! ----- For Best Knowledge Model -----\n" - f"readmytable, file = '{accel.model_dir / B2_ERRORS_TFS}', table=errtab;\n" + f"readmytable, file = '{self.resolve_madx_path(accel.model_dir / B2_ERRORS_TFS)}', table=errtab;\n" f"seterr, table=errtab;\n" - f"call, file = '{accel.model_dir / B2_SETTINGS_MADX}';\n" + f"call, file = '{self.resolve_madx_path(accel.model_dir / B2_SETTINGS_MADX)}';\n" ) return madx_script -class LhcCorrectionModelCreator(CorrectionModelCreator, LhcModelCreator): # ------------------------------------------- +class LhcCorrectionModelCreator(CorrectionModelCreator, LhcModelCreator): # ----------------------- """ Creates an updated model from multiple changeparameters inputs (used in iterative correction). """ - def __init__(self, accel: Lhc, twiss_out: Path | str, corr_files: Sequence[Path | str], update_dpp: bool = False): + def __init__( + self, + accel: Lhc, + twiss_out: Path | str, + corr_files: Sequence[Path | str], + update_dpp: bool = False, + ): """Model creator for the corrected/matched model of the LHC. Inheritance (i.e. __init__ calls) from here should be as follows: @@ -522,20 +554,27 @@ def __init__(self, accel: Lhc, twiss_out: Path | str, corr_files: Sequence[Path super().__init__(accel, twiss_out, corr_files, update_dpp) def get_madx_script(self) -> str: - """ Get the madx script for the correction model creator, which updates the model after correcion. """ + """Get the madx script for the correction model creator, which updates the model after correcion.""" accel: Lhc = self.accel - madx_script = self.get_base_madx_script() # do not super().get_madx_script as we don't need the uncorrected output. + + # do not super().get_madx_script as we don't need the uncorrected output. + madx_script = self.get_base_madx_script() # First set the dpp to the value in the accelerator model madx_script += f"{ORBIT_DPP} = {accel.dpp};\n" for corr_file in self.corr_files: # Load the corrections, can also update ORBIT_DPP - madx_script += f"call, file = '{corr_file!s}';\n" + # Should be the correct path already from the init + madx_script += f"call, file = '{corr_file}';\n" - if self.update_dpp: # If we are doing orbit correction, we need to ensure that a correct and a match is done + # If we are doing orbit correction, we need to ensure that a correct and a match is done + if self.update_dpp: madx_script += self.get_update_deltap_script(deltap=ORBIT_DPP) - madx_script += f'exec, do_twiss_elements(LHCB{accel.beam}, "{self.twiss_out!s}", {ORBIT_DPP});\n' + twiss_out_path = self.resolve_madx_path(self.output_dir / self.twiss_out) + madx_script += ( + f'exec, do_twiss_elements(LHCB{accel.beam}, "{twiss_out_path}", {ORBIT_DPP});\n' + ) return madx_script def prepare_run(self) -> None: @@ -545,7 +584,9 @@ def prepare_run(self) -> None: LOGGER.debug("Preparing model creation structure") macros_path = self.accel.model_dir / MACROS_DIR if not macros_path.exists(): - raise AcceleratorDefinitionError(f"Folder for the macros does not exist at {macros_path!s}.") + raise AcceleratorDefinitionError( + f"Folder for the macros does not exist at {macros_path!s}." + ) @property def files_to_check(self) -> list[str]: @@ -554,60 +595,67 @@ def files_to_check(self) -> list[str]: # LHC Segment Creator ---------------------------------------------------------- -class LhcSegmentCreator(SegmentCreator, LhcModelCreator): - def get_madx_script(self): +class LhcSegmentCreator(SegmentCreator, LhcModelCreator): + def get_madx_script(self) -> str: accel: Lhc = self.accel madx_script = self.get_base_madx_script() + measurement_path = self.resolve_madx_path(self.output_dir / self.measurement_madx) + twiss_forward_path = self.resolve_madx_path(self.output_dir / self.twiss_forward) + twiss_backward_path = self.resolve_madx_path(self.output_dir / self.twiss_backward) + + madx_script += f""" +! ----- Segment-by-Segment propagation for {self.segment.name} ----- + +! Cycle the sequence to avoid negative length. +seqedit, sequence=LHCB{accel.beam}; +flatten; +cycle, start={self.segment.start}; +endedit; + +use, period = LHCB{accel.beam}; +option, echo; + +twiss; +exec, save_initial_and_final_values( + LHCB{accel.beam}, + {self.segment.start}, + {self.segment.end}, + "{measurement_path}", + biniLHCB{accel.beam}, + bendLHCB{accel.beam} +); + +exec, extract_segment_sequence( + LHCB{accel.beam}, + forward_LHCB{accel.beam}, + backward_LHCB{accel.beam}, + {self.segment.start}, + {self.segment.end}, +); + +beam, particle = proton, sequence=forward_LHCB{accel.beam}, energy = {MADX_ENERGY_VAR}, bv={accel.beam_direction:d}; +beam, particle = proton, sequence=backward_LHCB{accel.beam}, energy = {MADX_ENERGY_VAR}, bv={accel.beam_direction:d}; + +exec, twiss_segment(forward_LHCB{accel.beam}, "{twiss_forward_path}", biniLHCB{accel.beam}); +exec, twiss_segment(backward_LHCB{accel.beam}, "{twiss_backward_path}", bendLHCB{accel.beam}); - madx_script += "\n".join([ - "", - f"! ----- Segment-by-Segment propagation for {self.segment.name} -----", - "", - "! Cycle the sequence to avoid negative length.", - f"seqedit, sequence=LHCB{accel.beam};", - "flatten;", - f"cycle, start={self.segment.start};", - "endedit;", - "", - f"use, period = LHCB{accel.beam};", - "option, echo;", - "", - "twiss;", - "exec, save_initial_and_final_values(", - f" LHCB{accel.beam},", - f" {self.segment.start},", - f" {self.segment.end}, ", - f" \"{accel.model_dir / self.measurement_madx!s}\",", - f" biniLHCB{accel.beam},", - f" bendLHCB{accel.beam}", - ");", - "", - "exec, extract_segment_sequence(", - f" LHCB{accel.beam},", - f" forward_LHCB{accel.beam},", - f" backward_LHCB{accel.beam},", - f" {self.segment.start},", - f" {self.segment.end},", - ");", - "", - f"beam, particle = proton, sequence=forward_LHCB{accel.beam}, energy = {MADX_ENERGY_VAR}, bv={accel.beam_direction:d};", - f"beam, particle = proton, sequence=backward_LHCB{accel.beam}, energy = {MADX_ENERGY_VAR}, bv={accel.beam_direction:d};", - "", - f"exec, twiss_segment(forward_LHCB{accel.beam}, \"{self.twiss_forward!s}\", biniLHCB{accel.beam});", - f"exec, twiss_segment(backward_LHCB{accel.beam}, \"{self.twiss_backward!s}\", bendLHCB{accel.beam});", - "", - ]) +""" if self.corrections is not None: - madx_script += "\n".join([ - f"call, file=\"{self.corrections_madx!s}\";", - f"exec, twiss_segment(forward_LHCB{accel.beam}, " - f"\"{self.twiss_forward_corrected}\", biniLHCB{accel.beam});", - f"exec, twiss_segment(backward_LHCB{accel.beam}, " - f"\"{self.twiss_backward_corrected}\", bendLHCB{accel.beam});", - "", - ]) + corrections_path = self.resolve_madx_path(self.output_dir / self.corrections_madx) + twiss_forward_corr_path = self.resolve_madx_path( + self.output_dir / self.twiss_forward_corrected + ) + twiss_backward_corr_path = self.resolve_madx_path( + self.output_dir / self.twiss_backward_corrected + ) + madx_script += f""" +call, file = "{corrections_path}"; +exec, twiss_segment(forward_LHCB{accel.beam}, "{twiss_forward_corr_path}", biniLHCB{accel.beam}); +exec, twiss_segment(backward_LHCB{accel.beam}, "{twiss_backward_corr_path}", bendLHCB{accel.beam}); + +""" return madx_script diff --git a/omc3/model/model_creators/ps_model_creator.py b/omc3/model/model_creators/ps_model_creator.py index 15d4cfbf..8d398589 100644 --- a/omc3/model/model_creators/ps_model_creator.py +++ b/omc3/model/model_creators/ps_model_creator.py @@ -4,6 +4,7 @@ This module provides convenience functions for model creation of the ``PS``. """ + from __future__ import annotations import logging @@ -22,43 +23,52 @@ class PsModelCreator(PsBaseModelCreator): def get_madx_script(self) -> str: accel: Ps = self.accel - if (accel.energy is None): - raise RuntimeError("PS model creation currently relies on manual Energy management. Please provide the --energy ENERGY flag") + if accel.energy is None: + raise RuntimeError( + "PS model creation currently relies on manual Energy management. Please provide the --energy ENERGY flag" + ) madx_script = self.get_base_madx_script() replace_dict = { "USE_ACD": str(int(accel.excitation == AccExcitationMode.ACD)), "DPP": accel.dpp, - "OUTPUT": str(accel.model_dir), + "OUTPUT": self.resolve_madx_path(self.output_dir), } madx_template = accel.get_file("twiss.mask").read_text() madx_script += madx_template % replace_dict return madx_script - def get_base_madx_script(self): + def get_base_madx_script(self) -> str: accel: Ps = self.accel use_acd = accel.excitation == AccExcitationMode.ACD replace_dict = { - "FILES_DIR": str(accel.get_dir()), + "FILES_DIR": self.resolve_madx_path(accel.get_dir()), "USE_ACD": str(int(use_acd)), "NAT_TUNE_X": accel.nat_tunes[0], "NAT_TUNE_Y": accel.nat_tunes[1], "KINETICENERGY": 0 if accel.energy is None else accel.energy, "USE_CUSTOM_PC": "0" if accel.energy is None else "1", - "ACC_MODELS_DIR": accel.acc_model_path, - "BEAM_FILE": accel.beam_file, - "STR_FILE": accel.str_file, + "ACC_MODELS_DIR": self.resolve_madx_path(accel.acc_model_path), + "BEAM_FILE": self.resolve_madx_path(accel.beam_file), + "STR_FILE": self.resolve_madx_path(accel.str_file), "DRV_TUNE_X": "0", "DRV_TUNE_Y": "0", "MODIFIERS": "", - "USE_MACROS": "0" if accel.year == "2018" else "1", # 2018 doesn't provide a macros file + "USE_MACROS": "0" + if accel.year == "2018" + else "1", # 2018 doesn't provide a macros file "PS_TUNE_METHOD": accel.tune_method, } if accel.modifiers: - replace_dict["MODIFIERS"] = '\n'.join([f" call, file = '{m}'; {MODIFIER_TAG}" for m in accel.modifiers]) + replace_dict["MODIFIERS"] = "\n".join( + [ + f" call, file = '{self.resolve_madx_path(modifier)}'; {MODIFIER_TAG}" + for modifier in accel.modifiers + ] + ) if use_acd: replace_dict["DRV_TUNE_X"] = accel.drv_tunes[0] replace_dict["DRV_TUNE_Y"] = accel.drv_tunes[1] - mask = accel.get_file('base.madx').read_text() + mask = accel.get_file("base.madx").read_text() return mask % replace_dict def prepare_run(self) -> None: diff --git a/omc3/model/model_creators/psbooster_model_creator.py b/omc3/model/model_creators/psbooster_model_creator.py index 67433660..3c2ee973 100644 --- a/omc3/model/model_creators/psbooster_model_creator.py +++ b/omc3/model/model_creators/psbooster_model_creator.py @@ -4,6 +4,7 @@ This module provides convenience functions for model creation of the ``PSB``. """ + from __future__ import annotations import shutil @@ -27,37 +28,38 @@ def get_madx_script(self) -> str: "USE_ACD": str(int(accel.excitation == AccExcitationMode.ACD)), "RING": accel.ring, "DPP": accel.dpp, - "OUTPUT": str(accel.model_dir), + "OUTPUT": self.resolve_madx_path(self.output_dir), } madx_template = accel.get_file("twiss.mask").read_text() madx_script += madx_template % replace_dict return madx_script - def get_base_madx_script(self): + def get_base_madx_script(self) -> str: accel: Psbooster = self.accel use_acd = accel.excitation == AccExcitationMode.ACD replace_dict = { - "FILES_DIR": str(accel.get_dir()), + "FILES_DIR": self.resolve_madx_path(accel.get_dir()), "USE_ACD": str(int(use_acd)), "RING": str(accel.ring), "NAT_TUNE_X": accel.nat_tunes[0], "NAT_TUNE_Y": accel.nat_tunes[1], "KINETICENERGY": 0 if accel.energy is None else accel.energy, "USE_CUSTOM_PC": "0" if accel.energy is None else "1", - "ACC_MODELS_DIR": accel.acc_model_path, - "BEAM_FILE": accel.beam_file, - "STR_FILE": accel.str_file, + "ACC_MODELS_DIR": self.resolve_madx_path(accel.acc_model_path), + "BEAM_FILE": self.resolve_madx_path(accel.beam_file), + "STR_FILE": self.resolve_madx_path(accel.str_file), "DRV_TUNE_X": "", "DRV_TUNE_Y": "", } if use_acd: replace_dict["DRV_TUNE_X"] = accel.drv_tunes[0] replace_dict["DRV_TUNE_Y"] = accel.drv_tunes[1] - mask = accel.get_file('base.mask').read_text() + mask = accel.get_file("base.mask").read_text() return mask % replace_dict def prepare_run(self): super().prepare_run() shutil.copy( - self.accel.get_file(f"error_deff_ring{self.accel.ring}.txt"), self.accel.model_dir / ERROR_DEFFS_TXT + self.accel.get_file(f"error_deff_ring{self.accel.ring}.txt"), + self.accel.model_dir / ERROR_DEFFS_TXT, ) diff --git a/omc3/model/model_creators/sps_model_creator.py b/omc3/model/model_creators/sps_model_creator.py index b11cede1..8f592ea8 100644 --- a/omc3/model/model_creators/sps_model_creator.py +++ b/omc3/model/model_creators/sps_model_creator.py @@ -2,6 +2,7 @@ SPS Model Creator ----------------- """ + from __future__ import annotations from abc import ABC @@ -66,8 +67,7 @@ def check_accelerator_instance(self) -> None: # hint: if modifiers are given as absolute paths: `path / abs_path` returns `abs_path` (jdilly) # hint: modifiers were at this point probably already checked (and adapted) in `prepare_run()`. - inexistent_modifiers = [ - m for m in accel.modifiers if not (accel.model_dir / m).exists()] + inexistent_modifiers = [m for m in accel.modifiers if not (accel.model_dir / m).exists()] if len(inexistent_modifiers): raise AcceleratorDefinitionError( "The following modifier files do not exist: " @@ -83,14 +83,12 @@ def check_accelerator_instance(self) -> None: ) def prepare_options(self, opt) -> bool: - """ Use the fetcher to list choices if requested. """ + """Use the fetcher to list choices if requested.""" accel: Sps = self.accel if opt.fetch == Fetcher.PATH: if opt.path is None: - raise AcceleratorDefinitionError( - "Path fetcher chosen, but no path proivided." - ) + raise AcceleratorDefinitionError("Path fetcher chosen, but no path proivided.") acc_model_path = Path(opt.path) elif opt.fetch == Fetcher.AFS: @@ -100,7 +98,7 @@ def prepare_options(self, opt) -> bool: msg="No optics tag (flag --year) given", selection=accel.year, list_choices=opt.list_choices, - predicate=Path.is_dir + predicate=Path.is_dir, ) # raises AcceleratorDefintionError if not valid choice else: LOGGER.warning( @@ -119,29 +117,32 @@ def prepare_options(self, opt) -> bool: msg="No/Unknown strength file (flag --str_file) selected", selection=None, list_choices=opt.list_choices, - predicate=get_check_suffix_func(".str") + predicate=get_check_suffix_func(".str"), ) # Set the found paths --- accel.acc_model_path = acc_model_path - def get_base_madx_script(self): + def get_base_madx_script(self) -> str: accel: Sps = self.accel use_excitation = accel.excitation != AccExcitationMode.FREE + seq_path = self.resolve_madx_path(accel.acc_model_path / "sps.seq") # The very basics --- madx_script = ( "! Load Base Sequence and Strengths/Modifiers ---\n" "option, -echo; ! suppress output from base sequence loading to keep the log small\n\n" - f"call, file = '{accel.acc_model_path / 'sps.seq'!s}';\n" + f"call, file = '{seq_path}';\n" ) if accel.modifiers is not None: # includes the strengths file for modifier in accel.modifiers: - madx_script += f"call, file = '{accel.acc_model_path / modifier!s}'; {MODIFIER_TAG}\n" + modifier_path = self.resolve_madx_path(modifier) + madx_script += f"call, file = '{modifier_path}'; {MODIFIER_TAG}\n" + toolkit_path = self.resolve_madx_path(accel.acc_model_path / "toolkit" / "macro.madx") madx_script += ( - f"call, file ='{accel.acc_model_path / 'toolkit' / 'macro.madx'!s}';\n" + f"call, file ='{toolkit_path}';\n" "option, echo;\n\n" "! Create Beam ---\n" "beam;\n\n" @@ -154,8 +155,8 @@ def get_base_madx_script(self): if use_excitation or accel.drv_tunes is not None: # allow user to modify script and enable excitation, if driven tunes are given madx_script += ( - f"qxd={accel.drv_tunes[0]%1:.3f};\n" - f"qyd={accel.drv_tunes[1]%1:.3f};\n\n" + f"qxd={accel.drv_tunes[0] % 1:.3f};\n" + f"qyd={accel.drv_tunes[1] % 1:.3f};\n\n" "! Prepare ACDipole Elements ---\n" f"use_acd={use_excitation:d}; ! Switch the use of AC dipole\n\n" "hacmap21 = 0;\n" @@ -166,11 +167,7 @@ def get_base_madx_script(self): "ZKV_MARKER: marker;\n\n" ) - madx_script += ( - "! Cycle Sequence ---\n" - "seqedit, sequence=sps;\n" - " flatten;\n" - ) + madx_script += "! Cycle Sequence ---\nseqedit, sequence=sps;\n flatten;\n" if use_excitation or accel.drv_tunes is not None: # allow user to modify script and enable excitation, if driven tunes are given @@ -194,48 +191,45 @@ def get_base_madx_script(self): # f" install, element={marker_name}, at=-{self._start_bpm}->L, from={self._start_bpm};\n" # f" cycle, start = {marker_name};\n" # ) - madx_script += ( - f" cycle, start = {self._start_bpm};\n" - ) + madx_script += f" cycle, start = {self._start_bpm};\n" - madx_script += ( - "endedit;\n" - "use, sequence=sps;\n\n" - ) + madx_script += "endedit;\nuse, sequence=sps;\n\n" # Match tunes --- madx_script += ( - "! Match Tunes ---\n" - "exec, sps_match_tunes(qx0,qy0);\n\n" + "! Match Tunes ---\nexec, sps_match_tunes(qx0,qy0);\n\n" # "twiss, file = 'sps.tfs';\n" # also not sure if needed ) return madx_script - def get_madx_script(self): + def get_madx_script(self) -> str: accel: Sps = self.accel use_excitation = accel.excitation != AccExcitationMode.FREE madx_script = self.get_base_madx_script() + twiss_path = self.resolve_madx_path(self.output_dir / TWISS_DAT) + twiss_elements_path = self.resolve_madx_path(self.output_dir / TWISS_ELEMENTS_DAT) madx_script += ( - "! Create twiss data files ---\n" + "! Create twiss data files ---\n" f"{self._get_select_command(pattern=accel.RE_DICT[AccElementTypes.BPMS])}" - f"twiss, file = {accel.model_dir / TWISS_DAT};\n" + f"twiss, file = '{twiss_path}';\n" "\n" f"{self._get_select_command()}" - f"twiss, file = {accel.model_dir / TWISS_ELEMENTS_DAT};\n" + f"twiss, file = '{twiss_elements_path}';\n" ) if use_excitation or accel.drv_tunes is not None: # allow user to modify script and enable excitation, if driven tunes are given + twiss_ac_path = self.resolve_madx_path(self.output_dir / TWISS_AC_DAT) madx_script += ( f"if(use_acd == 1){{\n" - " betxac = table(twiss, hacmap, betx);\n" - " betyac = table(twiss, vacmap, bety);\n" + " betxac = table(twiss, hacmap, betx);\n" + " betyac = table(twiss, vacmap, bety);\n" f" hacmap21 := 2*(cos(2*pi*qxd)-cos(2*pi*qx0))/(betxac*sin(2*pi*qx0));\n" f" vacmap43 := 2*(cos(2*pi*qyd)-cos(2*pi*qy0))/(betyac*sin(2*pi*qy0));\n" "\n" f"{self._get_select_command(pattern=accel.RE_DICT[AccElementTypes.BPMS], indent=4)}" - f" twiss, file = {accel.model_dir / TWISS_AC_DAT};\n" + f" twiss, file = '{twiss_ac_path}';\n" f"}}\n" ) return madx_script @@ -246,11 +240,13 @@ class SpsCorrectionModelCreator(CorrectionModelCreator, SpsModelCreator): Creates an updated model from multiple changeparameters inputs (used in iterative correction). """ + def prepare_run(self) -> None: # As the matched/corrected model is created in the same directory as the original model, # we do not need to prepare as much. self.check_accelerator_instance() + class SpsSegmentCreator(SegmentCreator, SpsModelCreator): _sequence_name = "sps" _start_bpm = None # prohibits first cycling (which inserts an additional element leading to negative drifts) diff --git a/omc3/mqt_extractor.py b/omc3/mqt_extractor.py new file mode 100644 index 00000000..39c2e4c0 --- /dev/null +++ b/omc3/mqt_extractor.py @@ -0,0 +1,190 @@ +r""" +MQT Extractor +------------- + +Entrypoint to extract MQT (Quadrupole Trim) knob values from ``NXCALS`` at a given time, +and eventually write them out to a file. +The data is fetched from ``NXCALS`` through PySpark queries and converted to K-values using ``LSA``. + +MQT magnets are trim quadrupoles located in the LHC arcs, used for fine-tuning the optics. +There are two types per arc (focusing 'f' and defocusing 'd'), resulting in 16 MQT magnets +per beam (8 arcs x 2 types). + +.. note:: + Please note that access to the GPN is required to use this functionality. + +**Arguments:** + +*--Required--* + +- **beam** *(int)*: + + The beam number (1 or 2). + + +- **time** *(str)*: + + At what time to extract the MQT knobs. Accepts ISO-format (YYYY-MM- + DDThh:mm:ss) with timezone, timestamp or 'now'. Timezone must be + specified for ISO-format (e.g. +00:00 for UTC). + + default: ``now`` + + +*--Optional--* + +- **output** *(PathOrStr)*: + + Specify user-defined output path. This should probably be + `model_dir/mqts.madx` + + +- **timedelta** *(str)*: + + Add this timedelta to the given time. The format of timedelta is + '((\d+)(\w))+' with the second token being one of s(seconds), + m(minutes), h(hours), d(days), w(weeks), M(months) e.g 7m = 7 minutes, + 1d = 1day, 7m30s = 7 min 30 secs. A prefix '_' specifies a negative + timedelta. This allows for easily getting the setting e.g. 2h ago: + '_2h' while setting the `time` argument to 'now' (default). + + +- **delta_days** *(float)*: + + Number of days to look back for data in NXCALS. + + default: ``0.25`` (e.g. 6 hours) + +""" + +from __future__ import annotations + +import argparse +from pathlib import Path +from typing import TYPE_CHECKING + +import tfs +from generic_parser import EntryPointParameters, entrypoint + +from omc3.nxcals.mqt_extraction import get_mqt_vals, knobs_to_madx +from omc3.utils.iotools import PathOrStr +from omc3.utils.logging_tools import get_logger +from omc3.utils.mock import cern_network_import +from omc3.utils.time_tools import parse_time + +if TYPE_CHECKING: + from datetime import datetime + +spark_session_builder = cern_network_import("nxcals.spark_session_builder") + +LOGGER = get_logger(__name__) + +USAGE_EXAMPLES = """Usage Examples: + +python -m omc3.mqt_extractor --beam 1 --time 2022-05-04T14:00+00:00 + extracts the MQT knobs for beam 1 at 14h on May 4th 2022 + +python -m omc3.mqt_extractor --beam 2 --time now --timedelta _2h + extracts the MQT knobs for beam 2 as of 2 hours ago + +python -m omc3.mqt_extractor --beam 1 --time now --output model_dir/mqts.madx + extracts the current MQT settings for beam 1 and writes to file +""" + + +def get_params(): + return EntryPointParameters( + beam={ + "type": int, + "help": "The beam number (1 or 2).", + }, + time={ + "type": str, + "help": ( + "At what time to extract the MQT knobs. " + "Accepts ISO-format (YYYY-MM-DDThh:mm:ss) with timezone, timestamp or 'now'. " + "Timezone must be specified for ISO-format (e.g. +00:00 for UTC)." + ), + "default": "now", + }, + timedelta={ + "type": str, + "help": ( + "Add this timedelta to the given time. " + "The format of timedelta is '((\\d+)(\\w))+' " + "with the second token being one of " + "s(seconds), m(minutes), h(hours), d(days), w(weeks), M(months) " + "e.g 7m = 7 minutes, 1d = 1day, 7m30s = 7 min 30 secs. " + "A prefix '_' specifies a negative timedelta. " + "This allows for easily getting the setting " + "e.g. 2h ago: '_2h' while setting the `time` argument to 'now' (default)." + ), + }, + output={ + "type": PathOrStr, + "help": ( + "Specify user-defined output path. This should probably be `model_dir/mqts.madx`" + ), + }, + delta_days={ + "type": float, + "help": "Number of days to look back for data in NXCALS.", + "default": 0.25, + }, + ) + + +@entrypoint( + get_params(), + strict=True, + argument_parser_args={ + "epilog": USAGE_EXAMPLES, + "formatter_class": argparse.RawDescriptionHelpFormatter, + "prog": "MQT Extraction Tool.", + }, +) +def main(opt) -> tfs.TfsDataFrame: + """ + Main MQT extracting function. + + Retrieves MQT (Quadrupole Trim) knob values from NXCALS for a specific time and beam, + and optionally saves them to a file. + + Returns: + tfs.TfsDataFrame: DataFrame containing the extracted MQT knob values with columns + for madx name, value, timestamp, and power converter name. + """ + spark = spark_session_builder.get_or_create(conf={"spark.ui.showConsoleProgress": "false"}) + time = parse_time(opt.time, opt.timedelta) + + LOGGER.info(f"---- EXTRACTING MQT KNOBS @ {time} for Beam {opt.beam} ----") + mqt_vals = get_mqt_vals(spark, time, opt.beam, delta_days=opt.delta_days) + + # Convert to TfsDataFrame for consistency with knob_extractor + mqt_df = tfs.TfsDataFrame( + index=[result.name for result in mqt_vals], + columns=["madx", "value", "timestamp", "pc_name"], + headers={"EXTRACTION_TIME": time, "BEAM": opt.beam}, + ) + for result in mqt_vals: + mqt_df.loc[result.name, "madx"] = result.name + mqt_df.loc[result.name, "value"] = result.value + mqt_df.loc[result.name, "timestamp"] = result.timestamp + mqt_df.loc[result.name, "pc_name"] = result.pc_name + + if opt.output: + _write_mqt_file(opt.output, mqt_vals, time, opt.beam) + + return mqt_df + + +def _write_mqt_file(output: Path | str, mqt_vals, time: datetime, beam: int): + """Write MQT knobs to a MAD-X file.""" + with Path(output).open("w") as outfile: + outfile.write("!! --- MQT knobs extracted by mqt_extractor\n") + outfile.write(f"!! --- extracted MQT knobs for time {time}, beam {beam}\n\n") + outfile.write(knobs_to_madx(mqt_vals)) + + +if __name__ == "__main__": + main() diff --git a/omc3/nxcals/__init__.py b/omc3/nxcals/__init__.py new file mode 100644 index 00000000..6e60abe9 --- /dev/null +++ b/omc3/nxcals/__init__.py @@ -0,0 +1,2 @@ +# Importing * is a bad practice and you should be punished for using it +__all__ = [] diff --git a/omc3/nxcals/constants.py b/omc3/nxcals/constants.py new file mode 100644 index 00000000..43616f7a --- /dev/null +++ b/omc3/nxcals/constants.py @@ -0,0 +1 @@ +EXTRACTED_MQTS_FILENAME: str = "extracted_mqts.str" diff --git a/omc3/nxcals/knob_extraction.py b/omc3/nxcals/knob_extraction.py new file mode 100644 index 00000000..c6005566 --- /dev/null +++ b/omc3/nxcals/knob_extraction.py @@ -0,0 +1,333 @@ +""" +Knob Extraction +--------------- + +This module provides functionality to extract knob values from NXCALS for the LHC and +convert them to MAD-X compatible format using LSA services. + +It handles retrieval of raw variable data from NXCALS, conversion of power converter +currents to K-values, and mapping of power converter names to MAD-X naming +conventions. +""" + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from datetime import datetime, timedelta, timezone +from typing import TYPE_CHECKING + +import jpype +import pandas as pd +from pyspark.sql import functions +from pyspark.sql.window import Window + +from omc3.utils.mock import cern_network_import + +if TYPE_CHECKING: + from pyspark.sql import SparkSession + +pjlsa = cern_network_import("pjlsa") +builders = cern_network_import("nxcals.api.extraction.data.builders") + +LOGGER = logging.getLogger(__name__) + + +@dataclass +class NXCALSResult: + name: str + value: float + timestamp: pd.Timestamp + pc_name: str + + +# High-level Knob Extraction --------------------------------------------------- + + +def get_knob_vals( + spark: SparkSession, + time: datetime, + beam: int, + patterns: list[str], + expected_knobs: set[str] | None = None, + log_prefix: str = "", + delta_days: float = 0.25, +) -> list[NXCALSResult]: + """ + Retrieve knob values for a given beam and time using specified patterns for the LHC. + + This is the main entry point for extracting magnet knob values from NXCALS. The function + performs a complete workflow: + + 1. Queries NXCALS for power converter current measurements (I_MEAS) using variable patterns + 2. Retrieves the beam energy at the specified time + 3. Converts currents to K-values (integrated quadrupole strengths) using LSA + 4. Maps power converter names to MAD-X naming conventions + 5. Returns knob values with their timestamps + + The difference between patterns and knob names: + - **patterns**: NXCALS variable patterns (e.g., "RPMBB.UA%.RQT%.A%B1:I_MEAS") used to query + raw power converter current measurements. These follow CERN naming conventions and may + include wildcards (%). + - **expected_knobs**: MAD-X element names (e.g., "kqt12.a12b1") representing the final + knob names as used in MAD-X scripts. These are lowercase, simplified names. + + Args: + spark (SparkSession): Active Spark session for NXCALS queries. + time (datetime): Time to retrieve data for. + beam (int): Beam number (1 or 2). + patterns (list[str]): List of NXCALS variable patterns to query for power converter + currents. Patterns can include SQL-like wildcards (%). Example: + "RPMBB.UA%.RQT%.A%B1:I_MEAS" matches all MQT quadrupole trim magnets for beam 1. + expected_knobs (set[str] | None): Set of expected MAD-X knob names to validate and filter + results. If None, returns all found knobs without validation. + log_prefix (str): Prefix for logging messages to distinguish different extraction runs. + delta_days (float): Number of days to look back for data. Default is 0.25. + + Returns: + list[NXCalResult]: List of NXCalResult objects containing MAD-X knob names, K-values, + timestamps, and power converter names. + """ + LOGGER.info(f"{log_prefix}Starting data retrieval for beam {beam} at time {time}") + + # Retrieve raw current measurements from NXCALS + combined_vars: list[NXCALSResult] = [] + for pattern in patterns: + LOGGER.info(f"{log_prefix}Getting currents for pattern {pattern} at {time}") + raw_vars = get_raw_vars(spark, time, pattern, delta_days) + combined_vars.extend(raw_vars) + + # Get beam energy for K-value calculations + energy, _ = get_energy(spark, time) + + # Prepare currents dict for LSA + currents = {strip_i_meas(var.name): var.value for var in combined_vars} + + LOGGER.info( + f"{log_prefix}Calculating K values for {len(currents)} power converters with energy {energy} GeV" + ) + + # Calculate K values using LSA + lsa_client = pjlsa.LSAClient() + k_values = calc_k_from_iref(lsa_client, currents, energy) + + # Transform K values keys to MAD-X format + k_values_madx = {map_pc_name_to_madx(key): value for key, value in k_values.items()} + + found_keys = set(k_values_madx.keys()) + + if expected_knobs is None: + expected_knobs = found_keys + + if missing_keys := expected_knobs - found_keys: + LOGGER.warning(f"{log_prefix}Missing K-values for knobs: {missing_keys}") + + if unknown_keys := found_keys - expected_knobs: + LOGGER.warning(f"{log_prefix}Unknown K-values found: {unknown_keys}") + + # Build result list with timestamps + timestamps = {map_pc_name_to_madx(var.name): var.timestamp for var in combined_vars} + pc_names = {map_pc_name_to_madx(var.name): var.pc_name for var in combined_vars} + results = [] + for madx_name in expected_knobs: + value = k_values_madx.get(madx_name) + timestamp = timestamps.get(madx_name) + pc_name = pc_names.get(madx_name) + + if value is not None and timestamp is not None and pc_name is not None: + results.append(NXCALSResult(madx_name, value, timestamp, pc_name)) + else: + LOGGER.warning(f"{log_prefix}Missing data for {madx_name}") + + return results + + +# NXCALS Data Retrieval -------------------------------------------------------- + + +def get_raw_vars( + spark: SparkSession, time: datetime, var_name: str, delta_days: float = 0.25 +) -> list[NXCALSResult]: + """ + Retrieve raw variable values from NXCALS. + + Args: + spark (SparkSession): Active Spark session. + time (datetime): Python datetime (timezone-aware recommended). + var_name (str): Name or pattern of the variable(s) to retrieve. + delta_days (float): Number of days to look back for data. Default is 0.25. + + Returns: + list[NXCalResult]: List of NXCalResult containing variable name, value, timestamp, + and power converter name for the latest sample of each matching variable at the given time. + + Raises: + RuntimeError: If no data is found for the variable in the given interval. + You may need to increase the delta_days if necessary. + """ + + # Ensure time is in UTC + if time.tzinfo is None: + raise ValueError("Datetime object must be timezone-aware") + time = time.astimezone(timezone.utc) + + # Look back delta_days, may need up to 1 day to find data + start_time = time - timedelta(days=delta_days) + end_time = time + LOGGER.info(f"Retrieving raw variables {var_name} from {start_time} to {end_time}") + + df = ( + builders.DataQuery.builder(spark) + .variables() + .system("CMW") + .nameLike(var_name) + .timeWindow(start_time, end_time) + .build() + ) + + # Avoid full count() – just check if we have at least one row + if df is None or not df.take(1): + raise RuntimeError( + f"No data found for {var_name} in {start_time} to {end_time}. " + f"You may need to increase the delta_days from {delta_days} days if necessary." + ) + + LOGGER.info(f"Raw variables {var_name} retrieved successfully.") + + # Get the latest sample for each variable + window_spec = Window.partitionBy("nxcals_variable_name").orderBy( + functions.col("nxcals_timestamp").desc() + ) + latest_df = ( + df.withColumn("row_num", functions.row_number().over(window_spec)) + .filter(functions.col("row_num") == 1) + .select("nxcals_variable_name", "nxcals_value", "nxcals_timestamp") + ) + + results = [] + for row in latest_df.collect(): + full_varname = row["nxcals_variable_name"] + raw_val = float(row["nxcals_value"]) + ts = pd.to_datetime(row["nxcals_timestamp"], unit="ns", utc=True) + results.append(NXCALSResult(full_varname, raw_val, ts, strip_i_meas(full_varname))) + LOGGER.info(f"LHC value retrieved: {full_varname} = {raw_val:.2f} at {ts}") + + return results + + +def get_energy(spark: SparkSession, time: datetime) -> tuple[float, pd.Timestamp]: + """ + Retrieve the beam energy of the LHC from NXCALS. + + Args: + spark (SparkSession): Active Spark session. + time (datetime): Python datetime (timezone-aware recommended). + + Returns: + tuple[float, pd.Timestamp]: Beam energy in GeV and its timestamp. + + Raises: + RuntimeError: If no energy data is found. + """ + scale = 0.120 + raw_vars = get_raw_vars(spark, time, "HX:ENG") + if not raw_vars: + raise RuntimeError("No energy data found.") + return raw_vars[0].value * scale, raw_vars[0].timestamp + + +# LSA K-value Calculation ------------------------------------------------------ + + +def calc_k_from_iref(lsa_client, currents: dict[str, float], energy: float) -> dict[str, float]: + """ + Calculate K values in the LHC from IREF using the LSA service. + + Args: + lsa_client: The LSA client instance. + currents (dict[str, float]): Dictionary of current values keyed by variable name. + energy (float): The beam energy in GeV. + + Returns: + dict[str, float]: Dictionary of K values keyed by variable name. + """ + # 1) Use the **instance** PJLSA already created + lhc_service = lsa_client._lhcService + + # 2) Build a java.util.HashMap (not a Python dict) + j_hash_map = jpype.JClass("java.util.HashMap") + j_double = jpype.JClass("java.lang.Double") + + jmap = j_hash_map() + for power_converter, current in currents.items(): + if current is None: + raise ValueError(f"Current for {power_converter} is None") + # ensure primitive-compatible double values + jmap.put(power_converter, j_double.valueOf(current)) # boxed; Java unboxes internally + + # 3) Call: Map calculateKfromIREF(Map, double) + out = lhc_service.calculateKfromIREF(jmap, float(energy)) + + # 4) Convert java.util.Map -> Python dict + res = {} + it = out.entrySet().iterator() + while it.hasNext(): + e = it.next() + res[str(e.getKey())] = float(e.getValue()) + return res + + +# Utility Functions ------------------------------------------------------------ + + +def strip_i_meas(text: str) -> str: + """ + Remove the I_MEAS suffix from a variable name. + + Args: + text (str): The variable name possibly ending with ':I_MEAS'. + + Returns: + str: The variable name without the ':I_MEAS' suffix. + """ + return text.removesuffix(":I_MEAS") + + +# Note: this will have to be updated if we ever want to support other magnet types +# such as dipoles, sextupoles, octupoles, etc. +def map_pc_name_to_madx(pc_name: str) -> str: + """ + Convert an LHC power converter name or circuit name to its corresponding MAD-X name. + + This function processes the input name by removing the ':I_MEAS' suffix if present, + extracting the circuit name from full power converter names (starting with 'RPMBB' or 'RPL'), + applying specific string replacements for MAD-X compatibility, and converting the result to lowercase. + + Args: + pc_name (str): The power converter or circuit name to convert. + + Returns: + str: The converted MAD-X name. + + Examples: + >>> map_pc_name_to_madx("RPMBB.UJ33.RCBH10.L1B1:I_MEAS") + 'acb10.l1b1' + >>> map_pc_name_to_madx("RQT12.L5B1") + 'kqt12.l5b1' + """ + # Remove the ':I_MEAS' suffix if it exists + pc_name = strip_i_meas(pc_name) + + # Extract circuit name from full power converter names + if "." in pc_name: + parts = pc_name.split(".") + # For full names like 'RPMBB.UJ33.RCBH10.L1B1', take from the third part onward, else use pc_name + circuit_name = ".".join(parts[2:]) if len(parts) > 2 else pc_name + else: + circuit_name = pc_name + + # Apply MAD-X specific replacements + replacements = {"RQ": "KQ", "RCB": "ACB"} + for old, new in replacements.items(): + circuit_name = circuit_name.replace(old, new) + return circuit_name.lower() diff --git a/omc3/nxcals/mqt_extraction.py b/omc3/nxcals/mqt_extraction.py new file mode 100644 index 00000000..f2e5026c --- /dev/null +++ b/omc3/nxcals/mqt_extraction.py @@ -0,0 +1,99 @@ +""" +Extraction of MQT knobs from NXCALS. +------------------------------------ + +This module provides functions to retrieve MQT (Quadrupole Trim) knob values for the LHC +for a specified beam and time using NXCALS and LSA. + +There are two types per arc (focusing 'f' and defocusing 'd'), resulting in 16 MQT circuits +per beam (8 arcs * 2 planes). + +The extraction uses the underlying `knob_extraction` module with MQT-specific patterns. +""" + +import logging +from datetime import datetime + +# import jpype +from pyspark.sql import SparkSession + +from omc3.nxcals.knob_extraction import NXCALSResult, get_knob_vals + +logger = logging.getLogger(__name__) + + +def generate_mqt_names(beam: int) -> set[str]: + """ + Generate the set of MAD-X MQT (Quadrupole Trim) variable names for a given beam. + + Args: + beam (int): The beam number (1 or 2). + + Returns: + set[str]: A set of MAD-X variable names for MQT magnets, e.g., 'kqt12.a12b1'. + + Raises: + ValueError: If beam is not 1 or 2. + + Examples: + >>> get_mqts(1) + {'kqt12.a12b1', 'kqt12.a23b1', ..., 'kqtd.a81b1'} + """ + if beam not in (1, 2): + raise ValueError("Beam must be 1 or 2") + + types = ["f", "d"] + arcs = [12, 23, 34, 45, 56, 67, 78, 81] + return {f"kqt{t}.a{a}b{beam}" for t in types for a in arcs} + + +def get_mqt_vals( + spark: SparkSession, time: datetime, beam: int, delta_days: float = 0.25 +) -> list[NXCALSResult]: + """ + Retrieve MQT (Quadrupole Trim) knob values from NXCALS for a specific time and beam. + + This function queries NXCALS for current measurements of MQT power converters, + calculates the corresponding K-values (integrated quadrupole strengths) using LSA, + and returns them in MAD-X format with timestamps. + + Args: + spark (SparkSession): Active Spark session for NXCALS queries. + time (datetime): The timestamp for which to retrieve the data (timezone-aware required). + beam (int): The beam number (1 or 2). + delta_days (float): Number of days to look back for data. Default is 0.25. + + Returns: + list[NXCalResult]: List of NXCalResult objects containing the MAD-X knob names, K-values, and timestamps. + + Raises: + ValueError: If beam is not 1 or 2 (propagated from get_mqts), or if time is not timezone-aware. + RuntimeError: If no data is found in NXCALS or LSA calculations fail. + """ + if time.tzinfo is None: + raise ValueError("Time must be timezone-aware.") + + madx_mqts = generate_mqt_names(beam) + pattern = f"RPMBB.UA%.RQT%.A%B{beam}:I_MEAS" + patterns = [pattern] + return get_knob_vals(spark, time, beam, patterns, madx_mqts, "MQT: ", delta_days) + + +def knobs_to_madx(mqt_vals: list[NXCALSResult]) -> str: + """ + Convert a list of NXCalResult objects to a MAD-X script string. + + Args: + mqt_vals: List of NXCalResult objects containing knob values. + + Returns: + A string containing the MAD-X script with knob assignments. + """ + lines = [] + for result in mqt_vals: + timestamp_str = f"{result.timestamp:%Y-%m-%d %H:%M:%S%z}" + value_str = f"{result.value:.10E}".replace("E+", "E") + lines.append( + f"{result.name:<15}= {value_str}; ! powerconverter: {result.pc_name} at {timestamp_str}\n" + ) + return "".join(lines) diff --git a/omc3/utils/iotools.py b/omc3/utils/iotools.py index a5a325ae..059ebfd1 100644 --- a/omc3/utils/iotools.py +++ b/omc3/utils/iotools.py @@ -4,6 +4,7 @@ Helper functions for input/output issues. """ + from __future__ import annotations import json @@ -65,7 +66,7 @@ def copy_item(src_item: Path, dst_item: Path): def glob_regex(path: Path, pattern: str) -> Iterator[str]: - """ Do a glob on the given `path` based on the regular expression `pattern`. + """Do a glob on the given `path` based on the regular expression `pattern`. Returns only the matching filenames (as strings). Args: @@ -80,6 +81,7 @@ def glob_regex(path: Path, pattern: str) -> Iterator[str]: class PathOrStr(metaclass=get_instance_faker_meta(Path, str)): """A class that behaves like a Path when possible, otherwise like a string.""" + def __new__(cls, value): value = strip_quotes(value) try: @@ -90,6 +92,7 @@ def __new__(cls, value): class PathOrStrOrDataFrame(metaclass=get_instance_faker_meta(TfsDataFrame, Path, str)): """A class that behaves like a Path when possible, otherwise maybe a TfsDataFrame, otherwise like a string.""" + def __new__(cls, value): value = strip_quotes(value) try: @@ -108,6 +111,7 @@ def __new__(cls, value): class PathOrStrOrDict(metaclass=get_instance_faker_meta(dict, Path, str)): """A class that tries to parse/behaves like a dict when possible, otherwise either like a Path or like a string.""" + def __new__(cls, value): value = strip_quotes(value) try: @@ -125,6 +129,7 @@ def __new__(cls, value): class UnionPathStr(metaclass=get_instance_faker_meta(Path, str)): """A class that can be used as Path and string parser input, but does not convert to path.""" + def __new__(cls, value): return strip_quotes(value) @@ -132,6 +137,7 @@ def __new__(cls, value): class UnionPathStrInt(metaclass=get_instance_faker_meta(Path, str, int)): """A class that can be used as Path, string, int parser input, but does not convert. Very special and used e.g. in the BBQ Input.""" + def __new__(cls, value): return strip_quotes(value) @@ -139,6 +145,7 @@ def __new__(cls, value): class OptionalStr(metaclass=get_instance_faker_meta(str, type(None))): """A class that allows `str` or `None`. Can be used in string-lists when individual entries can be `None`.""" + def __new__(cls, value): value = strip_quotes(value) if isinstance(value, str) and value.lower() == "none": @@ -169,7 +176,7 @@ def strip_quotes(value: Any) -> Any: """ if isinstance(value, str): - value = value.strip("\'\"") # behavior like dict-parser, IMPORTANT FOR EVERY STRING-FAKER + value = value.strip("'\"") # behavior like dict-parser, IMPORTANT FOR EVERY STRING-FAKER return value @@ -196,7 +203,7 @@ def convert_paths_in_dict_to_strings(dict_: dict) -> dict: def replace_in_path(path: Path, old: Path | str, new: Path | str) -> Path: - """ Replace a part of a path with a new path. + """Replace a part of a path with a new path. Useful for example to replace the original path with a path to a symlink or vice versa. Args: @@ -219,7 +226,7 @@ def remove_none_dict_entries(dict_: dict) -> dict: def maybe_add_command(opt: dict, script: str) -> dict: - """ Add a comment ';command' to the opt-dict, + """Add a comment ';command' to the opt-dict, which is the command used to run the script gotten from sys.argv, but only if the executed file (the file that run with the `python` command) equals ``script``, i.e. the script for which you are saving the parameters @@ -234,12 +241,11 @@ def maybe_add_command(opt: dict, script: str) -> dict: if the script names were different, just the original opt. """ if script == sys.argv[0]: - opt[";command"] = " ".join([sys.executable] + sys.argv) # will be sorted to the beginning below + opt[";command"] = " ".join([sys.executable] + sys.argv) return opt -def save_config(output_dir: Path, opt: dict, script: str, - unknown_opt: dict | list = None): +def save_config(output_dir: Path, opt: dict, script: str, unknown_opt: dict | list = None): """ Quick wrapper for ``save_options_to_config``. @@ -257,32 +263,36 @@ def save_config(output_dir: Path, opt: dict, script: str, save_options_to_config( output_dir / formats.get_config_filename(script), dict(sorted(opt.items())), - unknown=unknown_opt + unknown=unknown_opt, ) def always_true(*args, **kwargs) -> bool: - """ A function that is always True. """ + """A function that is always True.""" return True -def get_check_suffix_func(suffix: str) -> Callable[[Path],bool]: - """ Returns a function that checks the suffix of a given path against the suffix. """ +def get_check_suffix_func(suffix: str) -> Callable[[Path], bool]: + """Returns a function that checks the suffix of a given path against the suffix.""" + def check_suffix(path: Path) -> bool: return path.suffix == suffix + return check_suffix -def get_check_by_regex_func(pattern: str) -> Callable[[Path],bool]: - """ Returns a function that checks the name of a given path against the pattern. """ +def get_check_by_regex_func(pattern: str) -> Callable[[Path], bool]: + """Returns a function that checks the name of a given path against the pattern.""" + def check(path: Path) -> bool: return re.match(pattern, path.name) is not None + return check def load_multiple_jsons(*files) -> dict: - """ Load multiple json files into a single dict. - In case of duplicate keys, later files overwrite the earlier ones. """ + """Load multiple json files into a single dict. + In case of duplicate keys, later files overwrite the earlier ones.""" full_dict = {} for json_file in files: with Path(json_file).open() as json_data: @@ -291,7 +301,7 @@ def load_multiple_jsons(*files) -> dict: def find_file(file_name: Path | str, dirs: Iterable[Path | str]) -> Path: - """ Tries to find out if the given file exists, either on its own, or in the given directories. + """Tries to find out if the given file exists, either on its own, or in the given directories. Returns then the full path of the found file. If not found, raises a ``FileNotFoundError``. Args: diff --git a/omc3/utils/time_tools.py b/omc3/utils/time_tools.py index 14a8669c..d10323cb 100644 --- a/omc3/utils/time_tools.py +++ b/omc3/utils/time_tools.py @@ -7,12 +7,18 @@ """ from __future__ import annotations -from datetime import datetime, timedelta +import logging +import re +from datetime import datetime, timedelta, timezone import dateutil.tz as tz +from dateutil.relativedelta import relativedelta from omc3.definitions.formats import TIME +LOGGER = logging.getLogger(__name__) +MINUS_CHARS: tuple[str, ...] = ("_", "-") + # Datetime Conversions ######################################################### @@ -78,6 +84,61 @@ def check_tz(localized_dt, timezone): f"but was '{localized_dt.tzinfo}'" ) +# Knob extractor specific time tools ############################################# + + +def _parse_time_from_str(time_str: str) -> datetime: + """Parse time from given string.""" + # Now? --- + if time_str.lower() == "now": + return datetime.now(timezone.utc) + + # ISOFormat? --- + try: + return datetime.fromisoformat(time_str) + except (TypeError, ValueError): + LOGGER.debug("Could not parse time string as ISO format") + pass + + # Timestamp? --- + try: + return datetime.fromtimestamp(int(time_str)) + except (TypeError, ValueError): + LOGGER.debug("Could not parse time string as a timestamp") + pass + + raise ValueError(f"Couldn't read datetime '{time_str}'") + + +def _add_time_delta(time: datetime, delta_str: str) -> datetime: + """Parse delta-string and add time-delta to time.""" + sign = -1 if delta_str[0] in MINUS_CHARS else 1 + all_deltas = re.findall(r"(\d+)(\w)", delta_str) # tuples (value, timeunit-char) + # mapping char to the time-unit as accepted by relativedelta, + # following ISO-8601 for time durations + char2unit = { + "s": "seconds", + "m": "minutes", + "h": "hours", + "d": "days", + "w": "weeks", + "M": "months", + "Y": "years", + } + # add all deltas, which are tuples of (value, timeunit-char) + time_parts = {char2unit[delta[1]]: sign * int(delta[0]) for delta in all_deltas} + return time + relativedelta(**time_parts) + + +def parse_time(time: str, timedelta: str = None) -> datetime: + """Parse time from given time-input from command line.""" + t = _parse_time_from_str(time) + if timedelta: + t = _add_time_delta(t, timedelta) + if t.tzinfo is None: + raise ValueError("Datetime object must be timezone-aware") + return t + # AccDatetime Classes ########################################################## diff --git a/pyproject.toml b/pyproject.toml index 49cfeeeb..e2132575 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,6 +63,8 @@ dependencies = [ [project.optional-dependencies] cern = [ "jpype1 >= 1.3", + "nxcals >= 1.6.6", # Direct NXCALS access + "pyspark >= 3.5", # Needed when extracting from NXCALS using Spark "pytimber >= 3.0", # NXCALS support "pylogbook >= 3.4", "kerberos >= 1.3.1", # requires having krb-5config installed on the system @@ -74,6 +76,7 @@ optional = [ ] test = [ "jpype1 >= 1.3", + "pyspark >= 3.5", # needed for test collection when cern extras are not installed "pytest >= 7.0", "pytest-cov >= 2.9", "pytest-timeout >= 1.4", diff --git a/tests/inputs/knob_extractor/extracted_mqts_b1.str b/tests/inputs/knob_extractor/extracted_mqts_b1.str new file mode 100644 index 00000000..38fa5bba --- /dev/null +++ b/tests/inputs/knob_extractor/extracted_mqts_b1.str @@ -0,0 +1,16 @@ +kqtf.a78b1 = 5.2952467864E-04; ! powerconverter: RPMBB.UA83.RQTF.A78B1 at 2025-04-24 16:08:00.58 +kqtf.a67b1 = 6.5856960835E-04; ! powerconverter: RPMBB.UA67.RQTF.A67B1 at 2025-04-24 16:08:00.6 +kqtf.a34b1 = -4.7499507672E-04; ! powerconverter: RPMBB.UA43.RQTF.A34B1 at 2025-04-24 16:08:00.6 +kqtf.a45b1 = 1.9335982122E-04; ! powerconverter: RPMBB.UA47.RQTF.A45B1 at 2025-04-24 16:08:00.6 +kqtf.a56b1 = -2.9329827751E-04; ! powerconverter: RPMBB.UA63.RQTF.A56B1 at 2025-04-24 16:08:00.6 +kqtf.a23b1 = -5.4437426272E-04; ! powerconverter: RPMBB.UA27.RQTF.A23B1 at 2025-04-24 16:08:00.58 +kqtf.a12b1 = -1.0715019208E-04; ! powerconverter: RPMBB.UA23.RQTF.A12B1 at 2025-04-24 16:08:00.58 +kqtf.a81b1 = 1.7714130878E-04; ! powerconverter: RPMBB.UA87.RQTF.A81B1 at 2025-04-24 16:08:00.58 +kqtd.a12b1 = -1.1114670233E-04; ! powerconverter: RPMBB.UA23.RQTD.A12B1 at 2025-04-24 16:08:00.58 +kqtd.a67b1 = -1.0210970110E-03; ! powerconverter: RPMBB.UA67.RQTD.A67B1 at 2025-04-24 16:08:00.6 +kqtd.a56b1 = -3.0490031092E-04; ! powerconverter: RPMBB.UA63.RQTD.A56B1 at 2025-04-24 16:08:00.58 +kqtd.a45b1 = 1.9346274762E-04; ! powerconverter: RPMBB.UA47.RQTD.A45B1 at 2025-04-24 16:08:00.6 +kqtd.a34b1 = 1.2378239916E-03; ! powerconverter: RPMBB.UA43.RQTD.A34B1 at 2025-04-24 16:08:00.6 +kqtd.a23b1 = 1.1549226785E-03; ! powerconverter: RPMBB.UA27.RQTD.A23B1 at 2025-04-24 16:08:00.58 +kqtd.a78b1 = -1.1778641812E-03; ! powerconverter: RPMBB.UA83.RQTD.A78B1 at 2025-04-24 16:08:00.58 +kqtd.a81b1 = 1.7725039489E-04; ! powerconverter: RPMBB.UA87.RQTD.A81B1 at 2025-04-24 16:08:00.58 diff --git a/tests/inputs/knob_extractor/extracted_mqts_b2.str b/tests/inputs/knob_extractor/extracted_mqts_b2.str new file mode 100644 index 00000000..e0f43733 --- /dev/null +++ b/tests/inputs/knob_extractor/extracted_mqts_b2.str @@ -0,0 +1,16 @@ +kqtf.a12b2 = 8.6361503057E-05; ! powerconverter: RPMBB.UA23.RQTF.A12B2 at 2025-04-21 16:58:33.58 +kqtf.a23b2 = 6.8126690391E-04; ! powerconverter: RPMBB.UA27.RQTF.A23B2 at 2025-04-21 16:58:33.58 +kqtf.a34b2 = 6.3296929603E-04; ! powerconverter: RPMBB.UA43.RQTF.A34B2 at 2025-04-21 16:58:33.6 +kqtf.a45b2 = -4.2242762402E-04; ! powerconverter: RPMBB.UA47.RQTF.A45B2 at 2025-04-21 16:58:33.6 +kqtf.a56b2 = 2.2150181418E-04; ! powerconverter: RPMBB.UA63.RQTF.A56B2 at 2025-04-21 16:58:33.6 +kqtf.a78b2 = -2.2045892703E-04; ! powerconverter: RPMBB.UA83.RQTF.A78B2 at 2025-04-21 16:58:33.58 +kqtf.a67b2 = -5.1946843266E-04; ! powerconverter: RPMBB.UA67.RQTF.A67B2 at 2025-04-21 16:58:33.6 +kqtf.a81b2 = -2.9612713717E-04; ! powerconverter: RPMBB.UA87.RQTF.A81B2 at 2025-04-21 16:58:33.58 +kqtd.a12b2 = 6.8279938819E-05; ! powerconverter: RPMBB.UA23.RQTD.A12B2 at 2025-04-21 16:58:33.58 +kqtd.a78b2 = 1.1512298098E-03; ! powerconverter: RPMBB.UA83.RQTD.A78B2 at 2025-04-21 16:58:33.58 +kqtd.a23b2 = -1.3344184688E-03; ! powerconverter: RPMBB.UA27.RQTD.A23B2 at 2025-04-21 16:58:33.58 +kqtd.a34b2 = -1.3840291975E-03; ! powerconverter: RPMBB.UA43.RQTD.A34B2 at 2025-04-21 16:58:33.6 +kqtd.a67b2 = 8.6325133483E-04; ! powerconverter: RPMBB.UA67.RQTD.A67B2 at 2025-04-21 16:58:33.6 +kqtd.a45b2 = -4.3636175393E-04; ! powerconverter: RPMBB.UA47.RQTD.A45B2 at 2025-04-21 16:58:33.6 +kqtd.a56b2 = 2.1367291615E-04; ! powerconverter: RPMBB.UA63.RQTD.A56B2 at 2025-04-21 16:58:33.58 +kqtd.a81b2 = -3.0417310517E-04; ! powerconverter: RPMBB.UA87.RQTD.A81B2 at 2025-04-21 16:58:33.58 diff --git a/tests/inputs/knob_extractor/generate_mqt_references.py b/tests/inputs/knob_extractor/generate_mqt_references.py new file mode 100644 index 00000000..68f645bb --- /dev/null +++ b/tests/inputs/knob_extractor/generate_mqt_references.py @@ -0,0 +1,37 @@ +""" +Copy reference MQT extraction files for testing. + +This script copies the extracted_mqts_b1.str and extracted_mqts_b2.str files +from CERN data paths to the test inputs directory. +""" + +import shutil +from pathlib import Path + +# Source paths for MQT reference files +B1_SOURCE = Path( + "/user/slops/data/LHC_DATA/OP_DATA/Betabeat/2025-04-24/LHCB1/Models/B1_60cm_checks/extracted_mqts.str" +) +B2_SOURCE = Path( + "/user/slops/data/LHC_DATA/OP_DATA/Betabeat/2025-04-21/LHCB2/Models/b2_120cm_correct_knobs/extracted_mqts.str" +) + +# Output directory for test inputs +OUTPUT_DIR = Path(__file__).parent + + +def main(): + """Copy MQT reference files for both beams.""" + OUTPUT_DIR.mkdir(parents=True, exist_ok=True) + + print("Copying MQT reference for Beam 1...") + shutil.copy(B1_SOURCE, OUTPUT_DIR / "extracted_mqts_b1.str") + + print("Copying MQT reference for Beam 2...") + shutil.copy(B2_SOURCE, OUTPUT_DIR / "extracted_mqts_b2.str") + + print(f"Reference files copied to {OUTPUT_DIR}") + + +if __name__ == "__main__": + main() diff --git a/tests/unit/test_knob_extractor.py b/tests/unit/test_knob_extractor.py index b94dfcad..2ce30e55 100644 --- a/tests/unit/test_knob_extractor.py +++ b/tests/unit/test_knob_extractor.py @@ -18,10 +18,8 @@ STATE_VARIABLES, Col, Head, - _add_time_delta, _extract_and_gather, _parse_knobs_defintions, - _parse_time, _write_knobsfile, check_for_undefined_knobs, get_madx_command, @@ -30,6 +28,7 @@ lsa2name, main, ) +from omc3.utils.time_tools import _add_time_delta, parse_time from tests.conftest import cli_args INPUTS = Path(__file__).parent.parent / "inputs" / "knob_extractor" @@ -115,7 +114,7 @@ class MockTimber: monkeypatch.setattr(knob_extractor, "pytimber", MockTimber()) - time = datetime.now() + time = datetime.now(timezone.utc) # Main --- with caplog.at_level(logging.INFO): if commandline: @@ -341,40 +340,38 @@ def test_load_knobdefinitions_fail_wrong_scaling(self, tmp_path): class TestTime: @pytest.mark.basic def test_time_and_delta(self): - time_str = "2022-06-26T03:00" - t1 = _parse_time(time_str) + time_str = "2022-06-26T03:00+00:00" + t1 = parse_time(time_str) - assert t1 == datetime(2022, 6, 26, 3, 0, 0) + assert t1 == datetime(2022, 6, 26, 3, 0, 0, tzinfo=timezone.utc) # 2 hours earlier t2 = _add_time_delta(t1, "_2h") - assert t2 == datetime(2022, 6, 26, 1, 0, 0) + assert t2 == datetime(2022, 6, 26, 1, 0, 0, tzinfo=timezone.utc) # 1 week earlier t2 = _add_time_delta(t1, "_1w") - assert t2 == datetime(2022, 6, 19, 3, 0, 0) + assert t2 == datetime(2022, 6, 19, 3, 0, 0, tzinfo=timezone.utc) # 1 week and 1 hour earlier t2 = _add_time_delta(t1, "_1w1h") - assert t2 == datetime(2022, 6, 19, 2, 0, 0) + assert t2 == datetime(2022, 6, 19, 2, 0, 0, tzinfo=timezone.utc) # 1 month later t2 = _add_time_delta(t1, "1M") - assert t2 == datetime(2022, 7, 26, 3, 0, 0) + assert t2 == datetime(2022, 7, 26, 3, 0, 0, tzinfo=timezone.utc) # 20 years later t2 = _add_time_delta(t1, "20Y") - assert t2 == datetime(2042, 6, 26, 3, 0, 0) + assert t2 == datetime(2042, 6, 26, 3, 0, 0, tzinfo=timezone.utc) - t3 = _parse_time(time_str, "20Y") + t3 = parse_time(time_str, "20Y") assert t2 == t3 @pytest.mark.basic def test_timezones(self): - assert ( - _parse_time("2022-06-26T03:00+02:00") - == - datetime(2022, 6, 26, 3, 0, 0, tzinfo=timezone(timedelta(seconds=7200))) + assert parse_time("2022-06-26T03:00+02:00") == datetime( + 2022, 6, 26, 3, 0, 0, tzinfo=timezone(timedelta(seconds=7200)) ) diff --git a/tests/unit/test_model_creator.py b/tests/unit/test_model_creator.py index 6cc058df..42fae72e 100644 --- a/tests/unit/test_model_creator.py +++ b/tests/unit/test_model_creator.py @@ -25,10 +25,11 @@ ) from omc3.model.manager import get_accelerator from omc3.model.model_creators.lhc_model_creator import ( - LhcBestKnowledgeCreator, + # LhcBestKnowledgeCreator, LhcModelCreator, ) from omc3.model_creator import create_instance_and_model +from omc3.nxcals.constants import EXTRACTED_MQTS_FILENAME from omc3.optics_measurements.constants import NAME from tests.conftest import assert_frame_equal @@ -42,6 +43,7 @@ # ---- creation tests ------------------------------------------------------------------------------ + @pytest.mark.basic def test_booster_creation_nominal_driven(tmp_path, acc_models_psb_2021): accel_opt = { @@ -70,7 +72,10 @@ def test_booster_creation_nominal_driven(tmp_path, acc_models_psb_2021): accel_opt_duplicate["scenario"] = None with pytest.raises(AcceleratorDefinitionError) as e: create_instance_and_model( - type="nominal", outputdir=tmp_path, logfile=tmp_path / "madx_log.txt", **accel_opt_duplicate + type="nominal", + outputdir=tmp_path, + logfile=tmp_path / "madx_log.txt", + **accel_opt_duplicate, ) assert "flag --scenario" in str(e.value) assert "Selected: 'None'" in str(e.value) @@ -79,7 +84,10 @@ def test_booster_creation_nominal_driven(tmp_path, acc_models_psb_2021): accel_opt_duplicate["str_file"] = None with pytest.raises(AcceleratorDefinitionError) as e: create_instance_and_model( - type="nominal", outputdir=tmp_path, logfile=tmp_path / "madx_log.txt", **accel_opt_duplicate + type="nominal", + outputdir=tmp_path, + logfile=tmp_path / "madx_log.txt", + **accel_opt_duplicate, ) assert "flag --str_file" in str(e.value) assert "Selected: 'None'" in str(e.value) @@ -105,6 +113,7 @@ def test_booster_creation_nominal_free(tmp_path, acc_models_psb_2021): ) check_accel_from_dir_vs_options(tmp_path, accel_opt, accel) + # ps tune matching fails for 2018 optics # The magnets used for the different tune matching methods in > 2018 were installed in LS2. # TODO: check with PS expert a) if model creation <= 2018 is desired and b) how it worked @@ -135,7 +144,7 @@ def test_booster_creation_nominal_free(tmp_path, acc_models_psb_2021): def test_ps_creation_nominal_free_2018(tmp_path, acc_models_ps_2021): accel_opt = { "accel": "ps", - "nat_tunes": [0.21, 0.323], # from madx_job file in acc_models + "nat_tunes": [0.21, 0.323], # from madx_job file in acc_models "dpp": 0.0, "energy": 1.4, "year": "2018", @@ -158,7 +167,10 @@ def test_ps_creation_nominal_free_2018(tmp_path, acc_models_ps_2021): with pytest.raises(RuntimeError) as excinfo: create_instance_and_model( - type="nominal", outputdir=tmp_path, logfile=tmp_path / "madx_log.txt", **accel_opt_duplicate + type="nominal", + outputdir=tmp_path, + logfile=tmp_path / "madx_log.txt", + **accel_opt_duplicate, ) assert "Please provide the --energy ENERGY flag" in str(excinfo.value) @@ -220,7 +232,7 @@ def test_lhc_creation_nominal_free_high_beta(tmp_path, acc_models_lhc_2018): "energy": 6500.0, "fetch": Fetcher.PATH, "path": acc_models_lhc_2018, - "modifiers": LHC_2018_HIGH_BETA_MODIFIERS + "modifiers": LHC_2018_HIGH_BETA_MODIFIERS, } accel = create_instance_and_model( outputdir=tmp_path, type="nominal", logfile=tmp_path / "madx_log.txt", **accel_opt @@ -239,7 +251,7 @@ def test_lhc_creation_nominal_free(tmp_path, acc_models_lhc_2025): "energy": 6800.0, "fetch": Fetcher.PATH, "path": acc_models_lhc_2025, - "modifiers": LHC_2025_30CM_MODIFIERS + "modifiers": LHC_2025_30CM_MODIFIERS, } accel = create_instance_and_model( outputdir=tmp_path, type="nominal", logfile=tmp_path / "madx_log.txt", **accel_opt @@ -256,7 +268,7 @@ def test_lhc_creation_nominal_2016(tmp_path): "nat_tunes": [0.31, 0.32], "dpp": 0.0, "energy": 6500.0, - "modifiers": [INPUTS / "models" / "modifiers_2016" / "opt_400_10000_400_3000.madx"] + "modifiers": [INPUTS / "models" / "modifiers_2016" / "opt_400_10000_400_3000.madx"], } accel = create_instance_and_model( outputdir=tmp_path, type="nominal", logfile=tmp_path / "madx_log.txt", **accel_opt @@ -266,7 +278,7 @@ def test_lhc_creation_nominal_2016(tmp_path): @pytest.mark.basic def test_lhc_creation_best_knowledge(tmp_path, acc_models_lhc_2025): - (tmp_path / LhcBestKnowledgeCreator.EXTRACTED_MQTS_FILENAME).write_text("\n") + (tmp_path / EXTRACTED_MQTS_FILENAME).write_text("\n") corrections = tmp_path / "other_corrections.madx" corrections_str = "! just a comment to test the corrections file is actually loaded in madx. whfifhkdskjfshkdhfswojeorijr" @@ -284,7 +296,7 @@ def test_lhc_creation_best_knowledge(tmp_path, acc_models_lhc_2025): "energy": 6800.0, "fetch": Fetcher.PATH, "path": acc_models_lhc_2025, - "modifiers": LHC_2025_30CM_MODIFIERS + [corrections] + "modifiers": LHC_2025_30CM_MODIFIERS + [corrections], } # like from the GUI, dump best knowledge on top of nominal @@ -306,7 +318,7 @@ def test_lhc_creation_best_knowledge(tmp_path, acc_models_lhc_2025): @pytest.mark.basic -def test_lhc_creation_absolute_modifier_path(tmp_path: Path, acc_models_lhc_2022: Path): +def test_lhc_creation_relative_modifier_path(tmp_path: Path, acc_models_lhc_2022: Path): rel_path = OPTICS_SUBDIR / "R2022a_A30cmC30cmA10mL200cm.madx" accel_opt = { "accel": "lhc", @@ -318,13 +330,13 @@ def test_lhc_creation_absolute_modifier_path(tmp_path: Path, acc_models_lhc_2022 "energy": 6800.0, "fetch": Fetcher.PATH, "path": acc_models_lhc_2022, - "modifiers": [(acc_models_lhc_2022 / rel_path).absolute()] + "modifiers": [(acc_models_lhc_2022 / rel_path).absolute()], } accel = create_instance_and_model( outputdir=tmp_path, type="nominal", logfile=tmp_path / "madx_log.txt", **accel_opt ) - absolute_path = tmp_path / f"{ACC_MODELS_PREFIX}-{accel.NAME}" / rel_path # replaced in model creation - madx_string = f"call, file = '{absolute_path!s}" + + madx_string = f"call, file = '{f'{ACC_MODELS_PREFIX}-lhc' / rel_path!s}'" assert madx_string in (tmp_path / JOB_MODEL_MADX_NOMINAL).read_text() assert madx_string in (tmp_path / "madx_log.txt").read_text() check_accel_from_dir_vs_options(tmp_path, accel_opt, accel) @@ -343,7 +355,7 @@ def test_lhc_creation_modifier_nonexistent(tmp_path, acc_models_lhc_2018): "energy": 6800.0, "fetch": Fetcher.PATH, "path": acc_models_lhc_2018, - "modifiers": [non_existent] + "modifiers": [non_existent], } with pytest.raises(FileNotFoundError) as creation_error: create_instance_and_model( @@ -363,7 +375,10 @@ def test_lhc_creation_relative_modeldir_path(request, tmp_path, acc_models_lhc_2 model_dir_relpath.mkdir() optics_file_relpath = Path("R2022a_A30cmC30cmA10mL200cm.madx") - shutil.copy(acc_models_lhc_2022 / "operation/optics" / optics_file_relpath, model_dir_relpath / optics_file_relpath.name) + shutil.copy( + acc_models_lhc_2022 / "operation/optics" / optics_file_relpath, + model_dir_relpath / optics_file_relpath.name, + ) accel_opt = { "accel": "lhc", @@ -390,8 +405,8 @@ def test_lhc_creation_relative_modeldir_path(request, tmp_path, acc_models_lhc_2 @pytest.mark.basic def test_lhc_creation_nominal_driven_check_output(model_25cm_beam1): - """ Checks if the post_run() method succeeds on an already existing given model (dir), - and then checks that it failes when removing individual files from that model. """ + """Checks if the post_run() method succeeds on an already existing given model (dir), + and then checks that it failes when removing individual files from that model.""" accel = get_accelerator(**model_25cm_beam1) LhcModelCreator(accel).post_run() @@ -411,11 +426,12 @@ def test_lhc_creation_nominal_driven_check_output(model_25cm_beam1): if file_path_moved.exists(): shutil.move(file_path_moved, file_path) + # ---- cli tests ----------------------------------------------------------------------------------- + @pytest.mark.basic def test_lhc_creator_cli(tmp_path, acc_models_lhc_2025, capsys): - accel_opt = { "accel": "lhc", "year": "2025", @@ -444,6 +460,7 @@ def test_lhc_creator_cli(tmp_path, acc_models_lhc_2025, capsys): for m in modifiers: assert m.endswith(".madx") + @pytest.mark.basic def test_booster_creator_cli(tmp_path, acc_models_psb_2021, capsys): accel_opt = { @@ -469,14 +486,7 @@ def test_booster_creator_cli(tmp_path, acc_models_psb_2021, capsys): scenarios = [c for c in scenarios if len(c) > 0] # remove empty lines scenarios.sort() - assert scenarios == [ - "ad", - "east", - "isolde", - "lhc", - "sftpro", - "tof" - ] + assert scenarios == ["ad", "east", "isolde", "lhc", "sftpro", "tof"] accel_opt["scenario"] = "lhc" @@ -496,11 +506,12 @@ def test_booster_creator_cli(tmp_path, acc_models_psb_2021, capsys): "3_extraction", ] + @pytest.mark.basic def test_ps_creation_cli(tmp_path, acc_models_ps_2021, capsys): accel_opt = { "accel": "ps", - "nat_tunes": [0.21, 0.323], # from madx_job file in acc_models + "nat_tunes": [0.21, 0.323], # from madx_job file in acc_models "dpp": 0.0, "energy": 1.4, "year": "2018", @@ -527,14 +538,13 @@ def test_ps_creation_cli(tmp_path, acc_models_ps_2021, capsys): "4_flat_bottom_wo_QDN90", ] + # ---- helper -------------------------------------------------------------------------------------- + def check_accel_from_dir_vs_options( - model_dir: Path, - accel_options: DotDict, - accel_from_opt: Accelerator, - best_knowledge=False - ): + model_dir: Path, accel_options: DotDict, accel_from_opt: Accelerator, best_knowledge=False +): # creation via model_from_dir tests that all files are in place: accel_from_dir: Accelerator = get_accelerator( accel=accel_options["accel"], @@ -544,7 +554,17 @@ def check_accel_from_dir_vs_options( _check_arrays(accel_from_opt.nat_tunes, accel_from_dir.nat_tunes, eps=1e-4, tunes=True) _check_arrays(accel_from_opt.drv_tunes, accel_from_dir.drv_tunes, eps=1e-4, tunes=True) - _check_arrays(accel_from_opt.modifiers, accel_from_dir.modifiers) + # Check that each modifier from dir is relative to model_dir if possible + if accel_from_opt.modifiers is not None and accel_from_dir.modifiers is not None: + for mod_opt, mod_dir in zip(accel_from_opt.modifiers, accel_from_dir.modifiers): + try: + mod_opt_rel = mod_opt.relative_to(model_dir) + except ValueError: + mod_opt_rel = mod_opt + assert mod_dir == mod_opt_rel + else: + assert accel_from_opt.modifiers == accel_from_dir.modifiers + assert accel_from_opt.excitation == accel_from_dir.excitation assert accel_from_opt.model_dir == accel_from_dir.model_dir diff --git a/tests/unit/test_nxcals_mqt_extraction.py b/tests/unit/test_nxcals_mqt_extraction.py new file mode 100644 index 00000000..75e380ba --- /dev/null +++ b/tests/unit/test_nxcals_mqt_extraction.py @@ -0,0 +1,212 @@ +from __future__ import annotations + +import re +from datetime import timezone +from pathlib import Path + +import pandas as pd +import pytest + +from omc3 import mqt_extractor +from omc3.nxcals import mqt_extraction +from omc3.nxcals.constants import EXTRACTED_MQTS_FILENAME +from omc3.nxcals.knob_extraction import NXCALSResult + +SAMPLE_DIR = Path(__file__).parent.parent / "inputs" / "knob_extractor" +TEST_CASES = ( + (1, SAMPLE_DIR / "extracted_mqts_b1.str"), + (2, SAMPLE_DIR / "extracted_mqts_b2.str"), +) + + +def _parse_mqt_line(line: str, tz: str = "Europe/Zurich") -> tuple[str, float, str, pd.Timestamp]: + pattern = re.compile( + r"([a-zA-Z0-9_.]+)\s*=\s*([0-9.eE+-]+)\s*;\s*! powerconverter:\s*([a-zA-Z0-9_.]+)\s*at\s*(.+)" + ) + match = pattern.match(line.strip()) + if not match: + raise ValueError(f"Unexpected line format: {line}") + name, value_str, pc_name, timestamp_raw = match.groups() + value = float(value_str) + timestamp = pd.Timestamp(timestamp_raw.strip(), tz=tz).replace(microsecond=0) + return name, value, pc_name, timestamp + + +def _load_results_from_file(file_path: Path, tz: str = "Europe/Zurich") -> list[NXCALSResult]: + results: list[NXCALSResult] = [] + for raw_line in file_path.read_text().splitlines(): + line = raw_line.strip() + if not line or line.startswith("!"): + continue + name, value, pc_name, timestamp = _parse_mqt_line(line, tz=tz) + results.append( + NXCALSResult( + name=name, + value=value, + timestamp=timestamp, + pc_name=pc_name, + ) + ) + return results + + +@pytest.mark.cern_network +@pytest.mark.parametrize("beam, sample_file", TEST_CASES) +def test_get_mqts_matches_sample(beam: int, sample_file: Path): + sample_results = _load_results_from_file(sample_file) + expected_names = {result.name for result in sample_results} + assert expected_names == mqt_extraction.generate_mqt_names(beam=beam) + + +@pytest.mark.cern_network +@pytest.mark.parametrize("beam, sample_file", TEST_CASES) +def test_main_reproduces_reference_output(tmp_path, beam: int, sample_file: Path): + sample_results = _load_results_from_file(sample_file) + + latest_sample_time = max(result.timestamp for result in sample_results) + query_time = latest_sample_time.to_pydatetime() + + output_dir = tmp_path / f"beam{beam}" + output_dir.mkdir() + output_path = output_dir / EXTRACTED_MQTS_FILENAME + + mqt_extractor.main(time=query_time.isoformat(), beam=beam, output=output_path) + + expected_results = [ + NXCALSResult( + name=r.name, + value=r.value, + pc_name=r.pc_name, + timestamp=r.timestamp.tz_convert("UTC"), + ) + for r in sample_results + ] + + actual_results = _load_results_from_file(output_path, tz="UTC") + + expected_sorted = sorted(expected_results, key=lambda x: x.name) + actual_sorted = sorted(actual_results, key=lambda x: x.name) + + for exp, act in zip(expected_sorted, actual_sorted): + assert exp.name == act.name + assert exp.pc_name == act.pc_name + assert exp.value == pytest.approx(act.value, rel=1e-4) + assert abs((exp.timestamp - act.timestamp).total_seconds()) <= 1 + + +@pytest.mark.cern_network +def test_get_mqts_invalid_beam(): + with pytest.raises(ValueError): + mqt_extraction.generate_mqt_names(beam=3) + + +@pytest.mark.cern_network +@pytest.mark.parametrize("beam, sample_file", TEST_CASES) +def test_main_returns_tfs_dataframe(tmp_path, beam: int, sample_file: Path): + """Test that main returns a TfsDataFrame with correct structure.""" + sample_results = _load_results_from_file(sample_file) + latest_sample_time = max(result.timestamp for result in sample_results) + query_time = latest_sample_time.to_pydatetime() + + output_path = tmp_path / f"test_output_b{beam}.madx" + + # Call main and verify return type + result_df = mqt_extractor.main(time=query_time.isoformat(), beam=beam, output=output_path) + + # Check it's a TfsDataFrame + import tfs + + assert isinstance(result_df, tfs.TfsDataFrame), "main should return a TfsDataFrame" + + # Check headers + assert "EXTRACTION_TIME" in result_df.headers, "Missing EXTRACTION_TIME header" + assert "BEAM" in result_df.headers, "Missing BEAM header" + assert result_df.headers["BEAM"] == beam, f"Expected beam {beam} in headers" + + # Check columns + expected_columns = ["madx", "value", "timestamp", "pc_name"] + for col in expected_columns: + assert col in result_df.columns, f"Missing column {col}" + + # Check we have the right number of rows + assert len(result_df) == 16, f"Expected 16 MQT entries, got {len(result_df)}" + + +@pytest.mark.cern_network +@pytest.mark.parametrize("beam", [1, 2], ids=["beam1", "beam2"]) +def test_main_with_timedelta(tmp_path, beam: int): + """Test that timedelta parameter works correctly.""" + from datetime import datetime, timedelta + + output_path = tmp_path / f"test_timedelta_b{beam}.madx" + + # Call with timedelta going back 1 day + result_df = mqt_extractor.main( + time="now", + timedelta="_1d", # 1 day ago + beam=beam, + output=output_path, + ) + + # Verify the extraction time is approximately 1 day ago + extraction_time = result_df.headers["EXTRACTION_TIME"] + expected_time = datetime.now(timezone.utc) - timedelta(days=1) + time_diff = abs((extraction_time - expected_time).total_seconds()) + + # Should be close to 1 day ago, allow 5 minute tolerance + assert time_diff < 300, f"Extraction time should be ~1 day ago, but diff is {time_diff} seconds" + + +@pytest.mark.cern_network +@pytest.mark.parametrize("beam", [1, 2], ids=["beam1", "beam2"]) +def test_main_with_delta_days(tmp_path, beam: int): + """Test that delta_days parameter is properly passed through.""" + from datetime import datetime, timedelta + + output_path = tmp_path / f"test_delta_days_b{beam}.madx" + + # Use a time 1 day ago with delta_days=1 to ensure we get data + past_time = datetime.now(timezone.utc) - timedelta(hours=2) + + # This should work because we're looking back 1 day + result_df = mqt_extractor.main( + time=past_time.isoformat(), beam=beam, output=output_path, delta_days=1 / 12 + ) + + # Should succeed and return valid data + assert len(result_df) == 16, "Expected 16 MQT entries with delta_days=1" + assert result_df.headers["BEAM"] == beam + + +def test_parse_time_now(): + """Test that _parse_time correctly handles 'now'.""" + from datetime import datetime + + from omc3.utils.time_tools import parse_time + + result = parse_time("now") + now = datetime.now(timezone.utc) + + # Should be very close to now (within 1 second) + diff = abs((now - result).total_seconds()) + assert diff < 1, f"parse_time('now') should return current time, diff was {diff}s" + + +def test_parse_time_with_timedelta(): + """Test that _parse_time correctly applies timedelta.""" + from datetime import datetime + + from omc3.utils.time_tools import parse_time + + now_str = datetime.now(timezone.utc).isoformat() + + # Test positive timedelta + result_plus = parse_time(now_str, "1h") + result_base = parse_time(now_str) + diff_plus = (result_plus - result_base).total_seconds() + assert abs(diff_plus - 3600) < 1, f"1h timedelta should add 3600s, got {diff_plus}s" + + # Test negative timedelta + result_minus = parse_time(now_str, "_2h") + diff_minus = (result_minus - result_base).total_seconds() + assert abs(diff_minus + 7200) < 1, f"_2h timedelta should subtract 7200s, got {diff_minus}s"