Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 7 additions & 5 deletions dct/capacitor_selection.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def _start_optimization(circuit_filtered_point_file: str, act_cst_config: pecst.

loss_total_array = np.full_like(circuit_dto.calc_modulation.phi, np.nan)

new_circuit_dto_directory = os.path.join(act_cst_config.results_directory, "01_circuit_dtos_incl_capacitor_1_loss")
new_circuit_dto_directory = os.path.join(act_cst_config.results_directory, "01_circuit_dtos_incl_capacitor_loss")
if not os.path.exists(new_circuit_dto_directory):
os.makedirs(new_circuit_dto_directory)

Expand All @@ -182,11 +182,13 @@ def _start_optimization(circuit_filtered_point_file: str, act_cst_config: pecst.
loss_total_array[vec_vvp] = loss_per_capacitor * n_series * n_parallel

capacitor_losses = CapacitorResults(
p_combined_losses=loss_total_array,
volume=volume_total,
pcb_area=area_total,
loss_total_array=loss_total_array,
volume_total=volume_total,
area_total=area_total,
circuit_trial_file=circuit_filtered_point_file,
capacitor_order_number=df_geometry_re_simulation_number,
capacitor_order_number=df_geometry_re_simulation_number['ordering code'].values[0],
n_series=n_series,
n_parallel=n_parallel
)

pickle_file = os.path.join(new_circuit_dto_directory, f"{ordering_code}.pkl")
Expand Down
2 changes: 2 additions & 0 deletions dct/circuit_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ def load_filepaths(project_directory: str) -> circuit_dtos.ParetoFilePaths:

file_path_dto = circuit_dtos.ParetoFilePaths(
circuit=loaded_file["circuit"],
capacitor_1=loaded_file["capacitor_1"],
capacitor_2=loaded_file["capacitor_2"],
transformer=loaded_file["transformer"],
inductor=loaded_file["inductor"],
heat_sink=loaded_file["heat_sink"]
Expand Down
2 changes: 2 additions & 0 deletions dct/circuit_optimization_dtos.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ class ParetoFilePaths:
"""File paths for the sub simulation optimization parts."""

circuit: str
capacitor_1: str
capacitor_2: str
inductor: str
transformer: str
heat_sink: str
Expand Down
4 changes: 3 additions & 1 deletion dct/datasets_dtos.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,9 @@ class CapacitorResults:
volume_total: float
area_total: float
circuit_trial_file: str
capacitor_order_number: int
capacitor_order_number: str
n_parallel: int
n_series: int

def __init__(self, **kwargs):
names = set([f.name for f in dataclasses.fields(self)])
Expand Down
7 changes: 6 additions & 1 deletion dct/dctmainctl.py
Original file line number Diff line number Diff line change
Expand Up @@ -1564,10 +1564,15 @@ def run_optimization_from_toml_configurations(self, workspace_path: str) -> None
# Create list of inductor and transformer study (ASA: Currently not implemented in configuration files)
inductor_study_names = [self._inductor_study_data.study_name]
stacked_transformer_study_names = [self._transformer_study_data.study_name]
capacitor_1_study_names = [self._capacitor_1_selection_data.study_name]
capacitor_2_study_names = [self._capacitor_2_selection_data.study_name]
# Start summary processing by generating the DataFrame from calculated simulation results
s_df = self._summary_pre_processing.generate_result_database(
self._inductor_study_data, self._transformer_study_data, pre_summary_data,
inductor_study_names, stacked_transformer_study_names, filter_data)
inductor_study_names, stacked_transformer_study_names, filter_data,
self._capacitor_1_selection_data, self._capacitor_2_selection_data,
capacitor_1_study_names, capacitor_2_study_names
)
# Select the needed heat sink configuration
self._summary_pre_processing.select_heat_sink_configuration(self._heat_sink_study_data, pre_summary_data, s_df)

Expand Down
Loading