diff --git a/Manuals/FDS_User_Guide/FDS_User_Guide.tex b/Manuals/FDS_User_Guide/FDS_User_Guide.tex index e875ed3ea64..2f5e51ab274 100644 --- a/Manuals/FDS_User_Guide/FDS_User_Guide.tex +++ b/Manuals/FDS_User_Guide/FDS_User_Guide.tex @@ -14195,6 +14195,7 @@ \chapter{Error Codes} 266 \> \ct{REAC ... FUEL ... is not a predefined or tracked species.} \> Section~\ref{info:simple_chemistry} \\ 267 \> \ct{REAC ... has no consumed species.} \> Section~\ref{info:REAC_Diagnostics} \\ 268 \> \ct{BACK tracked species ... not found.} \> Section~\ref{info:BACK} \\ +269 \> \ct{MATL ... The specified pyrolysis parameters result in A >...} \> Section~\ref{info:kinetic_parameters} \\ \> \> \\ 299 \> \ct{SURF ... has no solid or particle for TGA.} \> Section~\ref{info:TGA_DSC_MCC} \\ 300 \> \ct{N_LAYER_CELLS_MAX should be at least ... for ...} \> Section~\ref{info:solid_phase_stability} \\ diff --git a/Source/dump.f90 b/Source/dump.f90 index 3de18681e6b..fcc7fa3b0a7 100644 --- a/Source/dump.f90 +++ b/Source/dump.f90 @@ -1409,7 +1409,7 @@ SUBROUTINE INITIALIZE_MESH_DUMPS(NM) IF (ABS(OB%Z2-M%ZS)SIZE(M%PATCH)) CALL REALLOCATE_PATCH(NM,SIZE(M%PATCH),SIZE(M%PATCH)+10) + IF (M%N_PATCH>SIZE(M%PATCH)) CALL REALLOCATE_PATCH(NM,SIZE(M%PATCH),SIZE(M%PATCH)+10,'PATCH') PA => M%PATCH(M%N_PATCH) PA%I1 = MIN(M%IBAR,MAX(0,NINT( GINV(OB%X1-M%XS,1,NM)*RDXI ))) ; PA%IG1 = PA%I1+1 PA%I2 = MIN(M%IBAR,MAX(0,NINT( GINV(OB%X2-M%XS,1,NM)*RDXI ))) ; PA%IG2 = PA%I2 @@ -1733,20 +1733,30 @@ END SUBROUTINE WRITE_STL_FILE !> \param NM Mesh number !> \param N1 Current size of array !> \param N2 New size of array +!> \param PATCH_NAME Character string indicating which PATCH_TYPE variable to reallocate -SUBROUTINE REALLOCATE_PATCH(NM,N1,N2) +SUBROUTINE REALLOCATE_PATCH(NM,N1,N2,PATCH_NAME) INTEGER, INTENT(IN) :: N1,N2,NM TYPE (PATCH_TYPE), DIMENSION(:), ALLOCATABLE :: PATCH_DUMMY TYPE (MESH_TYPE), POINTER :: M +CHARACTER(*), INTENT(IN) :: PATCH_NAME M => MESHES(NM) ALLOCATE(PATCH_DUMMY(1:N2)) -PATCH_DUMMY(1:N1) = M%PATCH(1:N1) -DEALLOCATE(M%PATCH) -ALLOCATE(M%PATCH(1:N2)) -M%PATCH(1:N2) = PATCH_DUMMY(1:N2) +SELECT CASE(PATCH_NAME) + CASE('PATCH') + PATCH_DUMMY(1:N1) = M%PATCH(1:N1) + DEALLOCATE(M%PATCH) + ALLOCATE(M%PATCH(1:N2)) + M%PATCH(1:N2) = PATCH_DUMMY(1:N2) + CASE('EXTERIOR_PATCH') + PATCH_DUMMY(1:N1) = M%EXTERIOR_PATCH(1:N1) + DEALLOCATE(M%EXTERIOR_PATCH) + ALLOCATE(M%EXTERIOR_PATCH(1:N2)) + M%EXTERIOR_PATCH(1:N2) = PATCH_DUMMY(1:N2) +END SELECT DEALLOCATE(PATCH_DUMMY) END SUBROUTINE REALLOCATE_PATCH @@ -2784,6 +2794,8 @@ SUBROUTINE DUMMY_VENTS(FI,N1,N2) VENT_INDICES(I:ISTP,J:JSTP,FI) = -1 M%N_EXTERIOR_PATCH = M%N_EXTERIOR_PATCH + 1 + IF (M%N_EXTERIOR_PATCH>SIZE(M%EXTERIOR_PATCH)) & + CALL REALLOCATE_PATCH(NM,SIZE(M%EXTERIOR_PATCH),SIZE(M%EXTERIOR_PATCH)+10,'EXTERIOR_PATCH') EP => M%EXTERIOR_PATCH(M%N_EXTERIOR_PATCH) SELECT CASE(FI) CASE (1) ; EP%I1=0 ; EP%I2=0 ; EP%J1=I-1 ; EP%J2=ISTP ; EP%K1=J-1 ; EP%K2=JSTP ; EP%IOR= 1 @@ -2816,7 +2828,7 @@ SUBROUTINE ADD_EXTERIOR_VENTS IF (PROCESS(NM)/=MY_RANK) CYCLE M => MESHES(NM) - ALLOCATE(M%EXTERIOR_PATCH(10*(6+N_VENT_TOTAL))) ; M%N_EXTERIOR_PATCH = 0 + ALLOCATE(M%EXTERIOR_PATCH(10)) ; M%N_EXTERIOR_PATCH = 0 ! The size of EXTERIOR_PATCH will expand if need be. ALLOCATE(VENT_INDICES(MAX(M%IBAR,M%JBAR),MAX(M%JBAR,M%KBAR),6)) ; VENT_INDICES = 0 VENT_LOOP: DO N=1,M%N_VENT diff --git a/Source/read.f90 b/Source/read.f90 index 382476f5c76..da3cf203713 100644 --- a/Source/read.f90 +++ b/Source/read.f90 @@ -7461,6 +7461,12 @@ SUBROUTINE READ_MATL PEAK_REACTION_RATE = 2._EB*ML%HEATING_RATE(NR)/ML%PYROLYSIS_RANGE(NR) ENDIF ML%E(NR) = EXP(1._EB)*PEAK_REACTION_RATE*R0*ML%TMP_REF(NR)**2/ML%HEATING_RATE(NR) + ! 0.0001 HUGE_EB is used so the reaction rate is not overflowed when multiplied by RHO. + IF (LOG(0.0001_EB*HUGE_EB)*R0*ML%TMP_REF(NR) < ML%E(NR)) THEN + WRITE(MESSAGE,'(3A,E10.3)') 'ERROR(269): MATL ',TRIM(ML%ID),' The specified pyrolysis parameters result in A >',& + 0.0001_EB*HUGE_EB + CALL SHUTDOWN(MESSAGE) ; RETURN + ENDIF ML%A(NR) = EXP(1._EB)*PEAK_REACTION_RATE*EXP(ML%E(NR)/(R0*ML%TMP_REF(NR))) ENDIF diff --git a/Utilities/Matlab/FDS_validation_dataplot_inputs.csv b/Utilities/Matlab/FDS_validation_dataplot_inputs.csv index 24072298f75..208a53790d6 100644 --- a/Utilities/Matlab/FDS_validation_dataplot_inputs.csv +++ b/Utilities/Matlab/FDS_validation_dataplot_inputs.csv @@ -2532,7 +2532,7 @@ s,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, d,SWJTU Tunnels,SWJTU_Tunnels/SWJTU_Tunnels_Extinction.csv,1,2,HRR_1,Time_1,Exp,k-,0,10000,,5.2,6,-10000,10000,0,SWJTU_Tunnels/Test_I-1_hrr.csv,2,3,HRR,Time,FDS,k--,0,10000,,5.2,6,-10000,10000,0,Extinction Time; Test I-1,HRR (kW),Time (s),0,20,1,0,800,1,yes,0.05 0.90,East,,1,SWJTU_Tunnels/Test_I-1_git.txt,linear,FDS_Validation_Guide/SCRIPT_FIGURES/SWJTU_Tunnels/Test_I-1_Ext_Time,0,max,0,SWJTU Tunnels,g^,g,TeX d,SWJTU Tunnels,SWJTU_Tunnels/SWJTU_Tunnels_Extinction.csv,1,2,HRR_2,Time_2,Exp,k-,0,10000,,10,12,-10000,10000,0,SWJTU_Tunnels/Test_I-2_hrr.csv,2,3,HRR,Time,FDS,k--,0,10000,,10,12,-10000,10000,0,Extinction Time; Test I-2,HRR (kW),Time (s),0,20,1,0,800,1,yes,0.05 0.90,East,,1,SWJTU_Tunnels/Test_I-2_git.txt,linear,FDS_Validation_Guide/SCRIPT_FIGURES/SWJTU_Tunnels/Test_I-2_Ext_Time,0,max,0,SWJTU Tunnels,g^,g,TeX d,SWJTU Tunnels,SWJTU_Tunnels/SWJTU_Tunnels_Extinction.csv,1,2,HRR_3,Time_3,Exp,k-,0,10000,,16,17,-10000,10000,0,SWJTU_Tunnels/Test_I-3_hrr.csv,2,3,HRR,Time,FDS,k--,0,10000,,16,17,-10000,10000,0,Extinction Time; Test I-3,HRR (kW),Time (s),0,20,1,0,800,1,yes,0.05 0.90,East,,1,SWJTU_Tunnels/Test_I-3_git.txt,linear,FDS_Validation_Guide/SCRIPT_FIGURES/SWJTU_Tunnels/Test_I-3_Ext_Time,0,max,0,SWJTU Tunnels,g^,g,TeX -d,SWJTU Tunnels,SWJTU_Tunnels/SWJTU_Tunnels_Extinction.csv,1,2,HRR_4,Time_4,Exp,k-,0,10000,,5.2,6,-10000,10000,0,SWJTU_Tunnels/Test_I-4_hrr.csv,2,3,HRR,Time,FDS,k--,0,10000,,5.2,6,-10000,10000,0,Extinction Time; Test I-4,HRR (kW),Time (s),0,20,1,0,800,1,yes,0.05 0.90,East,,1,SWJTU_Tunnels/Test_I-4_git.txt,linear,FDS_Validation_Guide/SCRIPT_FIGURES/SWJTU_Tunnels/Test_I-4_Ext_Time,0,max,0,SWJTU Tunnels,g^,g,TeX +d,SWJTU Tunnels,SWJTU_Tunnels/SWJTU_Tunnels_Extinction.csv,1,2,HRR_4,Time_4,Exp,k-,0,10000,,4.5,6,-10000,10000,0,SWJTU_Tunnels/Test_I-4_hrr.csv,2,3,HRR,Time,FDS,k--,0,10000,,4.5,6,-10000,10000,0,Extinction Time; Test I-4,HRR (kW),Time (s),0,20,1,0,800,1,yes,0.05 0.90,East,,1,SWJTU_Tunnels/Test_I-4_git.txt,linear,FDS_Validation_Guide/SCRIPT_FIGURES/SWJTU_Tunnels/Test_I-4_Ext_Time,0,max,0,SWJTU Tunnels,g^,g,TeX d,SWJTU Tunnels,SWJTU_Tunnels/SWJTU_Tunnels_Extinction.csv,1,2,HRR_5,Time_5,Exp,k-,0,10000,,10,12,-10000,10000,0,SWJTU_Tunnels/Test_I-5_hrr.csv,2,3,HRR,Time,FDS,k--,0,10000,,10,12,-10000,10000,0,Extinction Time; Test I-5,HRR (kW),Time (s),0,20,1,0,800,1,yes,0.05 0.90,East,,1,SWJTU_Tunnels/Test_I-5_git.txt,linear,FDS_Validation_Guide/SCRIPT_FIGURES/SWJTU_Tunnels/Test_I-5_Ext_Time,0,max,0,SWJTU Tunnels,g^,g,TeX d,SWJTU Tunnels,SWJTU_Tunnels/SWJTU_Tunnels_Extinction.csv,1,2,HRR_6,Time_6,Exp,k-,0,10000,,15,17,-10000,10000,0,SWJTU_Tunnels/Test_I-6_hrr.csv,2,3,HRR,Time,FDS,k--,0,10000,,12,17,-10000,10000,0,Extinction Time; Test I-6,HRR (kW),Time (s),0,20,1,0,800,1,yes,0.05 0.90,East,,1,SWJTU_Tunnels/Test_I-6_git.txt,linear,FDS_Validation_Guide/SCRIPT_FIGURES/SWJTU_Tunnels/Test_I-6_Ext_Time,0,max,0,SWJTU Tunnels,g^,g,TeX s,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, diff --git a/Utilities/Python/FDS_validation_script.py b/Utilities/Python/FDS_validation_script.py index 0bc106c7687..f3a90b9d5ae 100644 --- a/Utilities/Python/FDS_validation_script.py +++ b/Utilities/Python/FDS_validation_script.py @@ -70,6 +70,9 @@ # verbose=True, # ) +# Create table of git statistics for FDS Validation Guide + +print("validation_git_stats..."); runpy.run_path("./scripts/validation_git_stats.py", run_name="__main__") # Special cases diff --git a/Utilities/Python/fdsplotlib.py b/Utilities/Python/fdsplotlib.py index 3894e965fea..7303a6dd24f 100644 --- a/Utilities/Python/fdsplotlib.py +++ b/Utilities/Python/fdsplotlib.py @@ -89,8 +89,8 @@ def _compute_metrics_block( Returns: vals_flat : 1D np.array (metrics for each curve, or concatenated for 'all') - titles : list of curve labels - per_curve_series : list of 1D arrays (for Metric='all') + titles : list of metric labels + per_curve_series : list of per-curve metric arrays (for Metric='all') """ import numpy as np @@ -163,18 +163,19 @@ def _parse_stat_xy(m): else: out = np.nan - # MATLAB passes tiny value instead of exact zero (survives nonzeros()) if out == 0.0: out = 1e-12 return np.array([out]), [f"curve{idx_first}"], [] - # --- metric='all': return concatenated series for each curve (minus initial) --- + # --- metric='all': return all finite Y values (one per data point) --- if metric_str == "all": for j in range(ncols): - yj = Y_sel[:, j].reshape(-1) - initial_value + yj = Y_sel[:, j].reshape(-1) + mask = np.isfinite(yj) + yj = yj[mask] - initial_value per_curve_series.append(yj) - titles.extend([f"curve{j+1}"] * yj.size) + titles.extend([f"point{k+1}_curve{j+1}" for k in range(len(yj))]) vals_flat = np.concatenate(per_curve_series) if per_curve_series else np.array([]) return vals_flat, titles, per_curve_series @@ -203,10 +204,8 @@ def _parse_stat_xy(m): elif metric_str == "start": out = yj[0] elif metric_str == "ipct": - # Not implemented in this port; keep parity-friendly placeholder - out = 1e-12 + out = 1e-12 # placeholder for parity else: - # Default fallback consistent with MATLAB's behavior path to non-zeros out = 1e-12 if out == 0.0: @@ -230,6 +229,16 @@ def dataplot(config_filename,**kwargs): # Suppress just the 'findfont' warnings from matplotlib's font manager logging.getLogger('matplotlib.font_manager').setLevel(logging.ERROR) + # --- Simple in-memory CSV cache to avoid redundant I/O --- + _csv_cache = {} + + def read_csv_cached(path, **kwargs): + """Read CSV once per path; return cached DataFrame copy.""" + if path not in _csv_cache: + _csv_cache[path] = pd.read_csv(path, **kwargs) + # return a shallow copy so downstream dropna/trim doesn’t mutate cache + return _csv_cache[path].copy() + # defaults configdir = kwargs.get('configdir','') revision = kwargs.get('revision','') @@ -309,47 +318,51 @@ def dataplot(config_filename,**kwargs): # Only filter by plot_list if no plot_range was passed C = C[C['Dataname'].str.lower().isin([p.lower() for p in plot_list])] - Plot_Filename_Last = None - d1_Key_Last = None - f_Last = plt.figure() + # --- Determine if any 'o' lines exist (only once) --- + otest_active = any( str(C.iloc[j]['switch_id']).strip().lower() == 'o' for j in range(len(C)) ) + f_Last = plt.figure() # loop over the rows of the config file - for pos, (irow, row) in enumerate(C.iterrows()): - pp = define_plot_parameters(C, pos) # use position, not label + # Cache the position of __orig_index__ for speed and clarity + col_orig_idx = C.columns.get_loc("__orig_index__") - # ---------------------------------------------------------------------- - # Handle MATLAB dataplot switch_id behavior (d, f, o, g, s) - # ---------------------------------------------------------------------- - switch_id = str(pp.switch_id).strip().lower() + # Cache column positions once for speed + col_idx = {col: i for i, col in enumerate(C.columns)} - # Skip 's' outright - if switch_id == 's': - continue + # Detect fast_mode: skip expensive DataFrame access when processing all rows + plot_list_lower = [p.lower() for p in plot_list] if plot_list else [] + + # Toggle stays, but defaults to fast everywhere + fast_mode = bool(kwargs.get('fast_mode', True)) + if verbose: + print(f"[dataplot] {'Running in fast_mode (lightweight define_plot_parameters)' if fast_mode else 'Running in full mode'}") + + for pos, row in enumerate(C.itertuples(index=False, name=None)): - # If ANY 'o' lines exist in the filtered config C, process only those - otest_active = any( str(C.iloc[j]['switch_id']).strip().lower() == 'o' for j in range(len(C)) ) + csv_rownum = int(row[col_orig_idx]) + header_rows + 1 + pp = define_plot_parameters(C, pos, lightweight=fast_mode) - if otest_active and switch_id != 'o': + # --- Handle MATLAB dataplot switch_id behavior (d, f, o, g, s) --- + if pp.switch_id == 's': continue - # 'g' lines: generate plots but EXCLUDE from scatplot stats & drange - gtest = (switch_id == 'g') + if otest_active and pp.switch_id != 'o': + continue - # 'd' default, 'f' follow-on (your filename reuse already mimics “hold on”) - dtest = (switch_id == 'd') - ftest = (switch_id == 'f') + gtest = (pp.switch_id == 'g') + dtest = (pp.switch_id == 'd') + ftest = (pp.switch_id == 'f') - # If it’s none of the recognized ones, skip safely - if not (dtest or ftest or gtest or switch_id == 'o'): + if not (dtest or ftest or gtest or pp.switch_id == 'o'): if verbose: - print(f"[dataplot] Skipping unrecognized switch_id '{pp.switch_id}' on line {irow+2}") + print(f"[dataplot] Skipping unrecognized switch_id '{pp.switch_id}' on line {csv_rownum}") continue - # Track drange like MATLAB (1-based CSV lines starting at row 2) + # Track drange (MATLAB-style 1-based CSV row index) if not gtest: - drange.append(int(row["__orig_index__"]) + 2) + drange.append(int(row[col_orig_idx]) + 2) # Append metadata only for rows that should appear in scatplot if not gtest: @@ -363,18 +376,21 @@ def dataplot(config_filename,**kwargs): Save_Group_Style.append(pp.Group_Style) Save_Fill_Color.append(pp.Fill_Color) - # Placeholders (we will overwrite for this row below) Save_Measured_Metric.append(np.nan) Save_Predicted_Metric.append(np.nan) Save_Measured_Quantity.append(None) Save_Predicted_Quantity.append(None) # -------------- PLOTTING + PRINT ----------------- - # Read and prepare the experimental (d1) data, same for both cases - E = pd.read_csv(expdir + pp.d1_Filename, - header=int(pp.d1_Col_Name_Row - 1), - sep=',', engine='python', quotechar='"') + E = read_csv_cached(expdir + pp.d1_Filename, + header=int(pp.d1_Col_Name_Row - 1), + sep=',', engine='python', quotechar='"', + skip_blank_lines=True).dropna(how='all') + + # Drop trailing NaN rows across all columns (MATLAB csvread behavior) + E = E.loc[:E.dropna(how='all').last_valid_index()] E.columns = E.columns.str.strip() + start_idx = int(pp.d1_Data_Row - pp.d1_Col_Name_Row - 1) x, _ = get_data(E, pp.d1_Ind_Col_Name, start_idx) y, _ = get_data(E, pp.d1_Dep_Col_Name, start_idx) @@ -417,16 +433,19 @@ def dataplot(config_filename,**kwargs): key_labels = (raw_keys + [None] * len(y_plot_list))[:len(y_plot_list)] # --- Create new figure or reuse last one --- - if pp.Plot_Filename != Plot_Filename_Last: + if dtest: if verbose: - csv_rownum = int(row["__orig_index__"]) + header_rows + 1 # true CSV 1-based line print(f"Generating plot {csv_rownum} {pltdir}{pp.Plot_Filename}...") if close_figs: plt.close('all') first_plot = True - else: + elif ftest: f = f_Last first_plot = False + else: + if verbose: + print(f"[dataplot] Skipping unrecognized switch_id '{pp.switch_id}' on line {csv_rownum}") + continue # --- Plot Exp curves (handles both cases) --- for i, (x_i, y_i) in enumerate(zip(x_plot_list, y_plot_list)): @@ -449,29 +468,63 @@ def dataplot(config_filename,**kwargs): # --- Save measured (experimental) metric using MATLAB-equivalent logic --- if not gtest: try: - vals_meas, qty_meas, _ = _compute_metrics_block( - x=x, - Y=y, - metric=pp.Metric, - initial_value=float(pp.d1_Initial_Value or 0.0), - comp_start=float(pp.d1_Comp_Start or np.nan), - comp_end=float(pp.d1_Comp_End or np.nan), - dep_comp_start=float(pp.d1_Dep_Comp_Start or np.nan), - dep_comp_end=float(pp.d1_Dep_Comp_End or np.nan), - variant_side="d1", - ) - Save_Measured_Metric[-1] = vals_meas - Save_Measured_Quantity[-1] = qty_meas + vals_meas_list = [] + qty_meas_list = [] + + if y.ndim == 2 and x.ndim == 2 and y.shape[1] == x.shape[1]: + for j in range(y.shape[1]): + xj = np.ravel(x[:, j]) + yj = np.ravel(y[:, j]) + mask = np.isfinite(xj) & np.isfinite(yj) + xj = xj[mask] + yj = yj[mask] + if len(xj) > 0 and len(yj) > 0: + vals_meas, qty_meas, _ = _compute_metrics_block( + x=xj, + Y=yj, + metric=pp.Metric, + initial_value=float(pp.d1_Initial_Value or 0.0), + comp_start=float(pp.d1_Comp_Start or np.nan), + comp_end=float(pp.d1_Comp_End or np.nan), + dep_comp_start=float(pp.d1_Dep_Comp_Start or np.nan), + dep_comp_end=float(pp.d1_Dep_Comp_End or np.nan), + variant_side="d1", + ) + vals_meas_list.append(vals_meas) + qty_meas_list.append(qty_meas) + else: + vals_meas, qty_meas, _ = _compute_metrics_block( + x=x, + Y=y, + metric=pp.Metric, + initial_value=float(pp.d1_Initial_Value or 0.0), + comp_start=float(pp.d1_Comp_Start or np.nan), + comp_end=float(pp.d1_Comp_End or np.nan), + dep_comp_start=float(pp.d1_Dep_Comp_Start or np.nan), + dep_comp_end=float(pp.d1_Dep_Comp_End or np.nan), + variant_side="d1", + ) + vals_meas_list = [vals_meas] + qty_meas_list = [qty_meas] + + Save_Measured_Metric[-1] = np.array(vals_meas_list, dtype=object) + Save_Measured_Quantity[-1] = np.array(qty_meas_list, dtype=object) + except Exception as e: print(f"[dataplot] Error computing measured metric for {pp.Dataname}: {e}") Save_Measured_Metric[-1] = np.array([]) Save_Measured_Quantity[-1] = [] # ------------------- MODEL (d2) ------------------- - M = pd.read_csv(cmpdir + pp.d2_Filename, - header=int(pp.d2_Col_Name_Row - 1), - sep=',', engine='python', quotechar='"') + M = read_csv_cached(cmpdir + pp.d2_Filename, + header=int(pp.d2_Col_Name_Row - 1), + sep=',', engine='python', quotechar='"', + skip_blank_lines=True).dropna(how='all') + + # Drop trailing NaN rows across all columns (MATLAB csvread behavior) + M = M.loc[:M.dropna(how='all').last_valid_index()] M.columns = M.columns.str.strip() + start_idx = int(pp.d2_Data_Row - pp.d2_Col_Name_Row - 1) # --- Define version string --- @@ -546,42 +599,78 @@ def dataplot(config_filename,**kwargs): # --- Save predicted (model) metric using MATLAB-equivalent logic --- if not gtest: try: - vals_pred, qty_pred, _ = _compute_metrics_block( - x=x, - Y=y, - metric=pp.Metric, - initial_value=float(pp.d2_Initial_Value or 0.0), - comp_start=float(pp.d2_Comp_Start or np.nan), - comp_end=float(pp.d2_Comp_End or np.nan), - dep_comp_start=float(pp.d2_Dep_Comp_Start or np.nan), - dep_comp_end=float(pp.d2_Dep_Comp_End or np.nan), - variant_side="d2", - ) - - # --- Early consistency & padding for Metric='all' --- - if isinstance(pp.Metric, str) and pp.Metric.strip().lower() == 'all': - mvec = np.atleast_1d(Save_Measured_Metric[-1]) - pvec = np.atleast_1d(vals_pred) - len_m, len_p = mvec.size, pvec.size + vals_pred_list = [] + qty_pred_list = [] + + if y.ndim == 2 and x.ndim == 2 and y.shape[1] == x.shape[1]: + # Multiple paired curves (e.g., z/L jet, 62 kW, 31 kW) + for j in range(y.shape[1]): + xj = np.ravel(x[:, j]) + yj = np.ravel(y[:, j]) + mask = np.isfinite(xj) & np.isfinite(yj) + xj = xj[mask] + yj = yj[mask] + if len(xj) > 0 and len(yj) > 0: + vals_pred, qty_pred, _ = _compute_metrics_block( + x=xj, + Y=yj, + metric=pp.Metric, + initial_value=float(pp.d2_Initial_Value or 0.0), + comp_start=float(pp.d2_Comp_Start or np.nan), + comp_end=float(pp.d2_Comp_End or np.nan), + dep_comp_start=float(pp.d2_Dep_Comp_Start or np.nan), + dep_comp_end=float(pp.d2_Dep_Comp_End or np.nan), + variant_side="d2", + ) + vals_pred_list.append(vals_pred) + qty_pred_list.append(qty_pred) + else: + # Single curve (1D) + vals_pred, qty_pred, _ = _compute_metrics_block( + x=x, + Y=y, + metric=pp.Metric, + initial_value=float(pp.d2_Initial_Value or 0.0), + comp_start=float(pp.d2_Comp_Start or np.nan), + comp_end=float(pp.d2_Comp_End or np.nan), + dep_comp_start=float(pp.d2_Dep_Comp_Start or np.nan), + dep_comp_end=float(pp.d2_Dep_Comp_End or np.nan), + variant_side="d2", + ) + vals_pred_list = [vals_pred] + qty_pred_list = [qty_pred] + + # --- MATLAB-compatible consistency checks (Metric='all' padding, etc.) --- + vals_meas_entry = Save_Measured_Metric[-1] + metric_str = str(pp.Metric or '').strip().lower() + + # Always flatten any nested list-of-arrays first + flat_pred = np.concatenate([ + np.atleast_1d(v) for v in vals_pred_list if v is not None and np.size(v) > 0 + ]) if any(np.size(v) > 0 for v in vals_pred_list) else np.array([]) + + flat_meas = np.atleast_1d(vals_meas_entry).ravel() + + if metric_str == 'all': + len_m, len_p = flat_meas.size, flat_pred.size if len_m != len_p: maxlen = max(len_m, len_p) if len_m < maxlen: - mvec = np.pad(mvec, (0, maxlen - len_m), constant_values=np.nan) + flat_meas = np.pad(flat_meas, (0, maxlen - len_m), constant_values=np.nan) if len_p < maxlen: - pvec = np.pad(pvec, (0, maxlen - len_p), constant_values=np.nan) - Save_Measured_Metric[-1] = mvec - vals_pred = pvec + flat_pred = np.pad(flat_pred, (0, maxlen - len_p), constant_values=np.nan) + Save_Measured_Metric[-1] = flat_meas print(f"[dataplot] Padded {pp.Dataname} ({pp.Quantity}) from ({len_m},{len_p}) to {maxlen}") else: - len_m = np.size(Save_Measured_Metric[-1]) - len_p = np.size(vals_pred) + len_m = flat_meas.size + len_p = flat_pred.size if len_m != len_p: - print(f"[dataplot] Length mismatch at index {irow+2}: " - f"{pp.Dataname} | {pp.Quantity} | " - f"Measured={len_m}, Predicted={len_p}") + print(f"[dataplot] Length mismatch at CSV row {csv_rownum}: " + f"{pp.Dataname} | {pp.Quantity} | Measured={len_m}, Predicted={len_p}") + + Save_Predicted_Metric[-1] = flat_pred + Save_Predicted_Quantity[-1] = np.array(qty_pred_list, dtype=object) - Save_Predicted_Metric[-1] = vals_pred - Save_Predicted_Quantity[-1] = qty_pred except Exception as e: print(f"[dataplot] Error computing predicted metric for {pp.Dataname}: {e}") Save_Predicted_Metric[-1] = np.array([]) @@ -595,8 +684,6 @@ def dataplot(config_filename,**kwargs): plt.savefig(pltdir + pp.Plot_Filename + '.pdf', backend='pdf') - Plot_Filename_Last = pp.Plot_Filename - d1_Key_Last = pp.d1_Key f_Last = f # --- MATLAB-compatible output scaffolding for scatplot interface --- @@ -620,16 +707,24 @@ def dataplot(config_filename,**kwargs): print(f"[dataplot] Error assembling saved_data: {e}") saved_data = [] - # --- quick parity audit before returning --- - for i, (m, p, name, qty) in enumerate(zip(Save_Measured_Metric, - Save_Predicted_Metric, - Save_Dataname, - Save_Quantity)): + # --- MATLAB-compatible parity audit before returning --- + for i, (m, p, name, qty) in enumerate(zip( + Save_Measured_Metric, + Save_Predicted_Metric, + Save_Dataname, + Save_Quantity + )): len_m = np.size(m) if isinstance(m, np.ndarray) else 0 len_p = np.size(p) if isinstance(p, np.ndarray) else 0 + + # Get original CSV row number (1-based) + csv_rownum = drange[i] if i < len(drange) else "?" + if len_m != len_p: - print(f"[dataplot] Length mismatch at index {i}: " - f"{name} | {qty} | Measured={len_m}, Predicted={len_p}") + print( + f"[dataplot] Length mismatch at CSV row {csv_rownum}: " + f"{name} | {qty} | Measured={len_m}, Predicted={len_p}" + ) print("[dataplot] returning saved_data and drange") return saved_data, drange @@ -1361,10 +1456,100 @@ def matlab_legend_to_matplotlib(position): return mapping.get(position.strip().lower(), 'best') -def define_plot_parameters(D, irow): +def define_plot_parameters(D, irow, lightweight=False): import numpy as np class plot_parameters: + def __init__(self): + pass + + def __repr__(self): + return str(self.__dict__) + + # --- FAST PATH ---------------------------------------------------------- + if lightweight: + col_idx = {col: i for i, col in enumerate(D.columns)} + row = D.iloc[irow].values + + def get(col, default=None): + idx = col_idx.get(col) + return row[idx] if idx is not None else default + + d = plot_parameters() + + # Core identifiers + d.switch_id = get('switch_id') + d.Dataname = get('Dataname') + d.VerStr_Filename = get('VerStr_Filename') + d.Plot_Filename = get('Plot_Filename') + d.Plot_Title = get('Plot_Title') + d.Quantity = get('Quantity') + d.Metric = get('Metric') + d.Error_Tolerance = get('Error_Tolerance') + + # File and column info + d.d1_Filename = get('d1_Filename') + d.d1_Col_Name_Row = get('d1_Col_Name_Row', 1) + d.d1_Data_Row = get('d1_Data_Row', 2) + d.d1_Ind_Col_Name = get('d1_Ind_Col_Name') + d.d1_Dep_Col_Name = get('d1_Dep_Col_Name') + d.d1_Key = get('d1_Key', '') + d.d1_Style = get('d1_Style', '') + d.d1_Comp_Start = get('d1_Comp_Start', np.nan) + d.d1_Comp_End = get('d1_Comp_End', np.nan) + d.d1_Dep_Comp_Start = get('d1_Dep_Comp_Start', np.nan) + d.d1_Dep_Comp_End = get('d1_Dep_Comp_End', np.nan) + d.d1_Initial_Value = get('d1_Initial_Value', 0.0) + + d.d2_Filename = get('d2_Filename') + d.d2_Col_Name_Row = get('d2_Col_Name_Row', 1) + d.d2_Data_Row = get('d2_Data_Row', 2) + d.d2_Ind_Col_Name = get('d2_Ind_Col_Name') + d.d2_Dep_Col_Name = get('d2_Dep_Col_Name') + d.d2_Key = get('d2_Key', '') + d.d2_Style = get('d2_Style', '') + d.d2_Comp_Start = get('d2_Comp_Start', np.nan) + d.d2_Comp_End = get('d2_Comp_End', np.nan) + d.d2_Dep_Comp_Start = get('d2_Dep_Comp_Start', np.nan) + d.d2_Dep_Comp_End = get('d2_Dep_Comp_End', np.nan) + d.d2_Initial_Value = get('d2_Initial_Value', 0.0) + + # Plot formatting + d.Ind_Title = get('Ind_Title', '') + d.Dep_Title = get('Dep_Title', '') + d.Min_Ind = get('Min_Ind') + d.Max_Ind = get('Max_Ind') + d.Min_Dep = get('Min_Dep') + d.Max_Dep = get('Max_Dep') + d.Scale_Ind = get('Scale_Ind', 1.0) + d.Scale_Dep = get('Scale_Dep', 1.0) + d.Flip_Axis = get('Flip_Axis', '') + d.Plot_Type = get('Plot_Type', 'linear') + d.Key_Position = get('Key_Position', 'best') + d.Title_Position = get('Title_Position', '') + d.Legend_XYWidthHeight = get('Legend_XYWidthHeight', '') + d.Paper_Width_Factor = get('Paper_Width_Factor', 1.0) + + # Grouping / style info + d.Group_Key_Label = get('Group_Key_Label') + d.Group_Style = get('Group_Style') + d.Fill_Color = get('Fill_Color') + d.Font_Interpreter = get('Font_Interpreter') + + # --- sanitization for human-facing strings --- + d.Plot_Title = sanitize(safe_strip(d.Plot_Title)) + d.Ind_Title = sanitize(safe_strip(d.Ind_Title)) + d.Dep_Title = sanitize(safe_strip(d.Dep_Title)) + d.Quantity = sanitize(safe_strip(d.Quantity)) + d.Metric = sanitize(safe_strip(d.Metric)) + d.Group_Key_Label = sanitize(safe_strip(d.Group_Key_Label)) + d.d1_Key = sanitize(safe_strip(d.d1_Key)) + d.d2_Key = sanitize(safe_strip(d.d2_Key)) + + return d + + # --- FULL PATH ---------------------------------------------------------- + class plot_parameters_full(plot_parameters): def __init__(self): self.switch_id = D.values[irow,D.columns.get_loc('switch_id')] self.Dataname = D.values[irow,D.columns.get_loc('Dataname')] @@ -1423,12 +1608,9 @@ def __init__(self): self.Fill_Color = D.values[irow,D.columns.get_loc('Fill_Color')] self.Font_Interpreter = D.values[irow,D.columns.get_loc('Font_Interpreter')] - def __repr__(self): - return str(self.__dict__) - - d = plot_parameters() + d = plot_parameters_full() - # Explicit sanitization of only the human-facing fields + # --- sanitization block (unchanged) --- d.Plot_Title = sanitize(safe_strip(d.Plot_Title)) d.Ind_Title = sanitize(safe_strip(d.Ind_Title)) d.Dep_Title = sanitize(safe_strip(d.Dep_Title)) diff --git a/Utilities/Python/scripts/LNG_Dispersion.py b/Utilities/Python/scripts/LNG_Dispersion.py index af9ead23941..c462069d1da 100644 --- a/Utilities/Python/scripts/LNG_Dispersion.py +++ b/Utilities/Python/scripts/LNG_Dispersion.py @@ -43,8 +43,8 @@ for j in range(13): # Read devc and exp data (skip header lines to match MATLAB importdata) - M = pd.read_csv(os.path.join(outdir, f"{labels[j]}_devc.csv"), skiprows=2) - E = pd.read_csv(os.path.join(expdir, f"{labels[j]}_exp.csv"), skiprows=2) + M = pd.read_csv(os.path.join(outdir, f"{labels[j]}_devc.csv"), skiprows=1) + E = pd.read_csv(os.path.join(expdir, f"{labels[j]}_exp.csv"), skiprows=1) # Prepare output file with open(os.path.join(outdir, f"{labels[j]}.csv"), 'w') as fid: diff --git a/Utilities/Python/scripts/validation_git_stats.py b/Utilities/Python/scripts/validation_git_stats.py new file mode 100644 index 00000000000..bc450b35709 --- /dev/null +++ b/Utilities/Python/scripts/validation_git_stats.py @@ -0,0 +1,96 @@ + +# Generate the LaTeX table with validation git statistics. + +import subprocess +from pathlib import Path +import glob + +outdir = '../../../out/' +valdir = '../../Validation/' +resdir = '../../Manuals/FDS_Validation_Guide/SCRIPT_FIGURES/ScatterPlots/' + +def MAKEGITENTRY(case_name): + + # Output a single LaTeX table entry with git information for a validation set. + + # Collect all git.txt files and sort uniquely + git_file_pattern = outdir + case_name + '/*git.txt' + matching_files = set(Path().glob(git_file_pattern)) + + # Read first line + gitrev = '' + for file_path in sorted(matching_files): + with open(file_path.as_posix(), 'r') as fff: + gitrev = fff.readline().strip() + + output = '' + if gitrev != '': + # Extract git revision short hash + parts = gitrev.split('-') + if len(parts) >= 2: + gitrevshort = parts[-2] + gitrevshort = gitrevshort[1:] if gitrevshort.startswith('g') else gitrevshort + else: + gitrevshort = gitrev + + # Get git date + gitdate = '' + try: + result = subprocess.run( ['git show -s --format=%aD', gitrevshort], capture_output=True, text=True) + if result.returncode == 0 and result.stdout.strip(): + date_parts = result.stdout.strip().split() + if len(date_parts) >= 5: + gitdate = f"{date_parts[2]} {date_parts[1]}, {date_parts[3]}" + except Exception: + gitdate = '' + + # Escape underscores for LaTeX + dir_escaped = case_name.replace('_', '\\_') + output = f"{dir_escaped} & {gitdate} & {gitrev} \\\\ \\hline\n" + + return output + + +# Create a LaTeX table + +OUTPUT_TEX_FILE = resdir + 'validation_git_stats.tex' + +with open(OUTPUT_TEX_FILE, 'w') as outf: + outf.write("\\begin{longtable}[c]{|l|c|c|}\n") + outf.write("\\caption[Validation Git Statistics]{Validation Git statistics for all data sets}\n") + outf.write("\\label{validation_git_stats}\n") + outf.write("\\\\ \\hline\n") + outf.write("Dataset & FDS Revision Date & FDS Revision String\\\\ \\hline \\hline\n") + outf.write("\\endfirsthead\n") + outf.write("\\hline\n") + outf.write("Dataset & FDS Revision Date & FDS Revision String\\\\ \\hline \\hline\n") + outf.write("\\endhead\n") + + +# Extract case list from Validation/Process_All_Output.sh + +with open(valdir + 'Process_All_Output.sh', 'r') as inf: + lines = inf.readlines() + +cases = [] +line_num = 0 +for line in lines: + if 'PROCESS' in line: + parts = line.strip().split() + if len(parts) >= 2: + line_num += 1 + if line_num > 1: # Skip first match + cases.append(parts[1]) + +# Process each case and generate table entries + +for case in cases: + entry = MAKEGITENTRY(case) + with open(OUTPUT_TEX_FILE, 'a') as outf: + outf.write(entry) + +# Table footer + +with open(OUTPUT_TEX_FILE, 'a') as f: + f.write("\\end{longtable}\n") + diff --git a/Utilities/Scripts/validation_git_stats.bat b/Utilities/Scripts/validation_git_stats.bat deleted file mode 100644 index 66d96137758..00000000000 --- a/Utilities/Scripts/validation_git_stats.bat +++ /dev/null @@ -1,7 +0,0 @@ -@echo off - -:: Name and location of output .tex file with validation SVN statistics -set OUTPUT_TEX_FILE=..\..\Manuals\FDS_Validation_Guide\SCRIPT_FIGURES\ScatterPlots\validation_svn_stats.tex - -copy stats_table_template.tex %OUTPUT_TEX_FILE% - diff --git a/Utilities/Scripts/validation_git_stats.py b/Utilities/Scripts/validation_git_stats.py deleted file mode 100644 index 550cf74d67f..00000000000 --- a/Utilities/Scripts/validation_git_stats.py +++ /dev/null @@ -1,275 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -import subprocess -import re -import tempfile -from pathlib import Path -import argparse - -def MAKEGITENTRY(DIR, FIREMODELS_ROOT, TEMPDIR): - """ - This function outputs a LaTeX table entry with git information for a validation set. - """ - gitrevisions = os.path.join(TEMPDIR, f'gitrevisions.{os.getpid()}') - - # Collect all git.txt files and sort uniquely - git_txt_pattern = os.path.join(FIREMODELS_ROOT, 'out', DIR, '*git.txt') - - # Use shell to expand glob and collect content - try: - cmd = f'cat {git_txt_pattern} 2> /dev/null | sort -u' - result = subprocess.run(cmd, shell=True, capture_output=True, text=True) - with open(gitrevisions, 'w') as f: - f.write(result.stdout) - except Exception: - # If no files found, create empty file - with open(gitrevisions, 'w') as f: - f.write('') - - # Read first line - gitrev = '' - try: - with open(gitrevisions, 'r') as f: - gitrev = f.readline().strip() - except Exception: - gitrev = '' - - output = '' - if gitrev != '': - # Extract git revision short hash - # awk -F - '{print $(NF-1)}' | sed 's/^g//' - parts = gitrev.split('-') - if len(parts) >= 2: - gitrevshort = parts[-2] - gitrevshort = gitrevshort[1:] if gitrevshort.startswith('g') else gitrevshort - else: - gitrevshort = gitrev - - # Get git date - gitdate = '' - gitdate2 = '' - try: - result = subprocess.run( - ['git', 'show', '-s', '--format=%aD', gitrevshort], - capture_output=True, - text=True, - stderr=subprocess.DEVNULL - ) - if result.returncode == 0 and result.stdout.strip(): - # Parse date: awk '{print $3,$2",",$4}' - date_parts = result.stdout.strip().split() - if len(date_parts) >= 5: - gitdate = f"{date_parts[2]} {date_parts[1]}, {date_parts[3]}" - except Exception: - gitdate = '' - - if gitdate == '': - gitdate = 'undefined' - gitdate2 = '2000000000' - - # Check if ~/FDS-SMV exists - fds_smv_path = os.path.expanduser('~/FDS-SMV') - if os.path.exists(fds_smv_path): - CUR_DIR = os.getcwd() - os.chdir(fds_smv_path) - - # Extract different part of gitrev: awk -F - '{print $4}' | sed 's/^.\{1\}//' - parts = gitrev.split('-') - if len(parts) >= 4: - gitrevshort = parts[3] - gitrevshort = gitrevshort[1:] if len(gitrevshort) > 0 else gitrevshort - - gitdateold = '' - try: - result = subprocess.run( - ['git', 'show', '-s', '--format=%aD', gitrevshort], - capture_output=True, - text=True, - stderr=subprocess.DEVNULL - ) - if result.returncode == 0 and result.stdout.strip(): - date_parts = result.stdout.strip().split() - if len(date_parts) >= 5: - gitdateold = f"{date_parts[2]} {date_parts[1]}, {date_parts[3]}" - except Exception: - gitdateold = '' - - if gitdateold != '': - gitdate = gitdateold - try: - result = subprocess.run( - ['git', 'show', '-s', '--format=%at', gitrevshort], - capture_output=True, - text=True - ) - if result.returncode == 0 and result.stdout.strip(): - gitdate2 = result.stdout.strip().split()[0] - except Exception: - pass - - os.chdir(CUR_DIR) - else: - # Get Unix timestamp - try: - result = subprocess.run( - ['git', 'show', '-s', '--format=%at', gitrevshort], - capture_output=True, - text=True - ) - if result.returncode == 0 and result.stdout.strip(): - gitdate2 = result.stdout.strip().split()[0] - except Exception: - gitdate2 = '' - - # Escape underscores for LaTeX - dir_escaped = DIR.replace('_', '\\_') - output = f"{dir_escaped} & {gitdate} & {gitrev} & {gitdate2} \\\\ \\hline\n" - - # Remove temporary file - try: - os.remove(gitrevisions) - except Exception: - pass - - return output - - -def main(): - """ - Main function that generates the LaTeX table with validation git statistics. - """ - CURRENT_DIR = os.getcwd() - - # Determine repo root - SCRIPTDIR = os.path.dirname(os.path.realpath(__file__)) - os.chdir(os.path.join(SCRIPTDIR, '../../..')) - FIREMODELS_ROOT = os.getcwd() - - # Set up temp directory - TEMPDIR = os.path.join(os.path.expanduser('~'), 'temp') - if not os.path.exists(TEMPDIR): - os.makedirs(TEMPDIR) - - # Set environment variable - os.environ['FIREMODELS_ROOT'] = FIREMODELS_ROOT - - # Parse command line options (kept for backwards compatibility) - parser = argparse.ArgumentParser() - parser.add_argument('-r', dest='IGNORE', help='Ignored parameter for backwards compatibility') - args = parser.parse_args() - - # Change to scripts directory - os.chdir(os.path.join(FIREMODELS_ROOT, 'fds/Utilities/Scripts')) - - # Name and location of output .tex file with validation GIT statistics - OUTPUT_TEX_FILE = os.path.join( - FIREMODELS_ROOT, - 'fds/Manuals/FDS_Validation_Guide/SCRIPT_FIGURES/ScatterPlots/validation_git_stats.tex' - ) - - # Table header - with open(OUTPUT_TEX_FILE, 'w') as f: - f.write("\\begin{longtable}[c]{|l|c|c|}\n") - f.write("\\caption[Validation Git Statistics]{Validation Git statistics for all data sets}\n") - f.write("\\label{validation_git_stats}\n") - f.write("\\\\ \\hline\n") - f.write("Dataset & FDS Revision Date & FDS Revision String\\\\ \\hline \\hline\n") - f.write("\\endfirsthead\n") - f.write("\\hline\n") - f.write("Dataset & FDS Revision Date & FDS Revision String\\\\ \\hline \\hline\n") - f.write("\\endhead\n") - - # Table body - maketable = os.path.join(FIREMODELS_ROOT, 'fds/Validation/Process_All_Output.sh') - CASELIST = os.path.join(TEMPDIR, f'temp.out.{os.getpid()}') - TABLE_ENTRIES = os.path.join(TEMPDIR, f'temp2.out.{os.getpid()}') - - # Extract case list from Process_All_Output.sh - # grep PROCESS $maketable | awk 'BEGIN { FS = " " } ; { print $2 }' | awk '{if(NR>1)print}' - try: - with open(maketable, 'r') as f: - lines = f.readlines() - - cases = [] - line_num = 0 - for line in lines: - if 'PROCESS' in line: - parts = line.strip().split() - if len(parts) >= 2: - line_num += 1 - if line_num > 1: # Skip first match - cases.append(parts[1]) - - with open(CASELIST, 'w') as f: - for case in cases: - f.write(f"{case}\n") - except Exception as e: - # If file doesn't exist or error, create empty caselist - with open(CASELIST, 'w') as f: - f.write('') - - # Process each case and generate table entries - with open(TABLE_ENTRIES, 'w') as outf: - try: - with open(CASELIST, 'r') as inf: - for line in inf: - p = line.strip() - if p: - entry = MAKEGITENTRY(p, FIREMODELS_ROOT, TEMPDIR) - if entry: - outf.write(entry) - except Exception: - pass - - # Sort table entries and append to output file - # cat $TABLE_ENTRIES | sort -n -t '&' -k 4 | awk -F "&" '{ print $1 "&" $2 "&" $3 "\\\\ \\hline"}' - try: - with open(TABLE_ENTRIES, 'r') as f: - entries = f.readlines() - - # Sort by 4th field (numeric, using & as delimiter) - def sort_key(line): - parts = line.split('&') - if len(parts) >= 4: - # Extract numeric value from 4th field - try: - return int(parts[3].strip().split()[0]) - except: - return 0 - return 0 - - sorted_entries = sorted(entries, key=sort_key) - - with open(OUTPUT_TEX_FILE, 'a') as f: - for entry in sorted_entries: - parts = entry.split('&') - if len(parts) >= 4: - # Reconstruct line with first 3 fields - output_line = f"{parts[0]}&{parts[1]}&{parts[2]}\\\\ \\hline\n" - f.write(output_line) - except Exception: - pass - - # Clean up temporary files - try: - os.remove(CASELIST) - except Exception: - pass - try: - os.remove(TABLE_ENTRIES) - except Exception: - pass - - # Table footer - with open(OUTPUT_TEX_FILE, 'a') as f: - f.write("\\end{longtable}\n") - - # Return to original directory - os.chdir(CURRENT_DIR) - - -if __name__ == '__main__': - main() - diff --git a/Utilities/Scripts/validation_git_stats.sh b/Utilities/Scripts/validation_git_stats.sh deleted file mode 100755 index a1f37679a87..00000000000 --- a/Utilities/Scripts/validation_git_stats.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/bin/bash - -# This script outputs a LaTeX file with a table of the FDS validation -# sets and their corresponding GIT information (i.e., when the FDS -# output files were last commited to the repository). This table -# is then included in the FDS Validation Guide. -MAKEGITENTRY(){ -DIR=$1 -gitrevisions=$TEMPDIR/gitrevisions.$$ -cat $FIREMODELS_ROOT/out/$DIR/*git.txt 2> /dev/null | sort -u > $gitrevisions -gitrev=`head -1 $gitrevisions` -if [ "$gitrev" != "" ] ; then - gitrevshort=`echo $gitrev | awk -F - '{print $(NF-1)}' | sed 's/^g//'` - gitdate=`git show -s --format=%aD $gitrevshort 2> /dev/null | head -1 | awk '{print $3,$2",",$4}'` - if [ "$gitdate" == "" ]; then - gitdate="undefined" - gitdate2=2000000000 - if [ -e ~/FDS-SMV ]; then - CUR_DIR=`pwd` - cd ~/FDS-SMV - gitrevshort=`echo $gitrev | awk -F - '{print $4}' | sed 's/^.\{1\}//'` - gitdateold=`git show -s --format=%aD $gitrevshort 2> /dev/null | head -1 | awk '{print $3,$2",",$4}'` - if [ "$gitdateold" != "" ]; then - gitdate=$gitdateold - gitdate2=`git show -s --format=%at $gitrevshort | head -1 | awk '{print $1}'` - fi - cd $CUR_DIR - fi - else - gitdate2=`git show -s --format=%at $gitrevshort | head -1 | awk '{print $1}'` - fi - echo "${DIR//_/\\_} & $gitdate & $gitrev & $gitdate2 \\\\ \hline" -fi -rm $gitrevisions -} - -CURRENT_DIR=`pwd` - -#determine repo root -SCRIPTDIR=`dirname "$(readlink -f "$0")"` -cd $SCRIPTDIR/../../.. -FIREMODELS_ROOT=`pwd` -cd $CURDIR - -TEMPDIR=$HOME/temp -if [ ! -d $TEMPDIR ]; then - mkdir $TEMPDIR -fi - -export FIREMODELS_ROOT - -# the following is kept so older versions of firebot won't crash (ie firebot's that call this script using the -r parameter) -while getopts 'r:' OPTION -do -case $OPTION in - r) - IGNORE="$OPTARG" - ;; -esac -done -shift $(($OPTIND-1)) - -cd $FIREMODELS_ROOT/fds/Utilities/Scripts - -# Name and location of output .tex file with validation GIT statistics -OUTPUT_TEX_FILE=$FIREMODELS_ROOT/fds/Manuals/FDS_Validation_Guide/SCRIPT_FIGURES/ScatterPlots/validation_git_stats.tex - -# Table header -echo "\begin{longtable}[c]{|l|c|c|}" > $OUTPUT_TEX_FILE -echo "\caption[Validation Git Statistics]{Validation Git statistics for all data sets}" >> $OUTPUT_TEX_FILE -echo "\label{validation_git_stats}" >> $OUTPUT_TEX_FILE -echo "\\\\ \hline" >> $OUTPUT_TEX_FILE -echo "Dataset & FDS Revision Date & FDS Revision String\\\\ \hline \hline" >> $OUTPUT_TEX_FILE -echo "\endfirsthead" >> $OUTPUT_TEX_FILE -echo "\hline" >> $OUTPUT_TEX_FILE -echo "Dataset & FDS Revision Date & FDS Revision String\\\\ \hline \hline" >> $OUTPUT_TEX_FILE -echo "\endhead" >> $OUTPUT_TEX_FILE - -# Table body -maketable=$FIREMODELS_ROOT/fds/Validation/Process_All_Output.sh -CASELIST=$TEMPDIR/temp.out.$$ -TABLE_ENTRIES=$TEMPDIR/temp2.out.$$ -grep PROCESS $maketable | awk 'BEGIN { FS = " " } ; { print $2 }' | awk '{if(NR>1)print}'> $CASELIST -while read p; do - MAKEGITENTRY $p >> $TABLE_ENTRIES -done <$CASELIST - cat $TABLE_ENTRIES | sort -n -t '&' -k 4 | awk -F "&" '{ print $1 "&" $2 "&" $3 "\\\\ \\hline"}' >> $OUTPUT_TEX_FILE -rm $CASELIST $TABLE_ENTRIES - - -# Table footer -echo "\end{longtable}" >> $OUTPUT_TEX_FILE - -cd $CURRENT_DIR -