diff --git a/.gitignore b/.gitignore index d9005f2..70f1e91 100644 --- a/.gitignore +++ b/.gitignore @@ -144,6 +144,14 @@ dmypy.json # Cython debug symbols cython_debug/ +# debug folder +debug/ + +# debug outputs +*.png +*.pdf +*.csv + # PyCharm # JetBrains specific template is maintainted in a separate JetBrains.gitignore that can # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore diff --git a/example/Bi2SeO2-Expt-Comparison/compare_calc_expt.py b/example/Bi2SeO2-Expt-Comparison/compare_calc_expt.py index 0822dfc..44a5d85 100644 --- a/example/Bi2SeO2-Expt-Comparison/compare_calc_expt.py +++ b/example/Bi2SeO2-Expt-Comparison/compare_calc_expt.py @@ -27,7 +27,9 @@ import numpy as np -import sys ; sys.path.append(r"/mnt/d/Repositories/ZT-Calc-Workflow") +import sys + +sys.path.append(r"/mnt/d/Repositories/ZT-Calc-Workflow") from zt_calc_workflow.amset import read_amset_csv from zt_calc_workflow.analysis import match_data @@ -35,116 +37,138 @@ def read_expt_data_csv(file_path): - """ Read experimental data from a two-column CSV file. """ - - with open(file_path, 'r') as f: + """Read experimental data from a two-column CSV file.""" + + with open(file_path, "r") as f: f_csv = csv.reader(f) - - return np.array( - [[float(val) for val in r] for r in f_csv], dtype=np.float64) + + return np.array([[float(val) for val in r] for r in f_csv], dtype=np.float64) if __name__ == "__main__": # Load AMSET calculation and create 2D datasets. - + data = read_amset_csv(r"Bi2SeO2-AMSET-n.csv") calc_n, calc_t, calc_data_2d = dataset_to_2d(data) - + # Experimental carrier concentrations (nominal). - - expt_n = {'S1': 1.0e18, 'S2': 6.8e18, 'S3': 1.3e19, 'S4': 2.2e19, - 'S5': 8.7e19} - + + expt_n = {"S1": 1.0e18, "S2": 6.8e18, "S3": 1.3e19, "S4": 2.2e19, "S5": 8.7e19} + # Load experimental data. - + expt_data = {} - + for s in "S1", "S2", "S3", "S4", "S5": expt_sigma = read_expt_data_csv(r"5.0063091-2a_{0}.csv".format(s)) - + # Resistivity data -> sigma = 1 / rho. - - expt_sigma = np.array([[t, 1. / rho] for t, rho in expt_sigma], - dtype=np.float64) - + + expt_sigma = np.array( + [[t, 1.0 / rho] for t, rho in expt_sigma], dtype=np.float64 + ) + expt_s = read_expt_data_csv(r"5.0063091-2b_{0}.csv".format(s)) - - expt_data[s] = {'sigma': expt_sigma, 's': expt_s} - + + expt_data[s] = {"sigma": expt_sigma, "s": expt_s} + # Match sigma and S for each of the five samples and print results. - - for expt_k, calc_k in('sigma', 'sigma_ave'), ('s', 's_ave'): + + for expt_k, calc_k in ("sigma", "sigma_ave"), ("s", "s_ave"): for s in "S1", "S2", "S3", "S4", "S5": # List of (n, t, val) to match to. - + match_to = [(expt_n[s], t, v) for t, v in expt_data[s][expt_k]] - + # Match to calculated data. Using mode='same_t' will return the # calculated n that best match the experimental data at the # measurement T. - - res = match_data(calc_n, calc_t, calc_data_2d[calc_k], - match_to, mode='same_t', num_seeds=5) - + + res = match_data( + calc_n, + calc_t, + calc_data_2d[calc_k], + match_to, + mode="same_t", + num_seeds=5, + ) + # Print results. - + header = "Sample: '{0}', Data: '{1}'".format(s, expt_k) print(header) - print('-' * len(header)) + print("-" * len(header)) print("") - - print("{0: <10} | {1: <10} | {2: <10} | {3: <10} | {4: <10} | " - "{5: <10} | {6: <10}".format("n", "T", "Expt.", "n", "T", - "Calc.", "Diff.")) - - print('-' * (7 * 10 + 6 * 3)) - + + print( + "{0: <10} | {1: <10} | {2: <10} | {3: <10} | {4: <10} | " + "{5: <10} | {6: <10}".format( + "n", "T", "Expt.", "n", "T", "Calc.", "Diff." + ) + ) + + print("-" * (7 * 10 + 6 * 3)) + for (e_n, e_t, e_v), (c_n, c_t, c_v) in zip(match_to, res): print( "{0: >10.2e} | {1: >10.0f} | {2: >10.2f} | {3: >10.2e} | " "{4: >10.0f} | {5: >10.2f} | {6: >10.2e}".format( - e_n, e_t, e_v, c_n, c_t, c_v, c_v - e_v)) - + e_n, e_t, e_v, c_n, c_t, c_v, c_v - e_v + ) + ) + print("") - + print("") - + # Try one sample with all four modes. - + s = "S3" - - for expt_k, calc_k in ('sigma', 'sigma_ave'), ('s', 's_ave'): - for mode in 'same', 'same_t', 'same_n', 'best_match': + + for expt_k, calc_k in ("sigma", "sigma_ave"), ("s", "s_ave"): + for mode in "same", "same_t", "same_n", "best_match": match_to = [(expt_n[s], t, v) for t, v in expt_data[s][expt_k]] - + # num_seeds gives dubious results with mode = 'best_match' (and # will issue a RuntimeWarning to this effect). - - num_seeds = 1 if mode == 'best_match' else 5 - - res = match_data(calc_n, calc_t, calc_data_2d[calc_k], - match_to, mode=mode, num_seeds=num_seeds) - - header = ("Sample: '{0}', Data: '{1}', Mode: " - "'{2}'".format(s, expt_k, mode)) + + num_seeds = 1 if mode == "best_match" else 5 + + res = match_data( + calc_n, + calc_t, + calc_data_2d[calc_k], + match_to, + mode=mode, + num_seeds=num_seeds, + ) + + header = "Sample: '{0}', Data: '{1}', Mode: " "'{2}'".format( + s, expt_k, mode + ) print(header) - print('-' * len(header)) + print("-" * len(header)) print("") - - print("{0: <10} | {1: <10} | {2: <10} | {3: <10} | {4: <10} | " - "{5: <10} | {6: <10}".format("n", "T", "Expt.", "n", "T", - "Calc.", "Diff.")) - - print('-' * (7 * 10 + 6 * 3)) - + + print( + "{0: <10} | {1: <10} | {2: <10} | {3: <10} | {4: <10} | " + "{5: <10} | {6: <10}".format( + "n", "T", "Expt.", "n", "T", "Calc.", "Diff." + ) + ) + + print("-" * (7 * 10 + 6 * 3)) + for (e_n, e_t, e_v), (c_n, c_t, c_v) in zip(match_to, res): print( "{0: >10.2e} | {1: >10.0f} | {2: >10.2f} | {3: >10.2e} | " "{4: >10.0f} | {5: >10.2f} | {6: >10.2e}".format( - e_n, e_t, e_v, c_n, c_t, c_v, c_v - e_v)) - + e_n, e_t, e_v, c_n, c_t, c_v, c_v - e_v + ) + ) + print("") - + print("") diff --git a/example/SnS-SnSe-ZT/elec_n_plot.py b/example/SnS-SnSe-ZT/elec_n_plot.py index 196d154..68cbe33 100644 --- a/example/SnS-SnSe-ZT/elec_n_plot.py +++ b/example/SnS-SnSe-ZT/elec_n_plot.py @@ -30,10 +30,15 @@ # Read input data. amset_data = { - 'SnS' : (read_amset_csv("SnS-Pnma-AMSET-p.csv"), - read_amset_csv("SnS-Pnma-AMSET-n.csv")), - 'SnSe' : (read_amset_csv("SnSe-Pnma-AMSET-p.csv"), - read_amset_csv("SnSe-Pnma-AMSET-n.csv"))} + "SnS": ( + read_amset_csv("SnS-Pnma-AMSET-p.csv"), + read_amset_csv("SnS-Pnma-AMSET-n.csv"), + ), + "SnSe": ( + read_amset_csv("SnSe-Pnma-AMSET-p.csv"), + read_amset_csv("SnSe-Pnma-AMSET-n.csv"), + ), + } # Setup Matplotlib. @@ -41,53 +46,56 @@ # Plot parameters. - t_plot = 700. - - colours = ['b', 'r'] + t_plot = 700.0 + + colours = ["b", "r"] - xlim = (1.e16, 1.e20) - ylim_sets = [(0., 750.), (0., 750.), (0., 6.), (0., 1.)] + xlim = (1.0e16, 1.0e20) + ylim_sets = [(0.0, 750.0), (0.0, 750.0), (0.0, 6.0), (0.0, 1.0)] # Plot. - + plt.figure(figsize=(16.2 / 2.54, 13.5 / 2.54)) subplot_axes = [plt.subplot(2, 2, i + 1) for i in range(4)] for data_k, axes in zip( - ['sigma_ave', 's_ave', 'pf_ave', 'kappa_el_ave'], subplot_axes): + ["sigma_ave", "s_ave", "pf_ave", "kappa_el_ave"], subplot_axes + ): - for sys_k, c in zip(['SnS', 'SnSe'], colours): + for sys_k, c in zip(["SnS", "SnSe"], colours): data_p, data_n = amset_data[sys_k] - + # Extract data at plot temperature. - - data_p = data_p[data_p['t'] == t_plot] - data_n = data_n[data_n['t'] == t_plot] - - for data, l, d in [(data_p, sys_k, (None, None)), - (data_n, None, (3., 1.))]: + + data_p = data_p[data_p["t"] == t_plot] + data_n = data_n[data_n["t"] == t_plot] + + for data, l, d in [ + (data_p, sys_k, (None, None)), + (data_n, None, (3.0, 1.0)), + ]: # Plot |S| rather than S. - - y = (np.abs(data[data_k]) if data_k == 's_ave' - else data[data_k]) - - axes.plot(data['n'], y, label=l, color=c, dashes=d) + + y = np.abs(data[data_k]) if data_k == "s_ave" else data[data_k] + + axes.plot(data["n"], y, label=l, color=c, dashes=d) # "Fake" line to add a single entry for n-type doping. - axes.plot([-0.75, -0.25], [-0.75, -0.25], label="n-type", color='k', - dashes=(3., 1.)) + axes.plot( + [-0.75, -0.25], [-0.75, -0.25], label="n-type", color="k", dashes=(3.0, 1.0) + ) # Adjust axis scales/ranges and labels. for axes in subplot_axes: - axes.set_xscale('log') + axes.set_xscale("log") axes.set_xlim(xlim) for axes, ylim in zip(subplot_axes, ylim_sets): axes.set_ylim(ylim) - + y_min, y_max = ylim axes.set_yticks(np.linspace(y_min, y_max, 6)) @@ -101,27 +109,28 @@ # Add legend. - legend = subplot_axes[0].legend(loc='upper left') - legend.get_frame().set_edgecolor('k') - legend.get_frame().set_facecolor((1., 1., 1., 0.5)) + legend = subplot_axes[0].legend(loc="upper left") + legend.get_frame().set_edgecolor("k") + legend.get_frame().set_facecolor((1.0, 1.0, 1.0, 0.5)) # Add subplot labels. - locs = ['lower left', 'upper right', 'upper left', 'upper left'] + locs = ["lower left", "upper right", "upper left", "upper left"] for i, (axes, loc) in enumerate(zip(subplot_axes, locs)): subplot_label = AnchoredText( - r"({0})".format(chr(97 + i)), loc=loc, frameon=True) + r"({0})".format(chr(97 + i)), loc=loc, frameon=True + ) - subplot_label.patch.set_edgecolor('k') - subplot_label.patch.set_facecolor((1., 1., 1., 0.5)) + subplot_label.patch.set_edgecolor("k") + subplot_label.patch.set_facecolor((1.0, 1.0, 1.0, 0.5)) axes.add_artist(subplot_label) # Add gridlines. for axes in subplot_axes: - axes.grid(color=(0.9, 0.9, 0.9), dashes=(3., 1.), linewidth=0.5) + axes.grid(color=(0.9, 0.9, 0.9), dashes=(3.0, 1.0), linewidth=0.5) axes.set_axisbelow(True) plt.tight_layout() diff --git a/example/SnS-SnSe-ZT/elec_t_plot.py b/example/SnS-SnSe-ZT/elec_t_plot.py index e8fb399..2219863 100644 --- a/example/SnS-SnSe-ZT/elec_t_plot.py +++ b/example/SnS-SnSe-ZT/elec_t_plot.py @@ -29,55 +29,68 @@ if __name__ == "__main__": # Read input data. - amset_data = {'SnS' : (read_amset_csv("SnS-Pnma-AMSET-p.csv"), - read_amset_csv("SnS-Pnma-AMSET-n.csv")), - 'SnSe' : (read_amset_csv("SnSe-Pnma-AMSET-p.csv"), - read_amset_csv("SnSe-Pnma-AMSET-n.csv"))} - + amset_data = { + "SnS": ( + read_amset_csv("SnS-Pnma-AMSET-p.csv"), + read_amset_csv("SnS-Pnma-AMSET-n.csv"), + ), + "SnSe": ( + read_amset_csv("SnSe-Pnma-AMSET-p.csv"), + read_amset_csv("SnSe-Pnma-AMSET-n.csv"), + ), + } + # Setup Matplotlib. setup_matplotlib() - + # Plot parameters. - - n_plot = 4.e19 - - colours = ['b', 'r'] - xlim = (200., 1000.) - ylim_sets = [(0., 4000.), (0., 350.), (0., 6.), (0., 2.)] + n_plot = 4.0e19 + + colours = ["b", "r"] + + xlim = (200.0, 1000.0) + ylim_sets = [(0.0, 4000.0), (0.0, 350.0), (0.0, 6.0), (0.0, 2.0)] # Plot. - + plt.figure(figsize=(16.2 / 2.54, 13.5 / 2.54)) subplot_axes = [plt.subplot(2, 2, i + 1) for i in range(4)] for data_k, axes in zip( - ['sigma_ave', 's_ave', 'pf_ave', 'kappa_el_ave'], subplot_axes): + ["sigma_ave", "s_ave", "pf_ave", "kappa_el_ave"], subplot_axes + ): - for sys_k, c in zip(['SnS', 'SnSe'], colours): + for sys_k, c in zip(["SnS", "SnSe"], colours): data_p, data_n = amset_data[sys_k] - + # Extract data at plot carrier concentration. - - data_p = data_p[data_p['n'] == n_plot] - data_n = data_n[data_n['n'] == n_plot] - - for data, l, d in [(data_p, sys_k, (None, None)), - (data_n, None, (3., 1.))]: + + data_p = data_p[data_p["n"] == n_plot] + data_n = data_n[data_n["n"] == n_plot] + + for data, l, d in [ + (data_p, sys_k, (None, None)), + (data_n, None, (3.0, 1.0)), + ]: # Plot |S| rather than S. - - y = (np.abs(data[data_k].to_numpy()) if data_k == 's_ave' - else data[data_k].to_numpy()) - - axes.plot(data['t'].to_numpy(), y, label=l, color=c, dashes=d) + + y = ( + np.abs(data[data_k].to_numpy()) + if data_k == "s_ave" + else data[data_k].to_numpy() + ) + + axes.plot(data["t"].to_numpy(), y, label=l, color=c, dashes=d) # "Fake" line to add a single entry for n-type doping. - axes.plot([-0.75, -0.25], [-0.75, -0.25], label="n-type", color='k', - dashes=(3., 1.)) - + axes.plot( + [-0.75, -0.25], [-0.75, -0.25], label="n-type", color="k", dashes=(3.0, 1.0) + ) + # Adjust axis scales/ranges and labels. for axes in subplot_axes: @@ -99,25 +112,26 @@ # Add legend. - legend = subplot_axes[0].legend(loc='upper right') - legend.get_frame().set_edgecolor('k') - legend.get_frame().set_facecolor((1., 1., 1., 0.5)) + legend = subplot_axes[0].legend(loc="upper right") + legend.get_frame().set_edgecolor("k") + legend.get_frame().set_facecolor((1.0, 1.0, 1.0, 0.5)) # Add subplot labels. for i, axes in enumerate(subplot_axes): subplot_label = AnchoredText( - r"({0})".format(chr(97 + i)), loc='lower left', frameon=True) + r"({0})".format(chr(97 + i)), loc="lower left", frameon=True + ) - subplot_label.patch.set_edgecolor('k') - subplot_label.patch.set_facecolor((1., 1., 1., 0.5)) + subplot_label.patch.set_edgecolor("k") + subplot_label.patch.set_facecolor((1.0, 1.0, 1.0, 0.5)) axes.add_artist(subplot_label) # Add gridlines. for axes in subplot_axes: - axes.grid(color=(0.9, 0.9, 0.9), dashes=(3., 1.), linewidth=0.5) + axes.grid(color=(0.9, 0.9, 0.9), dashes=(3.0, 1.0), linewidth=0.5) axes.set_axisbelow(True) plt.tight_layout() diff --git a/example/SnS-SnSe-ZT/zt_max_yaml.py b/example/SnS-SnSe-ZT/zt_max_yaml.py index bda3837..450cbf4 100644 --- a/example/SnS-SnSe-ZT/zt_max_yaml.py +++ b/example/SnS-SnSe-ZT/zt_max_yaml.py @@ -28,61 +28,71 @@ from zt_calc_workflow.phono3py import read_phono3py_kappa_csv - if __name__ == "__main__": # Load AMSET and Phono3py calculations and create ZT datasets. - - input_files = {'SnS': (r"SnS-Pnma-AMSET-p.csv", r"SnS-Pnma-AMSET-n.csv", - r"SnS-kappa-m16816.Prim.csv"), - 'SnSe': (r"SnSe-Pnma-AMSET-p.csv", r"SnSe-Pnma-AMSET-n.csv", - r"SnSe-kappa-m16816.Prim.csv")} - + + input_files = { + "SnS": ( + r"SnS-Pnma-AMSET-p.csv", + r"SnS-Pnma-AMSET-n.csv", + r"SnS-kappa-m16816.Prim.csv", + ), + "SnSe": ( + r"SnSe-Pnma-AMSET-p.csv", + r"SnSe-Pnma-AMSET-n.csv", + r"SnSe-kappa-m16816.Prim.csv", + ), + } + zt_data = {} - + for k, (amset_p, amset_n, kappa) in input_files.items(): kappa_data = read_phono3py_kappa_csv(kappa) - + # The Phono3py calculations were performed on structures with the axes # oriented differently to those in the AMSET calculations. We deal with # this by dropping the off-diagonal elements and relabelling the # diagonal elements in kappa_data. - - kappa_data.drop(columns=['kappa_yz', 'kappa_xz', 'kappa_xy'], - inplace=True) - - kappa_data.columns = ['t', 'kappa_yy', 'kappa_zz', 'kappa_xx', - 'kappa_ave'] - + + kappa_data.drop(columns=["kappa_yz", "kappa_xz", "kappa_xy"], inplace=True) + + kappa_data.columns = ["t", "kappa_yy", "kappa_zz", "kappa_xx", "kappa_ave"] + amset_data_p = read_amset_csv(amset_p) amset_data_n = read_amset_csv(amset_n) - - zt_data[k] = (zt_dataset_from_data(amset_data_p, kappa_data), - zt_dataset_from_data(amset_data_n, kappa_data)) - + + zt_data[k] = ( + zt_dataset_from_data(amset_data_p, kappa_data), + zt_dataset_from_data(amset_data_n, kappa_data), + ) + # Get ZT_max for each system and n/p-type doping and write to a YAML file. - - with open(r"zt_max.yaml", 'w') as f: - for system, t_max in ('SnS', 880.), ('SnSe', 800.): + + with open(r"zt_max.yaml", "w") as f: + for system, t_max in ("SnS", 880.0), ("SnSe", 800.0): p_data, n_data = zt_data[system] - - for doping_type, data in ('p', p_data), ('n', n_data): + + for doping_type, data in ("p", p_data), ("n", n_data): rec = get_zt_max(data, t_max=t_max) - + f.write("- system: {0}\n".format("{0}-Pnma".format(system))) f.write(" carrier_type: {0}\n".format(doping_type)) - - f.write(" carrier_conc: {0}\n".format(rec['n'])) - f.write(" temp: {0}\n".format(rec['t'])) - + + f.write(" carrier_conc: {0}\n".format(rec["n"])) + f.write(" temp: {0}\n".format(rec["t"])) + data_k = [] - + for k in rec.index: - if k.endswith('ave'): - data_k.append(k.replace('_ave', '')) + if k.endswith("ave"): + data_k.append(k.replace("_ave", "")) for k in data_k: f.write(" {0}:\n".format(k)) - for suffix in 'xx', 'yy', 'zz', 'ave': - f.write(" {0}: {1}\n".format( - suffix, rec['{0}_{1}'.format(k, suffix)])) + for suffix in "xx", "yy", "zz", "ave": + f.write( + " {0}: {1}\n".format( + suffix, rec["{0}_{1}".format(k, suffix)] + ) + ) diff --git a/example/SnS-SnSe-ZT/zt_plot.py b/example/SnS-SnSe-ZT/zt_plot.py index c38f5d9..9830d35 100644 --- a/example/SnS-SnSe-ZT/zt_plot.py +++ b/example/SnS-SnSe-ZT/zt_plot.py @@ -39,83 +39,89 @@ if __name__ == "__main__": # Read input data. - input_files = {'SnS': (r"SnS-Pnma-AMSET-p.csv", r"SnS-Pnma-AMSET-n.csv", - r"SnS-kappa-m16816.Prim.csv"), - 'SnSe': (r"SnSe-Pnma-AMSET-p.csv", r"SnSe-Pnma-AMSET-n.csv", - r"SnSe-kappa-m16816.Prim.csv")} - + input_files = { + "SnS": ( + r"SnS-Pnma-AMSET-p.csv", + r"SnS-Pnma-AMSET-n.csv", + r"SnS-kappa-m16816.Prim.csv", + ), + "SnSe": ( + r"SnSe-Pnma-AMSET-p.csv", + r"SnSe-Pnma-AMSET-n.csv", + r"SnSe-kappa-m16816.Prim.csv", + ), + } + zt_data = {} - + for k, (amset_p, amset_n, kappa) in input_files.items(): kappa_data = read_phono3py_kappa_csv(kappa) - + # The Phono3py calculations were performed on structures with the axes # oriented differently to those in the AMSET calculations. We deal with # this by dropping the off-diagonal elements and relabelling the # diagonal elements in kappa_data. - - kappa_data.drop(columns=['kappa_yz', 'kappa_xz', 'kappa_xy'], - inplace=True) - - kappa_data.columns = ['t', 'kappa_yy', 'kappa_zz', 'kappa_xx', - 'kappa_ave'] - + + kappa_data.drop(columns=["kappa_yz", "kappa_xz", "kappa_xy"], inplace=True) + + kappa_data.columns = ["t", "kappa_yy", "kappa_zz", "kappa_xx", "kappa_ave"] + amset_data_p = read_amset_csv(amset_p) amset_data_n = read_amset_csv(amset_n) - + # Combine AMSET and Phono3py data into ZT datasets, then convert to # 2D data. - - zt_data[k] = (dataset_to_2d( - zt_dataset_from_data(amset_data_p, kappa_data)), - dataset_to_2d( - zt_dataset_from_data(amset_data_n, kappa_data))) + + zt_data[k] = ( + dataset_to_2d(zt_dataset_from_data(amset_data_p, kappa_data)), + dataset_to_2d(zt_dataset_from_data(amset_data_n, kappa_data)), + ) # Setup Matplotlib. setup_matplotlib() - + # Plot parameters. - t_min, t_max = 300., 900. + t_min, t_max = 300.0, 900.0 + + zt_contour_levels = [0.25, 0.5, 1.0, 1.5, 2.0, 2.5] - zt_contour_levels = [0.25, 0.5, 1., 1.5, 2., 2.5] - subplot_labels = ["p-type SnS", "n-type SnS", "p-type SnSe", "n-type SnSe"] # Custom formatters for labels. def contour_fmt(v): - if v % 1. == 0.: + if v % 1.0 == 0.0: return "{0:.0f}".format(v) - if v % 0.5 == 0.: + if v % 0.5 == 0.0: return "{0:.1f}".format(v) return "{0:.2f}".format(v) def log_fmt(v, pos): return "$10^{{{0:.0f}}}$".format(v) - + log_formatter = FuncFormatter(log_fmt) - + # To use a common colour bar, we need to determine the "global" ZT_max # across all datasets in the range (t_min, t_max). Once we've done this, # we can create a matplotlib.colors.Normalize to colour the 2D plots. - - global_zt_max = 0. - + + global_zt_max = 0.0 + for data_p, data_n in zt_data.values(): for n, t, data in data_p, data_n: t_mask = np.logical_and(t >= t_min, t <= t_max) - global_zt_max = max(global_zt_max, data['zt_ave'][:, t_mask].max()) - - norm = Normalize(vmin=0., vmax=global_zt_max) + global_zt_max = max(global_zt_max, data["zt_ave"][:, t_mask].max()) + + norm = Normalize(vmin=0.0, vmax=global_zt_max) # Plot. - plt.figure(figsize=(14. / 2.54, 14. / 2.54)) - + plt.figure(figsize=(14.0 / 2.54, 14.0 / 2.54)) + # Use a GridSpec to divide the plot into a four subplot axes and a colour # bar axis. @@ -125,17 +131,16 @@ def log_fmt(v, pos): for r in range(2): for c in range(2): - subplot_axes.append( - plt.subplot(grid_spec[4 * r + 1:4 * (r + 1) + 1, c])) + subplot_axes.append(plt.subplot(grid_spec[4 * r + 1 : 4 * (r + 1) + 1, c])) cbar_axes = plt.subplot(grid_spec[0, :]) - + # Loop over SnS/SnSe and p- and n-type doping and draw a 2D colour plot # with contour lines. - - for i, k in enumerate(['SnS', 'SnSe']): + + for i, k in enumerate(["SnS", "SnSe"]): data_p, data_n = zt_data[k] - + for j, (n, t, data) in enumerate([data_p, data_n]): axes = subplot_axes[2 * i + j] @@ -143,24 +148,23 @@ def log_fmt(v, pos): x = np.log10(n) y = t[t_mask] - z = data['zt_ave'].T[t_mask, :] + z = data["zt_ave"].T[t_mask, :] - axes.pcolormesh(x, y, z, norm=norm, shading='gouraud') + axes.pcolormesh(x, y, z, norm=norm, shading="gouraud") - cs = axes.contour(x, y, z, levels=zt_contour_levels, colors='r') + cs = axes.contour(x, y, z, levels=zt_contour_levels, colors="r") axes.clabel(cs, cs.levels, inline=True, fmt=contour_fmt) # Add colour bar. - plt.colorbar(ScalarMappable(norm=norm), orientation='horizontal', - cax=cbar_axes) - + plt.colorbar(ScalarMappable(norm=norm), orientation="horizontal", cax=cbar_axes) + cbar_axes.set_ylabel(r"$ZT$") # Adjust axis ranges and labels. for axes in subplot_axes: - axes.set_ylim(300., 900.) + axes.set_ylim(300.0, 900.0) for axes in subplot_axes: axes.xaxis.set_major_formatter(log_formatter) @@ -175,11 +179,11 @@ def log_fmt(v, pos): for i, (axes, label) in enumerate(zip(subplot_axes, subplot_labels)): subplot_label = AnchoredText( - r"({0}) {1}".format(chr(97 + i), label), loc='lower left', - frameon=True) + r"({0}) {1}".format(chr(97 + i), label), loc="lower left", frameon=True + ) - subplot_label.patch.set_edgecolor('k') - subplot_label.patch.set_facecolor((1., 1., 1., 0.5)) + subplot_label.patch.set_edgecolor("k") + subplot_label.patch.set_facecolor((1.0, 1.0, 1.0, 0.5)) axes.add_artist(subplot_label) diff --git a/example/SnS-SnSe-k_latt/k_latt_crta_plot.py b/example/SnS-SnSe-k_latt/k_latt_crta_plot.py index a5e8455..791f994 100644 --- a/example/SnS-SnSe-k_latt/k_latt_crta_plot.py +++ b/example/SnS-SnSe-k_latt/k_latt_crta_plot.py @@ -28,81 +28,87 @@ if __name__ == "__main__": # Read input data. - + crta_sns = read_phono3py_crta_csv(r"SnS-kappa-m16816.Prim-CRTA.csv") crta_snse = read_phono3py_crta_csv(r"SnSe-kappa-m16816.Prim-CRTA.csv") - + # Setup Matplotlib. - + setup_matplotlib() - + # Plot parameters. - + plot_labels = ["$xx$", "$yy$", "$zz$", "ave"] - plot_colours = ['b', 'r', 'g', 'k'] - + plot_colours = ["b", "r", "g", "k"] + # Plot. - - plt.figure(figsize=(12. / 2.54, 15. / 2.54)) - + + plt.figure(figsize=(12.0 / 2.54, 15.0 / 2.54)) + subplot_axes = [plt.subplot(3, 2, i + 1) for i in range(6)] - + # Loop over SnS/SnSe (columns). - + for i, df in enumerate([crta_sns, crta_snse]): # Loop over \kappa, \kappa / \tau^CRTA and \tau^CRTA (rows). - - for axes, k in zip( - subplot_axes[i::2], ['kappa', 'kappa_tau_crta', 'tau_crta']): - + + for axes, k in zip(subplot_axes[i::2], ["kappa", "kappa_tau_crta", "tau_crta"]): + # Loop over diagonal components of tensors and average. - + for suffix, l, c in zip( - ['xx', 'yy', 'zz', 'ave'], plot_labels, plot_colours): - - data_k = '{0}_{1}'.format(k, suffix) - axes.plot(df['t'], df[data_k], label=l, color=c) - + ["xx", "yy", "zz", "ave"], plot_labels, plot_colours + ): + + data_k = "{0}_{1}".format(k, suffix) + axes.plot(df["t"], df[data_k], label=l, color=c) + # Adjust axis ranges and labels. - + for axes in subplot_axes: - axes.set_xlim(0., 1000.) - - for i, y_max in enumerate([10., 1., 25.]): - for axes in subplot_axes[2 * i:2 * (i + 1)]: - axes.set_ylim(0., y_max) - + axes.set_xlim(0.0, 1000.0) + + for i, y_max in enumerate([10.0, 1.0, 25.0]): + for axes in subplot_axes[2 * i : 2 * (i + 1)]: + axes.set_ylim(0.0, y_max) + for axes in subplot_axes[-2:]: axes.set_xlabel(r"$T$ [K]") - + y_labels = [ r"$\kappa$ [W m$^{-1}$ K$^{-1}$]", r"$\kappa / \tau^\mathrm{CRTA}$ [W m$^{-1}$ K$^{-1}$ ps$^{-1}$]", - r"$\tau^\mathrm{CRTA}$ [ps]"] - + r"$\tau^\mathrm{CRTA}$ [ps]", + ] + for i, y_label in enumerate(y_labels): subplot_axes[2 * i].set_ylabel(y_label) - + # Add legend. - - legend = subplot_axes[1].legend(loc='upper right') - legend.get_frame().set_edgecolor('k') - + + legend = subplot_axes[1].legend(loc="upper right") + legend.get_frame().set_edgecolor("k") + # Add subplot labels. - - subplot_label_locs = ['upper right', 'lower left', 'lower right', - 'upper right', 'upper right', 'upper right'] - + + subplot_label_locs = [ + "upper right", + "lower left", + "lower right", + "upper right", + "upper right", + "upper right", + ] + for i, (axes, loc) in enumerate(zip(subplot_axes, subplot_label_locs)): - axes.add_artist( - AnchoredText(r"({0})".format(chr(97 + i)), loc=loc)) - + axes.add_artist(AnchoredText(r"({0})".format(chr(97 + i)), loc=loc)) + for axes in subplot_axes: - axes.grid(color=(0.9, 0.9, 0.9), dashes=(3.0, 1.0), linewidth=.5) - + axes.grid(color=(0.9, 0.9, 0.9), dashes=(3.0, 1.0), linewidth=0.5) + # Finalise and save. - + plt.tight_layout() - + plt.savefig(r"k_latt_crta_plot.png", dpi=300) plt.close() diff --git a/zt_calc_workflow/amset.py b/zt_calc_workflow/amset.py index a9b24a8..3ee7fc3 100644 --- a/zt_calc_workflow/amset.py +++ b/zt_calc_workflow/amset.py @@ -15,6 +15,8 @@ import warnings import numpy as np +import itertools +import pandas as pd from .io import read_validate_csv @@ -23,69 +25,107 @@ # Internal functions # ------------------ + def _check_update_amset_dataset( - df, check_uniform=True, convert_sigma_s_cm=True, - calculate_pf_mw_m_k2=True): - """ Check and an AMSET dataset in the form of a Pandas DataFrame: - + df, check_uniform=True, convert_sigma_s_cm=True, calculate_pf_mw_m_k2=True +): + """Check and an AMSET dataset in the form of a Pandas DataFrame: + * sort the dataset by carrier concentration and then by temperature; * check the dataset has a uniform set of n and T; * convert the electrical conductivity from S m^-1 to S cm^-1; and * (re)calculate the power factors in mW m^-1 K^-2. """ - + # Sort by n, then T. - df.sort_values(by=['n', 't'], inplace=True) + df.sort_values(by=["n", "t"], inplace=True) - # Check for uniform n and temperatures. + t_vals = sorted(df["t"].unique()) + n_vals = sorted(df["n"].unique()) + # Drop duplicates + df = df.drop_duplicates() + + # Check for uniform n and temperatures. if check_uniform: - t_vals = sorted(df['t'].unique()) - n_vals = sorted(df['n'].unique()) - + for n in n_vals: - t_check = df[df['n'] == n]['t'].unique() - + t_check = df[df["n"] == n]["t"].unique() + if len(t_check) != len(t_vals): - raise Exception("Incomplete set of temperatures for n = " - "{0:.3e}".format(n)) - + raise Exception( + "Incomplete set of temperatures for n = " "{0:.3e}".format(n) + ) + if not np.allclose(t_vals, sorted(t_check)): - raise Exception("Inconsistent set of temperatures for n = " - "{0:.3e}".format(n)) + raise Exception( + "Inconsistent set of temperatures for n = " "{0:.3e}".format(n) + ) else: - warnings.warn("check_uniform is set to False - other functions may " - "not work as expected on non-uniform data.", UserWarning) + warnings.warn( + "check_uniform is set to False - other functions may " + "not work as expected on non-uniform data.", + UserWarning, + ) + # Code added to enforce uniformity - allows non-uniform data to be plotted without crashing + # Clean up values with missing data + # List all combinations of n and t + combined = [n_vals, t_vals] + df1 = pd.DataFrame(columns=["n", "t"], data=list(itertools.product(*combined))) + # Combine into dataframe to show missing data + df1 = df1.merge(df, how="left") + # Find rows with any missing data (NaN in any column) + df1 = df1[df1.isnull().any(axis=1)] + # Find any n values with missing data + n_drop = sorted(df1["n"].unique()) + # Drop n values without complete data + df = df[df.n.isin(n_drop) == False] + # Continue with usual read + t_vals = sorted(df["t"].unique()) + n_vals = sorted(df["n"].unique()) + df = df.reset_index(drop=True) # If requested, recalculate PFs in mW/m.K^2. - has_pf = ('pf_xx' in df.columns and 'pf_yy' in df.columns - and 'pf_zz' in df.columns and 'pf_ave' in df.columns) + has_pf = ( + "pf_xx" in df.columns + and "pf_yy" in df.columns + and "pf_zz" in df.columns + and "pf_ave" in df.columns + ) if calculate_pf_mw_m_k2 or not has_pf: key_sets = [ - ('s_xx', 'sigma_xx', 'pf_xx'), ('s_yy', 'sigma_yy', 'pf_yy'), - ('s_zz', 'sigma_zz', 'pf_zz'), ('s_ave', 'sigma_ave', 'pf_ave')] + ("s_xx", "sigma_xx", "pf_xx"), + ("s_yy", "sigma_yy", "pf_yy"), + ("s_zz", "sigma_zz", "pf_zz"), + ("s_ave", "sigma_ave", "pf_ave"), + ] for k_s, k_sigma, k_pf in key_sets: - df.loc[:, k_pf] = 1.0e3 * ((1.0e-6 * df[k_s].to_numpy()) ** 2 - * df[k_sigma].to_numpy()) + df.loc[:, k_pf] = 1.0e3 * ( + (1.0e-6 * df[k_s].to_numpy()) ** 2 * df[k_sigma].to_numpy() + ) else: warnings.warn( "calculate_pf_mw_m_k2 is set to False - other functions may not " - "work as expected if data is in different units.", UserWarning) + "work as expected if data is in different units.", + UserWarning, + ) # If requested, convert \sigma from S/m -> S/cm. if convert_sigma_s_cm: - for k in 'sigma_xx', 'sigma_yy', 'sigma_zz', 'sigma_ave': + for k in "sigma_xx", "sigma_yy", "sigma_zz", "sigma_ave": df.loc[:, k] = df[k].to_numpy() / 100.0 else: warnings.warn( "convert_sigma_s_cm is set to False - other functions may not " - "work as expected if data is in different units.", UserWarning) - + "work as expected if data is in different units.", + UserWarning, + ) + return df @@ -94,30 +134,59 @@ def _check_update_amset_dataset( # --------- _READ_AMSET_HEADER_MAP = { - 'Carrier Concentration': 'n', 'Temperature': 't', - 'Conducitivty x': 'sigma_xx', 'Conducitivty y': 'sigma_yy', - 'Conducitivty z': 'sigma_zz', 'Conducitivty ave': 'sigma_ave', - 'Seebeck x': 's_xx', 'Seebeck y': 's_yy', 'Seebeck z': 's_zz', - 'Seebeck ave': 's_ave', 'Kele x': 'kappa_el_xx', 'Kele y': 'kappa_el_yy', - 'Kele z': 'kappa_el_zz', 'Kele ave': 'kappa_el_ave', 'Mobility x': 'mu_xx', - 'Mobility y': 'mu_yy', 'Mobility z': 'mu_zz', 'Mobility ave': 'mu_ave', - 'PF x': 'pf_xx', 'PF y': 'pf_yy', 'PF z': 'pf_zz', 'PF ave': 'pf_ave', - 'ADP': 'mu_adp_xx', 'ADP.1': 'mu_adp_yy', 'ADP.2': 'mu_adp_zz', - 'ADP.3': 'mu_adp_ave', 'IMP': 'mu_imp_xx', 'IMP.1': 'mu_imp_yy', - 'IMP.2': 'mu_imp_zz', 'IMP.3': 'mu_imp_ave', 'PIE': 'mu_pie_xx', - 'PIE.1': 'mu_pie_yy', 'PIE.2': 'mu_pie_zz', 'PIE.3': 'mu_pie_ave', - 'POP': 'mu_pop_xx', 'POP.1': 'mu_pop_yy', 'POP.2': 'mu_pop_zz', - 'POP.3': 'mu_pop_ave'} + "Carrier Concentration": "n", + "Temperature": "t", + "Conducitivty x": "sigma_xx", + "Conducitivty y": "sigma_yy", + "Conducitivty z": "sigma_zz", + "Conducitivty ave": "sigma_ave", + "Seebeck x": "s_xx", + "Seebeck y": "s_yy", + "Seebeck z": "s_zz", + "Seebeck ave": "s_ave", + "Kele x": "kappa_el_xx", + "Kele y": "kappa_el_yy", + "Kele z": "kappa_el_zz", + "Kele ave": "kappa_el_ave", + "Mobility x": "mu_xx", + "Mobility y": "mu_yy", + "Mobility z": "mu_zz", + "Mobility ave": "mu_ave", + "PF x": "pf_xx", + "PF y": "pf_yy", + "PF z": "pf_zz", + "PF ave": "pf_ave", + "ADP": "mu_adp_xx", + "ADP.1": "mu_adp_yy", + "ADP.2": "mu_adp_zz", + "ADP.3": "mu_adp_ave", + "IMP": "mu_imp_xx", + "IMP.1": "mu_imp_yy", + "IMP.2": "mu_imp_zz", + "IMP.3": "mu_imp_ave", + "PIE": "mu_pie_xx", + "PIE.1": "mu_pie_yy", + "PIE.2": "mu_pie_zz", + "PIE.3": "mu_pie_ave", + "POP": "mu_pop_xx", + "POP.1": "mu_pop_yy", + "POP.2": "mu_pop_zz", + "POP.3": "mu_pop_ave", +} _READ_AMSET_KNOWN_HEADERS = list(_READ_AMSET_HEADER_MAP.values()) + def read_amset_csv(file_path, **kwargs): - """ Read a CSV file generated with Joe's AMSET code and, by default, + """Read a CSV file generated with Joe's AMSET code and, by default, perform some checks and unit conversions. kwargs are passed to - _check_update_amset_dataset(). """ + _check_update_amset_dataset().""" + + df = read_validate_csv( + file_path, + header_map=_READ_AMSET_HEADER_MAP, + known_headers=_READ_AMSET_KNOWN_HEADERS, + known_headers_required=False, + ) - df = read_validate_csv(file_path, header_map=_READ_AMSET_HEADER_MAP, - known_headers=_READ_AMSET_KNOWN_HEADERS, - known_headers_required=False) - return _check_update_amset_dataset(df, **kwargs) diff --git a/zt_calc_workflow/analysis.py b/zt_calc_workflow/analysis.py index 6c05ea0..60f09c0 100644 --- a/zt_calc_workflow/analysis.py +++ b/zt_calc_workflow/analysis.py @@ -26,153 +26,169 @@ # Functions # --------- + def get_zt_max(data, n_min=None, n_max=None, t_max=None, t_min=None): - """ Locate the maximum ZT in a Pandas DataFrame, with optional bounds on - n and T, and return the corresponding table entry. """ - + """Locate the maximum ZT in a Pandas DataFrame, with optional bounds on + n and T, and return the corresponding table entry.""" + # Mask data if required. data_mask = np.ones(len(data), dtype=np.bool) if n_min is not None: - data_mask = np.logical_and(data_mask, data['n'] >= n_min) + data_mask = np.logical_and(data_mask, data["n"] >= n_min) if n_max is not None: - data_mask = np.logical_and(data_mask, data['n'] <= n_max) + data_mask = np.logical_and(data_mask, data["n"] <= n_max) if t_min is not None: - data_mask = np.logical_and(data_mask, data['t'] >= t_min) + data_mask = np.logical_and(data_mask, data["t"] >= t_min) if t_max is not None: - data_mask = np.logical_and(data_mask, data['t'] <= t_max) + data_mask = np.logical_and(data_mask, data["t"] <= t_max) + + idx = data[data_mask].idxmax()["zt_ave"] - idx = data[data_mask].idxmax()['zt_ave'] - return data.loc[idx] -def match_data(calc_n, calc_t, calc_data_2d, to_match, mode='same_t', - num_seeds=1): - - """ Construct a 2D interpolation of calc_data_2d, attempt to match data +def match_data(calc_n, calc_t, calc_data_2d, to_match, mode="same_t", num_seeds=1): + """Construct a 2D interpolation of calc_data_2d, attempt to match data specified in to_match according to mode, and return a list of best-fit n, T and values. - + to_match should be a list of (n, T, val) data points; n and/or T can be set to None, but this will throw an error for some modes. - + Mode can be one of 'same', 'same_t' (default), 'same_n', 'best_match', where: * 'same' returns the calculated value at the experimental n/T. * 'same_t'/'same_n' return closest match at the same T/n. * 'best_match' returns the closest match. """ - + # Generate an interpolation of the 2D data with x = log_n. - + calc_log_n = np.log10(calc_n) - + interpolator = RegularGridInterpolator( - (calc_log_n, calc_t), calc_data_2d, method='linear', bounds_error=False, - fill_value=None) - + (calc_log_n, calc_t), + calc_data_2d, + method="linear", + bounds_error=False, + fill_value=None, + ) + # Loop over the data in to_match. - + match_res = [] - + for n, t, val in to_match: # An initial guess for n and/or T can be taken from the coordinates of # the 2D data where the difference to val is a minimum. - + idx_n, idx_t = np.unravel_index( - np.argmin(np.abs(calc_data_2d - val)), calc_data_2d.shape) - - if mode == 'same': + np.argmin(np.abs(calc_data_2d - val)), calc_data_2d.shape + ) + + if mode == "same": if n is None or t is None: - raise Exception("mode = 'same' can only be used when both n " - "and T have been specified.") - + raise Exception( + "mode = 'same' can only be used when both n " + "and T have been specified." + ) + match_res.append((n, t, interpolator((np.log10(n), t)))) - - elif mode == 'same_t': + + elif mode == "same_t": if t is None: - raise Exception("mode = 'same_t' can only be used when T has " - "been specified.") - + raise Exception( + "mode = 'same_t' can only be used when T has " "been specified." + ) + # Objective function to be minimised: absolute difference between # the calculation and match value. - + def _fit_func(log_n): return np.abs(interpolator((log_n, t)) - val) - + # Generate a set of initial guesses to input to the minimisation # function. If num_seeds = 1, use the log(n) from the estimate # above. Otherwise, generate a uniform sampling of log(n) over the # calculation range. - + guesses = None - + if num_seeds > 1: - guesses = np.linspace(calc_log_n.min(), calc_log_n.max(), - num_seeds) + guesses = np.linspace(calc_log_n.min(), calc_log_n.max(), num_seeds) else: guesses = [calc_n[idx_n]] # Minimise the function for each initital guess in guesses. res_set = [] - + for log_n_0 in guesses: - res = minimize(_fit_func, [log_n_0], - bounds=[(calc_log_n[0], calc_log_n[-1])]) - + res = minimize( + _fit_func, [log_n_0], bounds=[(calc_log_n[0], calc_log_n[-1])] + ) + res_set.append(res) - + # Select the guess with the lowest error (given by res.fun). - + idx = np.argmin([res.fun for res in res_set]) - - match_res.append((np.power(10., res_set[idx].x[0]), t, - interpolator((res_set[idx].x[0], t)))) - - elif mode == 'same_n': + + match_res.append( + ( + np.power(10.0, res_set[idx].x[0]), + t, + interpolator((res_set[idx].x[0], t)), + ) + ) + + elif mode == "same_n": if n is None: - raise Exception("mode = 'same_n' can only be used when n has " - "been specified.") - + raise Exception( + "mode = 'same_n' can only be used when n has " "been specified." + ) + # As for mode == 'same_t'. - + def _fit_func(t): return np.abs(interpolator((np.log10(n), t)) - val) guesses = None - + if num_seeds > 1: guesses = np.linspace(calc_t.min(), calc_t.max(), num_seeds) else: guesses = [calc_t[idx_t]] res_set = [] - + for t in guesses: - res = minimize(_fit_func, [t], - bounds=[(calc_t.min(), calc_t.max())]) - + res = minimize(_fit_func, [t], bounds=[(calc_t.min(), calc_t.max())]) + res_set.append(res) - + # Select the guess with the lowest error (given by res.fun). - + idx = np.argmin([res.fun for res in res_set]) - - match_res.append((n, res_set[idx].x[0], - interpolator((np.log10(n), res_set[idx].x[0])))) - - elif mode == 'best_match': + + match_res.append( + (n, res_set[idx].x[0], interpolator((np.log10(n), res_set[idx].x[0]))) + ) + + elif mode == "best_match": if num_seeds > 1: - warnings.warn("Setting num_seeds > 1 with mode = 'best_match' " - "may take a long time and/or yield dubious " - "results.", RuntimeWarning) - + warnings.warn( + "Setting num_seeds > 1 with mode = 'best_match' " + "may take a long time and/or yield dubious " + "results.", + RuntimeWarning, + ) + def _fit_func(args): return np.abs(interpolator(args) - val) @@ -181,29 +197,30 @@ def _fit_func(args): # product() of a uniform sampling of log(n) and T. guesses = None - + if num_seeds > 1: - guess_n = np.linspace(calc_log_n.min(), calc_log_n.max(), - num_seeds) - + guess_n = np.linspace(calc_log_n.min(), calc_log_n.max(), num_seeds) + guess_t = np.linspace(calc_t.min(), calc_t.max(), num_seeds) - - guesses = [ - (log_n, t) for log_n, t in product(guess_n, guess_t)] + + guesses = [(log_n, t) for log_n, t in product(guess_n, guess_t)] else: guesses = [(calc_log_n[idx_n], calc_t[idx_t])] - + res_set = [] - + for log_n, t in guesses: - res = minimize(_fit_func, [log_n, t], - bounds=[(calc_log_n[0], calc_log_n[-1]), - (calc_t[0], calc_t[-1])]) - - match_res.append((np.power(10., res.x[0]), res.x[1], - interpolator((res.x[0], res.x[1])))) + res = minimize( + _fit_func, + [log_n, t], + bounds=[(calc_log_n[0], calc_log_n[-1]), (calc_t[0], calc_t[-1])], + ) + + match_res.append( + (np.power(10.0, res.x[0]), res.x[1], interpolator((res.x[0], res.x[1]))) + ) else: raise Exception("Unknown mode = '{0}'.".format(mode)) - + return match_res diff --git a/zt_calc_workflow/dataset.py b/zt_calc_workflow/dataset.py index 924c8d2..590d559 100644 --- a/zt_calc_workflow/dataset.py +++ b/zt_calc_workflow/dataset.py @@ -23,22 +23,25 @@ # Dataset creation # ---------------- + def zt_dataset_from_data(elec_prop_data, kappa_latt_data): - """ Combine electrical properties and lattice thermal conductivity data + """Combine electrical properties and lattice thermal conductivity data to create a new Pandas DataFrame with a ZT dataset. The new DataFrame has the same fields as elec_prop_data plus 'kappa_latt_*', 'kappa_tot_*' - and 'zt_*' fields. """ + and 'zt_*' fields.""" - elec_t = elec_prop_data['t'].unique() - kappa_latt_t = kappa_latt_data['t'].to_numpy() + elec_t = elec_prop_data["t"].unique() + kappa_latt_t = kappa_latt_data["t"].to_numpy() # Check temperatures in AMSET calculation are covered by the Phono3py # calculation. for t in elec_t: if t not in kappa_latt_t: - raise Exception("Phono3py calculation must cover the temperature " - "range of the AMSET calculation.") + raise Exception( + "Phono3py calculation must cover the temperature " + "range of the AMSET calculation." + ) zt_data = elec_prop_data.copy() @@ -46,47 +49,51 @@ def zt_dataset_from_data(elec_prop_data, kappa_latt_data): kl_cols = [[] for _ in range(4)] - for t in zt_data['t']: - (loc, ), = np.where(kappa_latt_t == t) + for t in zt_data["t"]: + ((loc,),) = np.where(kappa_latt_t == t) - for i, k in enumerate( - ['kappa_xx', 'kappa_yy', 'kappa_zz', 'kappa_ave']): + for i, k in enumerate(["kappa_xx", "kappa_yy", "kappa_zz", "kappa_ave"]): kl_cols[i].append(kappa_latt_data.loc[loc, k]) - kl_keys = ['kappa_latt_xx', 'kappa_latt_yy', 'kappa_latt_zz', 'kappa_latt_ave'] + kl_keys = ["kappa_latt_xx", "kappa_latt_yy", "kappa_latt_zz", "kappa_latt_ave"] for k, col in zip(kl_keys, kl_cols): zt_data[k] = col # Calculate and append \kappa_tot columns to AMSET data. - key_sets = [('kappa_el_xx', 'kappa_latt_xx', 'kappa_tot_xx'), - ('kappa_el_yy', 'kappa_latt_yy', 'kappa_tot_yy'), - ('kappa_el_zz', 'kappa_latt_zz', 'kappa_tot_zz'), - ('kappa_el_ave', 'kappa_latt_ave', 'kappa_tot_ave')] + key_sets = [ + ("kappa_el_xx", "kappa_latt_xx", "kappa_tot_xx"), + ("kappa_el_yy", "kappa_latt_yy", "kappa_tot_yy"), + ("kappa_el_zz", "kappa_latt_zz", "kappa_tot_zz"), + ("kappa_el_ave", "kappa_latt_ave", "kappa_tot_ave"), + ] for k_el, k_latt, k_tot in key_sets: zt_data[k_tot] = zt_data[k_el] + zt_data[k_latt] # Calculate and append ZT columns to AMSET data. - key_sets = [('pf_xx', 'kappa_tot_xx', 'zt_xx'), - ('pf_yy', 'kappa_tot_yy', 'zt_yy'), - ('pf_zz', 'kappa_tot_zz', 'zt_zz'), - ('pf_ave', 'kappa_tot_ave', 'zt_ave')] + key_sets = [ + ("pf_xx", "kappa_tot_xx", "zt_xx"), + ("pf_yy", "kappa_tot_yy", "zt_yy"), + ("pf_zz", "kappa_tot_zz", "zt_zz"), + ("pf_ave", "kappa_tot_ave", "zt_ave"), + ] for k_pf, k_kappa, k_zt in key_sets: - zt_data[k_zt] = ( - ((1.0e-3 * zt_data[k_pf]) / zt_data[k_kappa]) * zt_data['t']) + zt_data[k_zt] = ((1.0e-3 * zt_data[k_pf]) / zt_data[k_kappa]) * zt_data["t"] return zt_data + def zt_dataset_from_amset_phono3py_csvs(amset_file, phono3py_kappa_file): - """ Reads an AMSET CSV and Phono3py kappa CSV file and return a ZT dataset - from zt_dataset_from_data(). """ + """Reads an AMSET CSV and Phono3py kappa CSV file and return a ZT dataset + from zt_dataset_from_data().""" - elec_prop_data = read_amset_csv(amset_file, convert_sigma_s_cm=True, - calculate_pf_mw_m_k2=True) + elec_prop_data = read_amset_csv( + amset_file, convert_sigma_s_cm=True, calculate_pf_mw_m_k2=True + ) kappa_latt_data = read_phono3py_kappa_csv(phono3py_kappa_file) @@ -97,36 +104,35 @@ def zt_dataset_from_amset_phono3py_csvs(amset_file, phono3py_kappa_file): # 1D -> 2D conversion # ------------------- + def dataset_to_2d(data): - """ Convert a dataset as a Pandas DataFrame to a set of n and T values and + """Convert a dataset as a Pandas DataFrame to a set of n and T values and a dictionary of 2D NumPy arrays for each data column. """ # Make sure data has 'n' (carrier concentration) and 't' (temperature) keys. - assert 'n' in data.columns and 't' in data.columns + assert "n" in data.columns and "t" in data.columns # Make sure data is sorted. - data = data.sort_values(by=['n', 't']) + data = data.sort_values(by=["n", "t"]) # Get n/T values. - n_vals = data['n'].unique() - t_vals = data['t'].unique() - - # Cast remaining columns into 2D arrays indexed by n (x) and T (y). + n_vals = data["n"].unique() + t_vals = data["t"].unique() shape = (len(n_vals), len(t_vals)) data_2d = {} for k in data.columns: - if k != 'n' and k != 't': + if k != "n" and k != "t": arr = np.zeros(shape, dtype=np.float64) for i, n in enumerate(n_vals): - arr[i, :] = data[data['n'] == n][k] + arr[i, :] = data[data["n"] == n][k] data_2d[k] = arr diff --git a/zt_calc_workflow/general_zt_plot.py b/zt_calc_workflow/general_zt_plot.py new file mode 100644 index 0000000..71cc74a --- /dev/null +++ b/zt_calc_workflow/general_zt_plot.py @@ -0,0 +1,563 @@ +# zt_plot.py + + +""" +In this example, we prepare a plot to compare the ZT of p- and n-type Pnma SnS +and SnSe as a function of doping level and temperature. + +The calculation combines data from multiple papers: + + Approximate models for the lattice thermal conductivity of alloy + thermoelectrics + J. M. Skelton + J. Mater. Chem. C 9 (35), 11772 (2021), DOI: 10.1039/D1TC02026A + + Thermoelectric Properties of Pnma and Rocksalt SnS and SnSe + J. M. Flitcroft, I. Pallikara and J. M. Skelton + Solids 3 (1), 155-176 (2022), DOI: 10.3390/solids3010011 + + Thermoelectric properties of Pnma and R3m GeS and GeSe + M. Zhang, J. Flictroft, S. Guillemot and J. Skelton + J. Mater. Chem. C 11, 14833 (2023, DOI: 10.1039/D3TC02938G +""" + + +import numpy as np +import re +import os +import matplotlib.pyplot as plt + +from matplotlib.cm import ScalarMappable +from matplotlib.colors import Normalize +from matplotlib.gridspec import GridSpec +from matplotlib.offsetbox import AnchoredText +from matplotlib.ticker import FuncFormatter + +import argparse + +import sys + +sys.path.append(r"C:/PATH_TO_PACKAGE/ZT-Calc-Workflow") + +from zt_calc_workflow.amset import read_amset_csv +from zt_calc_workflow.dataset import zt_dataset_from_data, dataset_to_2d +from zt_calc_workflow.phono3py import read_phono3py_kappa_csv +from zt_calc_workflow.plotting import setup_matplotlib + +##### Space to specify script inputs ###### + + +#### Define all input files here +def define_input_files(): + """Function to edit filenames to be used in ZT 2D plot and other input parameters + + Returns: + Dictionary: input files with system name as key, and input files as values + """ + input_files = { + "Pm3m": [ + r"amset-STO-Cubic-P.csv", + r"amset-STO-Cubic-N.csv", + r"STO_cubic_klatt_NAC_Wigner.csv", + ], + "Pnma": [ + r"amset-STO-Ortho-P.csv", + r"amset-STO-Ortho-N.csv", + r"STO_ortho_klatt_NAC_Wigner.csv", + ], + "I4/mcm": [ + r"amset-STO-Tetra-P.csv", + r"amset-STO-Tetra-P.csv", + r"STO_tetra_klatt_NAC_Wigner.csv", + ], + } + return input_files + + +#### Define other inputs if required +def define_other_inputs(): + """Function to pass input data to script when not defined at runtime + + Returns: + list: plot labels to assign to plots in order + int: minimum temperature to model + int: maximum temperature to model + list: contour levels to plot on ZT plot + """ + # Change your plot labels here if not passed at runtime + plot_labels = [ + "Pm3m-p-type", + "Pm3m-n-type", + "Pnma-p-type", + "Pnma-n-type", + "test", + "test2", + "test3", + ] + + # Set temperatures here if not passed at runtime + t_min, t_max = 300.0, 1000.0 + + # Set contour levels here if not passed at runtime + zt_contour_levels = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] + + # Pass information to + return plot_labels, t_min, t_max, zt_contour_levels + + +def populate_input_dict(input_files): + """Creates dictionary from input strings to fit in with exisiting script + + Args: + input_files (list): list of strings containing paths to files and system names + + Returns: + dict: dictionary with key corresponding to system name and values corresponding to input csv files + """ + input_dict = {} + print(len(input_files)) + for i in range(int((len(input_files) / 4))): + input_dict = { + **input_dict, + input_files[i * 4]: input_files[i * 4 + 1 : i * 4 + 4], + } + + print(input_dict) + + return input_dict + + +##### End of space to specify script inputs ###### + + +def plot_2D_func( + input_files, + specify_temp, + output_name, + quality, + gridrow, + gridcolumn, + input_labels, + contour_range, + figure_size, + skip, + padding, + uniformity, +): + """Function to plot 2D ZT data from n, p and klatt data + + Args: + input_files (_type_): _description_ + specify_temp (_type_): _description_ + output_name (_type_): _description_ + quality (_type_): _description_ + + Returns: + _type_: _description_ + """ + + zt_data = {} + t_min, t_max = 10000, -10000 + + for k, (amset_p, amset_n, kappa) in input_files.items(): + kappa_data = read_phono3py_kappa_csv(kappa) + + # The Phono3py calculations were performed on structures with the axes + # oriented differently to those in the AMSET calculations. We deal with + # this by dropping the off-diagonal elements and relabelling the + # diagonal elements in kappa_data. + + kappa_data.drop(columns=["kappa_yz", "kappa_xz", "kappa_xy"], inplace=True) + kappa_data.columns = ["t", "kappa_yy", "kappa_zz", "kappa_xx", "kappa_ave"] + + amset_data_p = read_amset_csv(amset_p, check_uniform=uniformity) + amset_data_n = read_amset_csv(amset_n, check_uniform=uniformity) + + # checking to find highest and lowest temperature + if amset_data_p["t"].max() > t_max: + t_max = amset_data_p["t"].max() + if amset_data_n["t"].max() > t_max: + t_max = amset_data_n["t"].max() + if amset_data_p["t"].min() < t_min: + t_min = amset_data_p["t"].min() + if amset_data_n["t"].min() < t_min: + t_min = amset_data_n["t"].min() + + # Combine AMSET and Phono3py data into ZT datasets, write out CSVs + # Can be generalised to not require p and n... grid, count etc. + zt_p = zt_dataset_from_data(amset_data_p, kappa_data) + zt_n = zt_dataset_from_data(amset_data_n, kappa_data) + zt_p_name = str(k) + "_p_ZT" + zt_n_name = str(k) + "_n_ZT" + zt_p_name = re.sub(r"[^a-zA-Z0-9 \n\.]", "_", zt_p_name) + zt_n_name = re.sub(r"[^a-zA-Z0-9 \n\.]", "_", zt_n_name) + zt_p_name += ".csv" + zt_n_name += ".csv" + os.makedirs("CSVs", exist_ok=True) + + zt_p.to_csv(str("CSVs/" + zt_p_name)) + zt_n.to_csv(str("CSVs/" + zt_n_name)) + + # then convert to 2D data + zt_data[k] = ( + dataset_to_2d(zt_p), + dataset_to_2d(zt_n), + ) + + # Setup Matplotlib. + setup_matplotlib() + + # Collect inputs from top of script + input_params = define_other_inputs() + + # Custom formatters for labels.# + # Either read labels from script or from command line + if len(input_labels) == 0: + subplot_labels = input_params[0] + + else: + subplot_labels = input_labels + + def contour_fmt(v): + if v % 1.0 == 0: + return "{0:.0f}".format(v) + + if v % 0.5 == 0: + return "{0:.1f}".format(v) + + return "{0:.2f}".format(v) + + def log_fmt(v, pos): + return "$10^{{{0:.0f}}}$".format(v) + + log_formatter = FuncFormatter(log_fmt) + + # To use a common colour bar, we need to determine the "global" ZT_max + # across all datasets in the range (t_min, t_max). Once we've done this, + # we can create a matplotlib.colors.Normalize to colour the 2D plots. + + # set temperature to values if not allowed to calculate automatically + if specify_temp != "": + t_min, t_max = int(specify_temp[0]), int(specify_temp[1]) + + print("Plotting ZT between ", t_min, " and ", t_max) + + global_zt_max = 0.0 + + for data_p, data_n in zt_data.values(): + for n, t, data in data_p, data_n: + t_mask = np.logical_and(t >= t_min, t <= t_max) + global_zt_max = max(global_zt_max, data["zt_ave"][:, t_mask].max()) + + print(global_zt_max) + norm = Normalize(vmin=0.0, vmax=global_zt_max) + + # Plot. + # Contour levels - if not provided, use pre-defined + if len(contour_range) == 0: + zt_contour_levels = input_params[3] + # otherwise make a new list based on the increments + else: + # find number of decimal places of increment, for rounding + decimal_str = str(contour_range[2]) + dec_places = len(str(decimal_str).split(".")[1]) + print(dec_places) + # Create numpy array over contour range + contour_array = np.arange(contour_range[0], contour_range[1], contour_range[2]) + # round to number of decimal places required for increment + contour_array = np.round(contour_array, dec_places) + # convert to list + zt_contour_levels = contour_array.tolist() + + print("Plotting contours: ", zt_contour_levels) + + plt.figure(figsize=(figure_size[0] / 2.54, figure_size[1] / 2.54)) + + # Use a GridSpec to divide the plot into a four subplot axes and a colour + # bar axis. + # rows = 4 * r + 1 and columns = columns + # use gridrow and gridcolumn from inputs + + # check whether skip has been specified + # plot differently if skipping files requested + offset = 2 + if skip: + offset = 1 + + grid_spec = GridSpec((gridrow * 4), gridcolumn) + grid_spec.update(top=0.90, bottom=0.08, wspace=padding[0], hspace=padding[1]) + + col_grid = GridSpec(1, gridcolumn) + col_grid.update(top=0.99, bottom=0.94) + + subplot_axes = [] + + counter = 0 + for r in range(gridrow): + for c in range(gridcolumn): + if not skip or (skip and counter < len(input_files.keys())): + subplot_axes.append(plt.subplot(grid_spec[4 * r : 4 * (r + 1), c])) + counter += 1 + + # Loop over SnS/SnSe and p- and n-type doping and draw a 2D colour plot + # with contour lines. + + for i, k in enumerate(list(input_files.keys())): + data_p, data_n = zt_data[k] + + for j, (n, t, data) in enumerate([data_p, data_n]): + # if skip is enabled, skip the second file - fix to fit in with script (assumes n and p data always present) + if skip and j == 1: + continue + # check if there are enough axes to data plot on - avoids crash + if (offset * i + j) < len(subplot_axes): + axes = subplot_axes[offset * i + j] + + t_mask = np.logical_and(t >= t_min, t <= t_max) + + x = np.log10(n) + y = t[t_mask] + z = data["zt_ave"].T[t_mask, :] + + axes.pcolormesh(x, y, z, norm=norm, shading="gouraud") + + cs = axes.contour(x, y, z, levels=zt_contour_levels, colors="r") + axes.clabel(cs, cs.levels, inline=True, fmt=contour_fmt) + + # Adjust axis ranges and labels. + + for axes in subplot_axes: + axes.set_ylim(t_min, t_max) + axes.set_yticks(np.linspace(t_min, t_max, 6)) + + for axes in subplot_axes: + axes.xaxis.set_major_formatter(log_formatter) + + if skip: + for axes in subplot_axes: + axes.set_xlabel(r"Doping Level $n$ [cm$^{-3}$]") + for axes in subplot_axes: + axes.set_ylabel(r"$T$ [K]") + else: + for axes in subplot_axes[-gridcolumn:]: + axes.set_xlabel(r"Doping Level $n$ [cm$^{-3}$]") + for axes in subplot_axes[::gridcolumn]: + axes.set_ylabel(r"$T$ [K]") + + # Add subplot labels. + + for i, (axes, label) in enumerate(zip(subplot_axes, subplot_labels)): + subplot_label = AnchoredText( + r"({0}) {1}".format(chr(97 + i), label), loc="lower left", frameon=True + ) + + subplot_label.patch.set_edgecolor("k") + subplot_label.patch.set_facecolor((1.0, 1.0, 1.0, 0.5)) + + axes.add_artist(subplot_label) + + # Add colour bar. + cbar_axes = plt.subplot(col_grid[0, 0:gridcolumn]) + plt.colorbar(ScalarMappable(norm=norm), orientation="horizontal", cax=cbar_axes) + cbar_axes.set_ylabel(r"$ZT$") + # cbar_axes.set_in_layout(False) + + # Finalise and save. + + # plt.tight_layout(pad=float(padding)) + img_name = output_name + ".png" + pdf_name = output_name + ".pdf" + plt.savefig(img_name, dpi=quality) + plt.savefig(pdf_name, dpi=quality) + plt.close() + print("Plot saved as ", img_name) + print("Vector image saved as ", pdf_name) + + +### Running the ZT plot code - arguments are parsed here +if __name__ == "__main__": + + #### Defining script arguments for plotting #### + + # start setting up arguments + parser = argparse.ArgumentParser( + prog="general_zt_plot.py", + description="Process arguments for ZT plotting script", + ) + + parser.add_argument( + "-i", + "--input_files", + nargs="*", + default="", + help="Specify input files, separated by spaces. First entry is system name, followed by p data, n data and klatt data.", + ) + + # specify temperature range + parser.add_argument( + "-t", + "--temp_specify", + nargs="*", + default="", + help="Specify minimum and maximum temperature to plot", + ) + + # specify output path + parser.add_argument( + "-o", + "--output", + default="zt_plot", + help="Specify filename for graph image", + ) + + # specify image output quality + parser.add_argument( + "-q", + "--quality", + default=300, + help="Specify DPI for graph image quality", + ) + + # specify grid row layout + parser.add_argument( + "-gr", + "--gridrow", + default=3, + help="Specify number of rows of graphs for plotting grid ", + ) + + # specify grid column layout + parser.add_argument( + "-gc", + "--gridcolumn", + default=3, + help="Specify number of rows of graphs for plotting grid ", + ) + + # specify labels for plots + parser.add_argument( + "-l", + "--labels", + nargs="*", + default="", + help="Specify labels for plots, separated by spaces.", + ) + + # specify contour list for plots + parser.add_argument( + "-c", + "--contour", + nargs="*", + default="", + help="Define range for contour plot, first two values are minimum and maximum for the range, third value is the increment amount.", + ) + + # specify figure size in cm + parser.add_argument( + "-f", + "--fig_size", + nargs=2, + default=[14, 14], + help="Specify size in cm for figure in x and y", + ) + + # plot only one item + parser.add_argument( + "-s", + "--skip", + action="store_true", + help="Plot only one file from set, default is first file given per item.", + ) + + # specify padding for layout + parser.add_argument( + "-p", + "--padding", + nargs=2, + default=[0.24, 0.94], + help="Specify padding for layout of plots, height then width ", + ) + + # plot only one item + parser.add_argument( + "-u", + "--uniform", + action="store_false", + help="Forces data to be uniform, otherwise will flag non-uniform data and stop the calculation.", + ) + + # load arguments from script + args = parser.parse_args() + contour_float = list(map(float, args.contour)) + fig_size_float = list(map(float, args.fig_size)) + padding_float = list(map(float, args.padding)) + + #### Checking routines for arguments ###### + + # read input files from function + if len(args.input_files) > 0: + input_files = populate_input_dict(args.input_files) + else: + input_files = define_input_files() + + # check two temperatures are provided if custom range requested + if len(args.temp_specify) == 1 or len(args.temp_specify) > 2: + print("Two temperatures are required to specify plot temperature range") + print( + "Please re-run with two temperatures specified or leave blank for automatic temperature range" + ) + exit() + + # check that max temp is higher than min temp, otherwise rearrange + if len(args.temp_specify) > 0: + if int(args.temp_specify[0]) > int(args.temp_specify[1]): + print( + "Minimum temperature is higher than maximum temperature, values have been swapped." + ) + # save min temperature as variable + temp = args.temp_specify[0] + # swap min to max temperature + args.temp_specify[0] = args.temp_specify[1] + # swap max to min temperature saved in variable + args.temp_specify[1] = temp + + # check enough information provided to create contours + if len(args.contour) > 0 and len(args.contour) < 3: + print("A start, end and increment value are required to specify contour range") + print( + "Please re-run with a start, end and increment value or leave blank to use default contour range" + ) + exit() + + # check if contour needs rearranging like temperatures + if len(contour_float) > 0: + if contour_float[0] > contour_float[1]: + print( + "Contour minimum is higher than contour maximum, values have been swapped." + ) + # save min contour as variable + temp = contour_float[0] + # swap min to max contour + contour_float[0] = contour_float[1] + # swap max to min contour saved in variable + contour_float[1] = temp + + # remove . from output names + if "." in args.output: + args.output = ".".join(args.output.split(".")[:-1]) + + # Read input data. + plot_2D_func( + input_files, + args.temp_specify, + args.output, + args.quality, + int(args.gridrow), + int(args.gridcolumn), + args.labels, + contour_float, + fig_size_float, + args.skip, + padding_float, + args.uniform, + ) diff --git a/zt_calc_workflow/io.py b/zt_calc_workflow/io.py index 0d5216d..78e7886 100644 --- a/zt_calc_workflow/io.py +++ b/zt_calc_workflow/io.py @@ -18,10 +18,12 @@ # CSV files # --------- -def read_validate_csv(file_path, header_map=None, known_headers=None, - known_headers_required=False): - """ Read a CSV file into a Pandas DataFrame and optionally update headers - with header_map and check headers against known_headers. """ + +def read_validate_csv( + file_path, header_map=None, known_headers=None, known_headers_required=False +): + """Read a CSV file into a Pandas DataFrame and optionally update headers + with header_map and check headers against known_headers.""" df = pd.read_csv(file_path) diff --git a/zt_calc_workflow/phono3py.py b/zt_calc_workflow/phono3py.py index ce214ec..1ade930 100644 --- a/zt_calc_workflow/phono3py.py +++ b/zt_calc_workflow/phono3py.py @@ -20,42 +20,59 @@ # --------- _READ_PHONO3PY_KAPPA_HEADER_MAP = { - 'T [K]': 't', 'k_xx [W/m.K]': 'kappa_xx', 'k_yy [W/m.K]': 'kappa_yy', - 'k_zz [W/m.K]': 'kappa_zz', 'k_yz [W/m.K]': 'kappa_yz', - 'k_xz [W/m.K]': 'kappa_xz', 'k_xy [W/m.K]': 'kappa_xy', - 'k_iso [W/m.K]': 'kappa_ave'} + "T [K]": "t", + "k_xx [W/m.K]": "kappa_xx", + "k_yy [W/m.K]": "kappa_yy", + "k_zz [W/m.K]": "kappa_zz", + "k_yz [W/m.K]": "kappa_yz", + "k_xz [W/m.K]": "kappa_xz", + "k_xy [W/m.K]": "kappa_xy", + "k_iso [W/m.K]": "kappa_ave", +} + +_READ_PHONO3PY_KAPPA_KNOWN_HEADERS = list(_READ_PHONO3PY_KAPPA_HEADER_MAP.values()) -_READ_PHONO3PY_KAPPA_KNOWN_HEADERS = list( - _READ_PHONO3PY_KAPPA_HEADER_MAP.values()) def read_phono3py_kappa_csv(file_path): - """ Read a CSV file generated with the phono3py-get-kappa script. """ + """Read a CSV file generated with the phono3py-get-kappa script.""" return read_validate_csv( - file_path, header_map=_READ_PHONO3PY_KAPPA_HEADER_MAP, + file_path, + header_map=_READ_PHONO3PY_KAPPA_HEADER_MAP, known_headers=_READ_PHONO3PY_KAPPA_KNOWN_HEADERS, - known_headers_required=True) - -_READ_PHONO3PY_CRTA_HEADER_MAP = dict(_READ_PHONO3PY_KAPPA_HEADER_MAP, **{ - "(k/t)_xx [W/m.K.ps]": 'kappa_tau_crta_xx', - "(k/t)_yy [W/m.K.ps]": 'kappa_tau_crta_yy', - "(k/t)_zz [W/m.K.ps]": 'kappa_tau_crta_zz', - "(k/t)_yz [W/m.K.ps]": 'kappa_tau_crta_yz', - "(k/t)_xz [W/m.K.ps]": 'kappa_tau_crta_xz', - "(k/t)_xy [W/m.K.ps]": 'kappa_tau_crta_xy', - "(k/t)_iso [W/m.K.ps]": 'kappa_tau_crta_ave', - "(t^CRTA)_xx [ps]": 'tau_crta_xx', "(t^CRTA)_yy [ps]": 'tau_crta_yy', - "(t^CRTA)_zz [ps]": 'tau_crta_zz', "(t^CRTA)_yz [ps]": 'tau_crta_yz', - "(t^CRTA)_xz [ps]": 'tau_crta_xz', "(t^CRTA)_xy [ps]": 'tau_crta_xy', - "(t^CRTA)_iso [ps]": 'tau_crta_ave'}) - -_READ_PHONO3PY_CRTA_KNOWN_HEADERS = list( - _READ_PHONO3PY_CRTA_HEADER_MAP.values()) + known_headers_required=True, + ) + + +_READ_PHONO3PY_CRTA_HEADER_MAP = dict( + _READ_PHONO3PY_KAPPA_HEADER_MAP, + **{ + "(k/t)_xx [W/m.K.ps]": "kappa_tau_crta_xx", + "(k/t)_yy [W/m.K.ps]": "kappa_tau_crta_yy", + "(k/t)_zz [W/m.K.ps]": "kappa_tau_crta_zz", + "(k/t)_yz [W/m.K.ps]": "kappa_tau_crta_yz", + "(k/t)_xz [W/m.K.ps]": "kappa_tau_crta_xz", + "(k/t)_xy [W/m.K.ps]": "kappa_tau_crta_xy", + "(k/t)_iso [W/m.K.ps]": "kappa_tau_crta_ave", + "(t^CRTA)_xx [ps]": "tau_crta_xx", + "(t^CRTA)_yy [ps]": "tau_crta_yy", + "(t^CRTA)_zz [ps]": "tau_crta_zz", + "(t^CRTA)_yz [ps]": "tau_crta_yz", + "(t^CRTA)_xz [ps]": "tau_crta_xz", + "(t^CRTA)_xy [ps]": "tau_crta_xy", + "(t^CRTA)_iso [ps]": "tau_crta_ave", + } +) + +_READ_PHONO3PY_CRTA_KNOWN_HEADERS = list(_READ_PHONO3PY_CRTA_HEADER_MAP.values()) + def read_phono3py_crta_csv(file_path): - """ Read a CSV file generated with the CRTA.py script. """ + """Read a CSV file generated with the CRTA.py script.""" return read_validate_csv( - file_path, header_map=_READ_PHONO3PY_CRTA_HEADER_MAP, + file_path, + header_map=_READ_PHONO3PY_CRTA_HEADER_MAP, known_headers=_READ_PHONO3PY_CRTA_KNOWN_HEADERS, - known_headers_required=True) + known_headers_required=True, + ) diff --git a/zt_calc_workflow/plotting.py b/zt_calc_workflow/plotting.py index cca406b..6ed3e4e 100644 --- a/zt_calc_workflow/plotting.py +++ b/zt_calc_workflow/plotting.py @@ -22,57 +22,78 @@ # Functions # --------- + def setup_matplotlib(sans_serif=True, font_size=8, linewidth=0.5): - """ Set Matplotlib defaults. """ + """Set Matplotlib defaults.""" # Fonts. if sans_serif: - mpl.rc('font', **{'family': 'sans-serif', 'size': font_size, - 'sans-serif': 'Calibri'}) - - mpl.rc('mathtext', **{'fontset': 'custom', 'rm': 'Calibri', - 'it': 'Calibri:italic', 'bf' : 'Calibri:bold' }) + mpl.rc( + "font", + **{"family": "sans-serif", "size": font_size, "sans-serif": "Calibri"} + ) + + mpl.rc( + "mathtext", + **{ + "fontset": "custom", + "rm": "Calibri", + "it": "Calibri:italic", + "bf": "Calibri:bold", + } + ) else: - mpl.rc('font', **{'family': 'serif', 'size': font_size, - 'serif': 'Times'}) - - mpl.rc('mathtext', **{'fontset': 'custom', 'rm': 'Times', - 'it': 'Times:italic', 'bf' : 'Times:bold' }) + mpl.rc("font", **{"family": "serif", "size": font_size, "serif": "Times"}) + + mpl.rc( + "mathtext", + **{ + "fontset": "custom", + "rm": "Times", + "it": "Times:italic", + "bf": "Times:bold", + } + ) # Axes, lines and patches. - mpl.rc('axes', **{'linewidth': linewidth, 'labelsize': font_size}) - mpl.rc('lines', **{'linewidth': linewidth, 'markeredgewidth': linewidth}) - mpl.rc('patch', **{'linewidth': linewidth}) + mpl.rc("axes", **{"linewidth": linewidth, "labelsize": font_size}) + mpl.rc("lines", **{"linewidth": linewidth, "markeredgewidth": linewidth}) + mpl.rc("patch", **{"linewidth": linewidth}) # Tick params. - tick_params = {'major.width': linewidth, 'minor.width': linewidth, - 'direction': 'in'} + tick_params = { + "major.width": linewidth, + "minor.width": linewidth, + "direction": "in", + } + + mpl.rc("xtick", **tick_params) + mpl.rc("ytick", **tick_params) - mpl.rc('xtick', **tick_params) - mpl.rc('ytick', **tick_params) + mpl.rc("xtick", **{"top": True}) + mpl.rc("ytick", **{"right": True}) - mpl.rc('xtick', **{'top': True}) - mpl.rc('ytick', **{'right': True}) def cscale(n, h1, h2): - """ Generate an HSB colour scale with n colours between hues h1 and h2. """ + """Generate an HSB colour scale with n colours between hues h1 and h2.""" - if n <0: + if n < 0: raise Exception("n must be at least 1.") if n == 1: return hsv_to_rgb((h1 / 360.0, 1.0, 1.0)) - return [hsv_to_rgb(((h / 360.0) % 1.0, 1.0, 1.0)) - for h in np.linspace(h1, h2, n)] + return [hsv_to_rgb(((h / 360.0) % 1.0, 1.0, 1.0)) for h in np.linspace(h1, h2, n)] + def cscale_fire(n): - """ Generate an n-point "fire" colour scale. """ + """Generate an n-point "fire" colour scale.""" return cscale(n, 240.0, 390.0) + def cscale_ice(n): - """ Generate an n-point "ice" colour scale. """ + """Generate an n-point "ice" colour scale.""" return cscale(n, 240.0, 180.0)