diff --git a/examples/cfd/flow_reconstruction_diffusion/README.md b/examples/cfd/flow_reconstruction_diffusion/README.md index 328c40ba03..10b8f9d786 100644 --- a/examples/cfd/flow_reconstruction_diffusion/README.md +++ b/examples/cfd/flow_reconstruction_diffusion/README.md @@ -51,7 +51,7 @@ In directory ``physicsnemo/examples/cfd/flow_reconstruction_diffusion/``, run: python train.py --config-name=config_dfsr_train `` -or +or (with physics-informed conditioning) @@ -59,6 +59,23 @@ or python train.py --config-name=config_dfsr_cond_train `` +You can also use the helper script: + +`` +bash setup_and_train.sh +`` + +Optional environment variables: + +- `CONFIG_NAME` (default: `config_dfsr_train`) +- `TRAIN_EXTRA_ARGS` (default: empty; appended to `train.py`) + +Example for conditional training: + +`` +CONFIG_NAME=config_dfsr_cond_train bash setup_and_train.sh +`` + Step 2 - Super-resolution In directory ``physicsnemo/examples/cfd/flow_reconstruction_diffusion/``, run: @@ -81,4 +98,3 @@ This implementation is based on / inspired by: - [https://github.com/ermongroup/SDEdit](https://github.com/ermongroup/SDEdit) (SDEdit: Guided Image Synthesis and Editing with Stochastic Differential Equations) - [https://github.com/ermongroup/ddim](https://github.com/ermongroup/ddim) (Denoising Diffusion Implicit Models) - diff --git a/examples/cfd/flow_reconstruction_diffusion/setup_and_train.sh b/examples/cfd/flow_reconstruction_diffusion/setup_and_train.sh new file mode 100755 index 0000000000..50220a612e --- /dev/null +++ b/examples/cfd/flow_reconstruction_diffusion/setup_and_train.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Reproducible setup + train helper for flow reconstruction diffusion. +# Run this inside a PhysicsNeMo environment. +# +# Optional environment variables: +# CONFIG_NAME (default: config_dfsr_train) +# TRAIN_EXTRA_ARGS (default: empty; appended to train.py) + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +CONFIG_NAME="${CONFIG_NAME:-config_dfsr_train}" +TRAIN_EXTRA_ARGS="${TRAIN_EXTRA_ARGS:-}" + +echo ">>> [0/3] Entering ${SCRIPT_DIR}" +cd "${SCRIPT_DIR}" + +echo ">>> [1/3] Installing dependencies" +python -m pip install --upgrade pip +python -m pip install -r requirements.txt + +echo ">>> [2/3] Starting training with --config-name ${CONFIG_NAME}" +if [[ -n "${TRAIN_EXTRA_ARGS}" ]]; then + # shellcheck disable=SC2086 + python train.py --config-name "${CONFIG_NAME}" ${TRAIN_EXTRA_ARGS} +else + python train.py --config-name "${CONFIG_NAME}" +fi + +echo ">>> [3/3] Done. Check configured output directory for logs and snapshots." diff --git a/examples/cfd/flow_reconstruction_diffusion/train.py b/examples/cfd/flow_reconstruction_diffusion/train.py index 278e72d131..66cec4556c 100644 --- a/examples/cfd/flow_reconstruction_diffusion/train.py +++ b/examples/cfd/flow_reconstruction_diffusion/train.py @@ -27,7 +27,7 @@ import hydra import torch -from omegaconf import DictConfig +from omegaconf import DictConfig, ListConfig from training_loop import training_loop from misc import EasyDict @@ -46,6 +46,21 @@ import argparse +def _to_json_serializable(obj): + """Recursively convert OmegaConf/EasyDict-style objects to JSON-safe types.""" + if isinstance(obj, (DictConfig, ListConfig)): + obj = OmegaConf.to_container(obj, resolve=True) + + if isinstance(obj, dict): + return {str(k): _to_json_serializable(v) for k, v in obj.items()} + if isinstance(obj, (list, tuple)): + return [_to_json_serializable(v) for v in obj] + if isinstance(obj, (str, int, float, bool)) or obj is None: + return obj + + return str(obj) + + @hydra.main(version_base="1.2", config_path="conf", config_name="config") def main(cfg: DictConfig) -> None: """Train diffusion-based generative model using the techniques described in the @@ -285,8 +300,9 @@ def main(cfg: DictConfig) -> None: # c.task = cfg.task # Print options. # TODO replace prints with PhysicsNeMo logger + serialized_config = _to_json_serializable(c) logger0.info("Training options:") - logger0.info(json.dumps(c, indent=2)) + logger0.info(json.dumps(serialized_config, indent=2)) logger0.info(f"Output directory: {c.run_dir}") logger0.info(f"Dataset path: {c.dataset_kwargs.path}") logger0.info(f"Class-conditional: {c.dataset_kwargs.use_labels}") @@ -306,7 +322,7 @@ def main(cfg: DictConfig) -> None: if dist.rank == 0: os.makedirs(c.run_dir, exist_ok=True) with open(os.path.join(c.run_dir, "training_options.json"), "wt") as f: - json.dump(c, f, indent=2) + json.dump(serialized_config, f, indent=2) # utils.Logger(file_name=os.path.join(c.run_dir, 'log.txt'), file_mode='a', should_flush=True) # Train.