Skip to content

Commit 70f6bef

Browse files
authored
Merge branch 'main' into normalization
2 parents 36e1d34 + 4e18bf4 commit 70f6bef

23 files changed

Lines changed: 240 additions & 475 deletions
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,16 @@
11
defaults:
22
- data: reaction_diffusion
3+
# Or set to TheWell dataset:
4+
# - data: the_well
35
- model: ae
46
- trainer: default
57
- logging: wandb
68
- _self_
79

810
seed: 42
911
experiment_name: autoencoder
12+
data:
13+
autoencoder_mode: true
1014
output:
1115
checkpoint_name: autoencoder.ckpt
1216
save_config: true

configs/data/reaction_diffusion.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
data_path: null
2-
use_simulator: true
1+
data_path: ${oc.env:AUTOCAST_DATASETS,./datasets}/reaction_diffusion
2+
use_simulator: false
33
split:
44
n_train: 4
55
n_valid: 2

configs/data/the_well.yaml

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
# TheWell dataset configuration
2+
# Configure TheWellDataModule for loading datasets from The Well benchmark
3+
_target_: autocast.data.datamodule.TheWellDataModule
4+
5+
# Dataset name from The Well.
6+
# Set to `null` here so it must be provided or overridden from the CLI:
7+
# Example override: `python script.py data=the_well data.well_dataset_name=turbulent_radiative_layer_2D`
8+
well_dataset_name: turbulent_radiative_layer_2D
9+
10+
# Path to The Well datasets. Read from `AUTOCAST_DATASETS` environment variable
11+
# Example: export AUTOCAST_DATASETS=/path/to/autocast/datasets
12+
well_base_path: ${oc.env:AUTOCAST_DATASETS,./datasets}
13+
14+
# Temporal configuration
15+
n_steps_input: 1
16+
n_steps_output: 4
17+
min_dt_stride: 1
18+
max_dt_stride: 1
19+
20+
# DataLoader configuration
21+
batch_size: 16
22+
23+
# Normalization
24+
use_normalization: true
25+
normalization_path: ../stats.yaml # defaults to ../stats.yaml relative to dataset
26+
27+
# Mode configuration - can be overridden in autoencoder.yaml
28+
autoencoder_mode: false
29+
30+
# Additional TheWell parameters
31+
max_rollout_steps: 100
32+
flatten_tensors: true
33+
cache_small: true
34+
max_cache_size: 1.0e9
35+
return_grid: true
36+
boundary_return_type: padding

configs/decoder/identity.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
_target_: autocast.decoders.identity.IdentityDecoder

configs/encoder/identity.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
_target_: autocast.encoders.identity.IdentityEncoder
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
defaults:
2+
- /encoder: identity
3+
- /decoder: identity
4+
- /processor: flow_matching
5+
- _self_
6+
7+
learning_rate: 0.001
8+
train_processor_only: false
9+
teacher_forcing_ratio: 0.5
10+
max_rollout_steps: 10
11+
loss_func:
12+
_target_: torch.nn.MSELoss

configs/processor.yaml

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
11
defaults:
22
- data: reaction_diffusion
3-
- encoder: permute_concat
4-
- decoder: channels_last
5-
- processor: fno
3+
- model: encoder_processor_decoder
64
- trainer: default
75
- logging: wandb
86
- _self_
@@ -14,18 +12,12 @@ output:
1412
save_config: true
1513

1614
training:
17-
n_steps_input: 4
15+
n_steps_input: 1
1816
n_steps_output: 4
19-
stride: 4
17+
stride: null
2018
autoencoder_checkpoint: null
2119
freeze_autoencoder: false
2220

23-
encoder_processor_decoder:
24-
learning_rate: 0.001
25-
train_processor_only: false
26-
loss_func:
27-
_target_: torch.nn.MSELoss
28-
2921
hydra:
3022
run:
3123
dir: outputs/${experiment_name}/${now:%Y-%m-%d_%H-%M-%S}

configs/processor/diffusion.yaml

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
_target_: autocast.processors.diffusion.DiffusionProcessor
2+
backbone:
3+
_target_: autocast.nn.unet.TemporalUNetBackbone
4+
in_channels: null
5+
out_channels: null
6+
cond_channels: null
7+
mod_features: 256
8+
hid_channels: [32, 64, 128]
9+
hid_blocks: [2, 2, 2]
10+
spatial: 2
11+
periodic: false
12+
schedule:
13+
_target_: azula.noise.VPSchedule
14+
denoiser_type: karras
15+
teacher_forcing_ratio: 0.0
16+
stride: ${training.stride}
17+
max_rollout_steps: ${training.n_steps_output}
18+
learning_rate: 0.0001
19+
n_steps_output: null
20+
n_channels_out: null
21+
sampler_steps: 50
22+
sampler: euler
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
_target_: autocast.processors.flow_matching.FlowMatchingProcessor
2+
stride: ${training.stride}
3+
teacher_forcing_ratio: 0.0
4+
max_rollout_steps: ${training.n_steps_output}
5+
learning_rate: 0.0001
6+
flow_ode_steps: 4
7+
n_steps_output: null
8+
n_channels_out: null
9+
backbone:
10+
_target_: autocast.nn.unet.TemporalUNetBackbone
11+
in_channels: null
12+
out_channels: null
13+
cond_channels: null
14+
mod_features: 256
15+
hid_channels: [32, 64, 128]
16+
hid_blocks: [2, 2, 2]
17+
spatial: 2
18+
periodic: false

0 commit comments

Comments
 (0)