|
28 | 28 | "outputs": [], |
29 | 29 | "source": [ |
30 | 30 | "\n", |
31 | | - "from autoemulate.simulations.advection_diffusion import AdvectionDiffusion\n", |
| 31 | + "from autoemulate.simulations.reaction_diffusion import ReactionDiffusion as Sim\n", |
32 | 32 | "\n", |
33 | | - "sim = AdvectionDiffusion(return_timeseries=True, log_level=\"error\")\n", |
| 33 | + "sim = Sim(return_timeseries=True, log_level=\"error\")\n", |
34 | 34 | "\n", |
35 | | - "def generate_split(\n", |
36 | | - " simulator: AdvectionDiffusion, n_train: int = 4, n_valid: int = 2, n_test: int = 2\n", |
37 | | - "):\n", |
| 35 | + "def generate_split(simulator: Sim, n_train: int = 1, n_valid: int = 1, n_test: int = 1):\n", |
38 | 36 | " \"\"\"Generate training, validation, and test splits from the simulator.\"\"\"\n", |
39 | 37 | " train = simulator.forward_samples_spatiotemporal(n_train)\n", |
40 | 38 | " valid = simulator.forward_samples_spatiotemporal(n_valid)\n", |
|
62 | 60 | "source": [ |
63 | 61 | "from auto_cast.data.datamodule import SpatioTemporalDataModule\n", |
64 | 62 | "\n", |
| 63 | + "n_steps_input = 4\n", |
| 64 | + "n_steps_output = 1\n", |
65 | 65 | "datamodule = SpatioTemporalDataModule(\n", |
66 | | - " data=combined_data, data_path=None, n_steps_input=4, n_steps_output=1, batch_size=16\n", |
| 66 | + " data=combined_data, data_path=None, n_steps_input=n_steps_input, n_steps_output=n_steps_output, batch_size=16\n", |
67 | 67 | ")" |
68 | 68 | ] |
69 | 69 | }, |
|
100 | 100 | "from auto_cast.models.encoder_processor_decoder import EncoderProcessorDecoder\n", |
101 | 101 | "from auto_cast.nn.fno import FNOProcessor\n", |
102 | 102 | "\n", |
| 103 | + "batch = next(iter(datamodule.train_dataloader()))\n", |
| 104 | + "n_channels = batch.input_fields.shape[-1]\n", |
103 | 105 | "processor = FNOProcessor(\n", |
104 | | - " in_channels=1, out_channels=1, n_modes=(16, 16, 1), hidden_channels=64\n", |
| 106 | + " in_channels=n_channels * n_steps_input,\n", |
| 107 | + " out_channels=n_channels * n_steps_output,\n", |
| 108 | + " n_modes=(16, 16),\n", |
| 109 | + " hidden_channels=64,\n", |
105 | 110 | ")\n", |
106 | 111 | "encoder = PermuteConcat(with_constants=False)\n", |
107 | 112 | "decoder = ChannelsLast()\n", |
|
113 | 118 | ] |
114 | 119 | }, |
115 | 120 | { |
116 | | - "cell_type": "markdown", |
| 121 | + "cell_type": "code", |
| 122 | + "execution_count": null, |
117 | 123 | "id": "8", |
118 | 124 | "metadata": {}, |
| 125 | + "outputs": [], |
| 126 | + "source": [ |
| 127 | + "model(batch).shape" |
| 128 | + ] |
| 129 | + }, |
| 130 | + { |
| 131 | + "cell_type": "markdown", |
| 132 | + "id": "9", |
| 133 | + "metadata": {}, |
119 | 134 | "source": [ |
120 | 135 | "### Run trainer\n" |
121 | 136 | ] |
122 | 137 | }, |
123 | 138 | { |
124 | 139 | "cell_type": "code", |
125 | 140 | "execution_count": null, |
126 | | - "id": "9", |
| 141 | + "id": "10", |
127 | 142 | "metadata": {}, |
128 | 143 | "outputs": [], |
129 | 144 | "source": [ |
130 | 145 | "import lightning as L\n", |
131 | 146 | "\n", |
132 | | - "device = \"mps\" # \"cpu\"\n", |
133 | | - "trainer = L.Trainer(max_epochs=5, accelerator=device, log_every_n_steps=10)\n", |
| 147 | + "# device = \"mps\" # \"cpu\"\n", |
| 148 | + "device = \"cpu\"\n", |
| 149 | + "trainer = L.Trainer(max_epochs=1, accelerator=device, log_every_n_steps=10)\n", |
134 | 150 | "trainer.fit(model, datamodule.train_dataloader(), datamodule.val_dataloader())" |
135 | 151 | ] |
136 | 152 | }, |
137 | 153 | { |
138 | 154 | "cell_type": "markdown", |
139 | | - "id": "10", |
| 155 | + "id": "11", |
140 | 156 | "metadata": {}, |
141 | 157 | "source": [ |
142 | 158 | "### Run the evaluation" |
|
145 | 161 | { |
146 | 162 | "cell_type": "code", |
147 | 163 | "execution_count": null, |
148 | | - "id": "11", |
| 164 | + "id": "12", |
149 | 165 | "metadata": {}, |
150 | 166 | "outputs": [], |
151 | 167 | "source": [ |
152 | 168 | "trainer.test(model, datamodule.test_dataloader())" |
153 | 169 | ] |
| 170 | + }, |
| 171 | + { |
| 172 | + "cell_type": "markdown", |
| 173 | + "id": "13", |
| 174 | + "metadata": {}, |
| 175 | + "source": [ |
| 176 | + "### Example rollout" |
| 177 | + ] |
| 178 | + }, |
| 179 | + { |
| 180 | + "cell_type": "code", |
| 181 | + "execution_count": null, |
| 182 | + "id": "14", |
| 183 | + "metadata": {}, |
| 184 | + "outputs": [], |
| 185 | + "source": [ |
| 186 | + "# A single element is the full trajectory\n", |
| 187 | + "batch = next(iter(datamodule.rollout_test_dataloader()))" |
| 188 | + ] |
| 189 | + }, |
| 190 | + { |
| 191 | + "cell_type": "code", |
| 192 | + "execution_count": null, |
| 193 | + "id": "15", |
| 194 | + "metadata": {}, |
| 195 | + "outputs": [], |
| 196 | + "source": [ |
| 197 | + "# First n_steps_input are inputs\n", |
| 198 | + "print(batch.input_fields.shape)\n", |
| 199 | + "# Remaining n_steps_output are outputs\n", |
| 200 | + "print(batch.output_fields.shape)" |
| 201 | + ] |
| 202 | + }, |
| 203 | + { |
| 204 | + "cell_type": "code", |
| 205 | + "execution_count": null, |
| 206 | + "id": "16", |
| 207 | + "metadata": {}, |
| 208 | + "outputs": [], |
| 209 | + "source": [ |
| 210 | + "# Run rollout on one trajectory\n", |
| 211 | + "preds, trues = model.rollout(batch)" |
| 212 | + ] |
| 213 | + }, |
| 214 | + { |
| 215 | + "cell_type": "code", |
| 216 | + "execution_count": null, |
| 217 | + "id": "17", |
| 218 | + "metadata": {}, |
| 219 | + "outputs": [], |
| 220 | + "source": [ |
| 221 | + "print(preds.shape)" |
| 222 | + ] |
| 223 | + }, |
| 224 | + { |
| 225 | + "cell_type": "code", |
| 226 | + "execution_count": null, |
| 227 | + "id": "18", |
| 228 | + "metadata": {}, |
| 229 | + "outputs": [], |
| 230 | + "source": [ |
| 231 | + "print(trues.shape)\n" |
| 232 | + ] |
154 | 233 | } |
155 | 234 | ], |
156 | 235 | "metadata": { |
|
0 commit comments