Skip to content

Commit 67438f0

Browse files
committed
fix more comments; update readme-page
1 parent 23bbb2b commit 67438f0

File tree

4 files changed

+103
-93
lines changed

4 files changed

+103
-93
lines changed

README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ solver = Solver(equation=pde, ndims=2, boundary_condition=1,
4343

4444
```
4545

46-
Note that we defined the architecture of the neural network by supplying `layout`, `activation` and `units` parameters. Here `layout` configures the sequence of layers: `fa fa fa f` stands for `f`ully connected architecture with four layers and three `a`ctivations. In its turn, `units` and `activation` cotrol the number of units in dense layers and activation-function. When defining neural network this way use [`ConvBlock`](https://analysiscenter.github.io/batchflow/api/batchflow.models.torch.layers.html?highlight=baseconvblock#batchflow.models.torch.layers.BaseConvBlock) from [`BatchFlow`](https://github.com/analysiscenter/batchflow). Check out its capabilities to understand more: say, making a network with attention and skip connections.
46+
Note that we defined the architecture of the neural network by supplying `layout`, `activation` and `units` parameters. Here `layout` configures the sequence of layers: `fa fa fa f` stands for `f`ully connected architecture with four layers and three `a`ctivations. In its turn, `units` and `activation` cotrol the number of units in dense layers and activation-function. When defining neural network this way use [`ConvBlock`](https://analysiscenter.github.io/batchflow/api/batchflow.models.torch.layers.html?highlight=baseconvblock#batchflow.models.torch.layers.BaseConvBlock) from [`BatchFlow`](https://github.com/analysiscenter/batchflow).
4747

4848
It's time to run the optimization procedure
4949

@@ -82,14 +82,14 @@ def odeparam(f, x, e):
8282
s = NumpySampler('uniform') & NumpySampler('uniform', low=1, high=5)
8383

8484
solver = Solver(equation=odeparam, ndims=1, nparams=1, initial_condition=1)
85-
solver.fit(batch_size=1000, sampler=s, niters=5000)
85+
solver.fit(batch_size=1000, sampler=s, niters=5000, lr=0.01)
8686
# solving the whole family takes no more than a couple of seconds!
8787
```
8888

8989
Check out the result:
9090

9191
<p align="center">
92-
<img src="https://raw.githubusercontent.com/analysiscenter/pydens/master/imgs/sinus_sol.gif?invert_in_darkmode" align=middle height=250.973825pt/>
92+
<img src="https://raw.githubusercontent.com/analysiscenter/pydens/master/imgs/sinus_parametric.gif?invert_in_darkmode" align=middle height=250.973825pt/>
9393
</p>
9494

9595
### Solving PDEs with trainable coefficients

imgs/sinus_parametric.gif

6.32 MB
Loading

pydens/model_torch.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,6 @@ def __init__(self, initial_condition=None, boundary_condition=None, ndims=1, npa
4242
@abstractmethod
4343
def forward(self, xs):
4444
""" Forward of the model-network. """
45-
pass
4645

4746
def freeze_trainable(self, layers=None, variables=None):
4847
""" Freeze layers and trainable variables.
@@ -139,7 +138,8 @@ def reshape_and_concat(cls, tensors):
139138
""" Cast, reshape and concatenate sequence of incoming tensors. """
140139
# Determine batch size as max-len of a tensor.
141140
xs = list(tensors)
142-
batch_size = np.max([tensor.shape[0] for tensor in xs if isinstance(tensor, (np.ndarray, torch.Tensor))])
141+
sizes = [tensor.shape[0] for tensor in xs if isinstance(tensor, (np.ndarray, torch.Tensor))]
142+
batch_size = np.max(sizes) if len(sizes) > 0 else 1
143143

144144
# Perform cast and reshape of all tensors in the list.
145145
for i, x in enumerate(xs):
@@ -180,7 +180,7 @@ def __init__(self, equation, model=ConvBlockModel, constraints=None, **kwargs):
180180
_ = self.ctx.run(self.equation, u_hat, *xs)
181181

182182

183-
def fit(self, niters, batch_size, sampler=None, losses='equation', optimizer='Adam', criterion=nn.MSELoss(),
183+
def fit(self, niters, batch_size, sampler=None, loss_terms='equation', optimizer='Adam', criterion=nn.MSELoss(),
184184
lr=0.001, **kwargs):
185185
""" Fit the model inside the solver-instance. """
186186
# Initialize the optimizer if supplied.
@@ -206,13 +206,13 @@ def fit(self, niters, batch_size, sampler=None, losses='equation', optimizer='Ad
206206
u_hat = self.ctx.run(self.model, xs_concat)
207207

208208
# Compute loss: form it summing equation-loss and constraints-loss.
209-
losses = losses if isinstance(losses, (tuple, list)) else (losses, )
210-
nums_constraints = [int(loss_name.replace('constraint', '').replace('_', ''))
211-
for loss_name in losses if 'constraint' in loss_name]
209+
loss_terms = loss_terms if isinstance(loss_terms, (tuple, list)) else (loss_terms, )
210+
nums_constraints = [int(term_name.replace('constraint', '').replace('_', ''))
211+
for term_name in loss_terms if 'constraint' in term_name]
212212
loss = 0
213213

214214
# Include equation loss.
215-
if 'equation' in losses:
215+
if 'equation' in loss_terms:
216216
loss += criterion(self.ctx.run(self.equation, u_hat, *xs), torch.zeros_like(xs[0]))
217217

218218
# Include additional constraints' loss.
@@ -231,7 +231,7 @@ def _forward(*xs):
231231
# Gather and store training stats.
232232
self.losses.append(loss.detach().cpu().numpy())
233233

234-
def solve(self, *xs):
234+
def predict(self, *xs):
235235
""" Get approximation to a solution in a set of points.
236236
Points are given by a list of tensors.
237237
"""

0 commit comments

Comments
 (0)