Skip to content

Commit 38edaa5

Browse files
committed
fix sampling bug
1 parent 4417e0f commit 38edaa5

File tree

2 files changed

+21
-16
lines changed

2 files changed

+21
-16
lines changed

tensordiffeq/boundaries.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -153,8 +153,10 @@ def create_target(self):
153153
arg_list.append(get_linspace(var_dict))
154154
inp = flatten_and_stack(multimesh(arg_list))
155155
fun_vals.append(self.fun[i](*inp.T))
156-
157-
self.val = convertTensor(np.reshape(fun_vals, (-1, 1))[self.nums])
156+
if self.n_values is not None:
157+
self.val = convertTensor(np.reshape(fun_vals, (-1, 1))[self.nums])
158+
else:
159+
self.val = convertTensor(np.reshape(fun_vals, (-1, 1)))
158160

159161
def loss(self):
160162
return MSE(self.preds, self.val)

tensordiffeq/models.py

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -28,19 +28,18 @@ def compile(self, layer_sizes, f_model, domain, bcs, isAdaptive=False,
2828
self.X_f_dims = tf.shape(self.domain.X_f)
2929
self.X_f_len = tf.slice(self.X_f_dims, [0], [1]).numpy()
3030
# must explicitly cast data into tf.float32 for stability
31-
tmp = [tf.cast(np.reshape(vec, (-1,1)), tf.float32) for i, vec in enumerate(self.domain.X_f.T)]
31+
tmp = [tf.cast(np.reshape(vec, (-1, 1)), tf.float32) for i, vec in enumerate(self.domain.X_f.T)]
3232
self.X_f_in = np.asarray(tmp)
3333
self.u_model = neural_net(self.layer_sizes)
3434

35-
3635
if isAdaptive:
3736
self.isAdaptive = True
3837
if self.col_weights is None and self.u_weights is None:
3938
raise Exception("Adaptive weights selected but no inputs were specified!")
4039
if (
41-
not isAdaptive
42-
and self.col_weights is not None
43-
and self.u_weights is not None
40+
not isAdaptive
41+
and self.col_weights is not None
42+
and self.u_weights is not None
4443
):
4544
raise Exception(
4645
"Adaptive weights are turned off but weight vectors were provided. Set the weight vectors to "
@@ -83,21 +82,24 @@ def update_loss(self):
8382
f_u_pred = self.f_model(self.u_model, *self.X_f_in)
8483

8584
if self.isAdaptive:
86-
mse_f_u = MSE(f_u_pred, constant(0.0), self.col_weights)
85+
if self.g is not None:
86+
mse_f_u = g_MSE(f_u_pred, constant(0.0), self.g(self.col_weights))
87+
else:
88+
mse_f_u = MSE(f_u_pred, constant(0.0), self.col_weights)
8789
else:
8890
mse_f_u = MSE(f_u_pred, constant(0.0))
8991

9092
loss_tmp = tf.math.add(loss_tmp, mse_f_u)
9193
return loss_tmp
9294

93-
#@tf.function
95+
# @tf.function
9496
def grad(self):
9597
with tf.GradientTape() as tape:
9698
loss_value = self.update_loss()
9799
grads = tape.gradient(loss_value, self.variables)
98100
return loss_value, grads
99101

100-
def fit(self, tf_iter = 0, newton_iter = 0, batch_sz=None, newton_eager=True):
102+
def fit(self, tf_iter=0, newton_iter=0, batch_sz=None, newton_eager=True):
101103
if self.isAdaptive and (batch_sz is not None):
102104
raise Exception("Currently we dont support minibatching for adaptive PINNs")
103105
if self.dist:
@@ -141,10 +143,10 @@ def fit(self, tf_iter = 0, newton_iter = 0, batch_sz=None, newton_eager=True):
141143
self.u_model = neural_net(self.layer_sizes)
142144
self.tf_optimizer = tf.keras.optimizers.Adam(lr=0.005, beta_1=.99)
143145
self.tf_optimizer_weights = tf.keras.optimizers.Adam(lr=0.005, beta_1=.99)
144-
# self.dist_col_weights = tf.Variable(tf.zeros(batch_sz), validate_shape=True)
146+
# self.dist_col_weights = tf.Variable(tf.zeros(batch_sz), validate_shape=True)
145147

146148
if self.isAdaptive:
147-
# self.col_weights = tf.Variable(tf.random.uniform([self.batch_sz, 1]))
149+
# self.col_weights = tf.Variable(tf.random.uniform([self.batch_sz, 1]))
148150
self.u_weights = tf.Variable(self.u_weights)
149151

150152
fit_dist(self, tf_iter=tf_iter, newton_iter=newton_iter, batch_sz=batch_sz, newton_eager=newton_eager)
@@ -172,7 +174,7 @@ def predict(self, X_star):
172174
u_star = self.u_model(X_star)
173175
# split data into tuples for ND support
174176
# must explicitly cast data into tf.float32 for stability
175-
tmp = [tf.cast(np.reshape(vec, (-1,1)), tf.float32) for i, vec in enumerate(X_star.T)]
177+
tmp = [tf.cast(np.reshape(vec, (-1, 1)), tf.float32) for i, vec in enumerate(X_star.T)]
176178
X_star = np.asarray(tmp)
177179
X_star = tuple(X_star)
178180
f_u_star = self.f_model(self.u_model, *X_star)
@@ -194,10 +196,11 @@ def compile(self, layer_sizes, f_model, X, u, var, col_weights=None):
194196
self.tf_optimizer_vars = tf.keras.optimizers.Adam(lr=0.005, beta_1=.99)
195197
self.tf_optimizer_weights = tf.keras.optimizers.Adam(lr=0.005, beta_1=.99)
196198
self.col_weights = col_weights
197-
#tmp = [np.reshape(vec, (-1,1)) for i, vec in enumerate(self.X)]
199+
# tmp = [np.reshape(vec, (-1,1)) for i, vec in enumerate(self.X)]
198200
self.X_in = tuple(X)
199-
#self.X_in = np.asarray(tmp).T
200-
# print(np.shape(self.X_in))
201+
# self.X_in = np.asarray(tmp).T
202+
203+
# print(np.shape(self.X_in))
201204

202205
@tf.function
203206
def loss(self):

0 commit comments

Comments
 (0)