Replies: 1 comment 4 replies
-
|
What is the PDE? |
Beta Was this translation helpful? Give feedback.
4 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
-
Dear Dr. Lu and the community
I have driven into PINN with a lot of passion when I first noticed it. I believe that it can solve a number of problems that cannot be addressed previously. But the passion has been generally destroyed by unexpected results.
I want to use PINN to identify the wave speed, i.e., c of a one-dimensional wave equation. I have tried lots of cases with a small range of domains, large numbers of training sets, and different lost weights. But my result told me that even the simplest c = 1.0. My problem is good results for identified data but bad for parameters. This is my code as follows.
`###############################################################################
Import modules
import deepxde as dde
import matplotlib.pyplot as plt
import numpy as np
from deepxde.backend import tf
import re
###############################################################################
alpha_true
alpha_true = 1
Data acquire
wvals = np.loadtxt("Euler_beam_inv.txt") # displacements
time
time_s = np.linspace(0, 1, 1001)
wavefileds
#uvals = -1 + (wvals - np.min(wvals)) * (1 - (-1)) / (np.max(wvals) -np.min(wvals))
uvals = wvals.T
length
loc_s = np.linspace(0, 1, 1001)
"""
Plot
fig, ax = plt.subplots()
ax.plot(loc_s, uvals[600,...], linewidth=2.0)
plt.gca().set_xlabel('$x$ (m)', fontdict={'family' : 'Times New Roman', 'size' : 16})
fig, ax = plt.subplots()
ax.plot(time_s, uvals[...,600], linewidth=2.0)
plt.gca().set_xlabel('$t$ (s)', fontdict={'family' : 'Times New Roman', 'size' : 16})
X, Y = np.meshgrid(loc_s, time_s)
fig = plt.figure(figsize = (8,6))
plt.pcolor(X, Y, uvals, cmap = 'jet')
plt.colorbar()
plt.gca().set_ylabel('$t$ (s)', fontdict={'family' : 'Times New Roman', 'size' : 16})
plt.gca().set_xlabel('$x$ (m)', fontdict={'family' : 'Times New Roman', 'size' : 16})
plt.show()
"""
###############################################################################
Calculate
x1 = 0.0
x2 = 1.0
t1 = 0.0
t2 = 1.0
Parameters to be identified
C1 = dde.Variable(np.random.rand())
def gen_traindata(num):
xvals = loc_s.reshape(-1, 1)
tvals = time_s.reshape(-1, 1)
X, T = np.meshgrid(xvals, tvals)
X = X.flatten()[:, None]
T = T.flatten()[:, None]
# training domain: X = [x1, x2] and T = [t1, t2]
data1 = np.concatenate([X, T, uvals.flatten()[:, None]], 1)
data2 = data1[:, :][data1[:, 1] <= t2]
data3 = data2[:, :][data2[:, 1] >= t1]
data4 = data3[:, :][data3[:, 0] <= x2]
data_domain = data4[:, :][data4[:, 0] >= x1]
# choose number of training points: num
idx = np.random.choice(data_domain.shape[0], num, replace=False)
x_train = data_domain[idx, 0:1]
t_train = data_domain[idx, 1:2]
u_train = data_domain[idx, 2:3]
return [x_train / x2, t_train / t2, -1 + (u_train - np.min(u_train)) * (1 - (-1)) / (np.max(u_train) -np.min(u_train))]
PDE residual
def pde(x, y):
du_tt = dde.grad.hessian(y, x, i=1, j=1)
du_xx = dde.grad.hessian(y, x, i=0, j=0)
du_xxxx = dde.grad.hessian(du_xx, x, i=0, j=0)
#return [C1 * du_xxxx + du_tt]
return [C1 * du_xx - du_tt]
training data
ob_x, ob_t, ob_u = gen_traindata(5000)
ob_xt = np.hstack((ob_x, ob_t))
observe_u = dde.icbc.PointSetBC(ob_xt, ob_u, component=0)
define a geometry
geom = dde.geometry.Interval(x1/x2, 1)
timedomain = dde.geometry.TimeDomain(t1/t2, 1)
geomtime = dde.geometry.GeometryXTime(geom, timedomain)
define PDE
data = dde.data.TimePDE(
geomtime,
pde,
[observe_u],
num_domain=5000,
anchors=ob_xt,
num_test=1000,
)
define networks
layer_size = [2] + [100] * 3 + [1]
activation = "tanh"
initializer = "Glorot uniform"
net = dde.nn.FNN(layer_size, activation, initializer)
build a model to train
model = dde.Model(data, net)
###############################################################################
callbacks for storing results
fnamevar = "variables.dat"
variable = dde.callbacks.VariableValue(C1, period=10, filename=fnamevar)
compile and train model
model.compile("adam", lr=0.0001, loss_weights = [1, 1000], external_trainable_variables=C1)
losshistory, train_state = model.train(epochs=10000, callbacks=[variable], display_every=1000, disregard_previous_best=True)
model.compile("L-BFGS-B", lr=0.0001, loss_weights = [1, 1000], external_trainable_variables=C1)
losshistory, train_state = model.train(epochs=20000, callbacks=[variable], display_every=1000, disregard_previous_best=True)
save data
dde.saveplot(losshistory, train_state, issave=True, isplot=True)
view results
X_test, T_test, y_true = gen_traindata(5000)
ob_xt = np.hstack((X_test, T_test))
y_pred = model.predict(ob_xt)
np.savetxt("test2.dat", np.hstack((X_test, y_true, y_pred)))
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter3D(X_test, T_test, y_true, label="u_true")
plt.gca().set_ylabel('$t$ (s)', fontdict={'family' : 'Times New Roman', 'size' : 16})
plt.gca().set_xlabel('$x$ (m)', fontdict={'family' : 'Times New Roman', 'size' : 16})
plt.show()
ax = plt.axes(projection='3d')
ax.scatter3D(X_test, T_test, y_pred, label="u_true")
plt.gca().set_ylabel('$t$ (s)', fontdict={'family' : 'Times New Roman', 'size' : 16})
plt.gca().set_xlabel('$x$ (m)', fontdict={'family' : 'Times New Roman', 'size' : 16})
plt.show()
Plot Variables:
reopen saved data using callbacks in fnamevar
lines = open(fnamevar, "r").readlines()
read output data in fnamevar
Chat = np.array(
[
np.fromstring(
min(re.findall(re.escape("[") + "(.*?)" + re.escape("]"), line), key=len),
sep=",",
)
for line in lines
]
)
l, c = Chat.shape
fig = plt.figure()
plt.plot(np.linspace(x1, x2, Chat[:, 0].size), Chat[:, 0], "r.")
plt.plot(np.linspace(x1, x2, Chat[:, 0].size), np.ones(Chat[:, 0].shape) * alpha_true, "r--")
plt.legend(["C1hat", "True C1"], loc="right")
plt.xlabel("Epochs")
plt.title("Variables")
plt.show()`
The loss is shown as follows.
Warning: epochs is deprecated and will be removed in a future version. Use iterations instead.
Initializing variables...
Training model...
Step Train loss Test loss Test metric
0 [1.47e-05, 5.09e+02] [1.42e-05, 5.09e+02] []
1000 [4.03e+01, 1.44e+01] [4.09e+01, 1.44e+01] []
2000 [1.74e+01, 9.79e+00] [1.80e+01, 9.79e+00] []
3000 [1.12e+01, 6.75e+00] [1.17e+01, 6.75e+00] []
4000 [7.11e+00, 4.39e+00] [7.39e+00, 4.39e+00] []
5000 [4.16e+00, 2.62e+00] [4.32e+00, 2.62e+00] []
6000 [2.13e+00, 1.35e+00] [2.21e+00, 1.35e+00] []
7000 [9.04e-01, 6.04e-01] [9.35e-01, 6.04e-01] []
8000 [2.92e-01, 2.24e-01] [3.01e-01, 2.24e-01] []
9000 [6.95e-02, 6.85e-02] [7.02e-02, 6.85e-02] []
10000 [1.83e-02, 2.60e-02] [1.76e-02, 2.60e-02] []
Best model at step 10000:
train loss: 4.43e-02
test loss: 4.36e-02
test metric: []
'train' took 104.251099 s
Compiling model...
Warning: For the backend tensorflow.compat.v1,
external_trainable_variablesis ignored, and all trainabletf.Variableobjects are automatically collected.Warning: learning rate is ignored for L-BFGS-B
'compile' took 1.542729 s
Warning: epochs is deprecated and will be removed in a future version. Use iterations instead.
Training model...
Step Train loss Test loss Test metric
10000 [1.83e-02, 2.60e-02] [1.76e-02, 2.60e-02] []
11000 [4.99e-05, 1.93e-04]
INFO:tensorflow:Optimization terminated with:
Message: CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH
Objective function value: 0.000243
Number of iterations: 830
Number of functions evaluations: 1001
11001 [4.99e-05, 1.93e-04] [4.44e-05, 1.93e-04] []
Best model at step 11001:
train loss: 2.43e-04
test loss: 2.37e-04
test metric: []
'train' took 88.279585 s
Thank you!
Beta Was this translation helpful? Give feedback.
All reactions