You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Dear Doctor Lu and DEEPXDE community:
I am using DEEPXDE to solve the Axisymmetric Lame equation. However, this is a singular partial differential equation when r equals zeros. The loss function of the PDE tends to be infinite when the original form of the Lame equation is adopted. Therefore, I transformed the original PDE (each equation multiplies the coordinate component r). Based on the transformed PDEs, I normalize the domain the insert more nodes on the boundary and stress concentration region. Then, I adopt the hard constraint to define the right and bottom boundary conditions. For the top boundary condition, only the soft constraint is adopted. Finally, the code can run without obtaining the desirable answer. I wonder to know whether the transformation on the PDE is reasonable or not. If the transformation is reasonable, why is the result is incorrect.
The code is listed as follows:
"""Backend supported: tensorflow.compat.v1, tensorflow, pytorch, paddle"""
import time
import deepxde as dde
import numpy as np
Backend tensorflow.compat.v1 or tensorflow
from deepxde.backend import tf
Backend pytorch
import os
import torch
import pandas as pd
Backend paddle
import paddle
import matplotlib.pyplot as plt
SEED = 2
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.deterministic = True
dde.config.set_random_seed(SEED)
st = time.time()
print(dde.backend.backend.is_gpu_available())
dde.config.set_random_seed(2000)
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
Dear Doctor Lu and DEEPXDE community:


I am using DEEPXDE to solve the Axisymmetric Lame equation. However, this is a singular partial differential equation when r equals zeros. The loss function of the PDE tends to be infinite when the original form of the Lame equation is adopted. Therefore, I transformed the original PDE (each equation multiplies the coordinate component r). Based on the transformed PDEs, I normalize the domain the insert more nodes on the boundary and stress concentration region. Then, I adopt the hard constraint to define the right and bottom boundary conditions. For the top boundary condition, only the soft constraint is adopted. Finally, the code can run without obtaining the desirable answer. I wonder to know whether the transformation on the PDE is reasonable or not. If the transformation is reasonable, why is the result is incorrect.
The code is listed as follows:
"""Backend supported: tensorflow.compat.v1, tensorflow, pytorch, paddle"""
import time
import deepxde as dde
import numpy as np
Backend tensorflow.compat.v1 or tensorflow
from deepxde.backend import tf
Backend pytorch
import os
import torch
import pandas as pd
Backend paddle
import paddle
import matplotlib.pyplot as plt
SEED = 2
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.deterministic = True
dde.config.set_random_seed(SEED)
st = time.time()
print(dde.backend.backend.is_gpu_available())
dde.config.set_random_seed(2000)
Coordinate normalization
rmin = 0
rmax = 5000
rlength = rmax - rmin
zmin = 0
zmax = 5000
zLength = zmax - zmin
loadLength = 5000.0/(rlength)
Materials properties
E = 100.0
mu = 0.2
lambda1 = E * mu / (1.0 + mu) / (1.0 - 2 * mu)
G1 = E / 2.0 / (1.0 + mu)
a1 = lambda1 / G1 + 1.0
Generation of the model
geom = dde.geometry.Rectangle([0, 0], [1, 1])
Net structure
num_dense_layers = 4
num_dense_nodes = 64
def pde(x, y):
# x[:,0:1] represents r,x[:,1:2] represents z
# y[:,0:1] represents u,y[:,1:2] represents w
u = y[:, 0:1]
w = y[:, 1:2]
du_r = dde.grad.jacobian(u, x, i=0, j=0)
du_z = dde.grad.jacobian(u, x, i=0, j=1)
du_r_z = dde.grad.jacobian(du_r, x, i=0, j=1)
dw_z = dde.grad.jacobian(w, x, i=0, j=1)
dw_r = dde.grad.jacobian(w, x, i=0, j=0)
dw_z_r = dde.grad.jacobian(dw_z, x, i=0, j=0)
du_rr = dde.grad.hessian(u, x, i=0, j=0)
dw_rr = dde.grad.hessian(w, x, i=0, j=0)
du_zz = dde.grad.hessian(u, x, i=1, j=1)
dw_zz = dde.grad.hessian(w, x, i=1, j=1)
loss1 = ((a1 + 1.0) * du_rr + du_zz + a1 * dw_z_r)* x[:,0:1] ** 2.0+(a1 + 1.0) * du_r * x[:,0:1] - (a1 + 1.0)y[:, 0:1]
loss2 = ((a1 + 1.0) * dw_zz + dw_rr + a1 * du_r_z) x[:,0:1] ** 2.0+(a1 * du_z+ dw_r)*x[:,0:1]
return [loss1, loss2]
def boundary_top(x, on_boundary):
def boundary_down(x, on_boundary):
return dde.utils.isclose(x[1], 0)
def boundary_left(x, on_boundary):
return dde.utils.isclose(x[0], 0)
def boundary_left_value(x, y, X):
u = y[:, 0:1]
loss = u
return loss
def boundary_bottom1_value(x, y, X):
u = y[:, 0:1]
loss = u
return loss
def boundary_bottom2_value(x, y, X):
w = y[:, 1:2]
loss = w
return loss
def boundary_top_value(x, y, X):
w = y[:, 1:2]
wext = 3
loss = (w-wext)
return loss
def output_transform(x, y):
r,z= x[:,0:1],x[:,1:2]
u, w = y[:,0:1], y[:,1:2]
u_new = urz
w_new = w*z
return torch.concat((u_new,w_new),axis=1)
# Insert nodes on boundary
nodeNum = 100
topnodes = np.vstack((np.linspace(0, 1, num=nodeNum),np.full((nodeNum), 1)))
leftnodes = np.vstack((np.full((nodeNum), 0),np.linspace(0, 1, num=nodeNum)))
bottomnodes = np.vstack((np.linspace(0, 1, num=nodeNum),np.full((nodeNum), 0)))
insert nodes on domain
nodeNum2 = 40
rarray = np.linspace(0,loadLength,nodeNum2)
zarray = np.linspace(0.8,1.0,nodeNum2)
rv,zv = np.meshgrid(rarray,zarray,indexing = 'xy')
rv = np.array(rv.flat)
zv = np.array(zv.flat)
domainnodes = np.vstack((rv,zv))
array concatenate
extranodes = np.hstack((topnodes,leftnodes))
extranodes = np.hstack((extranodes,bottomnodes))
extranodes = np.hstack((extranodes,domainnodes))
extranodes = extranodes.T
tbc = dde.icbc.OperatorBC(geom, boundary_top_value, boundary_top)
bbc1 = dde.icbc.OperatorBC(geom, boundary_bottom1_value, boundary_down)
bbc2 = dde.icbc.OperatorBC(geom, boundary_bottom2_value, boundary_down)
lbc = dde.icbc.OperatorBC(geom, boundary_left_value, boundary_left)
BC = [tbc]
data = dde.data.PDE(
geom,
pde,
BC,
num_domain=4000,
num_boundary=500,
num_test=1000,
)
pde_resampler = dde.callbacks.PDEPointResampler(period=500)
checker = dde.callbacks.ModelCheckpoint("model/model.ckpt", save_better_only=True, period=500)
iteration = 10000
interationBFGS = 10000
net = dde.nn.FNN([2] + [num_dense_nodes] * num_dense_layers + [2], "tanh", "Glorot normal")
loss_weights = [1.0, 1.0, 1.0]
net.apply_output_transform(output_transform)
model = dde.Model(data, net)
model.compile("adam", lr=0.001, loss_weights=loss_weights)
modelPath = r'C:\Users\qyy\PycharmProjects\pythonProject1\DEEPXPDE\halfspace static\model\model-halfspace'
losshistory, train_state = model.train(iterations=iteration, callbacks=[checker,pde_resampler], display_every=500)
dde.config.set_default_float("float64")
dde.optimizers.config.set_LBFGS_options(maxcor=100, ftol=0, gtol=1e-08, maxiter=interationBFGS, maxfun=None, maxls=50)
model.compile("L-BFGS-B", lr=0.001, loss_weights=loss_weights)
losshistory, train_state = model.train(iterations=interationBFGS, callbacks=[checker,pde_resampler])
dde.saveplot(losshistory, train_state, issave=True, isplot=True)
print(time.time() - st)
model.save(save_path=modelPath)
rInput = np.linspace(0, 1, 50)
zcord = 0
zcordInput = (zcord - zmin) / (zmax - zmin)
xpre = np.full((np.shape(rInput)[0], 2), zcordInput)
xpre[:, 0] = rInput.T
ypre = model.predict(xpre)
ypre = pd.DataFrame(ypre)
rReal = rInput * (rmax - rmin) + rmin
plt.plot(rReal, ypre)
plt.show()
ypre.to_csv('twolayer.csv')
Beta Was this translation helpful? Give feedback.
All reactions