Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
4b11912
compatibility bug fixed.
parsa-rahbari-82 Oct 31, 2024
e628ef1
tensorflow_intel removed from dependencies.
parsa-rahbari-82 Oct 31, 2024
38260b9
dynamic module import for src
parsa-rahbari-82 Oct 31, 2024
b4474e4
fix bug for dynamic import.
parsa-rahbari-82 Oct 31, 2024
5ccd287
Revert "dynamic module import for src"
parsa-rahbari-82 Oct 31, 2024
968cd16
tendorflow compatibility fixed.
parsa-rahbari-82 Oct 31, 2024
0cb134b
setting up river pollutants example.
parsa-rahbari-82 Oct 31, 2024
003c642
smaller model for faster training
parsa-rahbari-82 Oct 31, 2024
87ee07e
basic commands added.
parsa-rahbari-82 Nov 26, 2024
de6c846
necessary commands added.
parsa-rahbari-82 Nov 26, 2024
ebe3542
redundant prints removed.
parsa-rahbari-82 Nov 26, 2024
d1b599d
Minor tensorflow 2 compatibility solved.
parsa-rahbari-82 Nov 27, 2024
a4a2616
major bugs for checkpoints solved
AmirAliFakhari Nov 27, 2024
68d5769
Legendre transform and deeponet compatibility bugs fixed.
parsa-rahbari-82 Dec 1, 2024
a1ee9c8
Stop tracking .gitignore
parsa-rahbari-82 Dec 15, 2024
f4a89ee
import order resolved. redundant python2 imports removed.
parsa-rahbari-82 Dec 15, 2024
358c03e
Import ordering fixed.
parsa-rahbari-82 Jan 18, 2025
b223d3a
fixing default advd example.
parsa-rahbari-82 Feb 3, 2025
9c951fe
including test folder
parsa-rahbari-82 Feb 4, 2025
ca6143c
including test folder for demo.
parsa-rahbari-82 Feb 4, 2025
6981a5d
Redundant files removed.
parsa-rahbari-82 Mar 24, 2025
524ec0e
Remove test file.
parsa-rahbari-82 Mar 24, 2025
41e0289
Rolling back previous requirements.txt
AmirAliFakhari May 31, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file modified requirements.txt
Binary file not shown.
151 changes: 89 additions & 62 deletions src/deeponet_pde.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,13 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import itertools

import deepxde as dde
import numpy as np
import tensorflow as tf
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

import argparse
import deepxde as dde
import itertools
import numpy as np
import tensorflow as tf


import deepxde as dde
from spaces import FinitePowerSeries, FiniteChebyshev, GRF
from system import LTSystem, ODESystem, DRSystem, CVCSystem, ADVDSystem
from utils import merge_values, trim_to_65535, mean_squared_error_outlier, safe_test
from spaces import GRF, FiniteChebyshev, FinitePowerSeries
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

from spaces import FiniteChebyshev, FinitePowerSeries, GRF

from system import ADVDSystem, CVCSystem, DRSystem, LTSystem, ODESystem
from utils import (mean_squared_error_outlier, merge_values, safe_test,
trim_to_65535)


def test_u_lt(nn, system, T, m, model, data, u, fname):
Expand All @@ -21,9 +18,10 @@ def test_u_lt(nn, system, T, m, model, data, u, fname):
ns = np.arange(system.npoints_output)[:, None]
X_test = [np.tile(sensor_value, (system.npoints_output, 1)), ns]
y_test = s
if nn != "opnn":
if nn != "deeponet":
X_test = merge_values(X_test)
y_pred = model.predict(data.transform_inputs(X_test))
X_test = tuple([arr.astype(dtype=np.float32) for arr in X_test])
y_pred = model.predict(X_test)
Copy link
Owner

@lululxvi lululxvi Jun 25, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why no data.transform_inputs? This is for data normalization.

np.savetxt("test/u_" + fname, sensor_value)
np.savetxt("test/s_" + fname, np.hstack((ns, y_test, y_pred)))

Expand All @@ -35,9 +33,11 @@ def test_u_ode(nn, system, T, m, model, data, u, fname, num=100):
x = np.linspace(0, T, num=num)[:, None]
X_test = [np.tile(sensor_values.T, (num, 1)), x]
y_test = system.eval_s_func(u, x)
if nn != "opnn":
if nn != "deeponet":
X_test = merge_values(X_test)
y_pred = model.predict(data.transform_inputs(X_test))

X_test = tuple([arr.astype(dtype=np.float32) for arr in X_test])
y_pred = model.predict(X_test)
np.savetxt(fname, np.hstack((x, y_test, y_pred)))
print("L2relative error:", dde.metrics.l2_relative_error(y_test, y_pred))

Expand All @@ -51,9 +51,11 @@ def test_u_dr(nn, system, T, m, model, data, u, fname):
xt = xt * [1 / (m - 1), T / (system.Nt - 1)]
X_test = [np.tile(sensor_value, (m * system.Nt, 1)), xt]
y_test = s.reshape([m * system.Nt, 1])
if nn != "opnn":
if nn != "deeponet":
X_test = merge_values(X_test)
y_pred = model.predict(data.transform_inputs(X_test))

X_test = tuple([arr.astype(dtype=np.float32) for arr in X_test])
y_pred = model.predict(X_test)
np.savetxt(fname, np.hstack((xt, y_test, y_pred)))


Expand All @@ -66,9 +68,11 @@ def test_u_cvc(nn, system, T, m, model, data, u, fname):
xt = xt * [1 / (m - 1), T / (system.Nt - 1)]
X_test = [np.tile(sensor_value, (m * system.Nt, 1)), xt]
y_test = s.reshape([m * system.Nt, 1])
if nn != "opnn":
if nn != "deeponet":
X_test = merge_values(X_test)
y_pred = model.predict(data.transform_inputs(X_test))

X_test = tuple([arr.astype(dtype=np.float32) for arr in X_test])
y_pred = model.predict(X_test)
np.savetxt("test/u_" + fname, sensor_value)
np.savetxt("test/s_" + fname, np.hstack((xt, y_test, y_pred)))

Expand All @@ -82,16 +86,18 @@ def test_u_advd(nn, system, T, m, model, data, u, fname):
xt = xt * [1 / (m - 1), T / (system.Nt - 1)]
X_test = [np.tile(sensor_value, (m * system.Nt, 1)), xt]
y_test = s.reshape([m * system.Nt, 1])
if nn != "opnn":
if nn != "deeponet":
X_test = merge_values(X_test)
y_pred = model.predict(data.transform_inputs(X_test))

X_test = tuple([arr.astype(dtype=np.float32) for arr in X_test])
y_pred = model.predict(X_test)
np.savetxt("test/u_" + fname, sensor_value)
np.savetxt("test/s_" + fname, np.hstack((xt, y_test, y_pred)))


def lt_system(npoints_output):
def lt_system(npoints_output, T):
"""Legendre transform"""
return LTSystem(npoints_output)
return LTSystem(npoints_output, T)


def ode_system(T):
Expand All @@ -103,11 +109,11 @@ def g(s, u, x):
# Nonlinear ODE
# return -s**2 + u
# Gravity pendulum
# k = 1
# return [s[1], - k * np.sin(s[0]) + u]
k = 1
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is return in line 108. The code here is not used.

return [s[1], - k * np.sin(s[0]) + u]

s0 = [0]
# s0 = [0, 0] # Gravity pendulum
#s0 = [0]
s0 = [0, 0] # Gravity pendulum
return ODESystem(g, s0, T)


Expand All @@ -129,8 +135,10 @@ def cvc_system(T, npoints_output):

def advd_system(T, npoints_output):
"""Advection-diffusion"""
f = None
g = None
# source term
f = 1
# boundary condition
g = [3 , 0]
Nt = 100
return ADVDSystem(f, g, T, Nt, npoints_output)

Expand All @@ -140,23 +148,14 @@ def run(problem, system, space, T, m, nn, net, lr, epochs, num_train, num_test):

X_train, y_train = system.gen_operator_data(space, m, num_train)
X_test, y_test = system.gen_operator_data(space, m, num_test)
if nn != "opnn":
if nn != "deeponet":
X_train = merge_values(X_train)
X_test = merge_values(X_test)

# np.savez_compressed("train.npz", X_train0=X_train[0], X_train1=X_train[1], y_train=y_train)
# np.savez_compressed("test.npz", X_test0=X_test[0], X_test1=X_test[1], y_test=y_test)
# return

# d = np.load("train.npz")
# X_train, y_train = (d["X_train0"], d["X_train1"]), d["y_train"]
# d = np.load("test.npz")
# X_test, y_test = (d["X_test0"], d["X_test1"]), d["y_test"]

X_test_trim = trim_to_65535(X_test)[0]
y_test_trim = trim_to_65535(y_test)[0]
if nn == "opnn":
data = dde.data.OpDataSet(
if nn == "deeponet":
data = dde.data.Triple(
X_train=X_train, y_train=y_train, X_test=X_test_trim, y_test=y_test_trim
)
else:
Expand All @@ -167,13 +166,12 @@ def run(problem, system, space, T, m, nn, net, lr, epochs, num_train, num_test):
model = dde.Model(data, net)
model.compile("adam", lr=lr, metrics=[mean_squared_error_outlier])
checker = dde.callbacks.ModelCheckpoint(
"model/model.ckpt", save_better_only=True, period=1000
"model/model", save_better_only=True, period=1000
)
losshistory, train_state = model.train(epochs=epochs, callbacks=[checker])
print("# Parameters:", np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
print("# Parameters:", np.sum([np.prod(v.get_shape().as_list()) for v in tf.compat.v1.trainable_variables()]))
dde.saveplot(losshistory, train_state, issave=True, isplot=True)

model.restore("model/model.ckpt-" + str(train_state.best_step), verbose=1)
model.restore("model/model-" + str(train_state.best_step) + ".ckpt", verbose=1)
safe_test(model, data, X_test, y_test)

tests = [
Expand All @@ -196,7 +194,7 @@ def run(problem, system, space, T, m, nn, net, lr, epochs, num_train, num_test):

if problem == "lt":
features = space.random(10)
sensors = np.linspace(0, 2, num=m)[:, None]
sensors = np.linspace(0, T, num=m)[:, None]
u = space.eval_u(features, sensors)
for i in range(u.shape[0]):
test_u_lt(nn, system, T, m, model, data, lambda x: u[i], str(i) + ".dat")
Expand All @@ -221,18 +219,18 @@ def run(problem, system, space, T, m, nn, net, lr, epochs, num_train, num_test):
test_u_advd(nn, system, T, m, model, data, lambda x: u[i], str(i) + ".dat")


def main():
def main(args):
# Problems:
# - "lt": Legendre transform
# - "ode": Antiderivative, Nonlinear ODE, Gravity pendulum
# - "dr": Diffusion-reaction
# - "cvc": Advection
# - "advd": Advection-diffusion
problem = "ode"
T = 1
problem = args.problem
T = args.t
if problem == "lt":
npoints_output = 20
system = lt_system(npoints_output)
system = lt_system(npoints_output, T)
elif problem == "ode":
system = ode_system(T)
elif problem == "dr":
Expand All @@ -249,29 +247,29 @@ def main():
# space = FinitePowerSeries(N=100, M=1)
# space = FiniteChebyshev(N=20, M=1)
# space = GRF(2, length_scale=0.2, N=2000, interp="cubic") # "lt"
space = GRF(1, length_scale=0.2, N=1000, interp="cubic")
# space = GRF(T, length_scale=0.2, N=1000 * T, interp="cubic")
# space = GRF(1, length_scale=0.2, N=1000, interp="cubic")
space = GRF(T, length_scale=0.2, N=1000 * T, interp="cubic")

# Hyperparameters
m = 100
num_train = 10000
num_test = 100000
lr = 0.001
epochs = 50000
m = args.m
num_train = args.num_train
num_test = args.num_test
lr = args.lr
epochs = args.epochs

# Network
nn = "opnn"
activation = "relu"
initializer = "Glorot normal" # "He normal" or "Glorot normal"
nn = args.nn
activation = args.activation
initializer = args.init # "He normal" or "Glorot normal"
dim_x = 1 if problem in ["ode", "lt"] else 2
if nn == "opnn":
net = dde.maps.OpNN(
if nn == "deeponet":
net = dde.maps.DeepONet(
[m, 40, 40],
[dim_x, 40, 40],
activation,
initializer,
use_bias=True,
stacked=False,
stacked=args.stacked,
)
elif nn == "fnn":
net = dde.maps.FNN([m + dim_x] + [100] * 2 + [1], activation, initializer)
Expand All @@ -282,4 +280,33 @@ def main():


if __name__ == "__main__":
main()

def process_initializer(value):
if value == 'Glorot':
return 'Glorot normal'
else:
return 'He normal'

parser = argparse.ArgumentParser()
parser.add_argument('-p','--problem', type=str, choices=['lt','ode', 'dr', 'cvc', 'advd'], help="Type of differential equation", required=True)
parser.add_argument('-t', type=int, default=1, help="Final time in domain (defualt=1)")
parser.add_argument('-m', type=int, help="number of sensors", required=True)
parser.add_argument('--num-train', type=int, help="number of train data", required=True)
parser.add_argument('--num-test', type=int, help="number of test data", required=True)
parser.add_argument('--lr', type=float, default=1e-3, help="learning rate (default=1e-3)")
parser.add_argument('--epochs', type=int, help="number of epochs", required=True)
parser.add_argument('--nn', type=str, choices=['fnn', 'deeponet', 'resnet'], help="Type of nueral network", default='deeponet')
parser.add_argument('--activation', type=str, choices=['elu', 'gelu', 'relu', 'selu', 'sigmoid', 'silu', 'sin', 'swish', 'tanh'], help="Activation function", default='relu')
parser.add_argument(
'--init',
type=str,
choices=['He', 'Glorot'],
default='Glorot', # Default initializer
help="Specify the initializer (choose from 'He [normal]', 'Glorot [normal]')"
)
parser.add_argument('--stacked',type=str,default='False',help="Specify whether to use stacked architecture (usage for --nn is 'deeponet')")
args = parser.parse_args()
args.init = process_initializer(args.init)
if args.nn == 'deeponet':
args.stacked = args.stacked.lower() == 'true'
main(args)
10 changes: 3 additions & 7 deletions src/spaces.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,10 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import matplotlib.pyplot as plt
import numpy as np
import config

from pathos.pools import ProcessPool
from scipy import linalg, interpolate
from scipy import interpolate, linalg
from sklearn import gaussian_process as gp

import config
from utils import eig


Expand Down
12 changes: 5 additions & 7 deletions src/system.py
Original file line number Diff line number Diff line change
@@ -1,36 +1,34 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import config
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

config is a local file. Move it below as the original code.


from pathos.pools import ProcessPool
from scipy import interpolate
from scipy.integrate import solve_ivp
from scipy.special import legendre

import config
from ADR_solver import solve_ADR
from ADVD_solver import solve_ADVD
from CVC_solver import solve_CVC
from utils import timing


class LTSystem(object):
def __init__(self, npoints_output):
def __init__(self, npoints_output, T):
"""Legendre transform J_n{f(x)}.
Args:
npoints_output: For a input function, choose n=0,1,2,...,`npoints_output`-1 as data.
"""
self.npoints_output = npoints_output
self.T = T

@timing
def gen_operator_data(self, space, m, num):
"""For each input function, generate `npoints_output` data, so the total number N = num x npoints_output.
"""
print("Generating operator data...", flush=True)
features = space.random(num)
sensors = np.linspace(0, 2, num=m)[:, None]
sensors = np.linspace(0, self.T, num=m)[:, None]
sensor_values = space.eval_u(features, sensors)

sensor_values_tile = np.tile(sensor_values, (1, self.npoints_output)).reshape(
Expand Down
9 changes: 3 additions & 6 deletions src/utils.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import sys
import time
import numpy as np
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Use the original import format for numpy


from functools import wraps

import numpy as np


def timing(f):
Expand Down Expand Up @@ -53,7 +50,7 @@ def is_nonempty(X):
X = X_test
while is_nonempty(X):
X_add, X = trim_to_65535(X)
y_pred.append(model.predict(data.transform_inputs(X_add)))
y_pred.append(model.predict(X_add))
y_pred = np.vstack(y_pred)
error = np.mean((y_test - y_pred) ** 2)
print("Test MSE: {}".format(error))
Expand Down