1313# Put params into a list
1414params = [tf .Variable (0.0 , dtype = tf .float32 ), tf .Variable (0.0 , dtype = tf .float32 )]
1515
16+
1617# Define f_model, note the `vars` argument. Inputs must follow this order!
17- def f_model (u_model , vars , x , t ):
18- u = u_model (tf .concat ([x ,t ],1 ))
18+ def f_model (u_model , var , x , t ):
19+ u = u_model (tf .concat ([x , t ], 1 ))
1920 u_x = tf .gradients (u , x )
2021 u_xx = tf .gradients (u_x , x )
21- u_t = tf .gradients (u ,t )
22- c1 = vars [0 ] # tunable param 1
23- c2 = vars [1 ] # tunable param 2
24- f_u = u_t - c1 * u_xx + c2 * u * u * u - c2 * u
22+ u_t = tf .gradients (u , t )
23+ c1 = var [0 ] # tunable param 1
24+ c2 = var [1 ] # tunable param 2
25+ f_u = u_t - c1 * u_xx + c2 * u * u * u - c2 * u
2526 return f_u
2627
2728
2829# Import data, same data as Raissi et al
2930
3031data = scipy .io .loadmat ('AC.mat' )
3132
32- t = data ['tt' ].flatten ()[:,None ]
33- x = data ['x' ].flatten ()[:,None ]
33+ t = data ['tt' ].flatten ()[:, None ]
34+ x = data ['x' ].flatten ()[:, None ]
3435Exact = data ['uu' ]
3536Exact_u = np .real (Exact )
3637
3738# define MLP depth and layer width
3839layer_sizes = [2 , 128 , 128 , 128 , 128 , 1 ]
3940
4041# generate all combinations of x and t
41- X , T = np .meshgrid (x ,t )
42+ X , T = np .meshgrid (x , t )
4243
43- X_star = np .hstack ((X .flatten ()[:,None ], T .flatten ()[:,None ]))
44- u_star = Exact_u .T .flatten ()[:,None ]
44+ X_star = np .hstack ((X .flatten ()[:, None ], T .flatten ()[:, None ]))
45+ u_star = Exact_u .T .flatten ()[:, None ]
4546
4647x = X_star [:, 0 :1 ]
4748t = X_star [:, 1 :2 ]
@@ -50,16 +51,18 @@ def f_model(u_model, vars, x, t):
5051# append to a list for input to model.fit
5152X = [x , t ]
5253
53- #define col_weights for SA discovery model
54+ # define col_weights for SA discovery model
5455col_weights = tf .Variable (tf .random .uniform ([np .shape (x )[0 ], 1 ]))
5556
5657# initialize, compile, train model
5758model = DiscoveryModel ()
58- model .compile (layer_sizes , f_model , X , u_star , params , col_weights = col_weights ) # baseline approach can be done by simply removing the col_weights arg
59- model .tf_optimizer_weights = tf .keras .optimizers .Adam (lr = 0.005 , beta_1 = .95 ) # an example as to how one could modify an optimizer, in this case the col_weights optimizer
59+ model .compile (layer_sizes , f_model , X , u_star , params ,
60+ col_weights = col_weights ) # baseline approach can be done by simply removing the col_weights arg
61+ model .tf_optimizer_weights = tf .keras .optimizers .Adam (lr = 0.005 ,
62+ beta_1 = .95 ) # an example as to how one could modify an optimizer, in this case the col_weights optimizer
6063
6164# train loop
62- model .fit (tf_iter = 10000 )
65+ model .fit (tf_iter = 10000 )
6366
6467# doesnt work quite yet
65- tdq .plotting .plot_weights (model , scale = 10.0 )
68+ tdq .plotting .plot_weights (model , scale = 10.0 )
0 commit comments