Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
f0f7b7a
return PC type from nonlinear solvers in place of usePC flag.
JustinRayAngus Nov 12, 2025
e31057a
add initialization of curl curl BC masks
JustinRayAngus Nov 12, 2025
e1eb28f
only add 1 to MM for PC when using pc_curl_curl_mlmg.
JustinRayAngus Nov 12, 2025
04c7dc4
BC masks are applied.
JustinRayAngus Nov 12, 2025
a5b32ff
bug fix.
JustinRayAngus Nov 12, 2025
2e0231b
move updatePreCondMat to just before linear solve
JustinRayAngus Nov 12, 2025
a8804fb
add detailed description of masks.
JustinRayAngus Nov 12, 2025
794ce20
add CI test.
JustinRayAngus Nov 12, 2025
ab78aed
ignore unused.
JustinRayAngus Nov 12, 2025
6f227fc
make InitializeCurlCurlBCMasks() public.
JustinRayAngus Nov 12, 2025
4f346a9
fix bug for 1D cyl and sph builds.
JustinRayAngus Nov 13, 2025
3e79d3f
fix iteration check for CI test.
JustinRayAngus Nov 13, 2025
6c0263c
fix comments.
JustinRayAngus Nov 13, 2025
2d7cb71
0 --> lev
JustinRayAngus Nov 13, 2025
0cf1efd
use field registry for the mask.
JustinRayAngus Nov 13, 2025
75cc940
remove commented out code.
JustinRayAngus Nov 14, 2025
36c01dd
update CI test scripts.
JustinRayAngus Nov 25, 2025
990e255
fix bug.
JustinRayAngus Nov 25, 2025
15897e7
use AMReX_PETSC
JustinRayAngus Nov 25, 2025
aea4b63
add AMReX_PETSC to .azure
JustinRayAngus Nov 25, 2025
75a95de
using PECInsulator->IsESet() for matrix BCs.
JustinRayAngus Dec 2, 2025
2cffb10
attempt to build petsc in azure.
JustinRayAngus Dec 2, 2025
6d17b45
add initialization of curl curl BC masks
JustinRayAngus Nov 12, 2025
00127b1
add parsing abililty for MM PC width
JustinRayAngus Nov 14, 2025
bcc0db8
copying full MM to PC MM for arbitrary width.
JustinRayAngus Nov 14, 2025
13b24c1
limit pc width to 0 in 3D for now.
JustinRayAngus Nov 14, 2025
edfb448
add off-diagonal terms to to the matrix.
JustinRayAngus Nov 14, 2025
09467fe
add blanking for components of E.
JustinRayAngus Nov 16, 2025
cea99f0
fix rebase bugs.
JustinRayAngus Nov 24, 2025
8092792
don't compute Ji from MM if blanking.
JustinRayAngus Dec 1, 2025
87801f9
use updateDt based on CFL.
JustinRayAngus Dec 2, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions .azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ jobs:
# Cartesian 2D
cartesian_2d:
WARPX_CMAKE_FLAGS: -DWarpX_DIMS=2 -DWarpX_FFT=ON -DWarpX_PYTHON=ON
# Cartesian 2D with PETSc
cartesian_petsc_2d:
WARPX_CMAKE_FLAGS: -DWarpX_DIMS=2 -DWarpX_FFT=OFF -DWarpX_PYTHON=ON -DAMReX_PETSC=YES
# Cartesian 3D
cartesian_3d:
WARPX_CMAKE_FLAGS: -DWarpX_DIMS=3 -DWarpX_FFT=ON -DWarpX_PYTHON=ON
Expand Down Expand Up @@ -108,6 +111,18 @@ jobs:
# -DopenPMD_USE_PYTHON=OFF -DBUILD_TESTING=OFF -DBUILD_EXAMPLES=OFF -DBUILD_CLI_TOOLS=OFF
# #python3 -m pip install --upgrade openpmd-api
#fi
if [ "${AMReX_PETSC:-YES}" == "TRUE" ]; then
export warpx_dir=${PWD}
export PETSC_DIR=${warpx_dir}/petsc
export PETSC_ARCH=arch-opt
# clone and configure PETSc
cd ${warpx_dir}
git clone -b release https://gitlab.com/petsc/petsc.git petsc
cd ${PETSC_DIR}
./configure --with-fortran-bindings=no --with-x=no --with-cc=mpicc --with-fc=mpif90 --with-cxx=mpicxx --with-debugging=1 --download-make --download-cmake --download-superlu --download-superlu_dist
make -j 4
export WARPX_CMAKE_FLAGS="${WARPX_CMAKE_FLAGS} -DPETSC_DIR=${PETSC_DIR} -DPETSC_ARCH=${PETSC_ARCH}"
fi
if [ "${WARPX_RZ_FFT:-FALSE}" == "TRUE" ]; then
# BLAS++
cmake-easyinstall --prefix=/usr/local \
Expand Down
12 changes: 12 additions & 0 deletions Examples/Tests/implicit/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,18 @@ add_warpx_test(
OFF # dependency
)

if(AMReX_PETSC)
add_warpx_test(
test_2d_curl_curl_petsc_pc # name
2 # dims
2 # nprocs
inputs_test_2d_curl_curl_petsc_pc # inputs
"analysis_petsc_matrix.py diags/diag1000200" # analysis
"analysis_default_regression.py --path diags/diag1000200" # checksum
OFF # dependency
)
endif()

add_warpx_test(
test_2d_theta_implicit_planar_pinch # name
2 # dims
Expand Down
26 changes: 26 additions & 0 deletions Examples/Tests/implicit/analysis_petsc_matrix.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#!/usr/bin/env python3

# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
#
# This is a script that analyses the simulation results from
# and script `inputs_test_2d_curl_curl_petsc_pc`.
# This simulates a time-varing EM wave injected from an insulator boundary
# using the theta-implicit solver with PETSc's LU preconditer.
# Since LU is an exact solver, if the preconditioner matrix is constructed
# correctly, then here should be 1 Newton and 1 GMRES iteration per time step.

import numpy as np

newton_solver = np.loadtxt("diags/reduced_files/newton_solver.txt", skiprows=1)
num_steps = newton_solver[-1, 0]
total_newton_iters = newton_solver[-1, 3]
total_gmres_iters = newton_solver[-1, 7]

# check that there is 1 Newton and 1 GMRES iteration per step
print(f"total steps: {num_steps}")
print(f"total gmres iters: {total_gmres_iters}")
print(f"total newton iters: {total_newton_iters}")
assert total_gmres_iters == num_steps
assert total_newton_iters == num_steps
162 changes: 162 additions & 0 deletions Examples/Tests/implicit/inputs_test_2d_curl_curl_petsc_pc
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
#################################
########## CONSTANTS ############
#################################

### characteristic plasma values
my_constants.n0 = 1.e23 # plasma density [1/m^3]
my_constants.T0 = 1. # plasma temperature [eV]
my_constants.R0 = 1.5e-2 # plasma radius [m]
my_constants.I0 = 200.e3 # pinch current [Amps]

my_constants.AD = 2.01410 # atomic number deuterium
my_constants.mD = AD*m_u - m_e # deuterium mass [kg]

### derived characteristic pinch values
my_constants.B0 = mu0*I0/(2*pi*R0) # B at r=R0 [Tesla]
my_constants.PB0 = B0^2/(2.0*mu0) # B pressure at r=R0
my_constants.rho0 = AD*m_u*n0 # mass density [kg/m^3]
my_constants.u0 = sqrt(PB0/rho0) # implosion speed [m/s]
my_constants.t0 = R0/u0 # implosion time [m/s]

### simulation parameters
my_constants.tsim = 220.e-9 # sim time = 1.35*t0 [s]
my_constants.rise_time = t0/20.0 # current rise time [s]
my_constants.dt = 1.0e-3*1.0e-9 # time step [s]
my_constants.Nppc_z = 10 # particles per cell per dim

#################################
####### GENERAL PARAMETERS ######
#################################
stop_time = tsim
max_step = 200
amr.n_cell = 224 8
amr.max_grid_size = 8
amr.max_level = 0
geometry.dims = 2
geometry.prob_lo = 0.0 0.0
geometry.prob_hi = 1.54e-2 0.11e-2

#################################
####### Boundary condition ######
#################################
boundary.field_lo = pmc periodic
boundary.field_hi = pec_insulator periodic
boundary.particle_lo = reflecting periodic
boundary.particle_hi = absorbing periodic

insulator.area_x_hi(y,z) = (-1.0 <= z and z <= 1.0)
insulator.By_x_hi(y,z,t) = min(t/rise_time,1)*B0

#################################
############ NUMERICS ###########
#################################
warpx.serialize_initial_conditions = 1
warpx.verbose = 1
warpx.limit_verbose_step = false
warpx.const_dt = dt
warpx.use_filter = 0

amrex.fpe_trap_invalid = 1
amrex.fpe_trap_overflow = 1
amrex.fpe_trap_zero = 1

algo.maxwell_solver = Yee
algo.evolve_scheme = "theta_implicit_em"

implicit_evolve.theta = 0.5
implicit_evolve.max_particle_iterations = 21
implicit_evolve.particle_tolerance = 1.0e-12

implicit_evolve.nonlinear_solver = "newton"
newton.verbose = true
newton.linear_solver= petsc_ksp
newton.max_iterations = 100
newton.relative_tolerance = 1.0e-6
newton.absolute_tolerance = 0.0
newton.require_convergence = false
newton.diagnostic_file = "diags/reduced_files/newton_solver.txt"
newton.diagnostic_interval = 10

gmres.verbose_int = 2
gmres.max_iterations = 1000
gmres.relative_tolerance = 1.0e-3
gmres.absolute_tolerance = 0.0

implicit_evolve.use_mass_matrices_jacobian = true
implicit_evolve.skip_particle_picard_init = true
implicit_evolve.use_mass_matrices_pc = true
jacobian.pc_type = "pc_petsc"
pc_petsc.verbose = true
pc_petsc.type = "lu"

particles.max_grid_crossings = 2
algo.particle_pusher = "higuera"
algo.particle_shape = 2
algo.current_deposition = "villasenor"

#################################
############ PLASMA #############
#################################
particles.species_names = electrons deuterium

electrons.charge = -q_e
electrons.mass = m_e
electrons.injection_style = "NUniformPerCell"
electrons.num_particles_per_cell_each_dim = Nppc_z Nppc_z
electrons.profile = constant
electrons.density = n0
electrons.xmax = 0.0
electrons.momentum_distribution_type = "gaussian"
electrons.ux_th = sqrt(T0*q_e/m_e)/clight
electrons.uy_th = sqrt(T0*q_e/m_e)/clight
electrons.uz_th = sqrt(T0*q_e/m_e)/clight

deuterium.charge = q_e
deuterium.mass = mD
deuterium.injection_style = "NUniformPerCell"
deuterium.num_particles_per_cell_each_dim = Nppc_z Nppc_z
deuterium.profile = constant
deuterium.density = n0
deuterium.xmax = 0.0
deuterium.momentum_distribution_type = "gaussian"
deuterium.ux_th = sqrt(T0*q_e/mD)/clight
deuterium.uy_th = sqrt(T0*q_e/mD)/clight
deuterium.uz_th = sqrt(T0*q_e/mD)/clight

### Diagnostics
diagnostics.diags_names = diag1
diag1.intervals = 20
diag1.diag_type = Full
diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho divE
diag1.electrons.variables = x z w ux uy uz
diag1.deuterium.variables = x z w ux uy uz

### reduced diagnostics
warpx.reduced_diags_names = particle_energy field_energy poynting_flux

particle_energy.type = ParticleEnergy
particle_energy.intervals = 10
particle_energy.precision = 18
particle_energy.path = diags/reduced_files/

field_energy.type = FieldEnergy
field_energy.intervals = 10
field_energy.precision = 18
field_energy.path = diags/reduced_files/

poynting_flux.type = FieldPoyntingFlux
poynting_flux.intervals = 10
poynting_flux.precision = 18
poynting_flux.path = diags/reduced_files/

#################################
############ COLLISION ##########
#################################
collisions.collision_names = collision1 collision2 collision3

collision1.species = electrons electrons
collision2.species = deuterium deuterium
collision3.species = electrons deuterium
collision1.CoulombLog = 10.0
collision2.CoulombLog = 10.0
collision3.CoulombLog = 10.0
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
{
"lev=0": {
"Bx": 9.069162586111297,
"By": 19.361810144285307,
"Bz": 12.107034717007863,
"Ex": 4193780781.9712296,
"Ey": 3007724460.40948,
"Ez": 4187550905.2162943,
"divE": 46810632867690.65,
"jx": 0.0,
"jy": 0.0,
"jz": 0.0,
"rho": 0.0
},
"electrons": {
"particle_momentum_x": 0.0,
"particle_momentum_y": 0.0,
"particle_momentum_z": 0.0,
"particle_position_x": 0.0,
"particle_position_y": 0.0,
"particle_weight": 0.0
},
"deuterium": {
"particle_momentum_x": 0.0,
"particle_momentum_y": 0.0,
"particle_momentum_z": 0.0,
"particle_position_x": 0.0,
"particle_position_y": 0.0,
"particle_weight": 0.0
}
}
11 changes: 7 additions & 4 deletions Source/Evolve/WarpXEvolve.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -192,8 +192,9 @@ WarpX::Evolve (int numsteps)

// Update timestep for electrostatic solver if a constant dt is not provided
// This first synchronizes the position and velocity before setting the new timestep
if (electromagnetic_solver_id == ElectromagneticSolverAlgo::None &&
!m_const_dt.has_value() && m_dt_update_interval.contains(step+1)) {
if ((electromagnetic_solver_id == ElectromagneticSolverAlgo::None ||
evolve_scheme == EvolveScheme::ThetaImplicitEM) &&
!m_const_dt.has_value() && m_dt_update_interval.contains(step+1)) {
if (verbose_step) {
amrex::Print() << Utils::TextMsg::Info("updating timestep");
}
Expand Down Expand Up @@ -384,13 +385,15 @@ void WarpX::OneStep (

// implicit solver
if (m_implicit_solver) {

// advance fields and particles by one time step
m_implicit_solver->OneStep(a_cur_time, a_dt, a_step);

// perform particle collisions
ExecutePythonCallback("beforecollisions");
mypc->doCollisions(a_step, a_cur_time, a_dt);
ExecutePythonCallback("aftercollisions");

// advance fields and particles by one time step
m_implicit_solver->OneStep(a_cur_time, a_dt, a_step);
}
// explicit solver
else {
Expand Down
51 changes: 50 additions & 1 deletion Source/FieldSolver/ImplicitSolvers/ImplicitSolver.H
Original file line number Diff line number Diff line change
Expand Up @@ -101,13 +101,38 @@ public:
else { return nullptr; }
}

/**
* \brief Return reference to the multifabs for curl curl mask
*/
[[nodiscard]] virtual const amrex::MultiFab* GetCurl2BCmask (const int lev, const int dir) const
{
amrex::ignore_unused(lev,dir);
return nullptr;
}

/**
* \brief Return the IntVect number of components for the mass matrices used in
* the preconditioner for field direction field_dir
*/
amrex::IntVect GetMassMatricesPCNcomp (const int field_dir) const
{
if (field_dir == 0) { return m_ncomp_pc_xx; }
else if (field_dir == 1) { return m_ncomp_pc_yy; }
else if (field_dir == 2) { return m_ncomp_pc_zz; }
else { return amrex::IntVect{-1}; }
}

// This parameter is used for the time-step fraction in the PC for implicit
// treatment of light waves in the curl-curl MLMG solver.
// This function should return zero if light waves are not treated implicitly
[[nodiscard]] virtual amrex::Real GetThetaForPC () const = 0;

void ComputeJfromMassMatrices (const bool a_J_from_MM_only);

const amrex::Array<bool,3>& GetBlankElectricField () const {
return m_blank_electric_field;
}

protected:

/**
Expand All @@ -127,6 +152,11 @@ protected:
*/
mutable amrex::Real m_dt = 0.0;

/**
* \brief Flags to turn off evolving select components of the electric field
*/
amrex::Array<bool,3> m_blank_electric_field{false,false,false};

/**
* \brief Time-biasing parameter for fields used on RHS to advance system.
*/
Expand Down Expand Up @@ -182,6 +212,11 @@ protected:
*/
bool m_use_mass_matrices_pc = false;

/**
* \brief Width for mass matrices in the preconditioner
*/
int m_mass_matrices_pc_width = -1;

/**
* \brief Direction-dependent component number of mass matrices elements
*/
Expand All @@ -195,6 +230,20 @@ protected:
amrex::IntVect m_ncomp_zy = amrex::IntVect{0};
amrex::IntVect m_ncomp_zz = amrex::IntVect{0};

/**
* \brief Direction-dependent component number of mass matrices elements
* used for the preconditioner
*/
amrex::IntVect m_ncomp_pc_xx = amrex::IntVect{0};
amrex::IntVect m_ncomp_pc_xy = amrex::IntVect{0};
amrex::IntVect m_ncomp_pc_xz = amrex::IntVect{0};
amrex::IntVect m_ncomp_pc_yx = amrex::IntVect{0};
amrex::IntVect m_ncomp_pc_yy = amrex::IntVect{0};
amrex::IntVect m_ncomp_pc_yz = amrex::IntVect{0};
amrex::IntVect m_ncomp_pc_zx = amrex::IntVect{0};
amrex::IntVect m_ncomp_pc_zy = amrex::IntVect{0};
amrex::IntVect m_ncomp_pc_zz = amrex::IntVect{0};

/**
* \brief Array of multifab pointers to mass matrix preconditioner
*/
Expand Down Expand Up @@ -242,7 +291,7 @@ protected:
/**
* \brief Convert from WarpX FieldBoundaryType to amrex::LinOpBCType
*/
[[nodiscard]] amrex::Array<amrex::LinOpBCType,AMREX_SPACEDIM> convertFieldBCToLinOpBC ( const amrex::Array<FieldBoundaryType,AMREX_SPACEDIM>& ) const;
[[nodiscard]] amrex::Array<amrex::LinOpBCType,AMREX_SPACEDIM> convertFieldBCToLinOpBC ( const amrex::Array<FieldBoundaryType,AMREX_SPACEDIM>&, const int bdry_side ) const;

};

Expand Down
Loading
Loading