Skip to content

Save additional node variables in SaveSolutionCallback #2298

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 16 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 43 additions & 1 deletion examples/p4est_2d_dgsem/elixir_navierstokes_vortex_street.jl
Original file line number Diff line number Diff line change
Expand Up @@ -102,10 +102,52 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval)

alive_callback = AliveCallback(analysis_interval = analysis_interval)

# Add `:vorticity` to `extra_node_variables` tuple ...
extra_node_variables = (:vorticity,)

# ... and specify the function `get_node_variables` for this symbol,
# with first argument matching the symbol (turned into a type via `Val`) for dispatching.
# Note that for parabolic(-extended) equations, `equations_parabolic` and `cache_parabolic`
# must be declared as the last two arguments of the function to match the expected signature.
function Trixi.get_node_variables(::Val{:vorticity}, u, mesh, equations, dg, cache,
equations_parabolic, cache_parabolic)
n_nodes = nnodes(dg)
n_elements = nelements(dg, cache)
# By definition, the variable must be provided at every node of every element!
# Otherwise, the `SaveSolutionCallback` will crash.
vorticity_array = zeros(eltype(cache.elements),
n_nodes, n_nodes, # equivalent: `ntuple(_ -> n_nodes, ndims(mesh))...,`
n_elements)

@unpack viscous_container = cache_parabolic
@unpack gradients = viscous_container
gradients_x, gradients_y = gradients

# We can accelerate the computation by thread-parallelizing the loop over elements
# by using the `@threaded` macro.
Trixi.@threaded for element in eachelement(dg, cache)
for j in eachnode(dg), i in eachnode(dg)
u_node = get_node_vars(u, equations, dg, i, j, element)

gradients_1 = get_node_vars(gradients_x, equations_parabolic, dg,
i, j, element)
gradients_2 = get_node_vars(gradients_y, equations_parabolic, dg,
i, j, element)

vorticity_nodal = vorticity(u_node, (gradients_1, gradients_2),
equations_parabolic)
vorticity_array[i, j, element] = vorticity_nodal
end
end

return vorticity_array
end

save_solution = SaveSolutionCallback(dt = 1.0,
save_initial_solution = true,
save_final_solution = true,
solution_variables = cons2prim)
solution_variables = cons2prim,
extra_node_variables = extra_node_variables) # Supply the additional `extra_node_variables` here

callbacks = CallbackSet(summary_callback,
analysis_callback,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,9 +132,8 @@ function Trixi.analyze(::Val{:energy_potential}, du, u_euler, t,
u_gravity) do u, i, j, element,
equations_euler, dg,
equations_gravity, u_gravity
u_euler_local = Trixi.get_node_vars(u_euler, equations_euler, dg, i, j, element)
u_gravity_local = Trixi.get_node_vars(u_gravity, equations_gravity, dg, i, j,
element)
u_euler_local = get_node_vars(u_euler, equations_euler, dg, i, j, element)
u_gravity_local = get_node_vars(u_gravity, equations_gravity, dg, i, j, element)
# OBS! subtraction is specific to Jeans instability test where rho0 = 1.5e7
# For formula of potential energy see
# "Galactic Dynamics" by Binney and Tremaine, 2nd ed., equation (2.18)
Expand Down
30 changes: 29 additions & 1 deletion examples/t8code_3d_dgsem/elixir_euler_ec.jl
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,37 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval)

alive_callback = AliveCallback(analysis_interval = analysis_interval)

# Add `:thermodynamic_entropy` to `extra_node_variables` tuple ...
extra_node_variables = (:thermodynamic_entropy,)

# ... and specify the function `get_node_variables` for this symbol,
# with first argument matching the symbol (turned into a type via `Val`) for dispatching.
function Trixi.get_node_variables(::Val{:thermodynamic_entropy}, u, mesh, equations,
dg, cache)
n_nodes = nnodes(dg)
n_elements = nelements(dg, cache)
# By definition, the variable must be provided at every node of every element!
# Otherwise, the `SaveSolutionCallback` will crash.
entropy_array = zeros(eltype(cache.elements),
ntuple(_ -> n_nodes, ndims(mesh))..., # equivalent: `n_nodes, n_nodes, n_nodes`
n_elements)

# We can accelerate the computation by thread-parallelizing the loop over elements
# by using the `@threaded` macro.
Trixi.@threaded for element in eachelement(dg, cache)
for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
u_node = get_node_vars(u, equations, dg, i, j, k, element)

entropy_array[i, j, k, element] = Trixi.entropy_thermodynamic(u_node, equations)
end
end

return entropy_array
end
save_solution = SaveSolutionCallback(interval = 100,
save_initial_solution = true,
save_final_solution = true)
save_final_solution = true,
extra_node_variables = extra_node_variables) # Supply the additional `extra_node_variables` here

stepsize_callback = StepsizeCallback(cfl = 1.0)

Expand Down
32 changes: 31 additions & 1 deletion examples/tree_2d_dgsem/elixir_euler_blast_wave_amr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -69,10 +69,40 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval)

alive_callback = AliveCallback(analysis_interval = analysis_interval)

# Add `:mach` to `extra_node_variables` tuple ...
extra_node_variables = (:mach,)

# ... and specify the function `get_node_variables` for this symbol,
# with first argument matching the symbol (turned into a type via `Val`) for dispatching.
function Trixi.get_node_variables(::Val{:mach}, u, mesh, equations, dg, cache)
n_nodes = nnodes(dg)
n_elements = nelements(dg, cache)
# By definition, the variable must be provided at every node of every element!
# Otherwise, the `SaveSolutionCallback` will crash.
mach_array = zeros(eltype(cache.elements),
n_nodes, n_nodes, # equivalent: `ntuple(_ -> n_nodes, ndims(mesh))...,`
n_elements)

# We can accelerate the computation by thread-parallelizing the loop over elements
# by using the `@threaded` macro.
Trixi.@threaded for element in eachelement(dg, cache)
for j in eachnode(dg), i in eachnode(dg)
u_node = get_node_vars(u, equations, dg, i, j, element)
rho, v1, v2, p = prim2cons(u_node, equations)
c = sqrt(equations.gamma * p / rho) # speed of sound
v_magnitude = sqrt(v1^2 + v2^2)

mach_array[i, j, element] = v_magnitude / c
end
end

return mach_array
end
save_solution = SaveSolutionCallback(interval = 100,
save_initial_solution = true,
save_final_solution = true,
solution_variables = cons2prim)
solution_variables = cons2prim,
extra_node_variables = extra_node_variables) # Supply the additional `extra_node_variables` here

amr_indicator = IndicatorHennemannGassner(semi,
alpha_max = 0.5,
Expand Down
31 changes: 30 additions & 1 deletion examples/tree_2d_dgsem/elixir_euler_vortex_amr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -143,10 +143,39 @@ analysis_callback = AnalysisCallback(semi, interval = analysis_interval,

alive_callback = AliveCallback(analysis_interval = analysis_interval)

# Add `:temperature` to `extra_node_variables` tuple ...
extra_node_variables = (:temperature,)

# ... and specify the function `get_node_variables` for this symbol,
# with first argument matching the symbol (turned into a type via `Val`) for dispatching.
function Trixi.get_node_variables(::Val{:temperature}, u, mesh, equations, dg, cache)
n_nodes = nnodes(dg)
n_elements = nelements(dg, cache)
# By definition, the variable must be provided at every node of every element!
# Otherwise, the `SaveSolutionCallback` will crash.
temp_array = zeros(eltype(cache.elements),
n_nodes, n_nodes, # equivalent: `ntuple(_ -> n_nodes, ndims(mesh))...,`
n_elements)

# We can accelerate the computation by thread-parallelizing the loop over elements
# by using the `@threaded` macro.
Trixi.@threaded for element in eachelement(dg, cache)
for j in eachnode(dg), i in eachnode(dg)
u_node = get_node_vars(u, equations, dg, i, j, element)
rho, _, _, p = prim2cons(u_node, equations)
temp = p / rho # ideal gas equation with R = 1

temp_array[i, j, element] = temp
end
end

return temp_array
end
save_solution = SaveSolutionCallback(interval = 50,
save_initial_solution = true,
save_final_solution = true,
solution_variables = cons2prim)
solution_variables = cons2prim,
extra_node_variables = extra_node_variables) # Supply the additional `extra_node_variables` here

amr_controller = ControllerThreeLevel(semi, TrixiExtension.IndicatorVortex(semi),
base_level = 3,
Expand Down
3 changes: 2 additions & 1 deletion src/Trixi.jl
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,8 @@ export VolumeIntegralSubcellLimiting, BoundsCheckCallback,
SubcellLimiterIDP, SubcellLimiterIDPCorrection

export nelements, nnodes, nvariables,
eachelement, eachnode, eachvariable
eachelement, eachnode, eachvariable,
get_node_vars

export SemidiscretizationHyperbolic, semidiscretize, compute_coefficients, integrate

Expand Down
38 changes: 31 additions & 7 deletions src/callbacks_step/save_solution.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@
save_initial_solution=true,
save_final_solution=true,
output_directory="out",
solution_variables=cons2prim)
solution_variables=cons2prim,
extra_node_variables=())

Save the current numerical solution in regular intervals. Either pass `interval` to save
every `interval` time steps or pass `dt` to save in intervals of `dt` in terms
Expand All @@ -20,13 +21,34 @@ of integration time by adding additional (shortened) time steps where necessary
at a single point to a set of solution variables. The first parameter passed
to `solution_variables` will be the set of conservative variables
and the second parameter is the equation struct.

Additional nodal variables such as vorticity or the Mach number can be saved by passing a tuple of symbols
to `extra_node_variables`, e.g., `extra_node_variables = (:vorticity, :mach)`.
In that case the function `get_node_variables` must be defined for each symbol in the tuple.
The expected signature of the function for (purely) hyperbolic equations is:
```julia
function get_node_variables(::Val{symbol}, u, mesh, equations, dg, cache)
# Implementation goes here
end
```
while for parabolic-hyperbolic equations `equations_parabolic` and `cache_parabolic` must be added:
```julia
function get_node_variables(::Val{symbol}, u, mesh, equations, dg, cache,
equations_parabolic, cache_parabolic)
# Implementation goes here
end
```

In case that the [`SubcellLimiterIDP`](@ref) is used, the `extra_node_variables` tuple is automatically extended by the
`:limiting_coefficient` key which contains the limiting coefficient for each node.
Comment on lines +42 to +43
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

TODO: Change the if-clauses in the function get_node_variables! to really check for IDP and not only subcell vol integral (see also failing downstream tests)

"""
mutable struct SaveSolutionCallback{IntervalType, SolutionVariablesType}
interval_or_dt::IntervalType
save_initial_solution::Bool
save_final_solution::Bool
output_directory::String
solution_variables::SolutionVariablesType
node_variables::Dict{Symbol, Any}
end

function Base.show(io::IO, cb::DiscreteCallback{<:Any, <:SaveSolutionCallback})
Expand Down Expand Up @@ -96,7 +118,8 @@ function SaveSolutionCallback(; interval::Integer = 0,
save_initial_solution = true,
save_final_solution = true,
output_directory = "out",
solution_variables = cons2prim)
solution_variables = cons2prim,
extra_node_variables = ())
if !isnothing(dt) && interval > 0
throw(ArgumentError("You can either set the number of steps between output (using `interval`) or the time between outputs (using `dt`) but not both simultaneously"))
end
Expand All @@ -108,9 +131,11 @@ function SaveSolutionCallback(; interval::Integer = 0,
interval_or_dt = dt
end

node_variables = Dict{Symbol, Any}(var => nothing for var in extra_node_variables)
solution_callback = SaveSolutionCallback(interval_or_dt,
save_initial_solution, save_final_solution,
output_directory, solution_variables)
output_directory, solution_variables,
node_variables)

# Expected most frequent behavior comes first
if isnothing(dt)
Expand Down Expand Up @@ -219,14 +244,13 @@ end
end
end

node_variables = Dict{Symbol, Any}()
@trixi_timeit timer() "get node variables" get_node_variables!(node_variables,
semi)
@trixi_timeit timer() "get node variables" get_node_variables!(solution_callback.node_variables,
u_ode, semi)

@trixi_timeit timer() "save solution" save_solution_file(u_ode, t, dt, iter, semi,
solution_callback,
element_variables,
node_variables,
solution_callback.node_variables,
system = system)
end

Expand Down
39 changes: 35 additions & 4 deletions src/callbacks_step/save_solution_dg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -129,11 +129,11 @@ function save_solution_file(u, time, dt, timestep,
if HDF5.has_parallel()
save_solution_file_parallel(data, time, dt, timestep, n_vars, mesh, equations,
dg, cache, solution_variables, filename,
element_variables)
element_variables, node_variables)
else
save_solution_file_on_root(data, time, dt, timestep, n_vars, mesh, equations,
dg, cache, solution_variables, filename,
element_variables)
element_variables, node_variables)
end
end

Expand All @@ -142,7 +142,8 @@ function save_solution_file_parallel(data, time, dt, timestep, n_vars,
ParallelT8codeMesh},
equations, dg::DG, cache,
solution_variables, filename,
element_variables = Dict{Symbol, Any}())
element_variables = Dict{Symbol, Any}(),
node_variables = Dict{Symbol, Any}())

# Calculate element and node counts by MPI rank
element_size = nnodes(dg)^ndims(mesh)
Expand Down Expand Up @@ -195,6 +196,22 @@ function save_solution_file_parallel(data, time, dt, timestep, n_vars,
# Add variable name as attribute
attributes(var)["name"] = string(key)
end

# Store node variables
for (v, (key, node_variable)) in enumerate(node_variables)
# Need to create dataset explicitly in parallel case
var = create_dataset(file, "/node_variables_$v",
datatype(eltype(node_variable)),
dataspace((nelementsglobal(mesh, dg, cache) *
element_size,)))

# Write data of each process in slices (ranks start with 0)
slice = (cum_node_counts[mpi_rank() + 1] + 1):cum_node_counts[mpi_rank() + 2]
# Add to file
var[slice] = node_variable
# Add variable name as attribute
attributes(var)["name"] = string(key)
end
end

return filename
Expand All @@ -205,7 +222,8 @@ function save_solution_file_on_root(data, time, dt, timestep, n_vars,
ParallelT8codeMesh},
equations, dg::DG, cache,
solution_variables, filename,
element_variables = Dict{Symbol, Any}())
element_variables = Dict{Symbol, Any}(),
node_variables = Dict{Symbol, Any}())

# Calculate element and node counts by MPI rank
element_size = nnodes(dg)^ndims(mesh)
Expand Down Expand Up @@ -266,6 +284,19 @@ function save_solution_file_on_root(data, time, dt, timestep, n_vars,
var = file["element_variables_$v"]
attributes(var)["name"] = string(key)
end

# Store node variables
for (v, (key, node_variable)) in enumerate(node_variables)
# Add to file
recv = Vector{eltype(data)}(undef, sum(node_counts))
MPI.Gatherv!(node_variable, MPI.VBuffer(recv, node_counts),
mpi_root(), mpi_comm())
file["node_variables_$v"] = node_variable

# Add variable name as attribute
var = file["node_variables_$v"]
attributes(var)["name"] = string(key)
end
end

return filename
Expand Down
2 changes: 1 addition & 1 deletion src/equations/compressible_navier_stokes_3d.jl
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ end

Computes the (node-wise) vorticity, defined in 3D as
```math
\omega = \nabla \times \boldsymbol{v} =
\boldsymbol{\omega} = \nabla \times \boldsymbol{v} =
\begin{pmatrix}
\frac{\partial v_3}{\partial y} - \frac{\partial v_2}{\partial z} \\
\frac{\partial v_1}{\partial z} - \frac{\partial v_3}{\partial x} \\
Expand Down
5 changes: 3 additions & 2 deletions src/semidiscretization/semidiscretization.jl
Original file line number Diff line number Diff line change
Expand Up @@ -360,8 +360,9 @@ function get_element_variables!(element_variables, u_ode,
get_element_variables!(element_variables, u, mesh_equations_solver_cache(semi)...)
end

function get_node_variables!(node_variables, semi::AbstractSemidiscretization)
get_node_variables!(node_variables, mesh_equations_solver_cache(semi)...)
# Required for storing `extra_node_variables` in the `SaveSolutionCallback`
function get_node_variables!(node_variables, u_ode, semi::AbstractSemidiscretization)
get_node_variables!(node_variables, u_ode, mesh_equations_solver_cache(semi)...)
end

# To implement AMR and use OrdinaryDiffEq.jl etc., we have to be a bit creative.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -266,6 +266,13 @@ function compute_coefficients!(u_ode, t, semi::SemidiscretizationHyperbolicParab
compute_coefficients!(u_ode, semi.initial_condition, t, semi)
end

# Required for storing `extra_node_variables` in the `SaveSolutionCallback`
function get_node_variables!(node_variables, u_ode,
semi::SemidiscretizationHyperbolicParabolic)
get_node_variables!(node_variables, u_ode, mesh_equations_solver_cache(semi)...,
semi.equations_parabolic, semi.cache_parabolic)
end

"""
semidiscretize(semi::SemidiscretizationHyperbolicParabolic, tspan)

Expand Down
Loading
Loading