diff --git a/examples/p4est_2d_dgsem/elixir_advection_coupled.jl b/examples/p4est_2d_dgsem/elixir_advection_coupled.jl new file mode 100644 index 00000000000..c7ad29b6294 --- /dev/null +++ b/examples/p4est_2d_dgsem/elixir_advection_coupled.jl @@ -0,0 +1,88 @@ +using OrdinaryDiffEqSSPRK, OrdinaryDiffEqLowStorageRK +using Trixi + +############################################################################### +# Simplest coupled setup consisting of two non-trivial mesh views. + +advection_velocity = (0.2, -0.7) +equations = LinearScalarAdvectionEquation2D(advection_velocity) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +# Define the physical domain for the parent mesh. +coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) +coordinates_max = (1.0, 1.0) # maximum coordinates (max(x), max(y)) + +trees_per_dimension = (8, 8) + +# Create parent P4estMesh with 8 x 8 trees and 8 x 8 elements +# Since we couple through the boundaries, the periodicity does not matter here, +# but it is to trigger parts of the code for the test. +parent_mesh = P4estMesh(trees_per_dimension, polydeg = 3, + coordinates_min = coordinates_min, + coordinates_max = coordinates_max, + initial_refinement_level = 0, + periodicity = false) + +# Define the mesh views consisting of a small square in the center +# and a square ring around it. +cell_ids1 = vcat((1:18), (23:26), (31:34), (39:42), (47:64)) +mesh1 = P4estMeshView(parent_mesh, cell_ids1) +cell_ids2 = vcat((19:22), (27:30), (35:38), (43:46)) +mesh2 = P4estMeshView(parent_mesh, cell_ids2) + +# Define a trivial coupling function. +coupling_function = (x, u, equations_other, equations_own) -> u + +# The mesh is coupled across the physical boundaries, which makes this setup +# effectively double periodic. +boundary_conditions = (; x_neg = BoundaryConditionCoupledP4est(coupling_function), + y_neg = BoundaryConditionCoupledP4est(coupling_function), + y_pos = BoundaryConditionCoupledP4est(coupling_function), + x_pos = BoundaryConditionCoupledP4est(coupling_function)) + +semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, + solver, + boundary_conditions = boundary_conditions) +semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, + solver, + boundary_conditions = boundary_conditions) + +# Create a semidiscretization that bundles semi1 and semi2 +semi = SemidiscretizationCoupledP4est(semi1, semi2) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 2.0 +ode = semidiscretize(semi, (0.0, 2.0)) + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +# We require this definition for the test, even though we don't use it in the CallbackSet. +analysis_callback1 = AnalysisCallback(semi1, interval = 100) +analysis_callback2 = AnalysisCallback(semi2, interval = 100) +analysis_callback = AnalysisCallbackCoupledP4est(semi, analysis_callback1, + analysis_callback2) + +# The SaveSolutionCallback allows to save the solution to a file in regular intervals +save_solution = SaveSolutionCallback(interval = 100, + solution_variables = cons2prim) + +# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl = 1.6) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, save_solution, stepsize_callback) + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false); + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + ode_default_options()..., callback = callbacks); diff --git a/examples/p4est_2d_dgsem/elixir_advection_meshview.jl b/examples/p4est_2d_dgsem/elixir_advection_meshview.jl deleted file mode 100644 index 0ad6e6e18e8..00000000000 --- a/examples/p4est_2d_dgsem/elixir_advection_meshview.jl +++ /dev/null @@ -1,65 +0,0 @@ -using OrdinaryDiffEqLowStorageRK -using Trixi - -############################################################################### -# Most basic p4est mesh view setup where the entire domain -# is part of the single mesh view. - -advection_velocity = (0.2, -0.7) -equations = LinearScalarAdvectionEquation2D(advection_velocity) - -# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux -solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) - -coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) -coordinates_max = (1.0, 1.0) # maximum coordinates (max(x), max(y)) - -trees_per_dimension = (8, 8) - -# Create parent P4estMesh with 8 x 8 trees and 8 x 8 elements -parent_mesh = P4estMesh(trees_per_dimension, polydeg = 3, - coordinates_min = coordinates_min, - coordinates_max = coordinates_max, - periodicity = true) - -# Define the mesh view covering the whole parent mesh. -cell_ids = collect(1:Trixi.ncells(parent_mesh)) -mesh = P4estMeshView(parent_mesh, cell_ids) - -# A semidiscretization collects data structures and functions for the spatial discretization -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, - solver; - boundary_conditions = boundary_condition_periodic) - -############################################################################### -# ODE solvers, callbacks etc. - -# Create ODE problem with time span from 0.0 to 1.0 -ode = semidiscretize(semi, (0.0, 1.0)) - -# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup -# and resets the timers -summary_callback = SummaryCallback() - -# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results -# We require this definition for the test, even though we don't use it in the CallbackSet. -analysis_callback = AnalysisCallback(semi) - -# The SaveSolutionCallback allows to save the solution to a file in regular intervals -save_solution = SaveSolutionCallback(interval = 100, - solution_variables = cons2prim) - -# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step -stepsize_callback = StepsizeCallback(cfl = 1.6) - -# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver -callbacks = CallbackSet(summary_callback, save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false); - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - ode_default_options()..., callback = callbacks); diff --git a/src/Trixi.jl b/src/Trixi.jl index 7707b8434bc..167e01189c8 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -147,6 +147,7 @@ include("semidiscretization/semidiscretization_hyperbolic.jl") include("semidiscretization/semidiscretization_hyperbolic_parabolic.jl") include("semidiscretization/semidiscretization_euler_acoustics.jl") include("semidiscretization/semidiscretization_coupled.jl") +include("semidiscretization/semidiscretization_coupled_p4est.jl") include("time_integration/time_integration.jl") include("callbacks_step/callbacks_step.jl") include("callbacks_stage/callbacks_stage.jl") @@ -233,7 +234,7 @@ export boundary_condition_do_nothing, BoundaryConditionNavierStokesWall, NoSlip, Slip, Adiabatic, Isothermal, - BoundaryConditionCoupled + BoundaryConditionCoupled, BoundaryConditionCoupledP4est export initial_condition_convergence_test, source_terms_convergence_test, source_terms_lorentz, source_terms_collision_ion_electron, @@ -304,14 +305,14 @@ export SemidiscretizationEulerGravity, ParametersEulerGravity, timestep_gravity_erk53_3Sstar!, timestep_gravity_carpenter_kennedy_erk54_2N! -export SemidiscretizationCoupled +export SemidiscretizationCoupled, SemidiscretizationCoupledP4est export SummaryCallback, SteadyStateCallback, AnalysisCallback, AliveCallback, SaveRestartCallback, SaveSolutionCallback, TimeSeriesCallback, VisualizationCallback, AveragingCallback, AMRCallback, StepsizeCallback, GlmSpeedCallback, LBMCollisionCallback, EulerAcousticsCouplingCallback, - TrivialCallback, AnalysisCallbackCoupled, + TrivialCallback, AnalysisCallbackCoupled, AnalysisCallbackCoupledP4est, AnalysisSurfaceIntegral, DragCoefficientPressure2D, LiftCoefficientPressure2D, DragCoefficientShearStress2D, LiftCoefficientShearStress2D, DragCoefficientPressure3D, LiftCoefficientPressure3D diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index f405508f75c..8659119fa36 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -705,8 +705,7 @@ end # have `VariableViscous` available. function analyze(quantity::AnalysisSurfaceIntegral{Variable}, du, u, t, - semi::SemidiscretizationHyperbolicParabolic) where { - Variable <: + semi::SemidiscretizationHyperbolicParabolic) where {Variable <: VariableViscous} mesh, equations, solver, cache = mesh_equations_solver_cache(semi) equations_parabolic = semi.equations_parabolic diff --git a/src/callbacks_step/analysis_dg2d.jl b/src/callbacks_step/analysis_dg2d.jl index 5035e831eed..077d01240b1 100644 --- a/src/callbacks_step/analysis_dg2d.jl +++ b/src/callbacks_step/analysis_dg2d.jl @@ -329,7 +329,8 @@ end function integrate_via_indices(func::Func, u, mesh::Union{StructuredMesh{2}, StructuredMeshView{2}, - UnstructuredMesh2D, P4estMesh{2}, + UnstructuredMesh2D, + P4estMesh{2}, P4estMeshView{2}, T8codeMesh{2}}, equations, dg::DGSEM, cache, args...; normalize = true) where {Func} diff --git a/src/meshes/p4est_mesh_view.jl b/src/meshes/p4est_mesh_view.jl index 208ce7250f9..4f0bce4c69b 100644 --- a/src/meshes/p4est_mesh_view.jl +++ b/src/meshes/p4est_mesh_view.jl @@ -35,8 +35,11 @@ function P4estMeshView(parent::P4estMesh{NDIMS, NDIMS_AMBIENT, RealT}, end @inline Base.ndims(::P4estMeshView{NDIMS}) where {NDIMS} = NDIMS -@inline Base.real(::P4estMeshView{NDIMS, NDIMS_AMBIENT, RealT}) where {NDIMS, NDIMS_AMBIENT, RealT} = RealT +@inline Base.real(::P4estMeshView{NDIMS, NDIMS_AMBIENT, RealT}) where {NDIMS, +NDIMS_AMBIENT, +RealT} = RealT +# Extract interfaces, boundaries and parent element ids from the neighbors. function extract_p4est_mesh_view(elements_parent, interfaces_parent, boundaries_parent, @@ -60,20 +63,32 @@ function extract_p4est_mesh_view(elements_parent, mesh.cell_ids] @views elements.surface_flux_values .= elements_parent.surface_flux_values[.., mesh.cell_ids] - # Extract interfaces that belong to mesh view + # Extract interfaces that belong to mesh view. interfaces = extract_interfaces(mesh, interfaces_parent) - return elements, interfaces, boundaries_parent, mortars_parent + # Extract boundaries of this mesh view. + boundaries = extract_boundaries(mesh, boundaries_parent, interfaces_parent, + interfaces) + + # Get the parent element ids of the neighbors. + neighbor_ids_parent = extract_neighbor_ids_parent(mesh, boundaries_parent, + interfaces_parent, + boundaries) + + return elements, interfaces, boundaries, mortars_parent, neighbor_ids_parent end # Remove all interfaces that have a tuple of neighbor_ids where at least one is -# not part of this meshview, i.e. mesh.cell_ids, and return the new interface container +# not part of this mesh view, i.e. mesh.cell_ids, and return the new interface container. function extract_interfaces(mesh::P4estMeshView, interfaces_parent) # Identify interfaces that need to be retained mask = BitArray(undef, ninterfaces(interfaces_parent)) + # Loop over all interfaces (index 2). for interface in 1:size(interfaces_parent.neighbor_ids)[2] - mask[interface] = (interfaces_parent.neighbor_ids[1, interface] in mesh.cell_ids) && - (interfaces_parent.neighbor_ids[2, interface] in mesh.cell_ids) + mask[interface] = (interfaces_parent.neighbor_ids[1, + interface] in mesh.cell_ids) && + (interfaces_parent.neighbor_ids[2, + interface] in mesh.cell_ids) end # Create deepcopy to get completely independent interfaces container @@ -85,39 +100,208 @@ function extract_interfaces(mesh::P4estMeshView, interfaces_parent) @views interfaces.node_indices .= interfaces_parent.node_indices[.., mask] @views neighbor_ids = interfaces_parent.neighbor_ids[.., mask] - # Transform the global (parent) indices into local (view) indices. + # Transform the parent indices into view indices. interfaces.neighbor_ids = zeros(Int, size(neighbor_ids)) for interface in 1:size(neighbor_ids)[2] interfaces.neighbor_ids[1, interface] = findall(id -> id == - neighbor_ids[1, interface], + neighbor_ids[1, + interface], mesh.cell_ids)[1] interfaces.neighbor_ids[2, interface] = findall(id -> id == - neighbor_ids[2, interface], + neighbor_ids[2, + interface], mesh.cell_ids)[1] end return interfaces end +# Remove all boundaries that are not part of this p4est mesh view and add new boundaries +# that were interfaces of the parent mesh. +function extract_boundaries(mesh::P4estMeshView{2}, + boundaries_parent, interfaces_parent, + interfaces) + # Remove all boundaries that are not part of this p4est mesh view. + boundaries = deepcopy(boundaries_parent) + mask = BitArray(undef, nboundaries(boundaries_parent)) + for boundary in 1:nboundaries(boundaries_parent) + mask[boundary] = boundaries_parent.neighbor_ids[boundary] in mesh.cell_ids + end + boundaries.neighbor_ids = parent_cell_id_to_view(boundaries_parent.neighbor_ids[mask], + mesh) + boundaries.name = boundaries_parent.name[mask] + boundaries.node_indices = boundaries_parent.node_indices[mask] + + # Add new boundaries that were interfaces of the parent mesh. + # Loop over all interfaces (index 2). + for interface in 1:ninterfaces(interfaces_parent) + # Create new boundary if exactly one of the neighbor cells is in the mesh view ("exclusive or" with ⊻) + if ((interfaces_parent.neighbor_ids[1, interface] in mesh.cell_ids) ⊻ + (interfaces_parent.neighbor_ids[2, interface] in mesh.cell_ids)) + # Determine which of the ids is part of the mesh view. + if interfaces_parent.neighbor_ids[1, interface] in mesh.cell_ids + neighbor_id = interfaces_parent.neighbor_ids[1, interface] + view_idx = 1 + else + neighbor_id = interfaces_parent.neighbor_ids[2, interface] + view_idx = 2 + end + + # Update the neighbor ids. + push!(boundaries.neighbor_ids, + parent_cell_id_to_view(neighbor_id, mesh)) + # Update the boundary names to reflect where the neighboring cell is + # relative to this one, i.e. left, right, up, down. + # In 3d one would need to add the third dimension. + if (interfaces_parent.node_indices[view_idx, interface] == + (:end, :i_forward)) + push!(boundaries.name, :x_pos) + elseif (interfaces_parent.node_indices[view_idx, interface] == + (:begin, :i_forward)) + push!(boundaries.name, :x_neg) + elseif (interfaces_parent.node_indices[view_idx, interface] == + (:i_forward, :end)) + push!(boundaries.name, :y_pos) + else + push!(boundaries.name, :y_neg) + end + + # Update the node indices. + push!(boundaries.node_indices, + interfaces_parent.node_indices[view_idx, interface]) + end + end + + # Create the boundary vector for u, which will be populated later. + n_dims = ndims(boundaries) + n_nodes = size(boundaries.u, 2) + n_variables = size(boundaries.u, 1) + capacity = length(boundaries.neighbor_ids) + + resize!(boundaries._u, n_variables * n_nodes^(n_dims - 1) * capacity) + boundaries.u = unsafe_wrap(Array, pointer(boundaries._u), + (n_variables, ntuple(_ -> n_nodes, n_dims - 1)..., + capacity)) + + return boundaries +end + +# Extract the ids of the neighboring elements using the parent mesh indexing. +# For every boundary of the mesh view find the neighboring cell id in global (parent) indexing. +# Such neighboring cells are either inside the domain and have an interface +# in the parent mesh, or they are physical boundaries for which we then +# construct a periodic coupling by assigning as neighbor id the cell id +# on the other end of the domain. +function extract_neighbor_ids_parent(mesh::P4estMeshView, + boundaries_parent, interfaces_parent, + boundaries) + # Determine the parent indices of the neighboring elements. + neighbor_ids_parent = similar(boundaries.neighbor_ids) + for (idx, id) in enumerate(boundaries.neighbor_ids) + parent_id = mesh.cell_ids[id] + # Find this id in the parent's interfaces. + for interface in eachindex(interfaces_parent.neighbor_ids[1, :]) + if (parent_id == interfaces_parent.neighbor_ids[1, interface] || + parent_id == interfaces_parent.neighbor_ids[2, interface]) + if parent_id == interfaces_parent.neighbor_ids[1, interface] + matching_boundary = 1 + else + matching_boundary = 2 + end + # Check if interfaces with this id have the right name/node_indices. + if (boundaries.name[idx] == + node_indices_to_name(interfaces_parent.node_indices[matching_boundary, + interface])) + if parent_id == interfaces_parent.neighbor_ids[1, interface] + neighbor_ids_parent[idx] = interfaces_parent.neighbor_ids[2, + interface] + else + neighbor_ids_parent[idx] = interfaces_parent.neighbor_ids[1, + interface] + end + end + end + end + + # Find this id in the parent's boundaries. + parent_xneg_cell_ids = boundaries_parent.neighbor_ids[boundaries_parent.name .== :x_neg] + parent_xpos_cell_ids = boundaries_parent.neighbor_ids[boundaries_parent.name .== :x_pos] + parent_yneg_cell_ids = boundaries_parent.neighbor_ids[boundaries_parent.name .== :y_neg] + parent_ypos_cell_ids = boundaries_parent.neighbor_ids[boundaries_parent.name .== :y_pos] + for (parent_idx, boundary) in enumerate(boundaries_parent.neighbor_ids) + if parent_id == boundary + # Check if boundaries with this id have the right name/node_indices. + if boundaries.name[idx] == boundaries_parent.name[parent_idx] + # Make the coupling periodic. + if boundaries_parent.name[parent_idx] == :x_neg + neighbor_ids_parent[idx] = parent_xpos_cell_ids[findfirst(parent_xneg_cell_ids .== + boundary)] + elseif boundaries_parent.name[parent_idx] == :x_pos + neighbor_ids_parent[idx] = parent_xneg_cell_ids[findfirst(parent_xpos_cell_ids .== + boundary)] + elseif boundaries_parent.name[parent_idx] == :y_neg + neighbor_ids_parent[idx] = parent_ypos_cell_ids[findfirst(parent_yneg_cell_ids .== + boundary)] + elseif boundaries_parent.name[parent_idx] == :y_pos + neighbor_ids_parent[idx] = parent_yneg_cell_ids[findfirst(parent_ypos_cell_ids .== + boundary)] + else + error("Unknown boundary name: $(boundaries_parent.name[parent_idx])") + end + end + end + end + end + + return neighbor_ids_parent +end + +# Translate the interface indices into boundary names. +# This works only in 2d currently. +function node_indices_to_name(node_index) + if node_index == (:end, :i_forward) + return :x_pos + elseif node_index == (:begin, :i_forward) + return :x_neg + elseif node_index == (:i_forward, :end) + return :y_pos + elseif node_index == (:i_forward, :begin) + return :y_neg + else + error("Unknown node index: $node_index") + end +end + +# Convert a parent cell id to a view cell id in the mesh view. +function parent_cell_id_to_view(id::Integer, mesh::P4estMeshView) + # Find the index of the cell id in the mesh view + view_id = searchsortedfirst(mesh.cell_ids, id) + + return view_id +end + +# Convert an array of parent cell ids to view cell ids in the mesh view. +function parent_cell_id_to_view(ids::AbstractArray, mesh::P4estMeshView) + # Find the index of the cell id in the mesh view + view_id = zeros(Int, length(ids)) + for i in eachindex(ids) + view_id[i] = parent_cell_id_to_view(ids[i], mesh) + end + return view_id +end + # Does not save the mesh itself to an HDF5 file. Instead saves important attributes # of the mesh, like its size and the type of boundary mapping function. # Then, within Trixi2Vtk, the P4estMeshView and its node coordinates are reconstructured from # these attributes for plotting purposes # | Warning: This overwrites any existing mesh file, either for a mesh view or parent mesh. -function save_mesh_file(mesh::P4estMeshView, output_directory, timestep, - mpi_parallel::False) +function save_mesh_file(mesh::P4estMeshView, output_directory; system = "", + timestep = 0) # Create output directory (if it does not exist) mkpath(output_directory) - # Determine file name based on existence of meaningful time step - if timestep > 0 - filename = joinpath(output_directory, @sprintf("mesh_%09d.h5", timestep)) - p4est_filename = @sprintf("p4est_data_%09d", timestep) - else - filename = joinpath(output_directory, "mesh.h5") - p4est_filename = "p4est_data" - end - + filename = joinpath(output_directory, "mesh.h5") + p4est_filename = "p4est_data" p4est_file = joinpath(output_directory, p4est_filename) # Save the complete connectivity and `p4est` data to disk. diff --git a/src/semidiscretization/semidiscretization_coupled_p4est.jl b/src/semidiscretization/semidiscretization_coupled_p4est.jl new file mode 100644 index 00000000000..18fc21c8f50 --- /dev/null +++ b/src/semidiscretization/semidiscretization_coupled_p4est.jl @@ -0,0 +1,500 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +""" + SemidiscretizationCoupledP4est + +Specialized semidiscretization routines for coupled problems using P4est mesh views. +This is analogous to the implementation for structured meshes. +[`semidiscretize`](@ref) will return an `ODEProblem` that synchronizes time steps between the semidiscretizations. +Each call of `rhs!` will call `rhs!` for each semidiscretization individually. +The semidiscretizations can be coupled by glueing meshes together using [`BoundaryConditionCoupled`](@ref). + +See also: [`SemidiscretizationCoupled`](@ref) + +!!! warning "Experimental code" + This is an experimental feature and can change any time. +""" +mutable struct SemidiscretizationCoupledP4est{Semis, Indices, EquationList} <: + AbstractSemidiscretization + semis::Semis + u_indices::Indices # u_ode[u_indices[i]] is the part of u_ode corresponding to semis[i] + performance_counter::PerformanceCounter + parent_cell_ids::Vector{Int} + view_cell_ids::Vector{Int} + mesh_ids::Vector{Int} +end + +""" + SemidiscretizationCoupledP4est(semis...) + +Create a coupled semidiscretization that consists of the semidiscretizations passed as arguments. +""" +function SemidiscretizationCoupledP4est(semis...) + @assert all(semi -> ndims(semi) == ndims(semis[1]), semis) "All semidiscretizations must have the same dimension!" + + # Number of coefficients for each semidiscretization + n_coefficients = zeros(Int, length(semis)) + for i in 1:length(semis) + _, equations, _, _ = mesh_equations_solver_cache(semis[i]) + n_coefficients[i] = ndofs(semis[i]) * nvariables(equations) + end + + # Compute range of coefficients associated with each semidiscretization + u_indices = Vector{UnitRange{Int}}(undef, length(semis)) + for i in 1:length(semis) + offset = sum(n_coefficients[1:(i - 1)]) + 1 + u_indices[i] = range(offset, length = n_coefficients[i]) + end + + # Create correspondence between parent mesh cell IDs and view cell IDs. + parent_cell_ids = 1:size(semis[1].mesh.parent.tree_node_coordinates)[end] + view_cell_ids = zeros(Int, length(parent_cell_ids)) + mesh_ids = zeros(Int, length(parent_cell_ids)) + for i in eachindex(semis) + view_cell_ids[semis[i].mesh.cell_ids] = parent_cell_id_to_view(parent_cell_ids[semis[i].mesh.cell_ids], + semis[i].mesh) + mesh_ids[semis[i].mesh.cell_ids] .= i + end + + performance_counter = PerformanceCounter() + + SemidiscretizationCoupledP4est{typeof(semis), typeof(u_indices), + typeof(performance_counter)}(semis, u_indices, + performance_counter, + parent_cell_ids, + view_cell_ids, + mesh_ids) +end + +function Base.show(io::IO, ::MIME"text/plain", semi::SemidiscretizationCoupledP4est) + @nospecialize semi # reduce precompilation time + + if get(io, :compact, false) + show(io, semi) + else + summary_header(io, "SemidiscretizationCoupledP4est") + summary_line(io, "#spatial dimensions", ndims(semi.semis[1])) + summary_line(io, "#systems", nsystems(semi)) + for i in eachsystem(semi) + summary_line(io, "system", i) + mesh, equations, solver, _ = mesh_equations_solver_cache(semi.semis[i]) + summary_line(increment_indent(io), "mesh", mesh |> typeof |> nameof) + summary_line(increment_indent(io), "equations", + equations |> typeof |> nameof) + summary_line(increment_indent(io), "initial condition", + semi.semis[i].initial_condition) + # no boundary conditions since that could be too much + summary_line(increment_indent(io), "source terms", + semi.semis[i].source_terms) + summary_line(increment_indent(io), "solver", solver |> typeof |> nameof) + end + summary_line(io, "total #DOFs per field", ndofsglobal(semi)) + summary_footer(io) + end +end + +function print_summary_semidiscretization(io::IO, semi::SemidiscretizationCoupledP4est) + show(io, MIME"text/plain"(), semi) + println(io, "\n") + for i in eachsystem(semi) + mesh, equations, solver, _ = mesh_equations_solver_cache(semi.semis[i]) + summary_header(io, "System #$i") + + summary_line(io, "mesh", mesh |> typeof |> nameof) + show(increment_indent(io), MIME"text/plain"(), mesh) + + summary_line(io, "equations", equations |> typeof |> nameof) + show(increment_indent(io), MIME"text/plain"(), equations) + + summary_line(io, "solver", solver |> typeof |> nameof) + show(increment_indent(io), MIME"text/plain"(), solver) + + summary_footer(io) + println(io, "\n") + end +end + +@inline nsystems(semi::SemidiscretizationCoupledP4est) = length(semi.semis) + +@inline eachsystem(semi::SemidiscretizationCoupledP4est) = Base.OneTo(nsystems(semi)) + +@inline Base.real(semi::SemidiscretizationCoupledP4est) = promote_type(real.(semi.semis)...) + +@inline function ndofs(semi::SemidiscretizationCoupledP4est) + return sum(ndofs, semi.semis) +end + +""" + ndofsglobal(semi::SemidiscretizationCoupledP4est) + +Return the global number of degrees of freedom associated with each scalar variable across all MPI ranks, and summed up over all coupled systems. +This is the same as [`ndofs`](@ref) for simulations running in serial or +parallelized via threads. It will in general be different for simulations +running in parallel with MPI. +""" +@inline function ndofsglobal(semi::SemidiscretizationCoupledP4est) + return sum(ndofsglobal, semi.semis) +end + +function compute_coefficients(t, semi::SemidiscretizationCoupledP4est) + @unpack u_indices = semi + + u_ode = Vector{real(semi)}(undef, u_indices[end][end]) + + # Distribute the partial solution vectors onto the global one. + @threaded for i in eachsystem(semi) + # Call `compute_coefficients` in `src/semidiscretization/semidiscretization.jl` + u_ode[u_indices[i]] .= compute_coefficients(t, semi.semis[i]) + end + + return u_ode +end + +@inline function get_system_u_ode(u_ode, index, semi::SemidiscretizationCoupledP4est) + return @view u_ode[semi.u_indices[index]] +end + +# RHS call for the coupled system. +function rhs!(du_ode, u_ode, semi::SemidiscretizationCoupledP4est, t) + time_start = time_ns() + + n_nodes = length(semi.semis[1].mesh.parent.nodes) + # Reformat the parent solutions vector. + u_ode_reformatted = Vector{real(semi)}(undef, ndofs(semi)) + u_ode_reformatted_reshape = reshape(u_ode_reformatted, + (n_nodes, + n_nodes, + length(semi.mesh_ids))) + # Extract the parent solution vector from the local solutions. + foreach_enumerate(semi.semis) do (i, semi_) + system_ode = get_system_u_ode(u_ode, i, semi) + system_ode_reshape = reshape(system_ode, + (n_nodes, n_nodes, + Int(length(system_ode) / + n_nodes^ndims(semi_.mesh)))) + u_ode_reformatted_reshape[:, :, semi.mesh_ids .== i] .= system_ode_reshape + end + + # Call rhs! for each semidiscretization + foreach_enumerate(semi.semis) do (i, semi_) + u_loc = get_system_u_ode(u_ode, i, semi) + du_loc = get_system_u_ode(du_ode, i, semi) + rhs!(du_loc, u_loc, u_ode_reformatted, semi, semi_, t) + end + + runtime = time_ns() - time_start + put!(semi.performance_counter, runtime) + + return nothing +end + +# RHS call for the local system. +# Here we require the data from u_parent for each semidiscretization in order +# to exchange the correct boundary values. +function rhs!(du_ode, u_ode, u_parent, semis, + semi::SemidiscretizationHyperbolic, t) + @unpack mesh, equations, boundary_conditions, source_terms, solver, cache = semi + + u = wrap_array(u_ode, mesh, equations, solver, cache) + du = wrap_array(du_ode, mesh, equations, solver, cache) + + time_start = time_ns() + @trixi_timeit timer() "rhs!" rhs!(du, u, t, u_parent, semis, mesh, equations, + boundary_conditions, source_terms, solver, cache) + runtime = time_ns() - time_start + put!(semi.performance_counter, runtime) + + return nothing +end + +################################################################################ +### AnalysisCallback +################################################################################ + +""" + AnalysisCallbackCoupledP4est(semi, callbacks...) + +Combine multiple analysis callbacks for coupled simulations with a +[`SemidiscretizationCoupled`](@ref). For each coupled system, an indididual +[`AnalysisCallback`](@ref) **must** be created and passed to the `AnalysisCallbackCoupledP4est` **in +order**, i.e., in the same sequence as the indidvidual semidiscretizations are stored in the +`SemidiscretizationCoupled`. + +!!! warning "Experimental code" + This is an experimental feature and can change any time. +""" +struct AnalysisCallbackCoupledP4est{CB} + callbacks::CB +end + +# Convenience constructor for the coupled callback that gets called directly from the elixirs +function AnalysisCallbackCoupledP4est(semi_coupled, callbacks...) + if length(callbacks) != nsystems(semi_coupled) + error("an AnalysisCallbackCoupledP4est requires one AnalysisCallback for each semidiscretization") + end + + analysis_callback_coupled = AnalysisCallbackCoupledP4est{typeof(callbacks)}(callbacks) + + # This callback is triggered if any of its subsidiary callbacks' condition is triggered + condition = (u, t, integrator) -> any(callbacks) do callback + callback.condition(u, t, integrator) + end + + DiscreteCallback(condition, analysis_callback_coupled, + save_positions = (false, false), + initialize = initialize!) +end + +# used for error checks and EOC analysis +function (cb::DiscreteCallback{Condition, Affect!})(sol) where {Condition, + Affect! <: + AnalysisCallbackCoupledP4est + } + semi_coupled = sol.prob.p + u_ode_coupled = sol.u[end] + @unpack callbacks = cb.affect! + + uEltype = real(semi_coupled) + n_vars_upto_semi = cumsum(nvariables(semi_coupled.semis[i].equations) + for i in eachindex(semi_coupled.semis))[begin:end] + error_indices = Array([1, 1 .+ n_vars_upto_semi...]) + length_error_array = sum(nvariables(semi_coupled.semis[i].equations) + for i in eachindex(semi_coupled.semis)) + l2_error_collection = uEltype[] + linf_error_collection = uEltype[] + for i in eachsystem(semi_coupled) + analysis_callback = callbacks[i].affect! + @unpack analyzer = analysis_callback + cache_analysis = analysis_callback.cache + + semi = semi_coupled.semis[i] + u_ode = get_system_u_ode(u_ode_coupled, i, semi_coupled) + + l2_error, + linf_error = calc_error_norms(u_ode, sol.t[end], analyzer, semi, + cache_analysis) + append!(l2_error_collection, l2_error) + append!(linf_error_collection, linf_error) + end + + return (; l2 = l2_error_collection, linf = linf_error_collection) +end + +################################################################################ +### SaveSolutionCallback +################################################################################ + +# Save mesh for a coupled semidiscretization, which contains multiple meshes internally +function save_mesh(semi::SemidiscretizationCoupledP4est, output_directory, timestep = 0) + for i in eachsystem(semi) + mesh, _, _, _ = mesh_equations_solver_cache(semi.semis[i]) + + if mesh.unsaved_changes + mesh.current_filename = save_mesh_file(mesh, output_directory; + system = string(i), + timestep = timestep) + mesh.unsaved_changes = false + end + end + return nothing +end + +@inline function save_solution_file(semi::SemidiscretizationCoupledP4est, u_ode, + solution_callback, + integrator) + @unpack semis = semi + + for i in eachsystem(semi) + u_ode_slice = get_system_u_ode(u_ode, i, semi) + save_solution_file(semis[i], u_ode_slice, solution_callback, integrator, + system = i) + end + return nothing +end + +################################################################################ +### StepsizeCallback +################################################################################ + +# In case of coupled system, use minimum timestep over all systems +# Case for constant `cfl_number`. +function calculate_dt(u_ode, t, cfl_advective, cfl_diffusive, + semi::SemidiscretizationCoupledP4est) + dt = minimum(eachsystem(semi)) do i + u_ode_slice = get_system_u_ode(u_ode, i, semi) + calculate_dt(u_ode_slice, t, cfl_advective, cfl_diffusive, semi.semis[i]) + end + + return dt +end + +################################################################################ +### Boundary conditions +################################################################################ + +""" + BoundaryConditionCoupledP4est(coupling_converter) + +Boundary condition struct where the user can specify the coupling converter function. + +# Arguments +- `coupling_converter::CouplingConverter`: function to call for converting the solution + state of one system to the other system +""" +mutable struct BoundaryConditionCoupledP4est{CouplingConverter} + coupling_converter::CouplingConverter + + function BoundaryConditionCoupledP4est(coupling_converter) + new{typeof(coupling_converter)}(coupling_converter) + end +end + +""" +Extract the boundary values from the neighboring element. +This requires values from other mesh views. +This currently only works for Cartesian meshes. +""" +function (boundary_condition::BoundaryConditionCoupledP4est)(u_inner, mesh, equations, + cache, + i_index, j_index, + element_index, + normal_direction, + surface_flux_function, + direction, + u_ode_coupled) + n_nodes = length(mesh.parent.nodes) + # Using a projection onto e_x, -e_x, e_y, -e_y to determine which way our boundary interfaces points to. + # Knowing this, we then find the cell index in the global (parent) space of the neighboring cell. + if abs(sum(normal_direction .* (1.0, 0.0))) > + abs(sum(normal_direction .* (0.0, 1.0))) + if sum(normal_direction .* (1.0, 0.0)) > + sum(normal_direction .* (-1.0, 0.0)) + cell_index_parent = cache.neighbor_ids_parent[findfirst((cache.boundaries.name .== + :x_pos) .* + (cache.boundaries.neighbor_ids .== + element_index))] + else + cell_index_parent = cache.neighbor_ids_parent[findfirst((cache.boundaries.name .== + :x_neg) .* + (cache.boundaries.neighbor_ids .== + element_index))] + end + i_index_g = i_index + # Make sure we do not leave the domain. + if i_index == n_nodes + i_index_g = 1 + elseif i_index == 1 + i_index_g = n_nodes + end + j_index_g = j_index + else + if sum(normal_direction .* (0.0, 1.0)) > sum(normal_direction .* (0.0, -1.0)) + cell_index_parent = cache.neighbor_ids_parent[findfirst((cache.boundaries.name .== + :y_pos) .* + (cache.boundaries.neighbor_ids .== + element_index))] + else + cell_index_parent = cache.neighbor_ids_parent[findfirst((cache.boundaries.name .== + :y_neg) .* + (cache.boundaries.neighbor_ids .== + element_index))] + end + j_index_g = j_index + # Make sure we do not leave the domain. + if j_index == n_nodes + j_index_g = 1 + elseif j_index == 1 + j_index_g = n_nodes + end + i_index_g = i_index + end + # Perform integer division to get the right shape of the array. + u_parent_reshape = reshape(u_ode_coupled, + (n_nodes, n_nodes, + length(u_ode_coupled) ÷ n_nodes^ndims(mesh.parent))) + u_boundary = SVector(u_parent_reshape[i_index_g, j_index_g, cell_index_parent]) + + # u_boundary = u_inner + orientation = normal_direction + + # Calculate boundary flux + flux = surface_flux_function(u_inner, u_boundary, orientation, equations) + + return flux +end + +function calc_boundary_flux!(cache, t, boundary_condition::BC, boundary_indexing, + mesh::P4estMeshView{2}, + equations, surface_integral, dg::DG, u_parent) where {BC} + @unpack boundaries = cache + @unpack surface_flux_values = cache.elements + index_range = eachnode(dg) + + @threaded for local_index in eachindex(boundary_indexing) + # Use the local index to get the global boundary index from the pre-sorted list + boundary = boundary_indexing[local_index] + + # Get information on the adjacent element, compute the surface fluxes, + # and store them + element = boundaries.neighbor_ids[boundary] + node_indices = boundaries.node_indices[boundary] + direction = indices2direction(node_indices) + + i_node_start, i_node_step = index_to_start_step_2d(node_indices[1], index_range) + j_node_start, j_node_step = index_to_start_step_2d(node_indices[2], index_range) + + i_node = i_node_start + j_node = j_node_start + for node in eachnode(dg) + calc_boundary_flux!(surface_flux_values, t, boundary_condition, + mesh, have_nonconservative_terms(equations), + equations, surface_integral, dg, cache, + i_node, j_node, + node, direction, element, boundary, + u_parent) + + i_node += i_node_step + j_node += j_node_step + end + end + return nothing +end + +# Iterate over tuples of boundary condition types and associated indices +# in a type-stable way using "lispy tuple programming". +function calc_boundary_flux_by_type!(cache, t, BCs::NTuple{N, Any}, + BC_indices::NTuple{N, Vector{Int}}, + mesh::P4estMeshView, + equations, surface_integral, dg::DG, + u_parent) where {N} + # Extract the boundary condition type and index vector + boundary_condition = first(BCs) + boundary_condition_indices = first(BC_indices) + # Extract the remaining types and indices to be processed later + remaining_boundary_conditions = Base.tail(BCs) + remaining_boundary_condition_indices = Base.tail(BC_indices) + + # process the first boundary condition type + calc_boundary_flux!(cache, t, boundary_condition, boundary_condition_indices, + mesh, equations, surface_integral, dg, u_parent) + + # recursively call this method with the unprocessed boundary types + calc_boundary_flux_by_type!(cache, t, remaining_boundary_conditions, + remaining_boundary_condition_indices, + mesh, equations, surface_integral, dg, u_parent) + + return nothing +end + +# terminate the type-stable iteration over tuples +function calc_boundary_flux_by_type!(cache, t, BCs::Tuple{}, BC_indices::Tuple{}, + mesh::P4estMeshView, + equations, surface_integral, dg::DG, u_parent) + return nothing +end +end # @muladd diff --git a/src/semidiscretization/semidiscretization_hyperbolic.jl b/src/semidiscretization/semidiscretization_hyperbolic.jl index cd20fbf8e3f..afdaf93d520 100644 --- a/src/semidiscretization/semidiscretization_hyperbolic.jl +++ b/src/semidiscretization/semidiscretization_hyperbolic.jl @@ -215,7 +215,8 @@ end # Allow NamedTuple for P4estMesh, UnstructuredMesh2D, and T8codeMesh # define in two functions to resolve ambiguities function digest_boundary_conditions(boundary_conditions::NamedTuple, - mesh::Union{P4estMesh{2}, UnstructuredMesh2D, + mesh::Union{P4estMesh{2}, P4estMeshView{2}, + UnstructuredMesh2D, T8codeMesh{2}}, solver, cache) return UnstructuredSortedBoundaryTypes(boundary_conditions, cache) @@ -228,7 +229,8 @@ function digest_boundary_conditions(boundary_conditions::NamedTuple, end function digest_boundary_conditions(boundary_conditions::UnstructuredSortedBoundaryTypes, - mesh::Union{P4estMesh{2}, UnstructuredMesh2D, + mesh::Union{P4estMesh{2}, P4estMeshView{2}, + UnstructuredMesh2D, T8codeMesh{2}}, solver, cache) return boundary_conditions @@ -243,7 +245,8 @@ end # allow passing a single BC that get converted into a named tuple of BCs # on (mapped) hypercube domains function digest_boundary_conditions(boundary_conditions, - mesh::Union{P4estMesh{2}, UnstructuredMesh2D, + mesh::Union{P4estMesh{2}, P4estMeshView{2}, + UnstructuredMesh2D, T8codeMesh{2}}, solver, cache) bcs = (; x_neg = boundary_conditions, x_pos = boundary_conditions, diff --git a/src/solvers/dgsem_p4est/dg.jl b/src/solvers/dgsem_p4est/dg.jl index 7893dbed5f7..dddef6999c4 100644 --- a/src/solvers/dgsem_p4est/dg.jl +++ b/src/solvers/dgsem_p4est/dg.jl @@ -48,17 +48,16 @@ function create_cache(mesh::P4estMeshView, equations::AbstractEquations, dg::DG, mortars_parent = init_mortars(mesh.parent, equations, dg.basis, elements_parent) # Extract data for views. - elements, interfaces, boundaries, mortars = extract_p4est_mesh_view(elements_parent, - interfaces_parent, - boundaries_parent, - mortars_parent, - mesh, - equations, - dg, - uEltype) - - # Container cache - cache = (; elements, interfaces, boundaries, mortars) + elements, interfaces, boundaries, mortars, neighbor_ids_parent = extract_p4est_mesh_view(elements_parent, + interfaces_parent, + boundaries_parent, + mortars_parent, + mesh, + equations, + dg, + uEltype) + + cache = (; elements, interfaces, boundaries, mortars, neighbor_ids_parent) # Add Volume-Integral cache cache = (; cache..., diff --git a/src/solvers/dgsem_p4est/dg_2d.jl b/src/solvers/dgsem_p4est/dg_2d.jl index d4cd609c6bd..846d5093c6d 100644 --- a/src/solvers/dgsem_p4est/dg_2d.jl +++ b/src/solvers/dgsem_p4est/dg_2d.jl @@ -332,6 +332,15 @@ function prolong2boundaries!(cache, u, return nothing end +# We require this function definition, as the function calls for the +# coupled simulations pass the u_parent variable +# Note: Since the implementation is identical, we forward to the original function +function prolong2boundaries!(cache, u, u_parent, semis, + mesh::P4estMeshView{2}, + equations, surface_integral, dg::DG) + return prolong2boundaries!(cache, u, mesh, equations, dg) +end + function calc_boundary_flux!(cache, t, boundary_condition::BC, boundary_indexing, mesh::Union{P4estMesh{2}, T8codeMesh{2}}, equations, surface_integral, dg::DG) where {BC} @@ -402,6 +411,36 @@ end return nothing end +# inlined version of the boundary flux calculation along a physical interface +@inline function calc_boundary_flux!(surface_flux_values, t, boundary_condition, + mesh::P4estMeshView{2}, + nonconservative_terms::False, equations, + surface_integral, dg::DG, cache, + i_index, j_index, + node_index, direction_index, element_index, + boundary_index, u_parent) + @unpack boundaries = cache + @unpack contravariant_vectors = cache.elements + @unpack surface_flux = surface_integral + + # Extract solution data from boundary container + u_inner = get_node_vars(boundaries.u, equations, dg, node_index, boundary_index) + + # Outward-pointing normal direction (not normalized) + normal_direction = get_normal_direction(direction_index, contravariant_vectors, + i_index, j_index, element_index) + + flux_ = boundary_condition(u_inner, mesh, equations, cache, i_index, j_index, + element_index, normal_direction, surface_flux, + normal_direction, u_parent) + + # Copy flux to element storage in the correct orientation + for v in eachvariable(equations) + surface_flux_values[v, node_index, direction_index, element_index] = flux_[v] + end + return nothing +end + # inlined version of the boundary flux with nonconservative terms calculation along a physical interface @inline function calc_boundary_flux!(surface_flux_values, t, boundary_condition, mesh::Union{P4estMesh{2}, T8codeMesh{2}}, @@ -498,6 +537,17 @@ end return nothing end +# Function barrier for type stability +function calc_boundary_flux!(cache, t, boundary_conditions, + mesh::P4estMeshView, + equations, surface_integral, dg::DG, u_parent) + @unpack boundary_condition_types, boundary_indices = boundary_conditions + + calc_boundary_flux_by_type!(cache, t, boundary_condition_types, boundary_indices, + mesh, equations, surface_integral, dg, u_parent) + return nothing +end + function prolong2mortars!(cache, u, mesh::Union{P4estMesh{2}, P4estMeshView{2}, T8codeMesh{2}}, equations, @@ -800,4 +850,76 @@ function calc_surface_integral!(du, u, return nothing end + +# Call this for coupled P4estMeshView simulations. +# The coupling calculations (especially boundary conditions) require data from the parent mesh, which is why +# the additional variable u_parent is needed, compared to non-coupled systems. +function rhs!(du, u, t, u_parent, semis, + mesh::P4estMeshView{2}, + equations, + boundary_conditions, source_terms::Source, + dg::DG, cache) where {Source} + # Reset du + @trixi_timeit timer() "reset ∂u/∂t" set_zero!(du, dg, cache) + + # Calculate volume integral + @trixi_timeit timer() "volume integral" begin + calc_volume_integral!(du, u, mesh, + have_nonconservative_terms(equations), equations, + dg.volume_integral, dg, cache) + end + + # Prolong solution to interfaces + @trixi_timeit timer() "prolong2interfaces" begin + prolong2interfaces!(cache, u, mesh, equations, dg) + end + + # Calculate interface fluxes + @trixi_timeit timer() "interface flux" begin + calc_interface_flux!(cache.elements.surface_flux_values, mesh, + have_nonconservative_terms(equations), equations, + dg.surface_integral, dg, cache) + end + + # Prolong solution to boundaries + @trixi_timeit timer() "prolong2boundaries" begin + prolong2boundaries!(cache, u, u_parent, semis, mesh, equations, + dg.surface_integral, dg) + end + + # Calculate boundary fluxes + @trixi_timeit timer() "boundary flux" begin + calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + dg.surface_integral, dg, u_parent) + end + + # Prolong solution to mortars + @trixi_timeit timer() "prolong2mortars" begin + prolong2mortars!(cache, u, mesh, equations, + dg.mortar, dg) + end + + # Calculate mortar fluxes + @trixi_timeit timer() "mortar flux" begin + calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + have_nonconservative_terms(equations), equations, + dg.mortar, dg.surface_integral, dg, cache) + end + + # Calculate surface integrals + @trixi_timeit timer() "surface integral" begin + calc_surface_integral!(du, u, mesh, equations, + dg.surface_integral, dg, cache) + end + + # Apply Jacobian from mapping to reference element + @trixi_timeit timer() "Jacobian" apply_jacobian!(du, mesh, equations, dg, cache) + + # Calculate source terms + @trixi_timeit timer() "source terms" begin + calc_sources!(du, u, t, source_terms, equations, dg, cache) + end + + return nothing +end end # @muladd diff --git a/src/solvers/dgsem_unstructured/dg_2d.jl b/src/solvers/dgsem_unstructured/dg_2d.jl index 76a186b9fa6..34c907faf62 100644 --- a/src/solvers/dgsem_unstructured/dg_2d.jl +++ b/src/solvers/dgsem_unstructured/dg_2d.jl @@ -309,8 +309,7 @@ function prolong2boundaries!(cache, u, end function calc_boundary_flux!(cache, t, boundary_condition::BoundaryConditionPeriodic, - mesh::Union{UnstructuredMesh2D, P4estMesh, P4estMeshView, - T8codeMesh}, + mesh::Union{UnstructuredMesh2D, P4estMesh, T8codeMesh}, equations, surface_integral, dg::DG) @assert isempty(eachboundary(dg, cache)) @@ -319,8 +318,7 @@ end # Function barrier for type stability function calc_boundary_flux!(cache, t, boundary_conditions, - mesh::Union{UnstructuredMesh2D, P4estMesh, P4estMeshView, - T8codeMesh}, + mesh::Union{UnstructuredMesh2D, P4estMesh, T8codeMesh}, equations, surface_integral, dg::DG) @unpack boundary_condition_types, boundary_indices = boundary_conditions diff --git a/test/test_p4est_2d.jl b/test/test_p4est_2d.jl index c516e3bc4b7..4ebd2599314 100644 --- a/test/test_p4est_2d.jl +++ b/test/test_p4est_2d.jl @@ -110,22 +110,19 @@ end @test_allocations(Trixi.rhs!, semi, sol, 1000) end -@trixi_testset "elixir_advection_meshview.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_meshview.jl"), - l2=[0.00013773915040249946], - linf=[0.0010140184322192658]) +@trixi_testset "elixir_advection_coupled.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled.jl"), + l2=[0.00013318279010717573, 0.00013318279010712838], + linf=[0.0009605782290112996, 0.0009605782290100784]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) - @test_allocations(Trixi.rhs!, semi, sol, 1000) + @test_broken (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 # Ensure we cover the calculation of the node coordinates node_coordinates = typeof(parent_mesh.tree_node_coordinates)(undef, 2, ntuple(_ -> length(parent_mesh.nodes), 2)..., - length(mesh.cell_ids)) - result = Trixi.calc_node_coordinates!(node_coordinates, mesh, parent_mesh.nodes) - @test parent_mesh.tree_node_coordinates == result - + length(mesh1.cell_ids)) # Load the mesh file for code coverage. loaded_mesh = Trixi.load_mesh_serial(joinpath("out", "mesh.h5"); n_cells_max = 0, RealT = typeof(parent_mesh).parameters[3])