Skip to content

Commit

Permalink
Merge branch 'kharma-next' of github:afd-illinois/kharma into kharma-…
Browse files Browse the repository at this point in the history
…next
  • Loading branch information
Ben Prather committed Aug 22, 2023
2 parents 60f933b + a479a50 commit bcb304b
Show file tree
Hide file tree
Showing 44 changed files with 1,191 additions and 799 deletions.
27 changes: 1 addition & 26 deletions external/patches/parthenon-use-gr-coordinates.patch
Original file line number Diff line number Diff line change
Expand Up @@ -32,29 +32,4 @@ index d1290dee..50bfc840 100644

namespace parthenon {

diff --git a/src/interface/data_collection.cpp b/src/interface/data_collection.cpp
index 6a1d72c9..b5ba609b 100644
--- a/src/interface/data_collection.cpp
+++ b/src/interface/data_collection.cpp
@@ -48,7 +48,7 @@ std::shared_ptr<T> DataCollection<T>::Add(const std::string &name,
if (it != containers_.end()) {
// check to make sure they are the same
if (!(*src == *(it->second))) {
- PARTHENON_THROW("Error attempting to add a Container to a Collection");
+ //PARTHENON_THROW("Error attempting to add a Container to a Collection");
}
return it->second;
}
diff --git a/src/interface/meshblock_data.cpp b/src/interface/meshblock_data.cpp
index 8d5dca57..0ab7dad8 100644
--- a/src/interface/meshblock_data.cpp
+++ b/src/interface/meshblock_data.cpp
@@ -440,7 +440,7 @@ MeshBlockData<T>::GetVariablesByUid(const std::vector<Uid_t> &uids) {

template <typename T>
void MeshBlockData<T>::Remove(const std::string &label) {
- throw std::runtime_error("MeshBlockData<T>::Remove not yet implemented");
+ varMap_.erase(label);
}

template <typename T>

30 changes: 9 additions & 21 deletions kharma/b_cd/b_cd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -223,20 +223,7 @@ Real MaxDivB(MeshData<Real> *md)

TaskStatus PostStepDiagnostics(const SimTime& tm, MeshData<Real> *md)
{
auto pmesh = md->GetMeshPointer();

// Print this unless we quash everything
int verbose = pmesh->packages.Get("Globals")->Param<int>("verbose");
if (verbose >= 0) {
static Reduce<Real> max_divb;
max_divb.val = B_CD::MaxDivB(md);
max_divb.StartReduce(0, MPI_MAX);
while (max_divb.CheckReduce() == TaskStatus::incomplete);

if(MPIRank0()) {
std::cout << "Max DivB: " << max_divb.val << std::endl;
}
}
// TODO. Unify w/other B?

return TaskStatus::complete;
}
Expand Down Expand Up @@ -280,16 +267,17 @@ void FillOutput(MeshBlock *pmb, ParameterInput *pin)

void UpdateCtopMax(Mesh *pmesh, ParameterInput *pin, const SimTime &tm)
{
// TODO use new Reductions stuff for this
// Reduce and record the maximum sound speed on the grid, to propagate
// phi at that speed next step.
// Just needs to run after every step, so we use the KHARMA callback at that point.
auto& params = pmesh->packages.Get("B_CD")->AllParams();
static AllReduce<Real> ctop_max_last_r;
ctop_max_last_r.val = params.Get<Real>("ctop_max");
ctop_max_last_r.StartReduce(MPI_MAX);
while (ctop_max_last_r.CheckReduce() == TaskStatus::incomplete);
params.Update<Real>("ctop_max_last", ctop_max_last_r.val);
params.Update<Real>("ctop_max", 0.0); // Reset for next max calculation
// auto& params = pmesh->packages.Get("B_CD")->AllParams();
// static AllReduce<Real> ctop_max_last_r;
// ctop_max_last_r.val = params.Get<Real>("ctop_max");
// ctop_max_last_r.StartReduce(MPI_MAX);
// while (ctop_max_last_r.CheckReduce() == TaskStatus::incomplete);
// params.Update<Real>("ctop_max_last", ctop_max_last_r.val);
// params.Update<Real>("ctop_max", 0.0); // Reset for next max calculation
}

} // namespace B_CD
39 changes: 4 additions & 35 deletions kharma/b_cleanup/b_cleanup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@

#include "boundaries.hpp"
#include "decs.hpp"
#include "kharma.hpp"
#include "kharma_driver.hpp"
#include "grmhd.hpp"
#include "kharma.hpp"
Expand Down Expand Up @@ -217,7 +218,7 @@ TaskStatus B_Cleanup::CleanupDivergence(std::shared_ptr<MeshData<Real>>& md)
}

// Calculate/print inital max divB exactly as we would during run
const double divb_start = B_FluxCT::GlobalMaxDivB(md.get());
const double divb_start = B_FluxCT::GlobalMaxDivB(md.get(), true);
if (divb_start < rel_tolerance && !always_solve) {
// If divB is "pretty good" and we allow not solving...
if (MPIRank0())
Expand All @@ -236,22 +237,8 @@ TaskStatus B_Cleanup::CleanupDivergence(std::shared_ptr<MeshData<Real>>& md)
KHARMADriver::SyncAllBounds(md);

// Add a solver container and associated MeshData
for (auto& pmb : pmesh->block_list) {
auto &base = pmb->meshblock_data.Get();
pmb->meshblock_data.Add("solve", base);
}
// The "solve" container really only needs the RHS, the solution, and the scratch array dB
// This does not affect the main container, but saves a *lot* of time not syncing
// static variables.
// There's no MeshData-wide 'Remove' so we go block-by-block
for (auto& pmb : pmesh->block_list) {
auto rc_s = pmb->meshblock_data.Get("solve");
auto vars = rc_s->GetVariablesByFlag({Metadata::GetUserFlag("MHD")}).vars();
for (auto var : vars) {
rc_s->Remove(var->label());
}
}
auto &msolve = pmesh->mesh_data.GetOrAdd("solve", 0);
std::vector<std::string> names = KHARMA::GetVariableNames(&pmesh->packages, Metadata::GetUserFlag("B_Cleanup"));
auto &msolve = pmesh->mesh_data.Add("solve", names);

// Create a TaskCollection of just the solve,
// execute it to perform BiCGStab iteration
Expand Down Expand Up @@ -285,24 +272,6 @@ TaskStatus B_Cleanup::CleanupDivergence(std::shared_ptr<MeshData<Real>>& md)
return TaskStatus::complete;
}

TaskStatus B_Cleanup::RemoveExtraFields(BlockList_t &blocks)
{
// If we aren't needed to clean anything...
if (! (blocks[0]->packages.Get("B_Cleanup")->Param<int>("cleanup_interval") > 0)) {
// remove the internal BiCGStab variables by name,
// to prevent them weighing down MPI exchanges
// TODO anything FillGhost & not Conserved or Primitive
for (auto& pmb : blocks) {
auto rc_s = pmb->meshblock_data.Get();
for (auto varlabel : {"pk0", "res0", "temp0", "divB_RHS", "p"}) {
if (rc_s->HasVariable(varlabel))
rc_s->Remove(varlabel);
}
}
}
return TaskStatus::complete;
}

TaskStatus B_Cleanup::ApplyP(MeshData<Real> *msolve, MeshData<Real> *md)
{
// Apply on physical zones only, we'll be syncing/updating ghosts
Expand Down
10 changes: 2 additions & 8 deletions kharma/b_cleanup/b_cleanup.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,14 +67,8 @@ TaskStatus CleanupDivergence(std::shared_ptr<MeshData<Real>>& md);
bool CleanupThisStep(Mesh* pmesh, int nstep);

/**
* Remove the extra solver fields which B_Cleanup added during initialization.
* Must be run before every step as the meshblocks are reconstructed per-step from
* package variable lists.
*/
TaskStatus RemoveExtraFields(BlockList_t &blocks);

/**
* Calculate the laplacian using divergence at corners
* Calculate the laplacian using divergence at corners.
* Extra MeshData arg is just to satisfy Parthenon solver calling convention
*/
TaskStatus CornerLaplacian(MeshData<Real>* md, const std::string& p_var, MeshData<Real>* md_again, const std::string& lap_var);

Expand Down
14 changes: 8 additions & 6 deletions kharma/b_ct/b_ct.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -383,13 +383,15 @@ double B_CT::BlockMaxDivB(MeshBlockData<Real> *rc)
return max_divb;
}

double B_CT::GlobalMaxDivB(MeshData<Real> *md)
double B_CT::GlobalMaxDivB(MeshData<Real> *md, bool all_reduce)
{
static AllReduce<Real> max_divb;
max_divb.val = MaxDivB(md);
max_divb.StartReduce(MPI_MAX);
while (max_divb.CheckReduce() == TaskStatus::incomplete);
return max_divb.val;
if (all_reduce) {
Reductions::StartToAll<Real>(md, 2, MaxDivB(md), MPI_MAX);
return Reductions::CheckOnAll<Real>(md, 2);
} else {
Reductions::Start<Real>(md, 2, MaxDivB(md), MPI_MAX);
return Reductions::Check<Real>(md, 2);
}
}

TaskStatus B_CT::PrintGlobalMaxDivB(MeshData<Real> *md, bool kill_on_large_divb)
Expand Down
18 changes: 1 addition & 17 deletions kharma/b_ct/b_ct.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ double BlockMaxDivB(MeshBlockData<Real> *rc);
/**
* Returns the global maximum value, rather than the maximum over this rank's MeshData
*/
double GlobalMaxDivB(MeshData<Real> *md);
double GlobalMaxDivB(MeshData<Real> *md, bool all_reduce=false);

/**
* Diagnostics printed/computed after each step
Expand All @@ -124,22 +124,6 @@ void FillOutput(MeshBlock *pmb, ParameterInput *pin);
*/
void CalcDivB(MeshData<Real> *md, std::string divb_field_name="divB");

// Reductions: FOR LATER
// KOKKOS_INLINE_FUNCTION Real phi(REDUCE_FUNCTION_ARGS_EH)
// {
// // \Phi == \int |*F^1^0| * gdet * dx2 * dx3 == \int |B1| * gdet * dx2 * dx3
// return 0.5 * m::abs(U(m_u.B1, k, j, i)); // factor of gdet already in cons.B
// }

// inline Real ReducePhi0(MeshData<Real> *md)
// {
// return Reductions::EHReduction(md, UserHistoryOperation::sum, phi, 0);
// }
// inline Real ReducePhi5(MeshData<Real> *md)
// {
// return Reductions::EHReduction(md, UserHistoryOperation::sum, phi, 5);
// }

// Device functions
template<typename Global>
KOKKOS_INLINE_FUNCTION Real face_div(const GRCoordinates &G, Global &v, const int &ndim, const int &k, const int &j, const int &i)
Expand Down
17 changes: 10 additions & 7 deletions kharma/b_flux_ct/b_flux_ct.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,8 @@ double MaxDivB(MeshData<Real> *md)
const IndexRange kb = IndexRange{kbl.s, kbl.e + (ndim > 2)};
const IndexRange block = IndexRange{0, B_U.GetDim(5)-1};

// TODO Keep zone of max! Also applies to ctop.
// TODO Keep zone of max! See timestep calc
// Will need to translate them back to KS to make them useful though

// This is one kernel call per block, because each block will have different bounds.
// Could consolidate at the cost of lots of bounds checking.
Expand All @@ -504,13 +505,15 @@ double MaxDivB(MeshData<Real> *md)
return max_divb;
}

double GlobalMaxDivB(MeshData<Real> *md)
double GlobalMaxDivB(MeshData<Real> *md, bool all_reduce)
{
static AllReduce<Real> max_divb;
max_divb.val = MaxDivB(md);
max_divb.StartReduce(MPI_MAX);
while (max_divb.CheckReduce() == TaskStatus::incomplete);
return max_divb.val;
if (all_reduce) {
Reductions::StartToAll<Real>(md, 2, MaxDivB(md), MPI_MAX);
return Reductions::CheckOnAll<Real>(md, 2);
} else {
Reductions::Start<Real>(md, 2, MaxDivB(md), MPI_MAX);
return Reductions::Check<Real>(md, 2);
}
}

TaskStatus PrintGlobalMaxDivB(MeshData<Real> *md, bool kill_on_large_divb)
Expand Down
20 changes: 7 additions & 13 deletions kharma/b_flux_ct/b_flux_ct.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,10 @@ double MaxDivB(MeshData<Real> *md);

/**
* Returns the global maximum value, rather than the maximum over this rank's MeshData
*
* By default, only returns the correct value on rank 0 for printing
*/
double GlobalMaxDivB(MeshData<Real> *md);
double GlobalMaxDivB(MeshData<Real> *md, bool all_reduce=false);

/**
* Diagnostics printed/computed after each step
Expand All @@ -124,7 +126,7 @@ TaskStatus PrintGlobalMaxDivB(MeshData<Real> *md, bool kill_on_large_divb=false)
*/
inline TaskStatus PostStepDiagnostics(const SimTime& tm, MeshData<Real> *md)
{
auto& params = md->GetMeshPointer()->block_list[0]->packages.Get("B_FluxCT")->AllParams();
auto& params = md->GetMeshPointer()->packages.Get("B_FluxCT")->AllParams();
return PrintGlobalMaxDivB(md, params.Get<bool>("kill_on_large_divb"));
}

Expand All @@ -133,25 +135,17 @@ inline TaskStatus PostStepDiagnostics(const SimTime& tm, MeshData<Real> *md)
*/
void FillOutput(MeshBlock *pmb, ParameterInput *pin);
/**
* Fill field "name" with divB
* Fill the field 'divb_field_name' with divB
*/
void CalcDivB(MeshData<Real> *md, std::string divb_field_name="divB");

// Reductions: phi uses global machinery, but divB is too
// Can also sum the hemispheres independently to be fancy (TODO?)
KOKKOS_INLINE_FUNCTION Real phi(REDUCE_FUNCTION_ARGS_EH)
{
// \Phi == \int |*F^1^0| * gdet * dx2 * dx3 == \int |B1| * gdet * dx2 * dx3
return 0.5 * m::abs(U(m_u.B1, k, j, i)); // factor of gdet already in cons.B
}

inline Real ReducePhi0(MeshData<Real> *md)
{
return Reductions::EHReduction(md, UserHistoryOperation::sum, phi, 0);
return Reductions::EHReduction<Reductions::Var::phi, Real>(md, UserHistoryOperation::sum, 0);
}
inline Real ReducePhi5(MeshData<Real> *md)
{
return Reductions::EHReduction(md, UserHistoryOperation::sum, phi, 5);
return Reductions::EHReduction<Reductions::Var::phi, Real>(md, UserHistoryOperation::sum, 5);
}

/**
Expand Down
4 changes: 2 additions & 2 deletions kharma/boundaries/boundaries.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,8 @@ std::shared_ptr<KHARMAPackage> KBoundaries::Initialize(ParameterInput *pin, std:

Metadata m_x1, m_x2, m_x3;
{
// We can't use GetVariablesByFlag yet, so walk through and count manually
int nvar = KHARMA::CountVars(packages.get(), Metadata::FillGhost);
// We can't use GetVariablesByFlag yet, so ask the packages
int nvar = KHARMA::PackDimension(packages.get(), Metadata::FillGhost);

// We also don't know the mesh size, since it's not constructed. We infer.
const int ng = pin->GetInteger("parthenon/mesh", "nghost");
Expand Down
1 change: 1 addition & 0 deletions kharma/coordinates/coordinate_utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
#pragma once

#include "decs.hpp"
#include "matrix.hpp"

/**
* Rotate a set of coordinates 'Xin' by 'angle' about the *y-axis*
Expand Down
Loading

0 comments on commit bcb304b

Please sign in to comment.