Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reduce memory consumption for buffer pool #1150

Merged
merged 7 commits into from
Aug 22, 2024
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/bvals/comms/bnd_info.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ BndInfo BndInfo::GetSetBndInfo(MeshBlock *pmb, const NeighborBlock &nb,
out.buf_allocated = false;
} else {
printf("%i [rank: %i] -> %i [rank: %i] (Set %s) is in state %i.\n", nb.gid, nb.rank,
pmb->gid, Globals::my_rank, v->label().c_str(), buf_state);
pmb->gid, Globals::my_rank, v->label().c_str(), static_cast<int>(buf_state));
PARTHENON_FAIL("Buffer should be in a received state.");
}
return out;
Expand Down
52 changes: 45 additions & 7 deletions src/bvals/comms/build_boundary_buffers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,12 @@
//========================================================================================

#include <algorithm>
#include <cstddef>
#include <iostream> // debug
#include <memory>
#include <random>
#include <string>
#include <unordered_map>
#include <vector>

#include "bvals_in_one.hpp"
Expand All @@ -44,25 +46,61 @@ template <BoundaryType BTYPE>
void BuildBoundaryBufferSubset(std::shared_ptr<MeshData<Real>> &md,
Mesh::comm_buf_map_t &buf_map) {
Mesh *pmesh = md->GetMeshPointer();
std::unordered_map<int, int>
nbufs; // total (existing and new) number of buffers for given size

ForEachBoundary<BTYPE>(md, [&](auto pmb, sp_mbd_t /*rc*/, nb_t &nb, const sp_cv_t v) {
// Calculate the required size of the buffer for this boundary
int buf_size = GetBufferSize(pmb, nb, v);
if (pmb->gid == nb.gid && nb.offsets.IsCell()) buf_size = 0;

nbufs[buf_size] += 1; // relying on value init of int to 0 for initial entry
});

ForEachBoundary<BTYPE>(md, [&](auto pmb, sp_mbd_t /*rc*/, nb_t &nb, const sp_cv_t v) {
// Calculate the required size of the buffer for this boundary
int buf_size = GetBufferSize(pmb, nb, v);
if (pmb->gid == nb.gid && nb.offsets.IsCell()) buf_size = 0;

// Add a buffer pool if one does not exist for this size
using buf_t = buf_pool_t<Real>::base_t;
if (pmesh->pool_map.count(buf_size) == 0) {
pmesh->pool_map.emplace(std::make_pair(
buf_size, buf_pool_t<Real>([buf_size](buf_pool_t<Real> *pool) {
using buf_t = buf_pool_t<Real>::base_t;
// TODO(LFR): Make nbuf a user settable parameter
const int nbuf = 200;
buf_t chunk("pool buffer", buf_size * nbuf);
std::cerr << "Setting up a new pool for buffers of size " << buf_size << "\n";
// Might be worth discussing what a good default is.
// Using the number of packs, assumes that all blocks in a pack have fairly similar
// buffer configurations, which may or may not be a good approximation.
// An alternative would be "1", which would reduce the memory footprint, but
// increase the number of individual memory allocations.
const int64_t nbuf = pmesh->DefaultNumPartitions();
pmesh->pool_map.emplace(
buf_size, buf_pool_t<Real>([buf_size, nbuf](buf_pool_t<Real> *pool) {
std::cerr << "Dynamically adding " << nbuf << " buffers of size " << buf_size
<< " to a pool with current size " << pool->NumBuffersInPool()
<< " and future size " << pool->NumBuffersInPool() + nbuf << "\n";

const auto pool_size = nbuf * buf_size;
buf_t chunk("pool buffer", pool_size);
for (int i = 1; i < nbuf; ++i) {
pool->AddFreeObjectToPool(
buf_t(chunk, std::make_pair(i * buf_size, (i + 1) * buf_size)));
}
return buf_t(chunk, std::make_pair(0, buf_size));
})));
}));
}
// Now that the pool is guaranteed to exist we can add free objects of the required
// amount.
auto &pool = pmesh->pool_map.at(buf_size);
const std::int64_t new_buffers_req = nbufs.at(buf_size) - pool.NumBuffersInPool();
if (new_buffers_req > 0) {
std::cerr << "Reserving " << new_buffers_req << " new buffers of size " << buf_size
<< " to pool with " << pool.NumBuffersInPool() << " buffers because "
<< nbufs.at(buf_size) << " are required in total.\n";
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe add a flag to possibly suppress the std::cerr outputs?

const auto pool_size = new_buffers_req * buf_size;
buf_t chunk("pool buffer", pool_size);
for (int i = 0; i < new_buffers_req; ++i) {
pool.AddFreeObjectToPool(
buf_t(chunk, std::make_pair(i * buf_size, (i + 1) * buf_size)));
}
}

const int receiver_rank = nb.rank;
Expand Down
2 changes: 2 additions & 0 deletions src/utils/object_pool.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@ class ObjectPool {
std::cout << inuse_.size() << " used objects." << std::endl;
}

auto NumBuffersInPool() const { return inuse_.size() + available_.size(); }

std::uint64_t SizeInBytes() const {
constexpr std::uint64_t datum_size = sizeof(typename base_t::value_type);
std::uint64_t object_size = 0;
Expand Down
Loading