Skip to content

Commit

Permalink
Restructured ObjectPool to satisfy AutoPacket (or any other object) l…
Browse files Browse the repository at this point in the history
…ifecycle contract. Presently, AutoFilterTest hangs.

- The first call on any issued AutoPacket is Initialize() and last call is Finalize().
- The internal store of objects is now a vector of unique_ptr objects. This avoids calling m_final incorrectly and allows the release() method to be used when issuing objects.
- Move assignment between object pools now swaps pools instead of copying.
  • Loading branch information
GabrielHare committed Jul 30, 2014
1 parent 441a2f9 commit 6192549
Showing 1 changed file with 25 additions and 16 deletions.
41 changes: 25 additions & 16 deletions autowiring/ObjectPool.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ class ObjectPool
// time the ClearCachedEntities method is called, and causes entities which might be trying
// to return to the pool to instead free themselves.
size_t m_poolVersion;
std::vector<std::shared_ptr<T>> m_objs;
std::vector<std::unique_ptr<T>> m_objs;

size_t m_maxPooled;
size_t m_limit;
Expand All @@ -109,18 +109,29 @@ class ObjectPool
std::function<T*()> m_alloc;

/// <summary>
/// Creates a shared pointer to wrap the specified object
/// Creates a shared pointer to wrap the specified object while it is issued
/// </summary>
/// <remarks>
/// The Initialize is applied immediate when Wrap is called.
/// The Finalize function will be applied is in the shared_ptr destructor.
/// </remarks>
std::shared_ptr<T> Wrap(T* pObj) {
// Initialize the issued object
m_initial(*pObj);

// Fill the shared pointer with the object we created, and ensure that we override
// the destructor so that the object is returned to the pool when it falls out of
// scope.
size_t poolVersion = m_poolVersion;
auto monitor = m_monitor;
std::function<void(T&)> final = m_final;

return std::shared_ptr<T>(
pObj,
[poolVersion, monitor](T* ptr) {
[poolVersion, monitor, final](T* ptr) {
// Finalize object before destruction or return to pool
final(*ptr);

// Default behavior will be to destroy the pointer
std::unique_ptr<T> unique(ptr);

Expand All @@ -139,6 +150,7 @@ class ObjectPool
}

void Return(size_t poolVersion, std::unique_ptr<T>& unique) {
// ASSERT: Object has already been finalized
// Always decrement the count when an object is no longer outstanding
assert(m_outstanding);
m_outstanding--;
Expand All @@ -150,9 +162,8 @@ class ObjectPool
// Object pool needs to be capable of accepting another object as an input
m_objs.size() < m_maxPooled
) {
// Reset the object and put it back in the pool:
m_final(*unique);
m_objs.push_back(Wrap(unique.release()));
// Return the object to the pool:
m_objs.emplace_back(std::move(unique));
}

// If the new outstanding count is less than or equal to the limit, wake up any waiters:
Expand All @@ -178,15 +189,13 @@ class ObjectPool

// We failed to recover an object, create a new one:
auto obj = Wrap(m_alloc());
m_initial(*obj);
return obj;
}

// Remove, return:
auto obj = m_objs.back();
m_initial(*obj);
m_objs.pop_back();
return obj;
// Transition from pooled to issued:
std::shared_ptr<T> iObj = Wrap(m_objs.back().release()); // Takes ownership
m_objs.pop_back(); // Removes non-referencing object
return iObj;
}

public:
Expand Down Expand Up @@ -217,7 +226,7 @@ class ObjectPool
/// </remarks>
void ClearCachedEntities(void) {
// Declare this first, so it's freed last:
std::vector<std::shared_ptr<T>> objs;
std::vector<std::unique_ptr<T>> objs;

// Move all of our objects into a local variable which we can then free at our leisure. This allows us to
// perform destruction outside of the scope of a lock, preventing any deadlocks that might occur inside
Expand All @@ -243,7 +252,7 @@ class ObjectPool
void SetMaximumPooledEntities(size_t maxPooled) {
m_maxPooled = maxPooled;
for(;;) {
std::shared_ptr<T> prior;
std::unique_ptr<T> prior;
std::lock_guard<std::mutex> lk(*m_monitor);

// Space check:
Expand All @@ -256,7 +265,7 @@ class ObjectPool

// Funny syntax needed to ensure destructors run while we aren't holding any locks. The prior
// shared_ptr will be reset after the lock is released, guaranteeing the desired ordering.
prior = m_objs.back();
prior = std::move(m_objs.back());
m_objs.pop_back();
}
}
Expand Down Expand Up @@ -412,10 +421,10 @@ class ObjectPool
std::swap(m_monitor, rhs.m_monitor);

m_poolVersion = rhs.m_poolVersion;
m_objs = rhs.m_objs;
m_maxPooled = rhs.m_maxPooled;
m_limit = rhs.m_limit;
m_outstanding = rhs.m_outstanding;
std::swap(m_objs, rhs.m_objs);
std::swap(m_alloc, rhs.m_alloc);
std::swap(m_initial, rhs.m_initial);
std::swap(m_final, rhs.m_final);
Expand Down

0 comments on commit 6192549

Please sign in to comment.