libcruft-util/pool.hpp
Danny Robson fdaa5e1392 assert: split CHECK_LIMIT into INCLUSIVE and INDEX
LIMIT hid an off-by-one bug when tests used end iterators. We rename the
assertion to uncover all uses of the flawed implementation, and split it
into an identical assertion, and one intended to protect against
iterator ends.
2020-09-24 08:03:41 +10:00

274 lines
8.7 KiB
C++

/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
* Copyright 2011-2019 Danny Robson <danny@nerdcruft.net>
*/
#pragma once
#include "cast.hpp"
#include "debug/assert.hpp"
#include "parallel/stack.hpp"
#include "view.hpp"
#include <atomic>
#include <new>
#include <cstddef>
#include <cstdint>
namespace cruft {
/// A simple, thread safe, pre-allocated pool allocator.
template <typename T>
class pool {
private:
/// A collection of all unallocated slots.
parallel::stack<void*> m_available;
/// A pointer to the start of the allocated region.
void *m_store;
/// The total number of items for which storage has been allocated.
std::size_t m_capacity;
// We used to use std::aligned_storage_t and arrays/vectors proper for
// data storage. But this requires that the user has already defined
// ValueT ahead of time (given we need to call sizeof outside a deduced
// context).
//
// We tried a strategy where nodes were a union of ValueT and a
// linked-list. However this proved heinously expensive to traverse to
// find allocated objects that need to be destroyed when our destructor
// is called.
public:
///////////////////////////////////////////////////////////////////////
pool (const pool&) = delete;
pool& operator= (const pool&) = delete;
//---------------------------------------------------------------------
pool (pool &&rhs) noexcept
: m_available (0)
, m_store (nullptr)
, m_capacity (0)
{
std::swap (m_available, rhs.m_available);
std::swap (m_store, rhs.m_store);
std::swap (m_capacity, rhs.m_capacity);
}
//---------------------------------------------------------------------
pool& operator= (pool &&rhs)
{
std::swap (m_available, rhs.m_available);
std::swap (m_store, rhs.m_store);
std::swap (m_capacity, rhs.m_capacity);
return *this;
}
//---------------------------------------------------------------------
pool clone (void) const
{
pool res (m_capacity);
memcpy (res.m_store, m_store, sizeof (T) * m_capacity);
res.m_available = m_available.clone ();
// Rebase the pointers to available data so they point into the
// cloned object.
auto offset = reinterpret_cast<uintptr_t> (res.m_store) - reinterpret_cast<uintptr_t> (m_store);
auto data = res.m_available.store (
decltype(res.m_available)::contract::I_HAVE_LOCKED_THIS_STRUCTURE
);
for (auto &ptr: data)
ptr = reinterpret_cast<void*> (reinterpret_cast<uintptr_t> (ptr) + offset);
return res;
}
//---------------------------------------------------------------------
explicit
pool (std::size_t _capacity)
: m_available (_capacity)
, m_store (::operator new[] (_capacity * sizeof (T), std::align_val_t {alignof (T)}))
, m_capacity (_capacity)
{
if (!m_store)
throw std::bad_alloc ();
reindex ();
}
//---------------------------------------------------------------------
~pool ()
{
clear ();
::operator delete[] (
m_store,
m_capacity * sizeof (T),
std::align_val_t {alignof (T)}
);
}
///////////////////////////////////////////////////////////////////////
[[nodiscard]] T*
allocate [[gnu::malloc]] [[gnu::returns_nonnull]] (void)
{
void *raw;
if (!m_available.pop (&raw))
throw std::bad_alloc ();
return reinterpret_cast<T*> (raw);
}
//---------------------------------------------------------------------
void
deallocate (T *cooked)
{
void *raw = reinterpret_cast<void*> (cooked);
if (unlikely (!m_available.push (raw)))
panic (__FUNCTION__);
}
///////////////////////////////////////////////////////////////////////
template <typename ...Args>
T*
construct (Args &&...args)
{
auto ptr = allocate ();
try {
return new (ptr) T (std::forward<Args> (args)...);
} catch (...) {
deallocate (ptr);
throw;
}
}
//---------------------------------------------------------------------
void
destroy (T *ptr)
{
ptr->~T ();
deallocate (ptr);
}
//---------------------------------------------------------------------
void destroy (size_t idx)
{
return destroy (&(*this)[idx]);
}
///////////////////////////////////////////////////////////////////////
auto capacity (void) const { return m_capacity; }
size_t size (void) const { return capacity () - m_available.size (); }
ssize_t ssize (void) const { return capacity () - m_available.size (); }
bool empty (void) const { return size () == 0; }
bool full (void) const { return size () == capacity (); }
/// Destroys all objects that have been allocated, frees the
/// associated memory, and then rebuilds the free node list ready for
/// allocations again.
///
/// NOTE: All bets are off if any object throws an exception out of
/// their destructor. We provide no exception guarantees.
///
/// This call is NOT thread safe. No users should be accessing this
/// object for the duration of this call.
void clear (void)
{
auto const valid_queue = m_available.store (
decltype(m_available)::contract::I_HAVE_LOCKED_THIS_STRUCTURE
);
std::sort (valid_queue.begin (), valid_queue.end ());
// Now that we've ordered the nodes we can walk the list from
// start to finish and find nodes that aren't in the free list.
// Call the destructors on the data contained in these.
auto node_cursor = valid_queue.begin ();
auto data_cursor = reinterpret_cast<T*> (m_store);
auto const data_end = data_cursor + m_capacity;
while (node_cursor != valid_queue.end ()) {
while (&*data_cursor < *node_cursor) {
reinterpret_cast<T*> (&*data_cursor)->~T ();
++data_cursor;
}
++node_cursor;
++data_cursor;
}
while (data_cursor != data_end) {
reinterpret_cast<T*> (&*data_cursor)->~T ();
++data_cursor;
}
reindex ();
}
///////////////////////////////////////////////////////////////////////
size_t index (T const *ptr) const
{
CHECK_GE (ptr, reinterpret_cast<T*> (m_store));
CHECK_LT (ptr, reinterpret_cast<T*> (m_store) + capacity ());
return ptr - reinterpret_cast<T*> (m_store);
}
/// returns the base address of the allocation.
///
/// guaranteed to point to the first _possible_ allocated value;
/// however it may not be _live_ at any given moment.
///
/// DO NOT use this pointer for indexing as you _may_ be unable to
/// account for internal node sizes, alignment, or padding. This is
/// why the return type is void.
///
/// We may be using one particular representation at the moment but
/// stability is not guaranteed at this point.
void * base (void) & { return m_store; }
void const* base (void) const& { return m_store; }
///////////////////////////////////////////////////////////////////////
T& operator[] (size_t idx) &
{
return reinterpret_cast<T*> (m_store) [idx];
}
//---------------------------------------------------------------------
T const& operator[] (size_t idx) const&
{
return reinterpret_cast<T*> (m_store) [idx];
}
private:
void reindex (void)
{
m_available.clear ();
T* elements = reinterpret_cast<T*> (m_store);
for (size_t i = m_capacity; i--; )
m_available.push (elements + i);
}
};
}