libcruft-util/cruft/util/pool.hpp

279 lines
9.0 KiB
C++
Raw Permalink Normal View History

/*
2018-08-04 15:14:06 +10:00
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
2019-05-22 15:05:00 +10:00
* Copyright 2011-2019 Danny Robson <danny@nerdcruft.net>
*/
#pragma once
#include "cast.hpp"
#include "debug/assert.hpp"
2021-04-12 16:56:06 +10:00
#include "debug/panic.hpp"
#include "parallel/stack.hpp"
#include "view.hpp"
#include <atomic>
#include <new>
#include <cstddef>
#include <cstdint>
2014-12-31 19:07:25 +11:00
namespace cruft {
/// A simple, thread safe, pre-allocated pool allocator.
2012-08-22 16:12:26 +10:00
template <typename T>
class pool {
private:
/// A collection of all unallocated slots.
parallel::stack<void*> m_available;
2016-01-19 18:29:13 +11:00
/// A pointer to the start of the allocated region.
void *m_store;
/// The total number of items for which storage has been allocated.
std::size_t m_capacity;
// We used to use std::aligned_storage_t and arrays/vectors proper for
// data storage. But this requires that the user has already defined
// ValueT ahead of time (given we need to call sizeof outside a deduced
// context).
//
// We tried a strategy where nodes were a union of ValueT and a
// linked-list. However this proved heinously expensive to traverse to
// find allocated objects that need to be destroyed when our destructor
// is called.
2017-01-05 19:50:00 +11:00
public:
///////////////////////////////////////////////////////////////////////
pool (const pool&) = delete;
pool& operator= (const pool&) = delete;
//---------------------------------------------------------------------
2019-05-22 15:05:00 +10:00
pool (pool &&rhs) noexcept
: m_available (0)
, m_store (nullptr)
2019-05-22 15:05:00 +10:00
, m_capacity (0)
{
std::swap (m_available, rhs.m_available);
std::swap (m_store, rhs.m_store);
std::swap (m_capacity, rhs.m_capacity);
2019-05-22 15:05:00 +10:00
}
//---------------------------------------------------------------------
2021-04-19 14:52:22 +10:00
pool& operator= (pool &&rhs) noexcept
{
std::swap (m_available, rhs.m_available);
std::swap (m_store, rhs.m_store);
std::swap (m_capacity, rhs.m_capacity);
return *this;
}
//---------------------------------------------------------------------
pool clone (void) const
{
pool res (m_capacity);
memcpy (res.m_store, m_store, sizeof (T) * m_capacity);
res.m_available = m_available.clone ();
// Rebase the pointers to available data so they point into the
// cloned object.
auto offset = reinterpret_cast<uintptr_t> (res.m_store) - reinterpret_cast<uintptr_t> (m_store);
auto data = res.m_available.store (
decltype(res.m_available)::contract::I_HAVE_LOCKED_THIS_STRUCTURE
);
for (auto &ptr: data)
ptr = reinterpret_cast<void*> (reinterpret_cast<uintptr_t> (ptr) + offset);
return res;
}
//---------------------------------------------------------------------
2017-01-05 19:50:00 +11:00
explicit
pool (std::size_t _capacity)
: m_available (_capacity)
, m_store (::operator new[] (_capacity * sizeof (T), std::align_val_t {alignof (T)}))
, m_capacity (_capacity)
{
if (!m_store)
throw std::bad_alloc ();
reindex ();
}
//---------------------------------------------------------------------
~pool ()
{
2019-05-23 12:36:52 +10:00
clear ();
::operator delete[] (
m_store,
m_capacity * sizeof (T),
std::align_val_t {alignof (T)}
);
}
2019-05-23 12:36:52 +10:00
///////////////////////////////////////////////////////////////////////
[[nodiscard]] T*
2019-01-21 17:14:02 +11:00
allocate [[gnu::malloc]] [[gnu::returns_nonnull]] (void)
{
void *raw;
if (!m_available.pop (&raw))
throw std::bad_alloc ();
return reinterpret_cast<T*> (raw);
}
//---------------------------------------------------------------------
void
deallocate (T *cooked)
{
void *raw = reinterpret_cast<void*> (cooked);
if (unlikely (!m_available.push (raw)))
panic (__FUNCTION__);
}
///////////////////////////////////////////////////////////////////////
template <typename ...Args>
T*
construct (Args &&...args)
{
auto ptr = allocate ();
try {
return new (ptr) T (std::forward<Args> (args)...);
} catch (...) {
deallocate (ptr);
throw;
}
}
//---------------------------------------------------------------------
void
destroy (T *ptr)
{
ptr->~T ();
deallocate (ptr);
}
2014-12-31 19:07:25 +11:00
//---------------------------------------------------------------------
2018-07-04 14:04:45 +10:00
void destroy (size_t idx)
{
return destroy (&(*this)[idx]);
2018-07-04 14:04:45 +10:00
}
///////////////////////////////////////////////////////////////////////
auto capacity (void) const { return m_capacity; }
2019-08-09 11:41:03 +10:00
size_t size (void) const { return capacity () - m_available.size (); }
ssize_t ssize (void) const { return capacity () - m_available.size (); }
bool empty (void) const { return size () == 0; }
2019-01-21 17:13:47 +11:00
bool full (void) const { return size () == capacity (); }
2014-12-31 19:07:25 +11:00
2019-05-23 12:36:52 +10:00
/// Destroys all objects that have been allocated, frees the
/// associated memory, and then rebuilds the free node list ready for
/// allocations again.
///
/// NOTE: All bets are off if any object throws an exception out of
/// their destructor. We provide no exception guarantees.
///
/// This call is NOT thread safe. No users should be accessing this
/// object for the duration of this call.
2019-05-22 15:05:10 +10:00
void clear (void)
{
auto const valid_queue = m_available.store (
decltype(m_available)::contract::I_HAVE_LOCKED_THIS_STRUCTURE
);
std::sort (valid_queue.begin (), valid_queue.end ());
2019-05-23 12:36:52 +10:00
// Now that we've ordered the nodes we can walk the list from
// start to finish and find nodes that aren't in the free list.
// Call the destructors on the data contained in these.
auto node_cursor = valid_queue.begin ();
auto data_cursor = reinterpret_cast<T*> (m_store);
auto const data_end = data_cursor + m_capacity;
2019-05-23 12:36:52 +10:00
while (node_cursor != valid_queue.end ()) {
while (&*data_cursor < *node_cursor) {
reinterpret_cast<T*> (&*data_cursor)->~T ();
2019-05-23 12:36:52 +10:00
++data_cursor;
}
++node_cursor;
2019-05-23 12:36:52 +10:00
++data_cursor;
}
while (data_cursor != data_end) {
reinterpret_cast<T*> (&*data_cursor)->~T ();
2019-05-23 12:36:52 +10:00
++data_cursor;
}
reindex ();
2019-05-23 12:36:52 +10:00
}
2019-05-22 15:05:10 +10:00
///////////////////////////////////////////////////////////////////////
2018-07-04 14:04:29 +10:00
size_t index (T const *ptr) const
{
CHECK_GE (ptr, reinterpret_cast<T*> (m_store));
CHECK_LT (ptr, reinterpret_cast<T*> (m_store) + capacity ());
return ptr - reinterpret_cast<T*> (m_store);
2018-07-04 14:04:29 +10:00
}
2018-07-04 14:04:16 +10:00
/// returns the base address of the allocation.
///
/// guaranteed to point to the first _possible_ allocated value;
/// however it may not be _live_ at any given moment.
///
/// DO NOT use this pointer for indexing as you _may_ be unable to
/// account for internal node sizes, alignment, or padding. This is
/// why the return type is void.
///
/// We may be using one particular representation at the moment but
/// stability is not guaranteed at this point.
void * base (void) & { return m_store; }
void const* base (void) const& { return m_store; }
///////////////////////////////////////////////////////////////////////
T& operator[] (size_t idx) &
{
return reinterpret_cast<T*> (m_store) [idx];
}
2014-12-31 19:07:25 +11:00
//---------------------------------------------------------------------
T const& operator[] (size_t idx) const&
{
return reinterpret_cast<T*> (m_store) [idx];
}
private:
void reindex (void)
{
m_available.clear ();
T* elements = reinterpret_cast<T*> (m_store);
for (size_t i = m_capacity; i--; )
2021-04-19 14:52:22 +10:00
// The available indices _should_ have enough capacity at all
// times as we've presumably allocated it at construction time,
// but it's worthwhile checking anyway.
if (!m_available.push (elements + i)) [[unlikely]]
throw std::bad_alloc ();
}
};
}