libcruft-util/pool.hpp

277 lines
8.3 KiB
C++
Raw Normal View History

/*
2018-08-04 15:14:06 +10:00
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
2019-05-22 15:05:00 +10:00
* Copyright 2011-2019 Danny Robson <danny@nerdcruft.net>
*/
#pragma once
#include "debug/assert.hpp"
#include "cast.hpp"
#include <atomic>
#include <new>
#include <cstddef>
#include <cstdint>
2014-12-31 19:07:25 +11:00
namespace cruft {
/// a simple pre-allocated pool for storage of PODs.
///
/// non-POD types can be stored, but there are no guarantees for calling
/// item destructors at pool destruction time.
2012-08-22 16:12:26 +10:00
template <typename T>
class pool {
2017-01-05 19:50:00 +11:00
protected:
union node;
union node {
alignas(node*) std::atomic<node*> next;
2019-05-23 12:36:52 +10:00
alignas(node*) node* raw;
alignas(T) char data[sizeof(T)];
2017-01-05 19:50:00 +11:00
};
static_assert (std::atomic<node*>::is_always_lock_free);
// root address of allocation. used in deletion at destruction time.
node* m_head;
2016-01-19 18:29:13 +11:00
// the next available entry in the linked list
std::atomic<node *> m_next;
// the total number of items that could be stored
std::size_t m_capacity;
// the number of items currently stored.
std::atomic<size_t> m_size;
2017-01-05 19:50:00 +11:00
public:
pool (const pool&) = delete;
pool& operator= (const pool&) = delete;
2019-05-22 15:05:00 +10:00
pool (pool &&rhs) noexcept
: m_head (nullptr)
, m_next (nullptr)
, m_capacity (0)
, m_size (0)
{
std::swap (m_head, rhs.m_head);
m_next = rhs.m_next.load ();
rhs.m_next = nullptr;
std::swap (m_capacity, rhs.m_capacity);
m_size = rhs.m_size.load ();
rhs.m_size = 0;
}
pool& operator= (pool&&);
2017-01-05 19:50:00 +11:00
explicit
pool (std::size_t _capacity):
m_capacity (_capacity),
m_size (0u)
{
// allocate the memory and note the base address for deletion in destructor
m_next = m_head = new node[m_capacity];
2019-05-23 12:36:52 +10:00
relink ();
}
~pool ()
{
2019-05-23 12:36:52 +10:00
clear ();
// don't check if everything's been returned as pools are often used
// for PODs which don't need to be destructed via calling release.
delete [] m_head;
}
2019-05-23 12:36:52 +10:00
2017-01-05 19:50:00 +11:00
// Data management
[[nodiscard]] T*
2019-01-21 17:14:02 +11:00
allocate [[gnu::malloc]] [[gnu::returns_nonnull]] (void)
{
// double check we have enough capacity left
if (!m_next)
throw std::bad_alloc ();
CHECK_LT (m_size, m_capacity);
// unlink the current cursor
do {
node* curr = m_next;
node* soon = curr->next;
if (m_next.compare_exchange_weak (curr, soon)) {
++m_size;
return std::launder (cruft::cast::alignment<T*> (curr));
}
} while (1);
}
void
deallocate (T *base)
{
auto soon = cruft::cast::alignment<node*> (base);
do {
node *curr = m_next;
soon->next = curr;
if (m_next.compare_exchange_weak (curr, soon)) {
--m_size;
return;
}
} while (1);
}
template <typename ...Args>
T*
construct (Args &&...args)
{
auto ptr = allocate ();
try {
return new (ptr) T (std::forward<Args> (args)...);
} catch (...) {
deallocate (ptr);
throw;
}
}
void
destroy (T *ptr)
{
ptr->~T ();
deallocate (ptr);
}
2014-12-31 19:07:25 +11:00
2018-07-04 14:04:45 +10:00
void destroy (size_t idx)
{
return destroy (&(*this)[idx]);
2018-07-04 14:04:45 +10:00
}
auto capacity (void) const { return m_capacity; }
auto size (void) const { return m_size.load (); }
bool empty (void) const { return size () == 0; }
2019-01-21 17:13:47 +11:00
bool full (void) const { return size () == capacity (); }
2014-12-31 19:07:25 +11:00
2019-05-23 12:36:52 +10:00
/// Destroys all objects that have been allocated, frees the
/// associated memory, and then rebuilds the free node list ready for
/// allocations again.
///
/// NOTE: All bets are off if any object throws an exception out of
/// their destructor. We provide no exception guarantees.
///
/// This call is NOT thread safe. No users should be accessing this
/// object for the duration of this call.
2019-05-22 15:05:10 +10:00
void clear (void)
{
2019-05-23 12:36:52 +10:00
// Create a fake root so that we can always point to the parent
// of every node in the system. Hopefully this isn't too large for
// the stack.
node container;
container.next.store (m_next.load ());
// Sort the node list. We walk the list, and at each step reparent
// the child at the lowest memory address to the cursor.
for (node* start = container.raw; start; start = start->raw) {
node* parent = start;
// Find the node whose child is the lowest pointer
int count = 0;
for (auto cursor = parent; cursor->raw; cursor = cursor->raw) {
++count;
CHECK_NEQ (cursor->raw, start);
if (cursor->raw < parent)
parent = cursor;
}
// Parent the lowest child to the start of the sorted list
auto tmp = start->raw;
start->raw = parent->raw;
// Remove the lowest child from their old parent
auto parent_next = parent->raw;
parent->raw = parent_next ? parent_next->raw : nullptr;
// Parent the old successor of the start to the lowest child
start->raw = tmp;
}
// Now that we've ordered the nodes we can walk the list from
// start to finish and find nodes that aren't in the free list.
// Call the destructors on the data contained in these.
auto node_cursor = m_next.load (std::memory_order_relaxed);
auto data_cursor = m_head;
while (node_cursor) {
while (data_cursor < node_cursor) {
cruft::cast::alignment<T*> (data_cursor->data)->~T ();
++data_cursor;
}
node_cursor = node_cursor->raw;
++data_cursor;
}
while (data_cursor < m_head + m_capacity) {
cruft::cast::alignment<T*> (data_cursor->data)->~T ();
++data_cursor;
}
relink ();
}
private:
void relink (void)
{
// Reset the allocation cursor to point to the start of the region
2019-05-22 15:05:10 +10:00
m_next = m_head;
2019-05-23 12:36:52 +10:00
// build out the linked list from all the nodes.
2019-05-22 15:05:10 +10:00
for (size_t i = 0; i < m_capacity - 1; ++i)
m_next[i].next = m_next + i + 1;
m_next[m_capacity - 1].next = nullptr;
}
2019-05-23 12:36:52 +10:00
public:
2017-01-05 19:50:00 +11:00
// Indexing
2018-07-04 14:04:29 +10:00
size_t index (T const *ptr) const
{
CHECK_LIMIT (cruft::cast::alignment<node const*> (ptr), m_head, m_head + m_capacity);
return cruft::cast::alignment<node const*> (ptr) - m_head;
2018-07-04 14:04:29 +10:00
}
2018-07-04 14:04:16 +10:00
/// returns the base address of the allocation.
///
/// guaranteed to point to the first _possible_ allocated value;
/// however it may not be _live_ at any given moment.
///
/// DO NOT use this pointer for indexing as you will be unable to
/// account for internal node sizes, alignment, or padding.
void * base (void) & { return m_head; }
void const* base (void) const& { return m_head; }
T& operator[] (size_t idx) &
{
CHECK_LIMIT (idx, 0u, capacity ());
return *cruft::cast::alignment<T*> (&m_head[idx].data[0]);
}
2014-12-31 19:07:25 +11:00
T const& operator[] (size_t idx) const&
{
CHECK_LIMIT (idx, 0u, capacity ());
return *cruft::cast::alignment<T const*> (&m_head[idx].data[0]);
}
};
}