/* * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Copyright 2011-2019 Danny Robson */ #pragma once #include "debug/assert.hpp" #include "cast.hpp" #include #include #include #include namespace cruft { /// a simple pre-allocated pool for storage of PODs. /// /// non-POD types can be stored, but there are no guarantees for calling /// item destructors at pool destruction time. template class pool { protected: union node; union node { alignas(node*) std::atomic next; alignas(node*) node* raw; alignas(T) char data[sizeof(T)]; }; static_assert (std::atomic::is_always_lock_free); // root address of allocation. used in deletion at destruction time. node* m_head; // the next available entry in the linked list std::atomic m_next; // the total number of items that could be stored std::size_t m_capacity; // the number of items currently stored. std::atomic m_size; public: pool (const pool&) = delete; pool& operator= (const pool&) = delete; pool (pool &&rhs) noexcept : m_head (nullptr) , m_next (nullptr) , m_capacity (0) , m_size (0) { std::swap (m_head, rhs.m_head); m_next = rhs.m_next.load (); rhs.m_next = nullptr; std::swap (m_capacity, rhs.m_capacity); m_size = rhs.m_size.load (); rhs.m_size = 0; } pool& operator= (pool&&); explicit pool (std::size_t _capacity): m_capacity (_capacity), m_size (0u) { // allocate the memory and note the base address for deletion in destructor m_next = m_head = new node[m_capacity]; relink (); } ~pool () { clear (); // don't check if everything's been returned as pools are often used // for PODs which don't need to be destructed via calling release. delete [] m_head; } // Data management [[nodiscard]] T* allocate [[gnu::malloc]] [[gnu::returns_nonnull]] (void) { // double check we have enough capacity left if (!m_next) throw std::bad_alloc (); CHECK_LT (m_size, m_capacity); // unlink the current cursor do { node* curr = m_next; node* soon = curr->next; if (m_next.compare_exchange_weak (curr, soon)) { ++m_size; return std::launder (cruft::cast::alignment (curr)); } } while (1); } void deallocate (T *base) { auto soon = cruft::cast::alignment (base); do { node *curr = m_next; soon->next = curr; if (m_next.compare_exchange_weak (curr, soon)) { --m_size; return; } } while (1); } template T* construct (Args &&...args) { auto ptr = allocate (); try { return new (ptr) T (std::forward (args)...); } catch (...) { deallocate (ptr); throw; } } void destroy (T *ptr) { ptr->~T (); deallocate (ptr); } void destroy (size_t idx) { return destroy (&(*this)[idx]); } auto capacity (void) const { return m_capacity; } auto size (void) const { return m_size.load (); } bool empty (void) const { return size () == 0; } bool full (void) const { return size () == capacity (); } /// Destroys all objects that have been allocated, frees the /// associated memory, and then rebuilds the free node list ready for /// allocations again. /// /// NOTE: All bets are off if any object throws an exception out of /// their destructor. We provide no exception guarantees. /// /// This call is NOT thread safe. No users should be accessing this /// object for the duration of this call. void clear (void) { // Create a fake root so that we can always point to the parent // of every node in the system. Hopefully this isn't too large for // the stack. node container; container.next.store (m_next.load ()); // Sort the node list. We walk the list, and at each step reparent // the child at the lowest memory address to the cursor. for (node* start = container.raw; start; start = start->raw) { node* parent = start; // Find the node whose child is the lowest pointer int count = 0; for (auto cursor = parent; cursor->raw; cursor = cursor->raw) { ++count; CHECK_NEQ (cursor->raw, start); if (cursor->raw < parent) parent = cursor; } // Parent the lowest child to the start of the sorted list auto tmp = start->raw; start->raw = parent->raw; // Remove the lowest child from their old parent auto parent_next = parent->raw; parent->raw = parent_next ? parent_next->raw : nullptr; // Parent the old successor of the start to the lowest child start->raw = tmp; } // Now that we've ordered the nodes we can walk the list from // start to finish and find nodes that aren't in the free list. // Call the destructors on the data contained in these. auto node_cursor = m_next.load (std::memory_order_relaxed); auto data_cursor = m_head; while (node_cursor) { while (data_cursor < node_cursor) { cruft::cast::alignment (data_cursor->data)->~T (); ++data_cursor; } node_cursor = node_cursor->raw; ++data_cursor; } while (data_cursor < m_head + m_capacity) { cruft::cast::alignment (data_cursor->data)->~T (); ++data_cursor; } relink (); } private: void relink (void) { // Reset the allocation cursor to point to the start of the region m_next = m_head; // build out the linked list from all the nodes. for (size_t i = 0; i < m_capacity - 1; ++i) m_next[i].next = m_next + i + 1; m_next[m_capacity - 1].next = nullptr; } public: // Indexing size_t index (T const *ptr) const { CHECK_LIMIT (cruft::cast::alignment (ptr), m_head, m_head + m_capacity); return cruft::cast::alignment (ptr) - m_head; } /// returns the base address of the allocation. /// /// guaranteed to point to the first _possible_ allocated value; /// however it may not be _live_ at any given moment. /// /// DO NOT use this pointer for indexing as you will be unable to /// account for internal node sizes, alignment, or padding. void * base (void) & { return m_head; } void const* base (void) const& { return m_head; } T& operator[] (size_t idx) & { CHECK_LIMIT (idx, 0u, capacity ()); return *cruft::cast::alignment (&m_head[idx].data[0]); } T const& operator[] (size_t idx) const& { CHECK_LIMIT (idx, 0u, capacity ()); return *cruft::cast::alignment (&m_head[idx].data[0]); } }; }