/* * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Copyright 2011-2019 Danny Robson */ #pragma once #include "debug/assert.hpp" #include "cast.hpp" #include #include #include #include namespace cruft { /// a simple pre-allocated pool for storage of PODs. /// /// non-POD types can be stored, but there are no guarantees for calling /// item destructors at pool destruction time. template class pool { protected: union node; union alignas(node*) node { std::atomic next; std::byte data[sizeof(T)]; }; static_assert (std::atomic::is_always_lock_free); // root address of allocation. used in deletion at destruction time. node* m_head; // the next available entry in the linked list std::atomic m_next; // the total number of items that could be stored std::size_t m_capacity; // the number of items currently stored. std::atomic m_size; public: pool (const pool&) = delete; pool& operator= (const pool&) = delete; pool (pool &&rhs) noexcept : m_head (nullptr) , m_next (nullptr) , m_capacity (0) , m_size (0) { std::swap (m_head, rhs.m_head); m_next = rhs.m_next.load (); rhs.m_next = nullptr; std::swap (m_capacity, rhs.m_capacity); m_size = rhs.m_size.load (); rhs.m_size = 0; } pool& operator= (pool&&); explicit pool (std::size_t _capacity): m_capacity (_capacity), m_size (0u) { // allocate the memory and note the base address for deletion in destructor m_next = m_head = new node[m_capacity]; clear (); } ~pool () { // don't check if everything's been returned as pools are often used // for PODs which don't need to be destructed via calling release. delete [] m_head; } // Data management [[nodiscard]] T* allocate [[gnu::malloc]] [[gnu::returns_nonnull]] (void) { // double check we have enough capacity left if (!m_next) throw std::bad_alloc (); CHECK_LT (m_size, m_capacity); // unlink the current cursor do { node* curr = m_next; node* soon = curr->next; if (m_next.compare_exchange_weak (curr, soon)) { ++m_size; return std::launder (cruft::cast::alignment (curr)); } } while (1); } void deallocate (T *base) { auto soon = cruft::cast::alignment (base); do { node *curr = m_next; soon->next = curr; if (m_next.compare_exchange_weak (curr, soon)) { --m_size; return; } } while (1); } template T* construct (Args &&...args) { auto ptr = allocate (); try { return new (ptr) T (std::forward (args)...); } catch (...) { deallocate (ptr); throw; } } void destroy (T *ptr) { ptr->~T (); deallocate (ptr); } void destroy (size_t idx) { return destroy (base () + idx); } auto capacity (void) const { return m_capacity; } auto size (void) const { return m_size.load (); } bool empty (void) const { return size () == 0; } bool full (void) const { return size () == capacity (); } void clear (void) { m_next = m_head; // build out a complete singly linked list from all the nodes. for (size_t i = 0; i < m_capacity - 1; ++i) m_next[i].next = m_next + i + 1; m_next[m_capacity - 1].next = nullptr; } // Indexing size_t index (T const *ptr) const { CHECK_LIMIT (ptr, base (), base () + m_capacity); return ptr - base (); } /// returns the base address of the allocation. /// // guaranteed to point to the first _possible_ allocated value; // however it may not be _live_ at any given moment. provided to // facilitate indexing. T* base (void) { return reinterpret_cast (m_head); } T const* base (void) const { return reinterpret_cast (m_head); } T& operator[] (size_t idx) &; const T& operator[] (size_t idx) const&; }; }