From 8758114040c36a3fad59ad5c165c7555204b596e Mon Sep 17 00:00:00 2001 From: Zhicheng Wang <1627343141@qq.com> Date: Sat, 1 Nov 2025 15:36:32 +0800 Subject: [PATCH] add hive back for blobtree usage --- .../container/detail/hive_detail.hpp | 4597 +++++++++++++++++ shared_module/container/hive.hpp | 8 + 2 files changed, 4605 insertions(+) create mode 100644 shared_module/container/detail/hive_detail.hpp create mode 100644 shared_module/container/hive.hpp diff --git a/shared_module/container/detail/hive_detail.hpp b/shared_module/container/detail/hive_detail.hpp new file mode 100644 index 0000000..7ff7c00 --- /dev/null +++ b/shared_module/container/detail/hive_detail.hpp @@ -0,0 +1,4597 @@ +// Copyright (c) 2025, Matthew Bentley (mattreecebentley@gmail.com) www.plflib.org + +// zLib license (https://www.zlib.net/zlib_license.html): +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgement in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. +#pragma once + +#include // std::fill_n, std::sort, std::swap +#include // assert +#include // memset, memcpy, size_t +#include // std::numeric_limits +#include // std::allocator, std::to_address +#include // std::bidirectional_iterator_tag, iterator_traits, std::move_iterator, std::distance for range insert +#include // std::length_error +#include // offsetof, used in blank() +#include // std::is_trivially_destructible, type_identity_t, etc +#include // std::move +#include + +namespace detail +{ +// std:: tool replacements for C++03/98/11 support: +template +struct enable_if { + typedef T type; +}; + +template +struct enable_if { +}; + +template +struct conditional; + +template +struct conditional { + typedef is_true type; +}; + +template +struct conditional { + typedef is_false type; +}; + +template +struct less { + bool operator()(const element_type &a, const element_type &b) const noexcept { return a < b; } +}; + +template +struct equal_to { + const element_type &value; + + explicit equal_to(const element_type &store_value) noexcept : value(store_value) {} + + bool operator()(const element_type &compare_value) const noexcept { return value == compare_value; } +}; + +// To enable conversion to void * when allocator supplies non-raw pointers: +template +static constexpr void *void_cast(const source_pointer_type source_pointer) noexcept +{ + return static_cast(&*source_pointer); +} + +template +static constexpr std::move_iterator make_move_iterator(iterator_type it) +{ + return std::move_iterator(std::move(it)); +} + +enum priority { performance = 1, memory_use = 4 }; + +struct limits { + size_t min, max; + + constexpr limits(const size_t minimum, const size_t maximum) noexcept : min(minimum), max(maximum) {} +}; + +template , detail::priority priority = performance> +class hive : private allocator_type // Empty base class optimisation - inheriting allocator functions +{ + typedef typename detail::conditional<(priority == performance && (sizeof(element_type) > 10 || alignof(element_type) > 10)), + unsigned short, + unsigned char>::type + skipfield_type; // Note: unsigned short is equivalent to uint_least16_t ie. Using 16-bit unsigned integer in best-case + // scenario, greater-than-16-bit unsigned integer where platform doesn't support 16-bit types. unsigned + // char is always == 1 byte, as opposed to uint_8, which may not be. + +public: + // Standard container typedefs: + typedef typename std::allocator_traits::size_type size_type; + typedef typename std::allocator_traits::difference_type difference_type; + typedef typename std::allocator_traits::pointer pointer; + typedef typename std::allocator_traits::const_pointer const_pointer; + + typedef element_type value_type; + typedef element_type &reference; + typedef const element_type &const_reference; + + // Iterator forward declarations: + template + class hive_iterator; + typedef hive_iterator iterator; + typedef hive_iterator const_iterator; + friend class hive_iterator; + friend class hive_iterator; + + template + class hive_reverse_iterator; + typedef hive_reverse_iterator reverse_iterator; + typedef hive_reverse_iterator const_reverse_iterator; + friend class hive_reverse_iterator; + friend class hive_reverse_iterator; + + // The element as allocated in memory needs to be at-least 2*skipfield_type width in order to support free list indexes in + // erased element memory space, so: make the size of this struct the larger of alignof(T), sizeof(T) or 2*skipfield_type + // (the latter is only relevant for type char/uchar), and make the alignment alignof(T). This type is used mainly for + // correct pointer arithmetic while iterating over elements in memory. + struct alignas(alignof(element_type)) aligned_element_struct { + // Using char as sizeof is always guaranteed to be 1 byte regardless of the number of bits in a byte on given computer, + // whereas for example, uint8_t would fail on machines where there are more than 8 bits in a byte eg. Texas Instruments + // C54x DSPs. + char data[(sizeof(element_type) < (sizeof(skipfield_type) * 2)) + ? ((sizeof(skipfield_type) * 2) < alignof(element_type) ? alignof(element_type) + : (sizeof(skipfield_type) * 2)) + : ((sizeof(element_type) < alignof(element_type)) ? alignof(element_type) : sizeof(element_type))]; + }; + + // We combine the allocation of elements and skipfield into one allocation to save performance. This memory must be + // allocated as an aligned type with the same alignment as T in order for the elements to align with memory boundaries + // correctly (which won't happen if we allocate as char or uint_8). But the larger the sizeof in the type we use for + // allocation, the greater the chance of creating a lot of unused memory in the skipfield portion of the allocated block. So + // we create a type that is sizeof(alignof(T)), as in most cases alignof(T) < sizeof(T). If alignof(t) >= sizeof(t) this + // makes no difference. + struct alignas(alignof(element_type)) aligned_allocation_struct { + char data[alignof(element_type)]; + }; + +private: + // Calculate the capacity of a group's elements+skipfield memory block when expressed in multiples of the value_type's + // alignment (rounding up). + static size_type get_aligned_block_capacity(const skipfield_type elements_per_group) + { + return ((elements_per_group * (sizeof(aligned_element_struct) + sizeof(skipfield_type))) + sizeof(skipfield_type) + + sizeof(aligned_allocation_struct) - 1) + / sizeof(aligned_allocation_struct); + } + + // To enable conversion when allocator supplies non-raw pointers: + template + static constexpr destination_pointer_type pointer_cast(const source_pointer_type source_pointer) noexcept + { + return destination_pointer_type(&*source_pointer); + } + + // forward declarations for typedefs below + struct group; + struct item_index_tuple; // for use in sort() + + typedef typename std::allocator_traits::template rebind_alloc + aligned_allocator_type; + typedef typename std::allocator_traits::template rebind_alloc group_allocator_type; + typedef typename std::allocator_traits::template rebind_alloc skipfield_allocator_type; + typedef typename std::allocator_traits::template rebind_alloc + aligned_struct_allocator_type; + typedef typename std::allocator_traits::template rebind_alloc tuple_allocator_type; + typedef typename std::allocator_traits::template rebind_alloc uchar_allocator_type; + + typedef typename std::allocator_traits::pointer + aligned_pointer_type; // pointer to the (potentially overaligned) element type, not the original element type + typedef typename std::allocator_traits::pointer group_pointer_type; + typedef typename std::allocator_traits::pointer skipfield_pointer_type; + typedef typename std::allocator_traits::pointer aligned_struct_pointer_type; + typedef typename std::allocator_traits::pointer tuple_pointer_type; + + // group == element memory block + skipfield + block metadata + struct group { + skipfield_pointer_type + skipfield; // Skipfield storage. The element and skipfield arrays are allocated contiguously, in a single + // allocation, in this implementation, hence the skipfield pointer also functions as a 'one-past-end' + // pointer for the elements array. There will always be one additional skipfield node allocated compared + // to the number of elements. This is to ensure a faster ++ iterator operation (fewer checks are required + // when this is present). The extra node is unused and always zero, but checked, and not having it will + // result in out-of-bounds memory errors. This is present before elements in the group struct as it is + // referenced constantly by the ++ operator, hence having it first results in a minor performance + // increase. + group_pointer_type next_group; // Next group in the linked list of all groups. NULL if no following group. 2nd in struct + // because it is so frequently used during iteration. + const aligned_pointer_type elements; // Element storage. + group_pointer_type previous_group; // Previous group in the linked list of all groups. NULL if no preceding group. + skipfield_type + free_list_head; // The index of the last erased element in the group. The last erased element will, in turn, contain + // the number of the index of the next erased element, and so on. If this is == maximum + // skipfield_type value then free_list is empty ie. no erasures have occurred in the group (or if + // they have, the erased locations have subsequently been reused via insert/emplace/assign). + const skipfield_type capacity; // The element capacity of this particular group - can also be calculated from + // reinterpret_cast(group->skipfield) - group->elements, however + // this space is effectively free due to struct padding and the sizeof(skipfield_type), + // and calculating it once is faster in benchmarking. + skipfield_type + size; // The total number of active elements in group - changes with insert and erase commands - used to check for + // empty group in erase function, as an indication to remove the group. Also used in combination with capacity + // to check if group is full, which is used in the next/previous/advance/distance overloads, and range-erase. + group_pointer_type erasures_list_next_group, + erasures_list_previous_group; // The next and previous groups in the list of groups with erasures ie. with active + // erased-element free lists. NULL if no next or previous group. + size_type group_number; // Used for comparison (> < >= <= <=>) iterator operators (used by distance function and user). + + group(aligned_struct_allocator_type &aligned_struct_allocator, + const skipfield_type elements_per_group, + const group_pointer_type previous) + : next_group(NULL), + elements(pointer_cast( + std::allocator_traits::allocate(aligned_struct_allocator, + get_aligned_block_capacity(elements_per_group), + (previous == 0) ? 0 : previous->elements))), + previous_group(previous), + free_list_head(std::numeric_limits::max()), + capacity(elements_per_group), + size(1), + erasures_list_next_group(NULL), + erasures_list_previous_group(NULL), + group_number((previous == NULL) ? 0 : previous->group_number + 1u) + { + skipfield = pointer_cast(elements + elements_per_group); + std::memset(detail::void_cast(skipfield), + 0, + sizeof(skipfield_type) * (static_cast(elements_per_group) + 1u)); + } + + void reset(const skipfield_type increment, + const group_pointer_type next, + const group_pointer_type previous, + const size_type group_num) noexcept + { + next_group = next; + free_list_head = std::numeric_limits::max(); + previous_group = previous; + size = increment; + erasures_list_next_group = NULL; + erasures_list_previous_group = NULL; + group_number = group_num; + + std::memset(detail::void_cast(skipfield), + 0, + sizeof(skipfield_type) + * static_cast(capacity)); // capacity + 1 is not necessary here as the final skipfield + // node is never written to after initialization + } + }; + + // hive member variables: + + iterator end_iterator, begin_iterator; + group_pointer_type erasure_groups_head, // Head of doubly-linked list of groups which have erased-element memory locations + // available for re-use + unused_groups_head; // Head of singly-linked list of reserved groups retained by erase()/clear() or created by reserve() + size_type total_size, total_capacity; + skipfield_type min_block_capacity, max_block_capacity; + + group_allocator_type group_allocator; + aligned_struct_allocator_type aligned_struct_allocator; + skipfield_allocator_type skipfield_allocator; + tuple_allocator_type tuple_allocator; + + void check_capacities_conformance(const detail::limits capacities) const + { + constexpr detail::limits hard_capacities = block_capacity_hard_limits(); + + if (capacities.min < hard_capacities.min || capacities.min > capacities.max || capacities.max > hard_capacities.max) { + throw std::length_error( + "Supplied memory block capacity limits are either invalid or outside of block_capacity_hard_limits()"); + } + } + + void blank() noexcept + { + if constexpr (std::is_standard_layout::value && std::allocator_traits::is_always_equal::value + && std::is_trivial::value && std::is_trivial::value + && std::is_trivial::value) { // If all pointer types are trivial, we can just + // nuke the member variables from orbit with + // memset (NULL is always 0): + std::memset(static_cast(this), 0, offsetof(hive, min_block_capacity)); + } else { + end_iterator.group_pointer = NULL; + end_iterator.element_pointer = NULL; + end_iterator.skipfield_pointer = NULL; + begin_iterator.group_pointer = NULL; + begin_iterator.element_pointer = NULL; + begin_iterator.skipfield_pointer = NULL; + erasure_groups_head = NULL; + unused_groups_head = NULL; + total_size = 0; + total_capacity = 0; + } + } + + static constexpr size_t max_size_static() noexcept + { + using allocator_size_type = typename allocator_type::size_type; + using allocator_value_type = typename allocator_type::value_type; + return static_cast(std::numeric_limits::max() / sizeof(allocator_value_type)); + // return static_cast(std::allocator_traits::max_size(allocator_type())); + } + + void reserve_and_fill(const size_type size, const element_type &element) + { + if (size != 0) { + reserve(size); + end_iterator.group_pointer->next_group = unused_groups_head; + fill_unused_groups(size, element, 0, NULL, begin_iterator.group_pointer); + } + } + + template + void reserve_and_range_fill(const size_type size, const iterator_type &it) + { + if (size != 0) { + reserve(size); + end_iterator.group_pointer->next_group = unused_groups_head; + range_fill_unused_groups(size, it, 0, NULL, begin_iterator.group_pointer); + } + } + +public: + // Adaptive minimum based around aligned size, sizeof(group) and sizeof(hive): + static constexpr skipfield_type block_capacity_default_min() noexcept + { + constexpr const skipfield_type adaptive_size = + static_cast(((sizeof(hive) + sizeof(group)) * 2) / sizeof(aligned_element_struct)); + constexpr const skipfield_type max_block_capacity = + block_capacity_default_max(); // Necessary to check against in situations with > 64bit pointer sizes and small + // sizeof(T) + return std::max(static_cast(8), std::min(adaptive_size, max_block_capacity)); + } + + // Adaptive maximum based on numeric_limits and best outcome from multiple benchmark's (on balance) in terms of memory usage + // and performance: + static constexpr skipfield_type block_capacity_default_max() noexcept + { + constexpr size_t max_skipfield_size = static_cast(std::numeric_limits::max()); + constexpr size_t clamped_size = max_skipfield_size < 8192u ? max_skipfield_size : 8192u; + constexpr size_t max_block_capacity = max_size_static() < clamped_size ? max_size_static() : clamped_size; + return static_cast(max_block_capacity); + } + + static constexpr detail::limits block_capacity_default_limits() noexcept + { + return detail::limits(static_cast(block_capacity_default_min()), + static_cast(block_capacity_default_max())); + } + + // Default constructors: + + constexpr explicit hive(const allocator_type &alloc) noexcept + : allocator_type(alloc), + erasure_groups_head(NULL), + unused_groups_head(NULL), + total_size(0), + total_capacity(0), + min_block_capacity(block_capacity_default_min()), + max_block_capacity(block_capacity_default_max()), + group_allocator(*this), + aligned_struct_allocator(*this), + skipfield_allocator(*this), + tuple_allocator(*this) + { + } + + constexpr hive() noexcept(noexcept(allocator_type())) + : erasure_groups_head(NULL), + unused_groups_head(NULL), + total_size(0), + total_capacity(0), + min_block_capacity(block_capacity_default_min()), + max_block_capacity(block_capacity_default_max()), + group_allocator(*this), + aligned_struct_allocator(*this), + skipfield_allocator(*this), + tuple_allocator(*this) + { + } + + constexpr hive(const detail::limits block_limits, const allocator_type &alloc) noexcept + : allocator_type(alloc), + erasure_groups_head(NULL), + unused_groups_head(NULL), + total_size(0), + total_capacity(0), + min_block_capacity(static_cast(block_limits.min)), + max_block_capacity(static_cast(block_limits.max)), + group_allocator(*this), + aligned_struct_allocator(*this), + skipfield_allocator(*this), + tuple_allocator(*this) + { + check_capacities_conformance(block_limits); + } + + constexpr explicit hive(const detail::limits block_limits) + : erasure_groups_head(NULL), + unused_groups_head(NULL), + total_size(0), + total_capacity(0), + min_block_capacity(static_cast(block_limits.min)), + max_block_capacity(static_cast(block_limits.max)), + group_allocator(*this), + aligned_struct_allocator(*this), + skipfield_allocator(*this), + tuple_allocator(*this) + { + check_capacities_conformance(block_limits); + } + + // Copy constructors: + hive(const hive &source, const allocator_type &alloc) + : allocator_type(alloc), + erasure_groups_head(NULL), + unused_groups_head(NULL), + total_size(0), + total_capacity(0), + min_block_capacity(std::max( + source.min_block_capacity, + static_cast(std::min( + source.total_size, + static_cast( + source.max_block_capacity))))), // min group size is set to value closest to total number of elements in + // source hive, in order to not create unnecessary small groups in the + // range-insert below, then reverts to the original min group size + // afterwards. This effectively saves a call to reserve. + max_block_capacity(source.max_block_capacity), + group_allocator(*this), + aligned_struct_allocator(*this), + skipfield_allocator(*this), + tuple_allocator(*this) + { // can skip checking for skipfield conformance here as source will have already checked theirs. Same applies for other + // copy and move constructors below + reserve_and_range_fill(source.total_size, source.begin_iterator); + min_block_capacity = source.min_block_capacity; // reset to correct value for future operations + } + + hive(const hive &source) + : allocator_type(std::allocator_traits::select_on_container_copy_construction(source)), + erasure_groups_head(NULL), + unused_groups_head(NULL), + total_size(0), + total_capacity(0), + min_block_capacity(std::max( + source.min_block_capacity, + static_cast(std::min(source.total_size, static_cast(source.max_block_capacity))))), + max_block_capacity(source.max_block_capacity), + group_allocator(*this), + aligned_struct_allocator(*this), + skipfield_allocator(*this), + tuple_allocator(*this) + { + reserve_and_range_fill(source.total_size, source.begin_iterator); + min_block_capacity = source.min_block_capacity; // reset to correct value for future operations + } + + // Move constructors: + hive(hive &&source, const allocator_type &alloc) + : allocator_type(alloc), + end_iterator(source.end_iterator), + begin_iterator(source.begin_iterator), + erasure_groups_head(source.erasure_groups_head), + unused_groups_head(source.unused_groups_head), + total_size(source.total_size), + total_capacity(source.total_capacity), + min_block_capacity(source.min_block_capacity), + max_block_capacity(source.max_block_capacity), + group_allocator(alloc), + aligned_struct_allocator(alloc), + skipfield_allocator(alloc), + tuple_allocator(alloc) + { + assert(&source != this); + + if constexpr (!std::allocator_traits::is_always_equal::value) { + if (alloc != static_cast(source)) { + blank(); + static_cast(*this) = static_cast(source); + reserve_and_range_fill(source.total_size, detail::make_move_iterator(source.begin_iterator)); + source.destroy_all_data(); + } + } + + source.blank(); + } + + hive(hive &&source) noexcept + : allocator_type(static_cast(source)), + end_iterator(std::move(source.end_iterator)), + begin_iterator(std::move(source.begin_iterator)), + erasure_groups_head(std::move(source.erasure_groups_head)), + unused_groups_head(std::move(source.unused_groups_head)), + total_size(source.total_size), + total_capacity(source.total_capacity), + min_block_capacity(source.min_block_capacity), + max_block_capacity(source.max_block_capacity), + group_allocator(*this), + aligned_struct_allocator(*this), + skipfield_allocator(*this), + tuple_allocator(*this) + { + assert(&source != this); + source.blank(); + } + + // Fill constructors: + hive(const size_type fill_number, + const element_type &element, + const detail::limits block_limits, + const allocator_type &alloc = allocator_type()) + : allocator_type(alloc), + erasure_groups_head(NULL), + unused_groups_head(NULL), + total_size(0), + total_capacity(0), + min_block_capacity(static_cast(block_limits.min)), + max_block_capacity(static_cast(block_limits.max)), + group_allocator(*this), + aligned_struct_allocator(*this), + skipfield_allocator(*this), + tuple_allocator(*this) + { + check_capacities_conformance(block_limits); + reserve_and_fill(fill_number, element); + } + + hive(const size_type fill_number, const element_type &element, const allocator_type &alloc = allocator_type()) + : allocator_type(alloc), + erasure_groups_head(NULL), + unused_groups_head(NULL), + total_size(0), + total_capacity(0), + min_block_capacity(block_capacity_default_min()), + max_block_capacity(block_capacity_default_max()), + group_allocator(*this), + aligned_struct_allocator(*this), + skipfield_allocator(*this), + tuple_allocator(*this) + { + reserve_and_fill(fill_number, element); + } + + // Default-value fill constructors: + hive(const size_type fill_number, const detail::limits block_limits, const allocator_type &alloc = allocator_type()) + : allocator_type(alloc), + erasure_groups_head(NULL), + unused_groups_head(NULL), + total_size(0), + total_capacity(0), + min_block_capacity(static_cast(block_limits.min)), + max_block_capacity(static_cast(block_limits.max)), + group_allocator(*this), + aligned_struct_allocator(*this), + skipfield_allocator(*this), + tuple_allocator(*this) + { + check_capacities_conformance(block_limits); + reserve_and_fill(fill_number, element_type()); + } + + hive(const size_type fill_number, const allocator_type &alloc = allocator_type()) + : allocator_type(alloc), + erasure_groups_head(NULL), + unused_groups_head(NULL), + total_size(0), + total_capacity(0), + min_block_capacity(block_capacity_default_min()), + max_block_capacity(block_capacity_default_max()), + group_allocator(*this), + aligned_struct_allocator(*this), + skipfield_allocator(*this), + tuple_allocator(*this) + { + reserve_and_fill(fill_number, element_type()); + } + + // Range constructors: + template + hive(const typename detail::enable_if::is_integer, iterator_type>::type &first, + const iterator_type &last, + const detail::limits block_limits, + const allocator_type &alloc = allocator_type()) + : allocator_type(alloc), + erasure_groups_head(NULL), + unused_groups_head(NULL), + total_size(0), + total_capacity(0), + min_block_capacity(static_cast(block_limits.min)), + max_block_capacity(static_cast(block_limits.max)), + group_allocator(*this), + aligned_struct_allocator(*this), + skipfield_allocator(*this), + tuple_allocator(*this) + { + check_capacities_conformance(block_limits); + assign(first, last); + } + + template + hive(const typename detail::enable_if::is_integer, iterator_type>::type &first, + const iterator_type &last, + const allocator_type &alloc = allocator_type()) + : allocator_type(alloc), + erasure_groups_head(NULL), + unused_groups_head(NULL), + total_size(0), + total_capacity(0), + min_block_capacity(block_capacity_default_min()), + max_block_capacity(block_capacity_default_max()), + group_allocator(*this), + aligned_struct_allocator(*this), + skipfield_allocator(*this), + tuple_allocator(*this) + { + assign(first, last); + } + + // Initializer-list constructors: + hive(const std::initializer_list &element_list, + const detail::limits block_limits, + const allocator_type &alloc = allocator_type()) + : allocator_type(alloc), + erasure_groups_head(NULL), + unused_groups_head(NULL), + total_size(0), + total_capacity(0), + min_block_capacity(static_cast(block_limits.min)), + max_block_capacity(static_cast(block_limits.max)), + group_allocator(*this), + aligned_struct_allocator(*this), + skipfield_allocator(*this), + tuple_allocator(*this) + { + check_capacities_conformance(block_limits); + reserve_and_range_fill(static_cast(element_list.size()), element_list.begin()); + } + + hive(const std::initializer_list &element_list, const allocator_type &alloc = allocator_type()) + : hive(element_list, block_capacity_default_limits(), alloc) + { + } + + // Everything else: + iterator begin() noexcept { return begin_iterator; } + + const_iterator begin() const noexcept { return begin_iterator; } + + iterator end() noexcept { return end_iterator; } + + const_iterator end() const noexcept { return end_iterator; } + + const_iterator cbegin() const noexcept { return begin_iterator; } + + const_iterator cend() const noexcept { return end_iterator; } + + reverse_iterator rbegin() noexcept + { + return (end_iterator.group_pointer != NULL) ? ++reverse_iterator(end_iterator.group_pointer, + end_iterator.element_pointer, + end_iterator.skipfield_pointer) + : reverse_iterator(begin_iterator.group_pointer, + begin_iterator.element_pointer - 1, + begin_iterator.skipfield_pointer - 1); + } + + const_reverse_iterator rbegin() const noexcept { return crbegin(); } + + reverse_iterator rend() noexcept + { + return reverse_iterator(begin_iterator.group_pointer, + begin_iterator.element_pointer - 1, + begin_iterator.skipfield_pointer - 1); + } + + const_reverse_iterator rend() const noexcept { return crend(); } + + const_reverse_iterator crbegin() const noexcept + { + return (end_iterator.group_pointer != NULL) ? ++const_reverse_iterator(end_iterator.group_pointer, + end_iterator.element_pointer, + end_iterator.skipfield_pointer) + : const_reverse_iterator(begin_iterator.group_pointer, + begin_iterator.element_pointer - 1, + begin_iterator.skipfield_pointer - 1); + } + + const_reverse_iterator crend() const noexcept + { + return const_reverse_iterator(begin_iterator.group_pointer, + begin_iterator.element_pointer - 1, + begin_iterator.skipfield_pointer - 1); + } + + ~hive() noexcept { destroy_all_data(); } + +private: + group_pointer_type allocate_new_group(const skipfield_type elements_per_group, const group_pointer_type previous = NULL) + { + const group_pointer_type new_group = std::allocator_traits::allocate(group_allocator, 1, 0); + + try { + std::allocator_traits::construct(group_allocator, + new_group, + aligned_struct_allocator, + elements_per_group, + previous); + } catch (...) { + std::allocator_traits::deallocate(group_allocator, new_group, 1); + throw; + } + + return new_group; + } + + void deallocate_group(const group_pointer_type the_group) noexcept + { + std::allocator_traits::deallocate( + aligned_struct_allocator, + pointer_cast(the_group->elements), + get_aligned_block_capacity(the_group->capacity)); + std::allocator_traits::deallocate(group_allocator, the_group, 1); + } + + constexpr void destroy_element(const aligned_pointer_type element) noexcept + { + std::allocator_traits::destroy(*this, pointer_cast(element)); + } + + void destroy_group(const aligned_pointer_type end_pointer) noexcept + { + if constexpr (!std::is_trivially_destructible::value) { + do { + destroy_element(begin_iterator.element_pointer); + begin_iterator.element_pointer += static_cast(*++begin_iterator.skipfield_pointer) + 1u; + begin_iterator.skipfield_pointer += *begin_iterator.skipfield_pointer; + } while (begin_iterator.element_pointer != end_pointer); + } + + deallocate_group(begin_iterator.group_pointer); + } + + void destroy_all_data() noexcept + { + if (begin_iterator.group_pointer != NULL) { + end_iterator.group_pointer->next_group = unused_groups_head; // Link used and unused_group lists together + + if constexpr (!std::is_trivially_destructible::value) { + if (total_size != 0) { + while (begin_iterator.group_pointer + != end_iterator.group_pointer) // Erase elements without bothering to update skipfield - much faster: + { + const group_pointer_type next_group = begin_iterator.group_pointer->next_group; + destroy_group(pointer_cast(begin_iterator.group_pointer->skipfield)); + begin_iterator.group_pointer = next_group; + begin_iterator.element_pointer = next_group->elements + *(next_group->skipfield); + begin_iterator.skipfield_pointer = next_group->skipfield + *(next_group->skipfield); + } + + destroy_group(end_iterator.element_pointer); + begin_iterator.group_pointer = unused_groups_head; + } + } + + while (begin_iterator.group_pointer != NULL) { + const group_pointer_type next_group = begin_iterator.group_pointer->next_group; + deallocate_group(begin_iterator.group_pointer); + begin_iterator.group_pointer = next_group; + } + } + } + + void initialize(const skipfield_type first_group_size) + { + end_iterator.group_pointer = begin_iterator.group_pointer = allocate_new_group(first_group_size); + end_iterator.element_pointer = begin_iterator.element_pointer = begin_iterator.group_pointer->elements; + end_iterator.skipfield_pointer = begin_iterator.skipfield_pointer = begin_iterator.group_pointer->skipfield; + total_capacity = first_group_size; + } + + void edit_free_list(const skipfield_pointer_type location, const skipfield_type value) noexcept + { + std::allocator_traits::destroy(skipfield_allocator, location); + std::allocator_traits::construct(skipfield_allocator, location, value); + } + + void edit_free_list_prev(const aligned_pointer_type location, + const skipfield_type value) noexcept // Write to the 'previous erased element' index in the erased + // element memory location + { + edit_free_list(pointer_cast(location), value); + } + + void edit_free_list_next(const aligned_pointer_type location, const skipfield_type value) noexcept // Ditto 'next' + { + edit_free_list(pointer_cast(location) + 1, value); + } + + void edit_free_list_head(const aligned_pointer_type location, const skipfield_type value) noexcept + { + const skipfield_pointer_type converted_location = pointer_cast(location); + edit_free_list(converted_location, value); + edit_free_list(converted_location + 1, std::numeric_limits::max()); + } + + void update_skipblock(const iterator &new_location, const skipfield_type prev_free_list_index) noexcept + { + const skipfield_type new_value = static_cast(*(new_location.skipfield_pointer) - 1); + + if (new_value != 0) // ie. skipfield was not 1, ie. a single-node skipblock, with no additional nodes to update + { + // set (new) start and (original) end of skipblock to new value: + *(new_location.skipfield_pointer + new_value) = *(new_location.skipfield_pointer + 1) = new_value; + + // transfer free list node to new start node: + ++(erasure_groups_head->free_list_head); + + if (prev_free_list_index != std::numeric_limits::max()) // ie. not the tail free list node + { + edit_free_list_next(new_location.group_pointer->elements + prev_free_list_index, + erasure_groups_head->free_list_head); + } + + edit_free_list_head(new_location.element_pointer + 1, prev_free_list_index); + } else // single-node skipblock, remove skipblock + { + erasure_groups_head->free_list_head = prev_free_list_index; + + if (prev_free_list_index != std::numeric_limits::max()) // ie. not the last free list node + { + edit_free_list_next(new_location.group_pointer->elements + prev_free_list_index, + std::numeric_limits::max()); + } else // remove this group from the list of groups with erasures + { + erasure_groups_head = + erasure_groups_head->erasures_list_next_group; // No need to update previous group for new head, as this is + // never accessed if group == head + } + } + + *(new_location.skipfield_pointer) = 0; + ++(new_location.group_pointer->size); + + if (new_location.group_pointer == begin_iterator.group_pointer + && new_location.element_pointer + < begin_iterator.element_pointer) { /* ie. begin_iterator was moved forwards as the result of an erasure at + some point, this erased element is before the current begin, hence, + set current begin iterator to this element */ + begin_iterator = new_location; + } + + ++total_size; + } + + void update_subsequent_group_numbers(size_type current_group_number, group_pointer_type update_group) noexcept + { + do { + update_group->group_number = current_group_number++; + update_group = update_group->next_group; + } while (update_group != NULL); + } + + void reset_group_numbers() noexcept { update_subsequent_group_numbers(0, begin_iterator.group_pointer); } + + void reset_group_numbers_if_necessary() noexcept + { + if (end_iterator.group_pointer->group_number == std::numeric_limits::max()) { reset_group_numbers(); } + } + + group_pointer_type reuse_unused_group() noexcept + { + const group_pointer_type reused_group = unused_groups_head; + unused_groups_head = reused_group->next_group; + reset_group_numbers_if_necessary(); + reused_group->reset(1, NULL, end_iterator.group_pointer, end_iterator.group_pointer->group_number + 1u); + return reused_group; + } + +public: + void reset() noexcept + { + destroy_all_data(); + blank(); + } + + iterator insert(const element_type &element) + { + if (end_iterator.element_pointer != NULL) { + if (erasure_groups_head == NULL) // ie. there are no erased elements + { + if (end_iterator.element_pointer + != pointer_cast( + end_iterator.group_pointer->skipfield)) // ie. end_iterator is not at end of block + { + std::allocator_traits::construct(*this, + pointer_cast(end_iterator.element_pointer), + element); + + const iterator return_iterator = end_iterator; + ++end_iterator.element_pointer; + ++end_iterator.skipfield_pointer; + ++(end_iterator.group_pointer->size); + ++total_size; + return return_iterator; + } else { + group_pointer_type next_group; + + if (unused_groups_head == NULL) { + const skipfield_type new_group_size = + static_cast(std::min(total_size, static_cast(max_block_capacity))); + reset_group_numbers_if_necessary(); + next_group = allocate_new_group(new_group_size, end_iterator.group_pointer); + + if constexpr (std::is_nothrow_copy_constructible::value) { + std::allocator_traits::construct(*this, + pointer_cast(next_group->elements), + element); + } else { + try { + std::allocator_traits::construct(*this, + pointer_cast(next_group->elements), + element); + } catch (...) { + deallocate_group(next_group); + throw; + } + } + + total_capacity += new_group_size; + } else { + std::allocator_traits::construct(*this, + pointer_cast(unused_groups_head->elements), + element); + next_group = reuse_unused_group(); + } + + end_iterator.group_pointer->next_group = next_group; + end_iterator.group_pointer = next_group; + end_iterator.element_pointer = next_group->elements + 1; + end_iterator.skipfield_pointer = next_group->skipfield + 1; + ++total_size; + + return iterator(next_group, next_group->elements, next_group->skipfield); + } + } else // there are erased elements, reuse those memory locations + { + iterator new_location(erasure_groups_head, + erasure_groups_head->elements + erasure_groups_head->free_list_head, + erasure_groups_head->skipfield + erasure_groups_head->free_list_head); + + // We always reuse the element at the start of the skipblock, this is also where the free-list information for + // that skipblock is stored. Get the previous free-list node's index from this memory space, before we write to + // our element to it. 'Next' index is always the free_list_head (as represented by the maximum value of the + // skipfield type) here so we don't need to get it: + const skipfield_type prev_free_list_index = *pointer_cast(new_location.element_pointer); + std::allocator_traits::construct(*this, + pointer_cast(new_location.element_pointer), + element); + update_skipblock(new_location, prev_free_list_index); + + return new_location; + } + } else // ie. newly-constructed hive, no insertions yet and no groups + { + initialize(min_block_capacity); + + if constexpr (std::is_nothrow_copy_constructible::value) { + std::allocator_traits::construct(*this, + pointer_cast(end_iterator.element_pointer++), + element); + } else { + try { + std::allocator_traits::construct(*this, + pointer_cast(end_iterator.element_pointer++), + element); + } catch (...) { + reset(); + throw; + } + } + + ++end_iterator.skipfield_pointer; + total_size = 1; + return begin_iterator; + } + } + + iterator insert(element_type &&element) // The move-insert function is near-identical to the regular insert function, with + // the exception of the element construction method and is_nothrow tests. + { + if (end_iterator.element_pointer != NULL) { + if (erasure_groups_head == NULL) { + if (end_iterator.element_pointer != pointer_cast(end_iterator.group_pointer->skipfield)) { + std ::allocator_traits::construct(*this, + pointer_cast(end_iterator.element_pointer), + std ::move(element)); + + const iterator return_iterator = end_iterator; + ++end_iterator.element_pointer; + ++end_iterator.skipfield_pointer; + ++(end_iterator.group_pointer->size); + ++total_size; + + return return_iterator; + } else { + group_pointer_type next_group; + + if (unused_groups_head == NULL) { + const skipfield_type new_group_size = + static_cast(std::min(total_size, static_cast(max_block_capacity))); + reset_group_numbers_if_necessary(); + next_group = allocate_new_group(new_group_size, end_iterator.group_pointer); + + if constexpr (std::is_nothrow_move_constructible::value) { + std::allocator_traits::construct(*this, + pointer_cast(next_group->elements), + std::move(element)); + } else { + try { + std::allocator_traits::construct(*this, + pointer_cast(next_group->elements), + std::move(element)); + } catch (...) { + deallocate_group(next_group); + throw; + } + } + + total_capacity += new_group_size; + } else { + std::allocator_traits::construct(*this, + pointer_cast(unused_groups_head->elements), + std::move(element)); + next_group = reuse_unused_group(); + } + + end_iterator.group_pointer->next_group = next_group; + end_iterator.group_pointer = next_group; + end_iterator.element_pointer = next_group->elements + 1; + end_iterator.skipfield_pointer = next_group->skipfield + 1; + ++total_size; + + return iterator(next_group, next_group->elements, next_group->skipfield); + } + } else { + iterator new_location(erasure_groups_head, + erasure_groups_head->elements + erasure_groups_head->free_list_head, + erasure_groups_head->skipfield + erasure_groups_head->free_list_head); + + const skipfield_type prev_free_list_index = *pointer_cast(new_location.element_pointer); + std::allocator_traits::construct(*this, + pointer_cast(new_location.element_pointer), + std::move(element)); + update_skipblock(new_location, prev_free_list_index); + + return new_location; + } + } else { + initialize(min_block_capacity); + + if constexpr (std::is_nothrow_move_constructible::value) { + std::allocator_traits::construct(*this, + pointer_cast(end_iterator.element_pointer++), + std::move(element)); + } else { + try { + std::allocator_traits::construct(*this, + pointer_cast(end_iterator.element_pointer++), + std::move(element)); + } catch (...) { + reset(); + throw; + } + } + + ++end_iterator.skipfield_pointer; + total_size = 1; + return begin_iterator; + } + } + + template + iterator emplace( + arguments &&...parameters) // The emplace function is near-identical to the regular insert function, with the exception + // of the element construction method, and change to is_nothrow tests. + { + if (end_iterator.element_pointer != NULL) { + if (erasure_groups_head == NULL) { + if (end_iterator.element_pointer != pointer_cast(end_iterator.group_pointer->skipfield)) { + std::allocator_traits::construct(*this, + pointer_cast(end_iterator.element_pointer), + std::forward(parameters)...); + + const iterator return_iterator = end_iterator; + ++end_iterator.element_pointer; + ++end_iterator.skipfield_pointer; + ++(end_iterator.group_pointer->size); + ++total_size; + return return_iterator; + } + + group_pointer_type next_group; + + if (unused_groups_head == NULL) { + const skipfield_type new_group_size = + static_cast(std::min(total_size, static_cast(max_block_capacity))); + reset_group_numbers_if_necessary(); + next_group = allocate_new_group(new_group_size, end_iterator.group_pointer); + + if constexpr (std::is_nothrow_constructible::value) { + std::allocator_traits::construct(*this, + pointer_cast(next_group->elements), + std::forward(parameters)...); + } else { + try { + std::allocator_traits::construct(*this, + pointer_cast(next_group->elements), + std::forward(parameters)...); + } catch (...) { + deallocate_group(next_group); + throw; + } + } + + total_capacity += new_group_size; + } else { + std::allocator_traits::construct(*this, + pointer_cast(unused_groups_head->elements), + std::forward(parameters)...); + next_group = reuse_unused_group(); + } + + end_iterator.group_pointer->next_group = next_group; + end_iterator.group_pointer = next_group; + end_iterator.element_pointer = next_group->elements + 1; + end_iterator.skipfield_pointer = next_group->skipfield + 1; + ++total_size; + + return iterator(next_group, next_group->elements, next_group->skipfield); + } else { + iterator new_location(erasure_groups_head, + erasure_groups_head->elements + erasure_groups_head->free_list_head, + erasure_groups_head->skipfield + erasure_groups_head->free_list_head); + + const skipfield_type prev_free_list_index = *pointer_cast(new_location.element_pointer); + std::allocator_traits::construct(*this, + pointer_cast(new_location.element_pointer), + std::forward(parameters)...); + update_skipblock(new_location, prev_free_list_index); + + return new_location; + } + } else { + initialize(min_block_capacity); + + if constexpr (std::is_nothrow_constructible::value) { + std::allocator_traits::construct(*this, + pointer_cast(end_iterator.element_pointer++), + std::forward(parameters)...); + } else { + try { + std::allocator_traits::construct(*this, + pointer_cast(end_iterator.element_pointer++), + std::forward(parameters)...); + } catch (...) { + reset(); + throw; + } + } + + ++end_iterator.skipfield_pointer; + total_size = 1; + return begin_iterator; + } + } + +private: + // For catch blocks in fill() and range_fill() + void recover_from_partial_fill() + { + if constexpr ((!std::is_copy_constructible::value + && !std::is_nothrow_move_constructible::value) + || !std::is_nothrow_copy_constructible::value) // to avoid unnecessary codegen, since + // this function will never be called if + // this line isn't true + { + const skipfield_type elements_constructed_before_exception = + static_cast(end_iterator.element_pointer - end_iterator.group_pointer->elements); + end_iterator.group_pointer->size = elements_constructed_before_exception; + end_iterator.skipfield_pointer = end_iterator.group_pointer->skipfield + elements_constructed_before_exception; + total_size += elements_constructed_before_exception; + unused_groups_head = end_iterator.group_pointer->next_group; + end_iterator.group_pointer->next_group = NULL; + } + } + + void fill(const element_type &element, const skipfield_type size) + { + if constexpr (std::is_nothrow_copy_constructible::value) { + if constexpr (sizeof(aligned_element_struct) != sizeof(element_type)) { + alignas(alignof(aligned_element_struct)) element_type aligned_copy = + element; // to avoid potentially violating memory boundaries in line below, create an initial object copy of + // same (but aligned) type + std::uninitialized_fill_n(end_iterator.element_pointer, + size, + *pointer_cast(&aligned_copy)); + } else { + std::uninitialized_fill_n(pointer_cast(end_iterator.element_pointer), size, element); + } + + end_iterator.element_pointer += size; + } else { + const aligned_pointer_type fill_end = end_iterator.element_pointer + size; + + do { + try { + std::allocator_traits::construct(*this, + pointer_cast(end_iterator.element_pointer), + element); + } catch (...) { + recover_from_partial_fill(); + throw; + } + } while (++end_iterator.element_pointer != fill_end); + } + + total_size += size; + } + + // For catch blocks in range_fill_skipblock and fill_skipblock + void recover_from_partial_skipblock_fill(const aligned_pointer_type location, + const aligned_pointer_type current_location, + const skipfield_pointer_type skipfield_pointer, + const skipfield_type prev_free_list_node) + { + if constexpr ((!std::is_copy_constructible::value + && !std::is_nothrow_move_constructible::value) + || !std::is_nothrow_copy_constructible::value) // to avoid unnecessary codegen + { + // Reconstruct existing skipblock and free-list indexes to reflect partially-reused skipblock: + const skipfield_type elements_constructed_before_exception = + static_cast(current_location - location); + erasure_groups_head->size = + static_cast(erasure_groups_head->size + elements_constructed_before_exception); + total_size += elements_constructed_before_exception; + + std::memset(skipfield_pointer, 0, elements_constructed_before_exception * sizeof(skipfield_type)); + + edit_free_list_head(location + elements_constructed_before_exception, prev_free_list_node); + + const skipfield_type new_skipblock_head_index = + static_cast((location - erasure_groups_head->elements) + elements_constructed_before_exception); + erasure_groups_head->free_list_head = new_skipblock_head_index; + + if (prev_free_list_node != std::numeric_limits::max()) { + edit_free_list_next(erasure_groups_head->elements + prev_free_list_node, new_skipblock_head_index); + } + } + } + + void fill_skipblock(const element_type &element, + const aligned_pointer_type location, + const skipfield_pointer_type skipfield_pointer, + const skipfield_type size) + { + if constexpr (std::is_nothrow_copy_constructible::value) { + if constexpr (sizeof(aligned_element_struct) != sizeof(element_type)) { + alignas(alignof(aligned_element_struct)) element_type aligned_copy = element; + std::uninitialized_fill_n(location, size, *pointer_cast(&aligned_copy)); + } else { + std::uninitialized_fill_n(pointer_cast(location), size, element); + } + } else { + const aligned_pointer_type fill_end = location + size; + const skipfield_type prev_free_list_node = *pointer_cast( + location); // in case of exception, grabbing indexes before free_list node is reused + + for (aligned_pointer_type current_location = location; current_location != fill_end; ++current_location) { + try { + std::allocator_traits::construct(*this, pointer_cast(current_location), element); + } catch (...) { + recover_from_partial_skipblock_fill(location, current_location, skipfield_pointer, prev_free_list_node); + throw; + } + } + } + + std::memset(skipfield_pointer, 0, size * sizeof(skipfield_type)); // reset skipfield nodes within skipblock to 0 + erasure_groups_head->size = static_cast(erasure_groups_head->size + size); + total_size += size; + } + + void fill_unused_groups(size_type size, + const element_type &element, + size_type group_number, + group_pointer_type previous_group, + const group_pointer_type current_group) + { + for (end_iterator.group_pointer = current_group; end_iterator.group_pointer->capacity < size; + end_iterator.group_pointer = end_iterator.group_pointer->next_group) { + const skipfield_type capacity = end_iterator.group_pointer->capacity; + end_iterator.group_pointer->reset(capacity, end_iterator.group_pointer->next_group, previous_group, group_number++); + previous_group = end_iterator.group_pointer; + size -= static_cast(capacity); + end_iterator.element_pointer = end_iterator.group_pointer->elements; + fill(element, capacity); + } + + // Deal with final group (partial fill) + unused_groups_head = end_iterator.group_pointer->next_group; + end_iterator.group_pointer->reset(static_cast(size), NULL, previous_group, group_number); + end_iterator.element_pointer = end_iterator.group_pointer->elements; + end_iterator.skipfield_pointer = end_iterator.group_pointer->skipfield + size; + fill(element, static_cast(size)); + } + +public: + // Fill insert + + void insert(size_type size, const element_type &element) + { + if (size == 0) { + return; + } else if (size == 1) { + insert(element); + return; + } + + if (total_size == 0) { + prepare_groups_for_assign(size); + fill_unused_groups(size, element, 0, NULL, begin_iterator.group_pointer); + return; + } + + reserve(total_size + size); + + // Use up erased locations if available: + while (erasure_groups_head + != NULL) // skipblock loop: breaks when hive is exhausted of reusable skipblocks, or returns if size == 0 + { + const aligned_pointer_type element_pointer = erasure_groups_head->elements + erasure_groups_head->free_list_head; + const skipfield_pointer_type skipfield_pointer = + erasure_groups_head->skipfield + erasure_groups_head->free_list_head; + const skipfield_type skipblock_size = *skipfield_pointer; + + if (erasure_groups_head == begin_iterator.group_pointer && element_pointer < begin_iterator.element_pointer) { + begin_iterator.element_pointer = element_pointer; + begin_iterator.skipfield_pointer = skipfield_pointer; + } + + if (skipblock_size <= size) { + erasure_groups_head->free_list_head = + *pointer_cast(element_pointer); // set free list head to previous free list node + fill_skipblock(element, element_pointer, skipfield_pointer, skipblock_size); + size -= skipblock_size; + + if (erasure_groups_head->free_list_head + != std::numeric_limits::max()) // ie. there are more skipblocks to be filled in this group + { + edit_free_list_next(erasure_groups_head->elements + erasure_groups_head->free_list_head, + std::numeric_limits::max()); // set 'next' index of new free list head + // to 'end' (numeric max) + } else { + erasure_groups_head = erasure_groups_head->erasures_list_next_group; // change groups + } + + if (size == 0) return; + } else // skipblock is larger than remaining number of elements + { + const skipfield_type prev_index = + *pointer_cast(element_pointer); // save before element location is overwritten + fill_skipblock(element, element_pointer, skipfield_pointer, static_cast(size)); + const skipfield_type new_skipblock_size = static_cast(skipblock_size - size); + + // Update skipfield (earlier nodes already memset'd in fill_skipblock function): + *(skipfield_pointer + size) = new_skipblock_size; + *(skipfield_pointer + skipblock_size - 1) = new_skipblock_size; + erasure_groups_head->free_list_head = static_cast( + erasure_groups_head->free_list_head + size); // set free list head to new start node + + // Update free list with new head: + edit_free_list_head(element_pointer + size, prev_index); + + if (prev_index != std::numeric_limits::max()) { + edit_free_list_next( + erasure_groups_head->elements + prev_index, + erasure_groups_head + ->free_list_head); // set 'next' index of previous skipblock to new start of skipblock + } + + return; + } + } + + // Use up remaining available element locations in end group: + // This variable is either the remaining capacity of the group or the number of elements yet to be inserted, whichever + // is smaller: + const skipfield_type group_remainder = static_cast( + std::min(static_cast(pointer_cast(end_iterator.group_pointer->skipfield) + - end_iterator.element_pointer), + size)); + + if (group_remainder != 0) { + fill(element, group_remainder); + end_iterator.group_pointer->size = static_cast(end_iterator.group_pointer->size + group_remainder); + + if (size == group_remainder) // ie. remaining capacity was >= remaining elements to be filled + { + end_iterator.skipfield_pointer = end_iterator.group_pointer->skipfield + end_iterator.group_pointer->size; + return; + } + + size -= group_remainder; + } + + // Use unused groups: + end_iterator.group_pointer->next_group = unused_groups_head; + + if ((std::numeric_limits::max() - end_iterator.group_pointer->group_number) < size) { + reset_group_numbers(); + } + + fill_unused_groups(size, + element, + end_iterator.group_pointer->group_number + 1u, + end_iterator.group_pointer, + unused_groups_head); + } + +private: + template + void range_fill(iterator_type &it, const skipfield_type size) + { + const aligned_pointer_type fill_end = end_iterator.element_pointer + size; + + if constexpr (std::is_nothrow_copy_constructible::value) { + do { + std::allocator_traits::construct(*this, + pointer_cast(end_iterator.element_pointer), + *it++); + } while (++end_iterator.element_pointer != fill_end); + } else if constexpr (std::is_nothrow_move_constructible::value + && !std::is_copy_constructible::value) { + do { + std::allocator_traits::construct(*this, + pointer_cast(end_iterator.element_pointer), + std::move(*it++)); + } while (++end_iterator.element_pointer != fill_end); + } else { + do { + try { + if constexpr (!std::is_copy_constructible::value) { + std::allocator_traits::construct(*this, + pointer_cast(end_iterator.element_pointer), + std::move(*it++)); + } else + std::allocator_traits::construct(*this, + pointer_cast(end_iterator.element_pointer), + *it++); + } catch (...) { + recover_from_partial_fill(); + throw; + } + } while (++end_iterator.element_pointer != fill_end); + } + + total_size += size; + } + + template + void range_fill_skipblock(iterator_type &it, + const aligned_pointer_type location, + const skipfield_pointer_type skipfield_pointer, + const skipfield_type size) + { + const aligned_pointer_type fill_end = location + size; + + if constexpr (std::is_nothrow_copy_constructible::value) { + for (aligned_pointer_type current_location = location; current_location != fill_end; ++current_location) { + std::allocator_traits::construct(*this, pointer_cast(current_location), *it++); + } + } else if constexpr (std::is_nothrow_move_constructible::value + && !std::is_copy_constructible::value) { + for (aligned_pointer_type current_location = location; current_location != fill_end; ++current_location) { + std::allocator_traits::construct(*this, + pointer_cast(current_location), + std::move(*it++)); + } + } else { + const skipfield_type prev_free_list_node = *pointer_cast( + location); // in case of exception, grabbing indexes before free_list node is reused + + for (aligned_pointer_type current_location = location; current_location != fill_end; ++current_location) { + try { + if constexpr (!std::is_copy_constructible::value) { + std::allocator_traits::construct(*this, + pointer_cast(current_location), + std::move(*it++)); + } else + std::allocator_traits::construct(*this, pointer_cast(current_location), *it++); + } catch (...) { + recover_from_partial_skipblock_fill(location, current_location, skipfield_pointer, prev_free_list_node); + throw; + } + } + } + + std::memset(skipfield_pointer, 0, size * sizeof(skipfield_type)); // reset skipfield nodes within skipblock to 0 + erasure_groups_head->size = static_cast(erasure_groups_head->size + size); + total_size += size; + } + + template + void range_fill_unused_groups(size_type size, + iterator_type it, + size_type group_number, + group_pointer_type previous_group, + const group_pointer_type current_group) + { + for (end_iterator.group_pointer = current_group; end_iterator.group_pointer->capacity < size; + end_iterator.group_pointer = end_iterator.group_pointer->next_group) { + const skipfield_type capacity = end_iterator.group_pointer->capacity; + end_iterator.group_pointer->reset(capacity, end_iterator.group_pointer->next_group, previous_group, group_number++); + previous_group = end_iterator.group_pointer; + size -= static_cast(capacity); + end_iterator.element_pointer = end_iterator.group_pointer->elements; + range_fill(it, capacity); + } + + // Deal with final group (partial fill) + unused_groups_head = end_iterator.group_pointer->next_group; + end_iterator.group_pointer->reset(static_cast(size), NULL, previous_group, group_number); + end_iterator.element_pointer = end_iterator.group_pointer->elements; + end_iterator.skipfield_pointer = end_iterator.group_pointer->skipfield + size; + range_fill(it, static_cast(size)); + } + + template + void range_insert( + iterator_type it, + size_type size) // this is near-identical to the fill insert, with the only alteration being incrementing an iterator + // for construction, rather than using a const element. And the fill etc function calls are changed to + // range_fill to match this pattern. See fill insert for code explanations + { + if (size == 0) { + return; + } else if (size == 1) { + insert(*it); + return; + } + + if (total_size == 0) { + prepare_groups_for_assign(size); + range_fill_unused_groups(size, it, 0, NULL, begin_iterator.group_pointer); + return; + } + + reserve(total_size + size); + + while (erasure_groups_head != NULL) { + const aligned_pointer_type element_pointer = erasure_groups_head->elements + erasure_groups_head->free_list_head; + const skipfield_pointer_type skipfield_pointer = + erasure_groups_head->skipfield + erasure_groups_head->free_list_head; + const skipfield_type skipblock_size = *skipfield_pointer; + + if (erasure_groups_head == begin_iterator.group_pointer && element_pointer < begin_iterator.element_pointer) { + begin_iterator.element_pointer = element_pointer; + begin_iterator.skipfield_pointer = skipfield_pointer; + } + + if (skipblock_size <= size) { + erasure_groups_head->free_list_head = *pointer_cast(element_pointer); + range_fill_skipblock(it, element_pointer, skipfield_pointer, skipblock_size); + size -= skipblock_size; + + if (erasure_groups_head->free_list_head != std::numeric_limits::max()) { + edit_free_list_next(erasure_groups_head->elements + erasure_groups_head->free_list_head, + std::numeric_limits::max()); + } else { + erasure_groups_head = erasure_groups_head->erasures_list_next_group; + } + + if (size == 0) return; + } else { + const skipfield_type prev_index = *pointer_cast(element_pointer); + range_fill_skipblock(it, element_pointer, skipfield_pointer, static_cast(size)); + const skipfield_type new_skipblock_size = static_cast(skipblock_size - size); + + *(skipfield_pointer + size) = new_skipblock_size; + *(skipfield_pointer + skipblock_size - 1) = new_skipblock_size; + erasure_groups_head->free_list_head = static_cast(erasure_groups_head->free_list_head + size); + edit_free_list_head(element_pointer + size, prev_index); + + if (prev_index != std::numeric_limits::max()) { + edit_free_list_next(erasure_groups_head->elements + prev_index, erasure_groups_head->free_list_head); + } + + return; + } + } + + const skipfield_type group_remainder = static_cast( + std::min(static_cast(pointer_cast(end_iterator.group_pointer->skipfield) + - end_iterator.element_pointer), + size)); + + if (group_remainder != 0) { + range_fill(it, group_remainder); + end_iterator.group_pointer->size = static_cast(end_iterator.group_pointer->size + group_remainder); + + if (size == group_remainder) { + end_iterator.skipfield_pointer = end_iterator.group_pointer->skipfield + end_iterator.group_pointer->size; + return; + } + + size -= group_remainder; + } + + end_iterator.group_pointer->next_group = unused_groups_head; + + if ((std::numeric_limits::max() - end_iterator.group_pointer->group_number) < size) { + reset_group_numbers(); + } + + range_fill_unused_groups(size, + it, + end_iterator.group_pointer->group_number + 1u, + end_iterator.group_pointer, + unused_groups_head); + } + +public: + // Range insert: + + template + void insert(const typename detail::enable_if::is_integer, iterator_type>::type first, + const iterator_type last) + { + range_insert(first, static_cast(std::distance(first, last))); + } + + template + void insert(const iterator first, const iterator last) + { + range_insert(first, static_cast(first.distance(last))); + } + + template + void insert(const const_iterator first, const const_iterator last) + { + range_insert(first, static_cast(first.distance(last))); + } + + template + void insert(const reverse_iterator first, const reverse_iterator last) + { + range_insert(first, static_cast(first.distance(last))); + } + + template + void insert(const const_reverse_iterator first, const const_reverse_iterator last) + { + range_insert(first, static_cast(first.distance(last))); + } + + // Range insert, move_iterator overload: + template + void insert(const std::move_iterator first, const std::move_iterator last) + { + range_insert(first, static_cast(std::distance(first.base(), last.base()))); + } + + // Initializer-list insert: + void insert(const std::initializer_list &element_list) + { + range_insert(element_list.begin(), static_cast(element_list.size())); + } + +private: + void remove_from_groups_with_erasures_list(const group_pointer_type group_to_remove) noexcept + { + if (group_to_remove != erasure_groups_head) { + group_to_remove->erasures_list_previous_group->erasures_list_next_group = group_to_remove->erasures_list_next_group; + + if (group_to_remove->erasures_list_next_group != NULL) { + group_to_remove->erasures_list_next_group->erasures_list_previous_group = + group_to_remove->erasures_list_previous_group; + } + } else { + erasure_groups_head = erasure_groups_head->erasures_list_next_group; + } + } + + void reset_only_group_left(const group_pointer_type group_pointer) noexcept + { + erasure_groups_head = NULL; + group_pointer->reset(0, NULL, NULL, 0); + + // Reset begin and end iterators: + end_iterator.element_pointer = begin_iterator.element_pointer = group_pointer->elements; + end_iterator.skipfield_pointer = begin_iterator.skipfield_pointer = group_pointer->skipfield; + } + + void add_group_to_unused_groups_list(const group_pointer_type group_pointer) noexcept + { + group_pointer->next_group = unused_groups_head; + unused_groups_head = group_pointer; + } + +public: + iterator erase(const const_iterator it) // if uninitialized/invalid iterator supplied, function could generate an exception + { + assert(total_size != 0); + assert(it.group_pointer != NULL); // ie. not uninitialized iterator + assert(it.element_pointer != end_iterator.element_pointer); // ie. != end() + assert(*(it.skipfield_pointer) == 0); // ie. element pointed to by iterator has not been erased previously + + if constexpr (!std::is_trivially_destructible::value) // Avoid the function call if possible + destroy_element(it.element_pointer); + --total_size; + + if (--(it.group_pointer->size) != 0) // ie. non-empty group at this point in time, don't consolidate + { + // Code logic for following section: + // --------------------------------- + // If current skipfield node has no skipblock on either side, create new skipblock of size 1 + // If node only has skipblock on left, set current node and start node of the skipblock to left node value + 1. + // If node only has skipblock on right, make this node the start node of the skipblock and update end node + // If node has skipblocks on left and right, set start node of left skipblock and end node of right skipblock to the + // values of the left + right nodes + 1 + + // Optimization explanation: + // The contextual logic below is the same as that in the insert() functions but in this case the value of the + // current skipfield node will always be zero (since it is not yet erased), meaning no additional manipulations are + // necessary for the previous skipfield node comparison - we only have to check against zero + const char prev_skipfield = *(it.skipfield_pointer - (it.skipfield_pointer != it.group_pointer->skipfield)) + != 0; // true if previous node is erased or this node is at beginning of skipfield + const char after_skipfield = + *(it.skipfield_pointer + 1) + != 0; // NOTE: boundary test (checking against end-of-elements) is able to be skipped due to the extra skipfield + // node (compared to element field) - which is present to enable faster iterator operator ++ operations + skipfield_type update_value = 1; + + if (!(prev_skipfield | after_skipfield)) // no consecutive erased elements + { + *it.skipfield_pointer = 1; // solo skipped node + const skipfield_type index = static_cast(it.element_pointer - it.group_pointer->elements); + + if (it.group_pointer->free_list_head + != std::numeric_limits::max()) // ie. if this group already has some erased elements + { + edit_free_list_next( + it.group_pointer->elements + it.group_pointer->free_list_head, + index); // set prev free list head's 'next index' number to the index of the current element + } else { + it.group_pointer->erasures_list_next_group = + erasure_groups_head; // add it to the groups-with-erasures free list + + if (erasure_groups_head != NULL) { erasure_groups_head->erasures_list_previous_group = it.group_pointer; } + + erasure_groups_head = it.group_pointer; + } + + edit_free_list_head(it.element_pointer, it.group_pointer->free_list_head); + it.group_pointer->free_list_head = index; + } else if (prev_skipfield & (!after_skipfield)) // previous erased consecutive elements, none following + { + *(it.skipfield_pointer - *(it.skipfield_pointer - 1)) = *it.skipfield_pointer = + static_cast(*(it.skipfield_pointer - 1) + 1); + } else if ((!prev_skipfield) & after_skipfield) // following erased consecutive elements, none preceding + { + const skipfield_type following_value = static_cast(*(it.skipfield_pointer + 1) + 1); + *(it.skipfield_pointer + following_value - 1) = *(it.skipfield_pointer) = following_value; + + const skipfield_type following_previous = *(pointer_cast(it.element_pointer + 1)); + const skipfield_type following_next = *(pointer_cast(it.element_pointer + 1) + 1); + edit_free_list_prev(it.element_pointer, following_previous); + edit_free_list_next(it.element_pointer, following_next); + + const skipfield_type index = static_cast(it.element_pointer - it.group_pointer->elements); + + if (following_previous != std::numeric_limits::max()) { + edit_free_list_next(it.group_pointer->elements + following_previous, + index); // Set next index of previous free list node to this node's 'next' index + } + + if (following_next != std::numeric_limits::max()) { + edit_free_list_prev(it.group_pointer->elements + following_next, + index); // Set previous index of next free list node to this node's 'previous' index + } else { + it.group_pointer->free_list_head = index; + } + + update_value = following_value; + } else // both preceding and following consecutive erased elements - erased element is between two skipblocks + { + *(it.skipfield_pointer) = 1; // This line necessary in order for get_iterator() to work - ensures that erased + // element skipfield nodes are always non-zero + const skipfield_type preceding_value = *(it.skipfield_pointer - 1); + const skipfield_type following_value = static_cast(*(it.skipfield_pointer + 1) + 1); + + // Join the skipblocks + *(it.skipfield_pointer - preceding_value) = *(it.skipfield_pointer + following_value - 1) = + static_cast(preceding_value + following_value); + + // Remove the following skipblock's entry from the free list + const skipfield_type following_previous = *(pointer_cast(it.element_pointer + 1)); + const skipfield_type following_next = *(pointer_cast(it.element_pointer + 1) + 1); + + if (following_previous != std::numeric_limits::max()) { + edit_free_list_next( + it.group_pointer->elements + following_previous, + following_next); // Set next index of previous free list node to this node's 'next' index + } + + if (following_next != std::numeric_limits::max()) { + edit_free_list_prev( + it.group_pointer->elements + following_next, + following_previous); // Set previous index of next free list node to this node's 'previous' index + } else { + it.group_pointer->free_list_head = following_previous; + } + + update_value = following_value; + } + + iterator return_iterator(it.group_pointer, it.element_pointer + update_value, it.skipfield_pointer + update_value); + + if (return_iterator.element_pointer == pointer_cast(it.group_pointer->skipfield) + && it.group_pointer != end_iterator.group_pointer) { + return_iterator.group_pointer = it.group_pointer->next_group; + const aligned_pointer_type elements = return_iterator.group_pointer->elements; + const skipfield_pointer_type skipfield = return_iterator.group_pointer->skipfield; + const skipfield_type skip = *skipfield; + return_iterator.element_pointer = elements + skip; + return_iterator.skipfield_pointer = skipfield + skip; + } + + if (it.element_pointer == begin_iterator.element_pointer) + begin_iterator = return_iterator; // If original iterator was first element in hive, update it's value with + // the next non-erased element: + + return return_iterator; + } + + // else: group is empty, consolidate groups + const bool in_back_block = (it.group_pointer->next_group == NULL), + in_front_block = (it.group_pointer == begin_iterator.group_pointer); + + if (in_back_block & in_front_block) // ie. only group in hive + { + // Reset skipfield and free list rather than clearing - leads to fewer allocations/deallocations: + reset_only_group_left(it.group_pointer); + return end_iterator; + } else if ((!in_back_block) & in_front_block) // ie. Remove first group, change first group to next group + { + it.group_pointer->next_group->previous_group = NULL; // Cut off this group from the chain + begin_iterator.group_pointer = it.group_pointer->next_group; // Make the next group the first group + + if (it.group_pointer->free_list_head + != std::numeric_limits::max()) // Erasures present within the group, ie. was part of the linked + // list of groups with erasures. + { + remove_from_groups_with_erasures_list(it.group_pointer); + } + + total_capacity -= it.group_pointer->capacity; + deallocate_group(it.group_pointer); + + // note: end iterator only needs to be changed if the deleted group was the final group in the chain ie. not in this + // case + begin_iterator.element_pointer = + begin_iterator.group_pointer->elements + + *(begin_iterator.group_pointer->skipfield); // If the beginning index has been erased (ie. skipfield != 0), + // skip to next non-erased element + begin_iterator.skipfield_pointer = + begin_iterator.group_pointer->skipfield + *(begin_iterator.group_pointer->skipfield); + + return begin_iterator; + } else if (!(in_back_block | in_front_block)) // this is a non-first group but not final group in chain: delete the + // group, then link previous group to the next group in the chain: + { + it.group_pointer->next_group->previous_group = it.group_pointer->previous_group; + const group_pointer_type return_group = it.group_pointer->previous_group->next_group = + it.group_pointer->next_group; // close the chain, removing this group from it + + if (it.group_pointer->free_list_head != std::numeric_limits::max()) { + remove_from_groups_with_erasures_list(it.group_pointer); + } + + if constexpr (priority == performance) { + if (it.group_pointer->next_group != end_iterator.group_pointer) { + total_capacity -= it.group_pointer->capacity; + deallocate_group(it.group_pointer); + } else { // ie. second to last block in iterative sequence + add_group_to_unused_groups_list(it.group_pointer); + } + } else { + total_capacity -= it.group_pointer->capacity; + deallocate_group(it.group_pointer); + } + + // Return next group's first non-erased element: + return iterator(return_group, + return_group->elements + *(return_group->skipfield), + return_group->skipfield + *(return_group->skipfield)); + } else // this is a non-first group and the final group in the chain + { + if (it.group_pointer->free_list_head != std::numeric_limits::max()) { + remove_from_groups_with_erasures_list(it.group_pointer); + } + + it.group_pointer->previous_group->next_group = NULL; + end_iterator.group_pointer = + it.group_pointer + ->previous_group; // end iterator needs to be changed as element supplied was the back element of the hive + end_iterator.element_pointer = pointer_cast(end_iterator.group_pointer->skipfield); + end_iterator.skipfield_pointer = end_iterator.group_pointer->skipfield + end_iterator.group_pointer->capacity; + + if constexpr (priority == performance) { + add_group_to_unused_groups_list(it.group_pointer); + } else { + if (unused_groups_head + != NULL) // priority == memory_use, if there are other reserved blocks already, get rid of this one + { + total_capacity -= it.group_pointer->capacity; + deallocate_group(it.group_pointer); + } else // otherwise, retain - prevents unnecessary allocations/deallocations with stack-like usage + { + add_group_to_unused_groups_list(it.group_pointer); + } + } + + return end_iterator; + } + } + + // Range erase: + + iterator erase(const const_iterator iterator1, + const const_iterator iterator2) // if uninitialized/invalid iterators supplied, function could generate an + // exception. If iterator1 > iterator2, behaviour is undefined. + { + assert(iterator1 <= iterator2); + + const_iterator current = iterator1; + + if (current.group_pointer != iterator2.group_pointer) // ie. if start and end iterators are in separate groups + { + if (current.element_pointer + != current.group_pointer->elements + + *(current.group_pointer->skipfield)) // if iterator1 is not the first non-erased element in it's group + // - most common case + { + size_type number_of_group_erasures = 0; + + // Now update skipfield: + const aligned_pointer_type end = pointer_cast(iterator1.group_pointer->skipfield); + + // Schema: first erase all non-erased elements until end of group & remove all skipblocks post-iterator1 from + // the free_list. Then, either update preceding skipblock or create new one: + + if (current.group_pointer->free_list_head + == std::numeric_limits::max()) // ie. no other erasures in block + { + number_of_group_erasures += static_cast(end - current.element_pointer); + + if constexpr (!std::is_trivially_destructible::value) { + do // Avoid checking skipfield for rest of elements in group, as there are no skipped elements + { + destroy_element(current.element_pointer); + } while (++current.element_pointer != end); + } + } else { + while (current.element_pointer != end) { + if (*current.skipfield_pointer == 0) { + if constexpr (!std::is_trivially_destructible::value) { + destroy_element(current.element_pointer); + } + ++number_of_group_erasures; + ++current.element_pointer; + ++current.skipfield_pointer; + } else // remove skipblock from group: + { + const skipfield_type prev_free_list_index = + *(pointer_cast(current.element_pointer)); + const skipfield_type next_free_list_index = + *(pointer_cast(current.element_pointer) + 1); + + current.element_pointer += *(current.skipfield_pointer); + current.skipfield_pointer += *(current.skipfield_pointer); + + if (next_free_list_index == std::numeric_limits::max() + && prev_free_list_index + == std::numeric_limits::max()) // if this is the last skipblock in the + // free list + { + remove_from_groups_with_erasures_list( + iterator1.group_pointer); // remove group from list of free-list groups - will be added back + // in down below, but not worth optimizing for + iterator1.group_pointer->free_list_head = std::numeric_limits::max(); + number_of_group_erasures += static_cast(end - current.element_pointer); + + if constexpr (!std::is_trivially_destructible::value) { + while (current.element_pointer + != end) // miniloop - avoid checking skipfield for rest of elements in group, as + // there are no more skipped elements now + { + destroy_element(current.element_pointer++); + } + } + + break; // end overall while loop + } else if (next_free_list_index + == std::numeric_limits::max()) // if this is the head of the free list + { + current.group_pointer->free_list_head = + prev_free_list_index; // make free list head equal to next free list node + edit_free_list_next(current.group_pointer->elements + prev_free_list_index, + std::numeric_limits::max()); + } else // either a tail or middle free list node + { + edit_free_list_prev(current.group_pointer->elements + next_free_list_index, + prev_free_list_index); + + if (prev_free_list_index + != std::numeric_limits::max()) // ie. not the tail free list node + { + edit_free_list_next(current.group_pointer->elements + prev_free_list_index, + next_free_list_index); + } + } + } + } + } + + const skipfield_type previous_node_value = + *(iterator1.skipfield_pointer + - 1); // safe to do this here as we've already established that we're not at start of skipfield + const skipfield_type distance_to_end = static_cast(end - iterator1.element_pointer); + + if (previous_node_value == 0) // no previous skipblock + { + *iterator1.skipfield_pointer = distance_to_end; // set start node value + *(iterator1.skipfield_pointer + distance_to_end - 1) = distance_to_end; // set end node value + + const skipfield_type index = + static_cast(iterator1.element_pointer - iterator1.group_pointer->elements); + + if (iterator1.group_pointer->free_list_head + != std::numeric_limits::max()) // ie. if this group already has some erased elements + { + edit_free_list_next( + iterator1.group_pointer->elements + iterator1.group_pointer->free_list_head, + index); // set prev free list head's 'next index' number to the index of the iterator1 element + } else { + iterator1.group_pointer->erasures_list_next_group = + erasure_groups_head; // add it to the groups-with-erasures free list + + if (erasure_groups_head != NULL) { + erasure_groups_head->erasures_list_previous_group = iterator1.group_pointer; + } + + erasure_groups_head = iterator1.group_pointer; + } + + edit_free_list_head(iterator1.element_pointer, iterator1.group_pointer->free_list_head); + iterator1.group_pointer->free_list_head = index; + } else { // update previous skipblock, no need to update free list: + *(iterator1.skipfield_pointer - previous_node_value) = + *(iterator1.skipfield_pointer + distance_to_end - 1) = + static_cast(previous_node_value + distance_to_end); + } + + if (distance_to_end > 2) // if the skipblock is longer than 2 nodes, fill in the middle nodes with non-zero + // values so that get_iterator() and is_active will work + { + std::memset(detail::void_cast(iterator1.skipfield_pointer + 1), + 1, + sizeof(skipfield_type) * (distance_to_end - 2)); + } + + iterator1.group_pointer->size = + static_cast(iterator1.group_pointer->size - number_of_group_erasures); + total_size -= number_of_group_erasures; + + current.group_pointer = current.group_pointer->next_group; + } + + // Intermediate groups: + const group_pointer_type previous_group = current.group_pointer->previous_group; + + while (current.group_pointer != iterator2.group_pointer) { + if constexpr (!std::is_trivially_destructible::value) { + current.element_pointer = current.group_pointer->elements + *(current.group_pointer->skipfield); + current.skipfield_pointer = current.group_pointer->skipfield + *(current.group_pointer->skipfield); + const aligned_pointer_type end = pointer_cast(current.group_pointer->skipfield); + + do { + destroy_element(current.element_pointer); + const skipfield_type skip = *(++current.skipfield_pointer); + current.element_pointer += static_cast(skip) + 1u; + current.skipfield_pointer += skip; + } while (current.element_pointer != end); + } + + if (current.group_pointer->free_list_head != std::numeric_limits::max()) { + remove_from_groups_with_erasures_list(current.group_pointer); + } + + total_size -= current.group_pointer->size; + const group_pointer_type current_group = current.group_pointer; + current.group_pointer = current.group_pointer->next_group; + + if (current_group != end_iterator.group_pointer && current_group->next_group != end_iterator.group_pointer) { + total_capacity -= current_group->capacity; + deallocate_group(current_group); + } else { + add_group_to_unused_groups_list(current_group); + } + } + + current.element_pointer = current.group_pointer->elements + *(current.group_pointer->skipfield); + current.skipfield_pointer = current.group_pointer->skipfield + *(current.group_pointer->skipfield); + current.group_pointer->previous_group = previous_group; // Join this group to the last non-removed group + + if (previous_group != NULL) { + previous_group->next_group = current.group_pointer; + } else { + begin_iterator = iterator(iterator2.group_pointer, iterator2.element_pointer, iterator2.skipfield_pointer); + ; // This line is included here primarily to avoid a secondary if statement within the if block below - it will + // not be needed in any other situation + } + } + + // Final group: + // Code explanation: + // If not erasing entire final group, 1. Destruct elements (if non-trivial destructor) and add locations to group free + // list. 2. process skipfield. If erasing entire group, 1. Destruct elements (if non-trivial destructor), 2. if no + // elements left in hive, reset the group 3. otherwise reset end_iterator and remove group from groups-with-erasures + // list (if free list of erasures present) + + if (current.element_pointer != iterator2.element_pointer) // in case iterator2 was at beginning of it's group - also + // covers empty range case (first == last) + { + if (iterator2.element_pointer != end_iterator.element_pointer + || current.element_pointer + != current.group_pointer->elements + + *(current.group_pointer + ->skipfield)) // ie. not erasing entire group. Note: logistically the only way the entire + // group can be erased here is if iterator2 == end - otherwise would be + // caught by the if block above. Second condition in this if statement only + // possibly applies if iterator1.group_pointer == iterator2.group_pointer + { + size_type number_of_group_erasures = 0; + // Schema: first erased all non-erased elements until end of group & remove all skipblocks post-iterator2 from + // the free_list. Then, either update preceding skipblock or create new one: + + const const_iterator current_saved = current; + + if (current.group_pointer->free_list_head + == std::numeric_limits::max()) // ie. no other erasures in block + { + number_of_group_erasures += static_cast(iterator2.element_pointer - current.element_pointer); + + if constexpr (!std::is_trivially_destructible::value) { + do // avoid checking skipfield + { + destroy_element(current.element_pointer); + } while (++current.element_pointer != iterator2.element_pointer); + } + } else { + while (current.element_pointer != iterator2.element_pointer) { + if (*current.skipfield_pointer == 0) { + if constexpr (!std::is_trivially_destructible::value) + destroy_element(current.element_pointer); + ++number_of_group_erasures; + ++current.element_pointer; + ++current.skipfield_pointer; + } else // remove skipblock from group: + { + const skipfield_type prev_free_list_index = + *(pointer_cast(current.element_pointer)); + const skipfield_type next_free_list_index = + *(pointer_cast(current.element_pointer) + 1); + + current.element_pointer += *(current.skipfield_pointer); + current.skipfield_pointer += *(current.skipfield_pointer); + + if (next_free_list_index == std::numeric_limits::max() + && prev_free_list_index + == std::numeric_limits::max()) // if this is the last skipblock in the + // free list + { + remove_from_groups_with_erasures_list( + iterator2.group_pointer); // remove group from list of free-list groups - will be added back + // in down below, but not worth optimizing for + iterator2.group_pointer->free_list_head = std::numeric_limits::max(); + number_of_group_erasures += + static_cast(iterator2.element_pointer - current.element_pointer); + + if constexpr (!std::is_trivially_destructible::value) { + while (current.element_pointer != iterator2.element_pointer) { + destroy_element(current.element_pointer++); + } + } + + break; // end overall while loop + } else if (next_free_list_index + == std::numeric_limits::max()) // if this is the head of the free list + { + current.group_pointer->free_list_head = prev_free_list_index; + edit_free_list_next(current.group_pointer->elements + prev_free_list_index, + std::numeric_limits::max()); + } else { + edit_free_list_prev(current.group_pointer->elements + next_free_list_index, + prev_free_list_index); + + if (prev_free_list_index + != std::numeric_limits::max()) // ie. not the tail free list node + { + edit_free_list_next(current.group_pointer->elements + prev_free_list_index, + next_free_list_index); + } + } + } + } + } + + const skipfield_type distance_to_iterator2 = + static_cast(iterator2.element_pointer - current_saved.element_pointer); + const skipfield_type index = + static_cast(current_saved.element_pointer - iterator2.group_pointer->elements); + + if (index == 0 + || *(current_saved.skipfield_pointer - 1) + == 0) // element is either at start of group or previous skipfield node is 0 + { + *(current_saved.skipfield_pointer) = distance_to_iterator2; + *(iterator2.skipfield_pointer - 1) = distance_to_iterator2; + + if (iterator2.group_pointer->free_list_head + != std::numeric_limits::max()) // ie. if this group already has some erased elements + { + edit_free_list_next(iterator2.group_pointer->elements + iterator2.group_pointer->free_list_head, index); + } else { + iterator2.group_pointer->erasures_list_next_group = + erasure_groups_head; // add it to the groups-with-erasures free list + + if (erasure_groups_head != NULL) + erasure_groups_head->erasures_list_previous_group = iterator2.group_pointer; + + erasure_groups_head = iterator2.group_pointer; + } + + edit_free_list_head(current_saved.element_pointer, iterator2.group_pointer->free_list_head); + iterator2.group_pointer->free_list_head = index; + } else // If iterator 1 & 2 are in same group, but iterator 1 was not at start of group, and previous skipfield + // node is an end node in a skipblock: + { + // Just update existing skipblock, no need to create new free list node: + const skipfield_type prev_node_value = *(current_saved.skipfield_pointer - 1); + *(current_saved.skipfield_pointer - prev_node_value) = + static_cast(prev_node_value + distance_to_iterator2); + *(iterator2.skipfield_pointer - 1) = static_cast(prev_node_value + distance_to_iterator2); + } + + if (distance_to_iterator2 > 2) // if the skipblock is longer than 2 nodes, fill in the middle nodes with + // non-zero values so that get_iterator() and is_active() will work + { + std::memset(detail::void_cast(current_saved.skipfield_pointer + 1), + 1, + sizeof(skipfield_type) * (distance_to_iterator2 - 2)); + } + + if (iterator1.element_pointer == begin_iterator.element_pointer) { + begin_iterator = iterator(iterator2.group_pointer, iterator2.element_pointer, iterator2.skipfield_pointer); + } + + iterator2.group_pointer->size = + static_cast(iterator2.group_pointer->size - number_of_group_erasures); + total_size -= number_of_group_erasures; + } else // ie. full group erasure + { + if constexpr (!std::is_trivially_destructible::value) { + while (current.element_pointer != iterator2.element_pointer) { + destroy_element(current.element_pointer); + const skipfield_type skip = *(++current.skipfield_pointer); + current.element_pointer += static_cast(skip) + 1u; + current.skipfield_pointer += skip; + } + } + + if ((total_size -= current.group_pointer->size) != 0) // ie. previous_group != NULL - hive is not empty + { + if (current.group_pointer->free_list_head != std::numeric_limits::max()) { + remove_from_groups_with_erasures_list(current.group_pointer); + } + + current.group_pointer->previous_group->next_group = current.group_pointer->next_group; + end_iterator.group_pointer = current.group_pointer->previous_group; + end_iterator.element_pointer = pointer_cast(end_iterator.group_pointer->skipfield); + end_iterator.skipfield_pointer = + end_iterator.group_pointer->skipfield + end_iterator.group_pointer->capacity; + + if constexpr (priority == performance) { + add_group_to_unused_groups_list(current.group_pointer); + } else { + if (unused_groups_head + != NULL) // priority == memory_use, if there are other reserved blocks already, get rid of this one + { + total_capacity -= current.group_pointer->capacity; + deallocate_group(current.group_pointer); + } else // otherwise, retain - prevents unnecessary allocations/deallocations with stack-like usage + { + add_group_to_unused_groups_list(current.group_pointer); + } + } + } else // ie. hive is now empty + { + if constexpr (priority == memory_use) { trim_capacity(); } + + // Reset skipfield and free list rather than clearing - leads to fewer allocations/deallocations: + reset_only_group_left(current.group_pointer); + } + + return end_iterator; + } + } + + return iterator(iterator2.group_pointer, iterator2.element_pointer, iterator2.skipfield_pointer); + } + +private: + void prepare_groups_for_assign(const size_type size) + { + // Destroy all elements if non-trivial: + if constexpr (!std::is_trivially_destructible::value) { + for (iterator current = begin_iterator; current != end_iterator; ++current) { + destroy_element(current.element_pointer); + } + } + + if (size < total_capacity && (total_capacity - size) >= min_block_capacity) { + size_type difference = total_capacity - size; + end_iterator.group_pointer->next_group = unused_groups_head; + + // Remove surplus groups which're under the difference limit: + group_pointer_type current_group = begin_iterator.group_pointer, previous_group = NULL; + + do { + const group_pointer_type next_group = current_group->next_group; + + if (current_group->capacity <= difference) { // Remove group: + difference -= current_group->capacity; + total_capacity -= current_group->capacity; + deallocate_group(current_group); + + if (current_group == begin_iterator.group_pointer) begin_iterator.group_pointer = next_group; + } else { + if (previous_group != NULL) { previous_group->next_group = current_group; } + + previous_group = current_group; + } + + current_group = next_group; + } while (current_group != NULL); + + previous_group->next_group = NULL; + } else { + if (size > total_capacity) reserve(size); + + // Join all unused_groups to main chain: + end_iterator.group_pointer->next_group = unused_groups_head; + } + + begin_iterator.element_pointer = begin_iterator.group_pointer->elements; + begin_iterator.skipfield_pointer = begin_iterator.group_pointer->skipfield; + erasure_groups_head = NULL; + total_size = 0; + } + +public: + // Fill assign: + void assign(size_type size, const element_type &element) + { + if (size == 0) { + reset(); + return; + } + + if constexpr ((std::is_trivially_destructible::value + && std::is_trivially_constructible::value + && std::is_trivially_copy_assignable::value) + || !std::is_copy_assignable::value) // ie. If there is no benefit nor difference to + // assigning vs constructing, or if we can't assign, + // use faster method: + { + prepare_groups_for_assign(size); + fill_unused_groups(size, element, 0, NULL, begin_iterator.group_pointer); + } else { + if (total_size == 0) { + prepare_groups_for_assign(size); + fill_unused_groups(size, element, 0, NULL, begin_iterator.group_pointer); + } else if (size < total_size) { + iterator current = begin_iterator; + + do { + *current++ = element; + } while (--size != 0); + + erase(current, end_iterator); + } else { + iterator current = begin_iterator; + + do { + *current = element; + } while (++current != end_iterator); + + insert(size - total_size, element); + } + } + } + +private: + // Range assign core: + template + void range_assign(iterator_type it, size_type size) + { + if (size == 0) { + reset(); + return; + } + + if constexpr ((std::is_trivially_destructible::value + && std::is_trivially_constructible::value + && std::is_trivially_copy_assignable::value) + || !std::is_copy_assignable::value) { + prepare_groups_for_assign(size); + range_fill_unused_groups(size, it, 0, NULL, begin_iterator.group_pointer); + } else { + if (total_size == 0) { + prepare_groups_for_assign(size); + range_fill_unused_groups(size, it, 0, NULL, begin_iterator.group_pointer); + } else if (size < total_size) { + iterator current = begin_iterator; + + do { + *current++ = *it++; + } while (--size != 0); + + erase(current, end_iterator); + } else { + iterator current = begin_iterator; + + do { + *current = *it++; + } while (++current != end_iterator); + + range_insert(it, size - total_size); + } + } + } + +public: + // Range assign: + template + void assign(const typename detail::enable_if::is_integer, iterator_type>::type &first, + const iterator_type &last) + { + range_assign(first, static_cast(std::distance(first, last))); + } + + // Overloads for hive iterators, since std::distance overloads for these are not possible without C++20 concepts and we + // must stick with ADL: + template + void assign(const iterator &first, const iterator &last) + { + range_assign(first, static_cast(first.distance(last))); + } + + template + void assign(const const_iterator &first, const const_iterator &last) + { + range_assign(first, static_cast(first.distance(last))); + } + + template + void assign(const reverse_iterator &first, const reverse_iterator &last) + { + range_assign(first, static_cast(first.distance(last))); + } + + template + void assign(const const_reverse_iterator &first, const const_reverse_iterator &last) + { + range_assign(first, static_cast(first.distance(last))); + } + + // Range assign, move_iterator overload: + template + void assign(const std::move_iterator first, const std::move_iterator last) + { + range_assign(first, static_cast(std::distance(first.base(), last.base()))); + } + + // Initializer-list assign: + void assign(const std::initializer_list &element_list) + { + range_assign(element_list.begin(), static_cast(element_list.size())); + } + + [[nodiscard]] bool empty() const noexcept { return total_size == 0; } + + size_type size() const noexcept { return total_size; } + + size_type max_size() const noexcept { return std::allocator_traits::max_size(*this); } + + size_type capacity() const noexcept { return total_capacity; } + + size_type memory() const noexcept + { + size_type memory_use = sizeof(*this); // sizeof hive basic structure + end_iterator.group_pointer->next_group = + unused_groups_head; // temporarily link the main groups and unused groups (reserved groups) in order to only have + // one loop below instead of several + + for (group_pointer_type current = begin_iterator.group_pointer; current != NULL; current = current->next_group) { + memory_use += + sizeof(group) + + (get_aligned_block_capacity(current->capacity) + * sizeof(aligned_allocation_struct)); // add memory block sizes and the size of the group structs themselves. + // The original calculation, including divisor, is necessary in order + // to correctly round up the number of allocations + } + + end_iterator.group_pointer->next_group = NULL; // unlink main groups and unused groups + return memory_use; + } + + static constexpr size_type block_metadata_memory(const size_type block_capacity) noexcept + { + return sizeof(group) + + ((get_aligned_block_capacity(static_cast(block_capacity)) - block_capacity) + * sizeof(aligned_allocation_struct)); + } + + static constexpr size_type block_allocation_amount(size_type block_capacity) noexcept + { + if (block_capacity > std::numeric_limits::max()) + block_capacity = std::numeric_limits::max(); + + return sizeof(aligned_allocation_struct) * get_aligned_block_capacity(static_cast(block_capacity)); + } + + static constexpr size_type max_elements_per_allocation(const size_type allocation_amount) noexcept + { + // Get a rough approximation of the number of elements + skipfield units we can fit in the amount expressed: + constexpr size_type num_units = allocation_amount / (sizeof(aligned_element_struct) + sizeof(skipfield_type)); + constexpr detail::limits hard_capacities = block_capacity_hard_limits(); + + // Truncate the amount to the implementation's hard block capacity max limit: + if (num_units > hard_capacities.max) num_units = hard_capacities.max; + + // Adjust num_units downward based on (a) the additional skipfield node necessary per-block in this implementation and + // (b) any additional memory waste required in order to allocate the skipfield in multiples of the element type's + // alignof: + if ((/* Explanation: elements and skipfield are allocated in a single allocation to save performance. + In order for the elements to be correctly aligned in memory, this single allocation is aligned to the + alignof the element type, so the first line below is the allocation amount in bytes required for the skipfield + when allocated in multiples of the element type's alignof. The + sizeof(skipfield_type) adds the additional + skipfield node as mentioned, and the (num_units + 1) minus 1 byte rounds up the integer division: */ + (((((num_units + 1) * sizeof(aligned_allocation_struct)) - 1) + sizeof(skipfield_type)) + / sizeof(aligned_allocation_struct)) + /* the second line is the amount of memory in bytes necessary for the elements themselves: */ + + (num_units * sizeof(aligned_element_struct))) + /* then we compare against the desired allocation amount: */ + > allocation_amount) { + --num_units; // In this implementation it is not possible for the necessary adjustment to be greater than 1 + // element+skipfield sizeof + } + + if (num_units < hard_capacities.min) num_units = 0; + + return num_units; + } + +private: + // get all elements contiguous in memory and shrink to fit, remove erasures and free lists. Invalidates all iterators and + // pointers to elements. + void consolidate(const skipfield_type new_min, const skipfield_type new_max) + { + hive temp(detail::limits(new_min, new_max)); + temp.reserve(total_size); + temp.end_iterator.group_pointer->next_group = temp.unused_groups_head; + + if constexpr (!std::is_trivial::value && std::is_nothrow_move_constructible::value) { + temp.range_fill_unused_groups(total_size, + detail::make_move_iterator(begin_iterator), + 0, + NULL, + temp.begin_iterator.group_pointer); + } else { + temp.range_fill_unused_groups(total_size, begin_iterator, 0, NULL, temp.begin_iterator.group_pointer); + } + + *this = std::move(temp); // Avoid generating 2nd temporary + } + +public: + void reshape(const detail::limits block_limits) + { + check_capacities_conformance(block_limits); + const skipfield_type new_min = static_cast(block_limits.min), + new_max = static_cast(block_limits.max); + + if (total_capacity != 0) { + if (total_size != 0) { + if (min_block_capacity > new_max + || max_block_capacity < new_min) // If none of the original blocks could potentially fit within the new + // limits, skip checking of blocks and just consolidate: + { + consolidate(new_min, new_max); + return; + } + + if (min_block_capacity < new_min + || max_block_capacity > new_max) // ie. If existing blocks could be outside of the new limits + { + // Otherwise need to check all group sizes here (not just back one, which is most likely largest), because + // splice might append smaller blocks after a larger block: + for (group_pointer_type current_group = begin_iterator.group_pointer; current_group != NULL; + current_group = current_group->next_group) { + if (current_group->capacity < new_min || current_group->capacity > new_max) { + consolidate(new_min, new_max); + return; + } + } + } + } else // include first group to be checked in the loop below + { + begin_iterator.group_pointer->next_group = unused_groups_head; + unused_groups_head = begin_iterator.group_pointer; + } + + // If a consolidation or throw has not occured, process reserved/unused groups and deallocate where they don't fit + // the new limits: + + for (group_pointer_type current_group = unused_groups_head, previous_group = NULL; current_group != NULL;) { + const group_pointer_type next_group = current_group->next_group; + + if (current_group->capacity < new_min || current_group->capacity > new_max) { + total_capacity -= current_group->capacity; + deallocate_group(current_group); + + if (previous_group == NULL) { + unused_groups_head = next_group; + } else { + previous_group->next_group = next_group; + } + } else { + previous_group = current_group; + } + + current_group = next_group; + } + + if (total_size == 0) { + if (unused_groups_head == NULL) { + blank(); + } else { + begin_iterator.group_pointer = unused_groups_head; + unused_groups_head = begin_iterator.group_pointer->next_group; + begin_iterator.group_pointer->next_group = NULL; + } + } + } + + min_block_capacity = new_min; + max_block_capacity = new_max; + } + + constexpr detail::limits block_capacity_limits() const noexcept + { + return detail::limits(static_cast(min_block_capacity), static_cast(max_block_capacity)); + } + + static constexpr detail::limits block_capacity_hard_limits() noexcept + { + return detail::limits(3, std::min(static_cast(std::numeric_limits::max()), max_size_static())); + } + + void clear() noexcept + { + if (total_size == 0) return; + + // Destroy all elements if element type is non-trivial: + if constexpr (!std::is_trivially_destructible::value) { + for (iterator current = begin_iterator; current != end_iterator; ++current) { + destroy_element(current.element_pointer); + } + } + + if (begin_iterator.group_pointer != end_iterator.group_pointer) { // Move all other groups onto the unused_groups list + end_iterator.group_pointer->next_group = unused_groups_head; + unused_groups_head = begin_iterator.group_pointer->next_group; + end_iterator.group_pointer = begin_iterator.group_pointer; // other parts of iterator reset in the function below + } + + reset_only_group_left(begin_iterator.group_pointer); + erasure_groups_head = NULL; + total_size = 0; + } + + hive &operator=(const hive &source) + { + assert(&source != this); + + if constexpr (std::allocator_traits::propagate_on_container_copy_assignment::value) { + if constexpr (!std::allocator_traits::is_always_equal::value) { + if (static_cast(*this) + != static_cast( + source)) { // Deallocate existing blocks as source allocator is not necessarily able to do so + reset(); + } + } + + static_cast(*this) = static_cast(source); + // Reconstruct rebinds: + group_allocator = group_allocator_type(*this); + aligned_struct_allocator = aligned_struct_allocator_type(*this); + skipfield_allocator = skipfield_allocator_type(*this); + tuple_allocator = tuple_allocator_type(*this); + } + + range_assign(source.begin_iterator, source.total_size); + return *this; + } + +private: + void move_assign(hive &&source) noexcept + { + if constexpr ((std::is_trivially_copyable::value + || std::allocator_traits::is_always_equal::value) + && std::is_trivial::value && std::is_trivial::value + && std::is_trivial::value) { + std::memcpy(static_cast(this), &source, sizeof(hive)); + } else { + end_iterator = std::move(source.end_iterator); + begin_iterator = std::move(source.begin_iterator); + erasure_groups_head = std::move(source.erasure_groups_head); + unused_groups_head = std::move(source.unused_groups_head); + total_size = source.total_size; + total_capacity = source.total_capacity; + min_block_capacity = source.min_block_capacity; + max_block_capacity = source.max_block_capacity; + + if constexpr (std::allocator_traits::propagate_on_container_move_assignment::value) { + static_cast(*this) = static_cast(source); + // Reconstruct rebinds: + group_allocator = group_allocator_type(*this); + aligned_struct_allocator = aligned_struct_allocator_type(*this); + skipfield_allocator = skipfield_allocator_type(*this); + tuple_allocator = tuple_allocator_type(*this); + } + } + } + +public: + // Move assignment + hive &operator=(hive &&source) noexcept(std::allocator_traits::propagate_on_container_move_assignment::value + || std::allocator_traits::is_always_equal::value) + { + assert(&source != this); + destroy_all_data(); + + // We need this to be constexpr to avoid warning errors on the 'throw' below + if constexpr (std::allocator_traits::propagate_on_container_move_assignment::value + || std::allocator_traits::is_always_equal::value) { + move_assign(std::move(source)); + } else if (static_cast(*this) == static_cast(source)) { + move_assign(std::move(source)); + } else // Allocator isn't movable so move elements from source and deallocate the source's blocks. Could throw here: + { + if constexpr (!(std::is_move_constructible::value && std::is_move_assignable::value)) { + range_assign(source.begin_iterator, source.total_size); + } else { + range_assign(detail::make_move_iterator(source.begin_iterator), source.total_size); + } + + source.destroy_all_data(); + } + + source.blank(); + return *this; + } + + hive &operator=(const std::initializer_list &element_list) + { + range_assign(element_list.begin(), static_cast(element_list.size())); + return *this; + } + + friend bool operator==(const hive &lh, const hive &rh) noexcept + { + if (lh.total_size != rh.total_size) return false; + + for (const_iterator lh_iterator = lh.begin_iterator, rh_iterator = rh.begin_iterator; lh_iterator != lh.end_iterator; + ++lh_iterator, ++rh_iterator) { + if (*lh_iterator != *rh_iterator) return false; + } + + return true; + } + + friend bool operator!=(const hive &lh, const hive &rh) noexcept { return !(lh == rh); } + + void shrink_to_fit() + { + if (total_size == total_capacity) { + return; + } else if (total_size == 0) { + reset(); + return; + } + + consolidate(min_block_capacity, max_block_capacity); + } + + void trim_capacity() noexcept + { + if (end_iterator.element_pointer == NULL) return; // empty hive + + while (unused_groups_head != NULL) { + total_capacity -= unused_groups_head->capacity; + const group_pointer_type next_group = unused_groups_head->next_group; + deallocate_group(unused_groups_head); + unused_groups_head = next_group; + } + + if (begin_iterator.element_pointer == end_iterator.element_pointer) // ie. clear() has been called prior + { + deallocate_group(begin_iterator.group_pointer); + blank(); + } + } + + void trim_capacity(const size_type capacity_retain) noexcept + { + const size_type capacity_difference = total_capacity - capacity_retain; + + if (end_iterator.element_pointer == NULL || total_capacity <= capacity_retain || total_size >= capacity_retain + || capacity_difference < min_block_capacity) + return; + + size_type number_of_elements_to_remove = capacity_difference; + + for (group_pointer_type current_group = unused_groups_head, previous_group = NULL; current_group != NULL;) { + const group_pointer_type next_group = current_group->next_group; + + if (number_of_elements_to_remove >= current_group->capacity) { + number_of_elements_to_remove -= current_group->capacity; + deallocate_group(current_group); + + if (previous_group == NULL) { + unused_groups_head = next_group; + } else { + previous_group->next_group = next_group; + } + + if (number_of_elements_to_remove < min_block_capacity) break; + } else { + previous_group = current_group; + } + + current_group = next_group; + } + + if (begin_iterator.element_pointer == end_iterator.element_pointer) // ie. clear() has been called prior + { + if (number_of_elements_to_remove >= begin_iterator.group_pointer->capacity) { + number_of_elements_to_remove -= begin_iterator.group_pointer->capacity; + deallocate_group(begin_iterator.group_pointer); + + if (unused_groups_head != NULL) // some of the reserved blocks were not removed as they were too large, so use + // one of these to make the new begin group + { + begin_iterator.group_pointer = unused_groups_head; + begin_iterator.element_pointer = unused_groups_head->elements; + begin_iterator.skipfield_pointer = unused_groups_head->skipfield; + end_iterator = begin_iterator; + + unused_groups_head = unused_groups_head->next_group; + begin_iterator.group_pointer->next_group = NULL; + } else { + blank(); + return; + } + } + } + + total_capacity -= capacity_difference - number_of_elements_to_remove; + } + + void reserve(size_type new_capacity) + { + if (new_capacity == 0 || new_capacity <= total_capacity) return; // ie. We already have enough space allocated + + if (new_capacity > max_size()) { throw std::length_error("Capacity requested via reserve() greater than max_size()"); } + + new_capacity -= total_capacity; + + size_type number_of_max_groups = new_capacity / max_block_capacity; + skipfield_type remainder = static_cast(new_capacity - (number_of_max_groups * max_block_capacity)); + + if (remainder == 0) { + remainder = max_block_capacity; + --number_of_max_groups; + } else if (remainder < min_block_capacity) { + remainder = min_block_capacity; + } + + group_pointer_type current_group, first_unused_group; + + if (begin_iterator.group_pointer == NULL) // Most common scenario - empty hive + { + initialize(remainder); + begin_iterator.group_pointer->size = 0; // 1 by default in initialize function (optimised for insert()) + + if (number_of_max_groups == 0) { + return; + } else { + first_unused_group = current_group = allocate_new_group(max_block_capacity, begin_iterator.group_pointer); + total_capacity += max_block_capacity; + --number_of_max_groups; + } + } else // Non-empty hive, add first group: + { + if ((std::numeric_limits::max() - end_iterator.group_pointer->group_number) + < (number_of_max_groups + 1)) { + reset_group_numbers(); + } + + first_unused_group = current_group = allocate_new_group(remainder, end_iterator.group_pointer); + total_capacity += remainder; + } + + while (number_of_max_groups != 0) { + try { + current_group->next_group = allocate_new_group(max_block_capacity, current_group); + } catch (...) { + deallocate_group(current_group->next_group); + current_group->next_group = unused_groups_head; + unused_groups_head = first_unused_group; + throw; + } + + current_group = current_group->next_group; + total_capacity += max_block_capacity; + --number_of_max_groups; + } + + current_group->next_group = unused_groups_head; + unused_groups_head = first_unused_group; + } + +private: + template + hive_iterator get_it(const pointer element_pointer) const noexcept + { + if (end_iterator.group_pointer != NULL) { + const aligned_pointer_type aligned_element_pointer = pointer_cast(element_pointer); + // Note: we start with checking the back group first, as it will be the largest group in most cases, so there's a + // statistically-higher chance of the element being within it. + + // Special case for back group in case the element was in a group which became empty and got moved to the + // unused_groups list or was deallocated, and then that memory was re-used (ie. it became the current back group). + // The following prevents the function from mistakenly returning an iterator which is beyond the back element of the + // hive: + if (aligned_element_pointer >= end_iterator.group_pointer->elements + && aligned_element_pointer < end_iterator.element_pointer) { + const skipfield_pointer_type skipfield_pointer = + end_iterator.group_pointer->skipfield + (aligned_element_pointer - end_iterator.group_pointer->elements); + return (*skipfield_pointer == 0) + ? hive_iterator(end_iterator.group_pointer, aligned_element_pointer, skipfield_pointer) + : hive_iterator(end_iterator); + } + + // All other groups, if any exist: + for (group_pointer_type current_group = end_iterator.group_pointer->previous_group; current_group != NULL; + current_group = current_group->previous_group) { + if (aligned_element_pointer >= current_group->elements + && aligned_element_pointer < pointer_cast(current_group->skipfield)) { + const skipfield_pointer_type skipfield_pointer = + current_group->skipfield + (aligned_element_pointer - current_group->elements); + return (*skipfield_pointer == 0) + ? hive_iterator(current_group, aligned_element_pointer, skipfield_pointer) + : hive_iterator(end_iterator); + } + } + } + + return end_iterator; + } + +public: + iterator get_iterator(const pointer element_pointer) noexcept { return get_it(element_pointer); } + + const_iterator get_iterator(const const_pointer element_pointer) const noexcept + { + return get_it(const_cast(element_pointer)); + } + + bool is_active(const const_iterator &it) const noexcept + { + if (end_iterator.group_pointer != NULL) { + // Schema: check (a) that the group the iterator belongs to is still active and not deallocated or in the + // unused_groups list, then (b) that the element is not erased. (a) prevents an out-of-bounds memory access if the + // group is deallocated. Same reasoning as get_iterator for loop conditions + + // Special case for back group, same reasoning as in get_it(): + if (it.group_pointer == end_iterator.group_pointer && it.element_pointer >= end_iterator.group_pointer->elements + && it.element_pointer < end_iterator.element_pointer) { + return (*it.skipfield_pointer == 0); + } + + for (group_pointer_type current_group = end_iterator.group_pointer->previous_group; current_group != NULL; + current_group = current_group->previous_group) { + if (it.group_pointer == current_group && it.element_pointer >= current_group->elements + && it.element_pointer < pointer_cast( + current_group->skipfield)) // 2nd 2 conditions necessary in case the group contained the element + // which the iterator points to, has been deallocated from the hive + // previously, but then the same pointer address is re-supplied via an + // allocator for a subsequent group allocation (in which case the group's + // element block memory location may be different) + { + return (*it.skipfield_pointer == 0); + } + } + } + + return false; + } + + allocator_type get_allocator() const noexcept { return static_cast(*this); } + +private: + void source_blocks_incompatible() + { + throw std::length_error("A source memory block capacity is outside of the destination's minimum or maximum memory " + "block capacity limits - please change either the source or the destination's min/max block " + "capacity limits using reshape() before calling splice() in this case"); + } + +public: + void splice(hive &source) + { + // Process: if there are unused memory spaces at the end of the current back group of the chain, convert them + // to skipped elements and add the locations to the group's free list. + // Then link the destination's groups to the source's groups and nullify the source. + // If the source has more unused memory spaces in the back group than the destination, swap them before processing to + // reduce the number of locations added to a free list and also subsequent jumps during iteration. + + assert(&source != this); + + if (source.total_size == 0) return; + + // Throw if incompatible block capacities found in source: + if (source.min_block_capacity > max_block_capacity + || source.max_block_capacity + < min_block_capacity) // ie. source blocks cannot possibly fit within *this's block capacity limits + { + source_blocks_incompatible(); + } else if (source.min_block_capacity < min_block_capacity + || source.max_block_capacity > max_block_capacity) // ie. source blocks may or may not fit + { + for (group_pointer_type current_group = source.begin_iterator.group_pointer; current_group != NULL; + current_group = current_group->next_group) { + if (current_group->capacity < min_block_capacity || current_group->capacity > max_block_capacity) { + source_blocks_incompatible(); + } + } + } + + if (total_size != 0) { + // If there's more unused element locations in back memory block of destination than in back memory block of source, + // swap with source to reduce number of skipped elements during iteration: + if ((pointer_cast(end_iterator.group_pointer->skipfield) - end_iterator.element_pointer) + > (pointer_cast(source.end_iterator.group_pointer->skipfield) + - source.end_iterator.element_pointer)) { + swap(source); + // Swap back unused groups list and block capacity limits so that source and *this retain their original ones: + std::swap(source.unused_groups_head, unused_groups_head); + std::swap(source.min_block_capacity, min_block_capacity); + std::swap(source.max_block_capacity, max_block_capacity); + } + + // Add source list of groups-with-erasures to destination list of groups-with-erasures: + if (source.erasure_groups_head != NULL) { + if (erasure_groups_head != NULL) { + group_pointer_type tail_group = erasure_groups_head; + + while (tail_group->erasures_list_next_group != NULL) { tail_group = tail_group->erasures_list_next_group; } + + tail_group->erasures_list_next_group = source.erasure_groups_head; + source.erasure_groups_head->erasures_list_previous_group = tail_group; + } else { + erasure_groups_head = source.erasure_groups_head; + } + } + + const skipfield_type distance_to_end = static_cast( + pointer_cast(end_iterator.group_pointer->skipfield) - end_iterator.element_pointer); + + if (distance_to_end != 0) // 0 == edge case + { // Mark unused element memory locations from back group as skipped/erased: + // Update skipfield: + const skipfield_type previous_node_value = *(end_iterator.skipfield_pointer - 1); + + if (previous_node_value == 0) // no previous skipblock + { + *end_iterator.skipfield_pointer = distance_to_end; + *(end_iterator.skipfield_pointer + distance_to_end - 1) = distance_to_end; + + if (distance_to_end > 2) // make erased middle nodes non-zero for get_iterator and is_active + { + std::memset(static_cast(end_iterator.skipfield_pointer + 1), + 1, + sizeof(skipfield_type) * (distance_to_end - 2)); + } + + const skipfield_type index = + static_cast(end_iterator.element_pointer - end_iterator.group_pointer->elements); + + if (end_iterator.group_pointer->free_list_head + != std::numeric_limits::max()) // ie. if this group already has some erased elements + { + edit_free_list_next( + end_iterator.group_pointer->elements + end_iterator.group_pointer->free_list_head, + index); // set prev free list head's 'next index' number to the index of the current element + } else { + end_iterator.group_pointer->erasures_list_next_group = + erasure_groups_head; // add it to the groups-with-erasures free list + + if (erasure_groups_head != NULL) { + erasure_groups_head->erasures_list_previous_group = end_iterator.group_pointer; + } + + erasure_groups_head = end_iterator.group_pointer; + } + + edit_free_list_head(end_iterator.element_pointer, end_iterator.group_pointer->free_list_head); + end_iterator.group_pointer->free_list_head = index; + } else { // update previous skipblock, no need to update free list: + *(end_iterator.skipfield_pointer - previous_node_value) = + *(end_iterator.skipfield_pointer + distance_to_end - 1) = + static_cast(previous_node_value + distance_to_end); + + if (distance_to_end > 1) // make erased middle nodes non-zero for get_iterator and is_active + { + std::memset(static_cast(end_iterator.skipfield_pointer), + 1, + sizeof(skipfield_type) * (distance_to_end - 1)); + } + } + } + + // Join the destination and source group chains: + end_iterator.group_pointer->next_group = source.begin_iterator.group_pointer; + source.begin_iterator.group_pointer->previous_group = end_iterator.group_pointer; + + // Update group numbers if necessary: + if (source.begin_iterator.group_pointer->group_number <= end_iterator.group_pointer->group_number) { + size_type source_group_count = 0; + + for (group_pointer_type current_group = source.begin_iterator.group_pointer; current_group != NULL; + current_group = current_group->next_group, ++source_group_count) {} + + if ((std::numeric_limits::max() - end_iterator.group_pointer->group_number) >= source_group_count) { + update_subsequent_group_numbers(end_iterator.group_pointer->group_number + 1u, + source.begin_iterator.group_pointer); + } else { + reset_group_numbers(); + } + } + + end_iterator = source.end_iterator; + total_size += source.total_size; + total_capacity += source.total_capacity; + } else // If *this is empty(): + { + // Preserve unused_groups_head and de-link so that destroy_all_data doesn't remove them: + const group_pointer_type original_unused_groups = unused_groups_head; + unused_groups_head = NULL; + destroy_all_data(); + unused_groups_head = original_unused_groups; + + // Move source data to *this: + end_iterator = source.end_iterator; + begin_iterator = source.begin_iterator; + erasure_groups_head = source.erasure_groups_head; + total_size = source.total_size; + total_capacity = source.total_capacity; + + // Add capacity for unused groups back into *this: + for (group_pointer_type current = original_unused_groups; current != NULL; current = current->next_group) { + total_capacity += current->capacity; + } + } + + // Reset source values: + const group_pointer_type original_unused_groups_head = source.unused_groups_head; // grab value before it gets wiped + source.blank(); // blank source before adding capacity from unused groups back in + + if (original_unused_groups_head != NULL) // If there were unused groups in source, re-link them and remove their + // capacity count from *this while adding it to source: + { + size_type source_unused_groups_capacity = 0; + + // Count capacity in source unused_groups: + for (group_pointer_type current = original_unused_groups_head; current != NULL; current = current->next_group) { + source_unused_groups_capacity += current->capacity; + } + + total_capacity -= source_unused_groups_capacity; + source.total_capacity = source_unused_groups_capacity; + + // Establish first group from source unused_groups as first active group in source, link rest as reserved groups: + source.unused_groups_head = original_unused_groups_head->next_group; + source.begin_iterator.group_pointer = original_unused_groups_head; + source.begin_iterator.element_pointer = original_unused_groups_head->elements; + source.begin_iterator.skipfield_pointer = original_unused_groups_head->skipfield; + source.end_iterator = source.begin_iterator; + original_unused_groups_head->reset(0, NULL, NULL, 0); + } + } + + void splice(hive &&source) { splice(source); } + +private: + struct item_index_tuple { + pointer original_location; + size_type original_index; + + item_index_tuple(const pointer _item, const size_type _index) noexcept + : original_location(_item), original_index(_index) + { + } + }; + + template + struct sort_dereferencer { + comparison_function stored_instance; + + explicit sort_dereferencer(const comparison_function &function_instance) : stored_instance(function_instance) {} + + bool operator()(const item_index_tuple first, const item_index_tuple second) + { + return stored_instance(*(first.original_location), *(second.original_location)); + } + }; + + // Try and find space in the unused blocks or the back block instead of allocating for sort: + template + aligned_pointer_type get_free_space() const noexcept + { + const size_type number_of_elements_needed = ((total_size * sizeof(the_type)) + sizeof(aligned_element_struct) - 1) + / sizeof(aligned_element_struct); // rounding up + + if (number_of_elements_needed < max_block_capacity) { + if (static_cast(pointer_cast(end_iterator.group_pointer->skipfield) + - end_iterator.element_pointer) + >= number_of_elements_needed) { // there is enough space at the back of the back block + return end_iterator.element_pointer; + } + + for (group_pointer_type current = unused_groups_head; current != NULL; current = current->next_group) { + if (current->capacity >= number_of_elements_needed) + return current->elements; // ie. there is enough space in one of the unused blocks + } + } + + return NULL; + } + +public: + template + void sort(comparison_function compare) + { + if (total_size < 2) return; + + if constexpr ((std::is_trivially_copyable::value || std::is_move_assignable::value) + && sizeof(element_type) + <= sizeof(element_type *) * 2) // If element is <= 2 pointers, just copy to an array and sort + // that then copy back - consumes less memory + { + pointer sort_array = pointer_cast(get_free_space()); + const bool need_to_allocate = (sort_array == NULL); + + if (need_to_allocate) { + sort_array = std::allocator_traits::allocate(*this, total_size, end_iterator.skipfield_pointer); + } + + const pointer end = sort_array + total_size; + + if constexpr (!std::is_trivially_copy_constructible::value + && std::is_nothrow_move_assignable::value) { + std::uninitialized_copy(detail::make_move_iterator(begin_iterator), + detail::make_move_iterator(end_iterator), + sort_array); + } else { + std::uninitialized_copy(begin_iterator, end_iterator, sort_array); + } + + std::sort(sort_array, end, compare); + + if constexpr ((!std::is_trivially_copy_constructible::value + || !std::is_trivially_destructible::value) + && std::is_move_assignable::value) { + std::copy(detail::make_move_iterator(sort_array), detail::make_move_iterator(end), begin_iterator); + } else { + std::copy(sort_array, end, begin_iterator); + + if (!std::is_trivially_destructible::value) { + for (element_type *current = sort_array; current != end; ++current) { + std::allocator_traits::destroy(*this, current); + } + } + } + + if (need_to_allocate) { std::allocator_traits::deallocate(*this, sort_array, total_size); } + } else { + item_index_tuple *sort_array = pointer_cast(get_free_space()); + const bool need_to_allocate = (sort_array == NULL); + + if (need_to_allocate) { + sort_array = std::allocator_traits::allocate(tuple_allocator, + total_size, + end_iterator.skipfield_pointer); + } + + tuple_pointer_type tuple_pointer = sort_array; + + // Construct pointers to all elements in the sequence: + size_type index = 0; + + for (iterator current_element = begin_iterator; current_element != end_iterator; + ++current_element, ++tuple_pointer, ++index) { + std::allocator_traits::construct(tuple_allocator, + tuple_pointer, + &*current_element, + index); + } + + // Now, sort the pointers by the values they point to: + std::sort(sort_array, tuple_pointer, sort_dereferencer(compare)); + + // Sort the actual elements via the tuple array: + index = 0; + + for (tuple_pointer_type current_tuple = sort_array; current_tuple != tuple_pointer; ++current_tuple, ++index) { + if (current_tuple->original_index != index) { + element_type end_value = std::move(*(current_tuple->original_location)); + size_type destination_index = index; + size_type source_index = current_tuple->original_index; + + do { + *(sort_array[destination_index].original_location) = + std::move(*(sort_array[source_index].original_location)); + destination_index = source_index; + source_index = sort_array[destination_index].original_index; + sort_array[destination_index].original_index = destination_index; + } while (source_index != index); + + *(sort_array[destination_index].original_location) = std::move(end_value); + } + } + + if (need_to_allocate) { + std::allocator_traits::deallocate(tuple_allocator, sort_array, total_size); + } + } + } + + void sort() { sort(detail::less()); } + + template + size_type unique(comparison_function compare) + { + if (total_size < 2) return 0; + + size_type count = 0; + const const_iterator end = end_iterator; + + for (const_iterator current = begin_iterator, previous = begin_iterator; ++current != end; previous = current) { + if (compare(*current, *previous)) { + const size_type original_count = ++count; + const_iterator last = current; + + while (++last != end && compare(*last, *previous)) { ++count; } + + if (count != original_count) { + current = erase(current, last); // optimised range-erase + } else { + current = erase(current); + } + + if (last == end) break; + } + } + + return count; + } + + size_type unique() { return unique(std::equal_to()); } + + void swap(hive &source) noexcept(std::allocator_traits::propagate_on_container_swap::value + || std::allocator_traits::is_always_equal::value) + { + assert(&source != this); + + if constexpr (std::allocator_traits::is_always_equal::value + && std::is_trivial::value && std::is_trivial::value + && std::is_trivial::value) // if all pointer types are trivial we can just + // copy using memcpy - avoids + // constructors/destructors etc and is faster + { + char temp[sizeof(hive)]; + std::memcpy(&temp, static_cast(this), sizeof(hive)); + std::memcpy(static_cast(this), static_cast(&source), sizeof(hive)); + std::memcpy(static_cast(&source), &temp, sizeof(hive)); + } else if constexpr (std::is_move_assignable::value + && std::is_move_assignable::value + && std::is_move_assignable::value + && std::is_move_constructible::value + && std::is_move_constructible::value + && std::is_move_constructible::value) { + hive temp(std::move(source)); + source = std::move(*this); + *this = std::move(temp); + } else { + // Otherwise, make the reads/writes as contiguous in memory as-possible (yes, it is faster than using std::swap with + // the individual variables): + const iterator swap_end_iterator = end_iterator, swap_begin_iterator = begin_iterator; + const group_pointer_type swap_erasure_groups_head = erasure_groups_head, + swap_unused_groups_head = unused_groups_head; + const size_type swap_total_size = total_size, swap_total_capacity = total_capacity; + const skipfield_type swap_min_block_capacity = min_block_capacity, swap_max_block_capacity = max_block_capacity; + + end_iterator = source.end_iterator; + begin_iterator = source.begin_iterator; + erasure_groups_head = source.erasure_groups_head; + unused_groups_head = source.unused_groups_head; + total_size = source.total_size; + total_capacity = source.total_capacity; + min_block_capacity = source.min_block_capacity; + max_block_capacity = source.max_block_capacity; + + source.end_iterator = swap_end_iterator; + source.begin_iterator = swap_begin_iterator; + source.erasure_groups_head = swap_erasure_groups_head; + source.unused_groups_head = swap_unused_groups_head; + source.total_size = swap_total_size; + source.total_capacity = swap_total_capacity; + source.min_block_capacity = swap_min_block_capacity; + source.max_block_capacity = swap_max_block_capacity; + + if constexpr (std::allocator_traits::propagate_on_container_swap::value + && !std::allocator_traits::is_always_equal::value) { + std::swap(static_cast(source), static_cast(*this)); + + // Reconstruct rebinds for swapped allocators: + group_allocator = group_allocator_type(*this); + aligned_struct_allocator = aligned_struct_allocator_type(*this); + skipfield_allocator = skipfield_allocator_type(*this); + tuple_allocator = tuple_allocator_type(*this); + source.group_allocator = group_allocator_type(source); + source.aligned_struct_allocator = aligned_struct_allocator_type(source); + source.skipfield_allocator = skipfield_allocator_type(source); + source.tuple_allocator = tuple_allocator_type(source); + } // else: undefined behaviour, as per standard + } + } + + struct hive_data : public uchar_allocator_type { + aligned_pointer_type *const block_pointers; // array of pointers to element memory blocks + unsigned char **const bitfield_pointers; // array of pointers to bitfields in the form of unsigned char arrays + // representing whether an element is erased or not (0 for erased). + size_t *const block_capacities; // array of the number of elements in each memory block + const size_t number_of_blocks; // size of each of the arrays above + + hive_data(const hive::size_type size) + : block_pointers(pointer_cast( + std::allocator_traits::allocate(*this, size * sizeof(aligned_pointer_type), nullptr))), + bitfield_pointers(pointer_cast( + std::allocator_traits::allocate(*this, size * sizeof(unsigned char *), nullptr))), + block_capacities(pointer_cast( + std::allocator_traits::allocate(*this, size * sizeof(size_t), nullptr))), + number_of_blocks(size) + { + } + + ~hive_data() + { + for (size_t index = 0; index != number_of_blocks; ++index) { + std::allocator_traits::deallocate(*this, + bitfield_pointers[index], + (block_capacities[index] + 7) / 8); + } + + std::allocator_traits::deallocate(*this, + pointer_cast(block_pointers), + number_of_blocks * sizeof(aligned_pointer_type)); + std::allocator_traits::deallocate(*this, + pointer_cast(bitfield_pointers), + number_of_blocks * sizeof(unsigned char *)); + std::allocator_traits::deallocate(*this, + pointer_cast(block_capacities), + number_of_blocks * sizeof(size_t)); + } + }; + +private: + void setup_data_cell(hive_data *data, + const group_pointer_type current_group, + const size_t capacity, + const size_t group_number) + { + const size_t bitfield_capacity = (capacity + 7) / 8; // round up + + data->block_pointers[group_number] = current_group->elements; + unsigned char *bitfield_location = data->bitfield_pointers[group_number] = + std::allocator_traits::allocate((*data), bitfield_capacity, nullptr); + data->block_capacities[group_number] = capacity; + std::memset(bitfield_location, 0, bitfield_capacity); + + skipfield_pointer_type skipfield_pointer = current_group->skipfield; + const unsigned char *const end = bitfield_location + bitfield_capacity; + + for (size_t index = 0; bitfield_location != end; ++bitfield_location) { + for (unsigned char offset = 0; offset != 8 && index != capacity; ++index, ++offset, ++skipfield_pointer) { + *bitfield_location |= static_cast(static_cast(!*skipfield_pointer) << offset); + } + } + } + +public: + hive_data *data() + { + hive_data *data = new hive_data(end_iterator.group_pointer->group_number + 1); + size_t group_number = 0; + + for (group_pointer_type current_group = begin_iterator.group_pointer; current_group != end_iterator.group_pointer; + current_group = current_group->next_group, ++group_number) { + setup_data_cell(data, current_group, current_group->capacity, group_number); + } + + // Special case for end group: + setup_data_cell(data, + end_iterator.group_pointer, + static_cast(end_iterator.element_pointer - end_iterator.group_pointer->elements), + group_number); + + return data; + } + + // Iterators: + template + class hive_iterator + { + private: + typedef typename hive::group_pointer_type group_pointer_type; + typedef typename hive::aligned_pointer_type aligned_pointer_type; + typedef typename hive::skipfield_pointer_type skipfield_pointer_type; + + group_pointer_type group_pointer; + aligned_pointer_type element_pointer; + skipfield_pointer_type skipfield_pointer; + + public: + struct hive_iterator_tag { + }; + + typedef std::bidirectional_iterator_tag iterator_category; + typedef std::bidirectional_iterator_tag iterator_concept; + typedef typename hive::value_type value_type; + typedef typename hive::difference_type difference_type; + typedef hive_reverse_iterator reverse_type; + typedef typename detail::conditional::type pointer; + typedef + typename detail::conditional::type reference; + + friend class hive; + friend class hive_reverse_iterator; + friend class hive_reverse_iterator; + + // Friend functions: + template + friend void advance(hive_iterator &it, distance_type distance) + { + it.advance(static_cast(distance)); + } + + friend hive_iterator next(const hive_iterator &it, const difference_type distance) + { + hive_iterator return_iterator(it); + return_iterator.advance(static_cast(distance)); + return return_iterator; + } + + friend hive_iterator prev(const hive_iterator &it, const difference_type distance) + { + hive_iterator return_iterator(it); + return_iterator.advance(-(static_cast(distance))); + return return_iterator; + } + + friend typename hive_iterator::difference_type distance(const hive_iterator &first, const hive_iterator &last) + { + return first.distance(last); + } + + hive_iterator() noexcept : group_pointer(NULL), element_pointer(NULL), skipfield_pointer(NULL) {} + + hive_iterator(const hive_iterator &source) noexcept + : // Note: Surprisingly, use of = default here and in other simple constructors results in slowdowns of up to 10% in + // many benchmarks under GCC + group_pointer(source.group_pointer), + element_pointer(source.element_pointer), + skipfield_pointer(source.skipfield_pointer) + { + } + + template ::type> + hive_iterator(const hive_iterator &source) noexcept + : group_pointer(source.group_pointer), + element_pointer(source.element_pointer), + skipfield_pointer(source.skipfield_pointer) + { + } + + hive_iterator(hive_iterator &&source) noexcept + : group_pointer(std::move(source.group_pointer)), + element_pointer(std::move(source.element_pointer)), + skipfield_pointer(std::move(source.skipfield_pointer)) + { + } + + template ::type> + hive_iterator(hive_iterator &&source) noexcept + : group_pointer(std::move(source.group_pointer)), + element_pointer(std::move(source.element_pointer)), + skipfield_pointer(std::move(source.skipfield_pointer)) + { + } + + hive_iterator &operator=(const hive_iterator &source) noexcept + { + group_pointer = source.group_pointer; + element_pointer = source.element_pointer; + skipfield_pointer = source.skipfield_pointer; + return *this; + } + + template ::type> + hive_iterator &operator=(const hive_iterator &source) noexcept + { + group_pointer = source.group_pointer; + element_pointer = source.element_pointer; + skipfield_pointer = source.skipfield_pointer; + return *this; + } + + hive_iterator &operator=(hive_iterator &&source) noexcept + { + assert(&source != this); + group_pointer = std::move(source.group_pointer); + element_pointer = std::move(source.element_pointer); + skipfield_pointer = std::move(source.skipfield_pointer); + return *this; + } + + template ::type> + hive_iterator &operator=(hive_iterator &&source) noexcept + { + group_pointer = std::move(source.group_pointer); + element_pointer = std::move(source.element_pointer); + skipfield_pointer = std::move(source.skipfield_pointer); + return *this; + } + + bool operator==(const hive_iterator &rh) const noexcept { return (element_pointer == rh.element_pointer); } + + bool operator==(const hive_iterator &rh) const noexcept { return (element_pointer == rh.element_pointer); } + + bool operator!=(const hive_iterator &rh) const noexcept { return (element_pointer != rh.element_pointer); } + + bool operator!=(const hive_iterator &rh) const noexcept { return (element_pointer != rh.element_pointer); } + + reference operator*() const // may cause exception with uninitialized iterator + { + return *pointer_cast(element_pointer); + } + + pointer operator->() const { return pointer_cast(element_pointer); } + + hive_iterator &operator++() + { + assert(group_pointer != NULL); // covers uninitialised hive_iterator + skipfield_type skip = *(++skipfield_pointer); + + if ((element_pointer += static_cast(skip) + 1u) + == pointer_cast(group_pointer->skipfield) + && group_pointer->next_group != NULL) // ie. beyond end of current memory block. Second condition allows + // iterator to reach end(), which may be 1 past end of block, if block has + // been fully used and another block is not allocated + { + group_pointer = group_pointer->next_group; + const aligned_pointer_type elements = group_pointer->elements; + const skipfield_pointer_type skipfield = group_pointer->skipfield; + skip = *skipfield; + element_pointer = elements + skip; + skipfield_pointer = skipfield; + } + + skipfield_pointer += skip; + return *this; + } + + hive_iterator operator++(int) + { + const hive_iterator copy(*this); + ++*this; + return copy; + } + + hive_iterator &operator--() + { + assert(group_pointer != NULL); + + if (--skipfield_pointer + >= group_pointer->skipfield) // ie. not already at beginning of group prior to decrementation + { + element_pointer -= static_cast(*skipfield_pointer) + 1u; + if ((skipfield_pointer -= *skipfield_pointer) >= group_pointer->skipfield) + return *this; // ie. skipfield jump value does not takes us beyond beginning of group + } + + group_pointer = group_pointer->previous_group; + const skipfield_pointer_type skipfield = group_pointer->skipfield + group_pointer->capacity - 1; + const skipfield_type skip = *skipfield; + element_pointer = (pointer_cast(group_pointer->skipfield) - 1) - skip; + skipfield_pointer = skipfield - skip; + return *this; + } + + hive_iterator operator--(int) + { + const hive_iterator copy(*this); + --*this; + return copy; + } + + // Less-than etc operators retained as GCC codegen synthesis from <=> is slower and bulkier for same operations: + template + bool operator>(const hive_iterator &rh) const noexcept + { + return ((group_pointer == rh.group_pointer) & (element_pointer > rh.element_pointer)) + || (group_pointer != rh.group_pointer && group_pointer->group_number > rh.group_pointer->group_number); + } + + template + bool operator<(const hive_iterator &rh) const noexcept + { + return rh > *this; + } + + template + bool operator>=(const hive_iterator &rh) const noexcept + { + return !(rh > *this); + } + + template + bool operator<=(const hive_iterator &rh) const noexcept + { + return !(*this > rh); + } + + private: + // Used by cend(), erase() etc: + hive_iterator(const group_pointer_type group_p, + const aligned_pointer_type element_p, + const skipfield_pointer_type skipfield_p) noexcept + : group_pointer(group_p), element_pointer(element_p), skipfield_pointer(skipfield_p) + { + } + + // Advance implementation: + void advance(difference_type distance) // Cannot be noexcept due to the possibility of an uninitialized iterator + { + assert(group_pointer != NULL); // covers uninitialized hive_iterator && empty group + + // Now, run code based on the nature of the distance type - negative, positive or zero: + if (distance > 0) // ie. += + { + // Code explanation: + // For the initial state of the iterator, we don't know which elements have been erased before that element in + // that group. So for the first group, we follow the following logic: + // 1. If no elements have been erased in the group, we do simple pointer addition to progress, either to within + // the group (if the distance is small enough) or the end of the group and subtract from distance accordingly. + // 2. If any of the first group's elements have been erased, we manually iterate, as we don't know whether the + // erased elements occur before or after the initial iterator position, and we subtract 1 from the distance + // amount each time we iterate. Iteration continues until either distance becomes zero, or we reach the end of + // the group. + + // For all subsequent groups, we follow this logic: + // 1. If distance is larger than the total number of non-erased elements in a group, we skip that group and + // subtract the number of elements in that group from distance. + // 2. If distance is smaller than the total number of non-erased elements in a group, then: + // a. If there are no erased elements in the group we simply add distance to group->elements to find the new + // location for the iterator. b. If there are erased elements in the group, we manually iterate and subtract 1 + // from distance on each iteration, until the new iterator location is found ie. distance = 0. + + // Note: incrementing element_pointer is avoided until necessary to avoid needless calculations. + + if (group_pointer->next_group == NULL + && element_pointer == pointer_cast(group_pointer->skipfield)) + return; // Check if we're already beyond back of final block + + // Special case for initial element pointer and initial group (we don't know how far into the group the element + // pointer is) + if (element_pointer + != group_pointer->elements + + *(group_pointer + ->skipfield)) // ie. != first non-erased element in group - otherwise we skip this section + // and just treat the first block as we would an intermediary block + { + const difference_type distance_from_end = + pointer_cast(group_pointer->skipfield) - element_pointer; + + if (group_pointer->free_list_head + == std::numeric_limits::max()) // ie. if there are no erasures in the group + { + if (distance < distance_from_end) { + element_pointer += distance; + skipfield_pointer += distance; + return; + } else if (group_pointer->next_group + == NULL) // either we've reached end() or gone beyond it, so bound to back of block + { + element_pointer += distance_from_end; + skipfield_pointer += distance_from_end; + return; + } else { + distance -= distance_from_end; + } + } else { + const skipfield_pointer_type endpoint = skipfield_pointer + distance_from_end; + + while (true) { + ++skipfield_pointer; + skipfield_pointer += *skipfield_pointer; + --distance; + + if (skipfield_pointer == endpoint) { + break; + } else if (distance == 0) { + element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield); + return; + } + } + + if (group_pointer->next_group + == NULL) // either we've reached end() or gone beyond it, so bound to end of block + { + element_pointer = pointer_cast(group_pointer->skipfield); + return; + } + } + + group_pointer = group_pointer->next_group; + + if (distance == 0) { + element_pointer = group_pointer->elements + *(group_pointer->skipfield); + skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield); + return; + } + } + + // Intermediary groups - at the start of this code block and the subsequent block, the position of the iterator + // is assumed to be the first non-erased element in the current group: + while (static_cast(group_pointer->size) <= distance) { + if (group_pointer->next_group + == NULL) // either we've reached end() or gone beyond it, so bound to end of block + { + element_pointer = pointer_cast(group_pointer->skipfield); + skipfield_pointer = group_pointer->skipfield + group_pointer->capacity; + return; + } else if ((distance -= group_pointer->size) == 0) { + group_pointer = group_pointer->next_group; + element_pointer = group_pointer->elements + *(group_pointer->skipfield); + skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield); + return; + } else { + group_pointer = group_pointer->next_group; + } + } + + // Final group (if not already reached): + if (group_pointer->free_list_head + == std::numeric_limits::max()) // No erasures in this group, use straight pointer addition + { + element_pointer = group_pointer->elements + distance; + skipfield_pointer = group_pointer->skipfield + distance; + } else // We already know size > distance due to the intermediary group checks above - safe to ignore endpoint + // check condition while incrementing here: + { + skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield); + + do { + ++skipfield_pointer; + skipfield_pointer += *skipfield_pointer; + } while (--distance != 0); + + element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield); + } + } else if (distance < 0) { + // Code logic is very similar to += above + if (group_pointer->previous_group == NULL + && element_pointer == group_pointer->elements + *(group_pointer->skipfield)) + return; // check if we're already at begin() + + distance = -distance; + + // Special case for initial element pointer and initial group (we don't know how far into the group the element + // pointer is) + if (element_pointer + != pointer_cast(group_pointer->skipfield)) // not currently at the back of a block + { + if (group_pointer->free_list_head + == std::numeric_limits::max()) // ie. no prior erasures have occurred in this group + { + const difference_type distance_from_beginning = + static_cast(element_pointer - group_pointer->elements); + + if (distance <= distance_from_beginning) { + element_pointer -= distance; + skipfield_pointer -= distance; + return; + } else if (group_pointer->previous_group == NULL) // ie. we've gone before begin(), so bound to begin() + { + element_pointer = group_pointer->elements; + skipfield_pointer = group_pointer->skipfield; + return; + } else { + distance -= distance_from_beginning; + } + } else { + for (const skipfield_pointer_type begin = group_pointer->skipfield + *(group_pointer->skipfield); + skipfield_pointer != begin;) { + --skipfield_pointer; + skipfield_pointer -= *skipfield_pointer; + + if (--distance == 0) { + element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield); + return; + } + } + + if (group_pointer->previous_group == NULL) { + element_pointer = group_pointer->elements + + *(group_pointer->skipfield); // This is first group, so bound to begin() (just + // in case final decrement took us before begin()) + skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield); + return; + } + } + + group_pointer = group_pointer->previous_group; + } + + // Intermediary groups - at the start of this code block and the subsequent block, the position of the iterator + // is assumed to be either the first non-erased element in the next group over, or end(): + while (static_cast(group_pointer->size) < distance) { + if (group_pointer->previous_group == NULL) // we've gone beyond begin(), so bound to it + { + element_pointer = group_pointer->elements + *(group_pointer->skipfield); + skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield); + return; + } + + distance -= group_pointer->size; + group_pointer = group_pointer->previous_group; + } + + // Final group (if not already reached): + if (static_cast(group_pointer->size) == distance) // go to front of group + { + element_pointer = group_pointer->elements + *(group_pointer->skipfield); + skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield); + } else if (group_pointer->free_list_head + == std::numeric_limits::max()) // ie. no erased elements in this group + { + element_pointer = pointer_cast(group_pointer->skipfield) - distance; + skipfield_pointer = (group_pointer->skipfield + group_pointer->size) - distance; + } else // ie. no more groups to traverse but there are erased elements in this group + { + skipfield_pointer = + group_pointer->skipfield + + (pointer_cast(group_pointer->skipfield) - group_pointer->elements); + + do { + --skipfield_pointer; + skipfield_pointer -= *skipfield_pointer; + } while (--distance != 0); + + element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield); + } + } + } + + // distance implementation: + difference_type distance(const hive_iterator &last) const + { + // Code logic: + // If iterators are the same, return 0 + // Otherwise, find which iterator is later in hive, copy that to iterator2. Copy the lower to iterator1. + // If they are not pointing to elements in the same group, process the intermediate groups and add distances, + // skipping manual incrementation in all but the initial and final groups. + // In the initial and final groups, manual incrementation must be used to calculate distance, if there have been no + // prior erasures in those groups. If there are no prior erasures in either of those groups, we can use pointer + // arithmetic to calculate the distances for those groups. + + assert(!(group_pointer == NULL) && !(last.group_pointer == NULL)); // Check that they are both initialized + + if (last.element_pointer == element_pointer) return 0; + + difference_type distance = 0; + hive_iterator iterator1 = *this, iterator2 = last; + const bool swap_iterators = iterator1 > iterator2; + + if (swap_iterators) { + iterator1 = last; + iterator2 = *this; + } + + if (iterator1.group_pointer != iterator2.group_pointer) // if not in same group, process intermediate groups + { + // Process initial group: + if (iterator1.group_pointer->free_list_head + == std::numeric_limits::max()) // If no prior erasures have occured in this group we can do + // simple addition + { + distance += static_cast( + pointer_cast(iterator1.group_pointer->skipfield) - iterator1.element_pointer); + } else if (iterator1.element_pointer + == iterator1.group_pointer->elements + + *(iterator1.group_pointer->skipfield)) // ie. element is at start of group - rare case + { + distance += static_cast(iterator1.group_pointer->size); + } else // Manually iterate to find distance to end of group: + { + for (const skipfield_pointer_type end = + iterator1.skipfield_pointer + + (pointer_cast(iterator1.group_pointer->skipfield) + - iterator1.element_pointer); + iterator1.skipfield_pointer != end;) { + ++iterator1.skipfield_pointer; + iterator1.skipfield_pointer += *iterator1.skipfield_pointer; + ++distance; + } + } + + // Process all other intermediate groups: + iterator1.group_pointer = iterator1.group_pointer->next_group; + + while (iterator1.group_pointer != iterator2.group_pointer) { + distance += static_cast(iterator1.group_pointer->size); + iterator1.group_pointer = iterator1.group_pointer->next_group; + } + + iterator1.skipfield_pointer = iterator1.group_pointer->skipfield + *(iterator1.group_pointer->skipfield); + } + + if (iterator2.group_pointer->free_list_head + == std::numeric_limits::max()) // ie. no erasures in this group, direct subtraction is possible + { + distance += iterator2.skipfield_pointer - iterator1.skipfield_pointer; + } else if (iterator1.element_pointer == iterator2.group_pointer->elements + *(iterator2.group_pointer->skipfield) + && iterator2.element_pointer + 1 + *(iterator2.skipfield_pointer + 1) + == pointer_cast( + iterator2.group_pointer + ->skipfield)) // ie. if iterator1 is at beginning of block (have to check this in case + // first and last are in the same block to begin with) and iterator2 is last + // element in the block + { + distance += static_cast(iterator2.group_pointer->size) - 1; + } else { + while (iterator1.skipfield_pointer != iterator2.skipfield_pointer) { + ++iterator1.skipfield_pointer; + iterator1.skipfield_pointer += *iterator1.skipfield_pointer; + ++distance; + } + } + + if (swap_iterators) distance = -distance; + + return distance; + } + }; // hive_iterator + + // Reverse iterators: + template + class hive_reverse_iterator + { + private: + typedef typename hive::group_pointer_type group_pointer_type; + typedef typename hive::aligned_pointer_type aligned_pointer_type; + typedef typename hive::skipfield_pointer_type skipfield_pointer_type; + + protected: + iterator current; + + public: + struct hive_iterator_tag { + }; + + typedef std::bidirectional_iterator_tag iterator_category; + typedef std::bidirectional_iterator_tag iterator_concept; + typedef iterator iterator_type; + typedef typename hive::value_type value_type; + typedef typename hive::difference_type difference_type; + typedef typename detail::conditional::type pointer; + typedef + typename detail::conditional::type reference; + + friend class hive; + + template + friend void advance(hive_reverse_iterator &it, distance_type distance) + { + it.advance(static_cast(distance)); + } + + friend hive_reverse_iterator next(const hive_reverse_iterator &it, const difference_type distance) + { + hive_reverse_iterator return_iterator(it); + return_iterator.advance(static_cast(distance)); + return return_iterator; + } + + template + friend hive_reverse_iterator prev(const hive_reverse_iterator &it, const difference_type distance) + { + hive_reverse_iterator return_iterator(it); + return_iterator.advance(-(static_cast(distance))); + return return_iterator; + } + + friend typename hive_reverse_iterator::difference_type distance(const hive_reverse_iterator &first, + const hive_reverse_iterator &last) + { + return first.distance(last); + } + + hive_reverse_iterator() noexcept {} + + hive_reverse_iterator(const hive_reverse_iterator &source) noexcept : current(source.current) {} + + template ::type> + hive_reverse_iterator(const hive_reverse_iterator &source) noexcept : current(source.current) + { + } + + hive_reverse_iterator(const hive_iterator &source) noexcept : current(source) { ++(*this); } + + template ::type> + hive_reverse_iterator(const hive_iterator &source) noexcept : current(source) + { + ++(*this); + } + + hive_reverse_iterator(hive_reverse_iterator &&source) noexcept : current(std::move(source.current)) {} + + template ::type> + hive_reverse_iterator(hive_reverse_iterator &&source) noexcept : current(std::move(source.current)) + { + } + + hive_reverse_iterator &operator=(const hive_iterator &source) noexcept + { + current = source; + ++current; + return *this; + } + + template ::type> + hive_reverse_iterator &operator=(const hive_iterator &source) noexcept + { + current = source; + ++current; + return *this; + } + + hive_reverse_iterator &operator=(const hive_reverse_iterator &source) noexcept + { + current = source.current; + return *this; + } + + template ::type> + hive_reverse_iterator &operator=(const hive_reverse_iterator &source) noexcept + { + current = source.current; + return *this; + } + + hive_reverse_iterator &operator=(hive_reverse_iterator &&source) noexcept + { + assert(&source != this); + current = std::move(source.current); + return *this; + } + + template ::type> + hive_reverse_iterator &operator=(hive_reverse_iterator &&source) noexcept + { + assert(&source != this); + current = std::move(source.current); + return *this; + } + + bool operator==(const hive_reverse_iterator &rh) const noexcept { return (current == rh.current); } + + bool operator==(const hive_reverse_iterator &rh) const noexcept { return (current == rh.current); } + + bool operator!=(const hive_reverse_iterator &rh) const noexcept { return (current != rh.current); } + + bool operator!=(const hive_reverse_iterator &rh) const noexcept { return (current != rh.current); } + + reference operator*() const noexcept { return *pointer_cast(current.element_pointer); } + + pointer operator->() const noexcept { return pointer_cast(current.element_pointer); } + + // In this case we have to redefine the algorithm, rather than using the internal iterator's -- operator, in order for + // the reverse_iterator to be allowed to reach rend() ie. begin_iterator - 1 + hive_reverse_iterator &operator++() + { + group_pointer_type &group_pointer = current.group_pointer; + aligned_pointer_type &element_pointer = current.element_pointer; + skipfield_pointer_type &skipfield_pointer = current.skipfield_pointer; + + assert(group_pointer != NULL); + + if (--skipfield_pointer >= group_pointer->skipfield) { + element_pointer -= static_cast(*skipfield_pointer) + 1u; + if ((skipfield_pointer -= *skipfield_pointer) >= group_pointer->skipfield) return *this; + } + + if (group_pointer->previous_group != NULL) { + group_pointer = group_pointer->previous_group; + const skipfield_pointer_type skipfield = group_pointer->skipfield + group_pointer->capacity - 1; + const skipfield_type skip = *skipfield; + element_pointer = (pointer_cast(group_pointer->skipfield) - 1) - skip; + skipfield_pointer = skipfield - skip; + } else // bound to rend() + { + --element_pointer; + } + + return *this; + } + + hive_reverse_iterator operator++(int) + { + const hive_reverse_iterator copy(*this); + ++*this; + return copy; + } + + hive_reverse_iterator &operator--() + { + ++current; + return *this; + } + + hive_reverse_iterator operator--(int) + { + const hive_reverse_iterator copy(*this); + ++current; + return copy; + } + + hive_iterator base() const noexcept + { + return (current.group_pointer != NULL) ? ++(hive_iterator(current)) + : hive_iterator(NULL, NULL, NULL); + } + + template + bool operator>(const hive_reverse_iterator &rh) const noexcept + { + return (rh.current > current); + } + + template + bool operator<(const hive_reverse_iterator &rh) const noexcept + { + return (current > rh.current); + } + + template + bool operator>=(const hive_reverse_iterator &rh) const noexcept + { + return !(current > rh.current); + } + + template + bool operator<=(const hive_reverse_iterator &rh) const noexcept + { + return !(rh.current > current); + } + + private: + // Used by rend(), etc: + hive_reverse_iterator(const group_pointer_type group_p, + const aligned_pointer_type element_p, + const skipfield_pointer_type skipfield_p) noexcept + : current(group_p, element_p, skipfield_p) + { + } + + // distance implementation: + + difference_type distance(const hive_reverse_iterator &last) const { return last.current.distance(current); } + + // Advance for reverse_iterator and const_reverse_iterator - this needs to be implemented slightly differently to + // forward-iterator's advance, as current needs to be able to reach rend() (ie. begin() - 1) and to be bounded by + // rbegin(): + void advance(difference_type distance) + { + group_pointer_type &group_pointer = current.group_pointer; + aligned_pointer_type &element_pointer = current.element_pointer; + skipfield_pointer_type &skipfield_pointer = current.skipfield_pointer; + + assert(element_pointer != NULL); + + if (distance > 0) { + if (group_pointer->previous_group == NULL && element_pointer == group_pointer->elements - 1) + return; // Check if we're already at rend() + + if (group_pointer->free_list_head == std::numeric_limits::max()) { + const difference_type distance_from_beginning = element_pointer - group_pointer->elements; + + if (distance <= distance_from_beginning) { + element_pointer -= distance; + skipfield_pointer -= distance; + return; + } else if (group_pointer->previous_group + == NULL) // Either we've reached rend() or gone beyond it, so bound to rend() + { + element_pointer = group_pointer->elements - 1; + skipfield_pointer = group_pointer->skipfield - 1; + return; + } else { + distance -= distance_from_beginning; + } + } else { + for (const skipfield_pointer_type begin = group_pointer->skipfield + *(group_pointer->skipfield); + skipfield_pointer != begin;) { + --skipfield_pointer; + skipfield_pointer -= *skipfield_pointer; + + if (--distance == 0) { + element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield); + return; + } + } + + if (group_pointer->previous_group == NULL) { + element_pointer = group_pointer->elements - 1; // If we've reached rend(), bound to that + skipfield_pointer = group_pointer->skipfield - 1; + return; + } + } + + group_pointer = group_pointer->previous_group; + + // Intermediary groups - at the start of this code block and the subsequent block, the position of the iterator + // is assumed to be the first non-erased element in the next group: + while (static_cast(group_pointer->size) < distance) { + if (group_pointer->previous_group == NULL) // bound to rend() + { + element_pointer = group_pointer->elements - 1; + skipfield_pointer = group_pointer->skipfield - 1; + return; + } + + distance -= static_cast(group_pointer->size); + group_pointer = group_pointer->previous_group; + } + + // Final group (if not already reached) + if (static_cast(group_pointer->size) == distance) { + element_pointer = group_pointer->elements + *(group_pointer->skipfield); + skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield); + return; + } else if (group_pointer->free_list_head == std::numeric_limits::max()) { + element_pointer = (group_pointer->elements + group_pointer->size) - distance; + skipfield_pointer = (group_pointer->skipfield + group_pointer->size) - distance; + return; + } else { + skipfield_pointer = group_pointer->skipfield + group_pointer->capacity; + + do { + --skipfield_pointer; + skipfield_pointer -= *skipfield_pointer; + } while (--distance != 0); + + element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield); + return; + } + } else if (distance < 0) { + if (group_pointer->next_group == NULL + && (element_pointer + == (pointer_cast(group_pointer->skipfield) - 1) + - *(group_pointer->skipfield + + (pointer_cast(group_pointer->skipfield) - group_pointer->elements) + - 1))) + return; // Check if we're already at rbegin() + + if (element_pointer + != group_pointer->elements + *(group_pointer->skipfield)) // ie. != first non-erased element in group + { + if (group_pointer->free_list_head + == std::numeric_limits::max()) // ie. if there are no erasures in the group + { + const difference_type distance_from_end = + pointer_cast(group_pointer->skipfield) - element_pointer; + + if (distance < distance_from_end) { + element_pointer += distance; + skipfield_pointer += distance; + return; + } else if (group_pointer->next_group + == NULL) // either we've reached end() or gone beyond it, so bound to back of block + { + element_pointer += distance_from_end - 1; + skipfield_pointer += distance_from_end - 1; + return; + } else { + distance -= distance_from_end; + } + } else { + for (const skipfield_pointer_type end = + skipfield_pointer + + (pointer_cast(group_pointer->skipfield) - element_pointer); + ;) { + ++skipfield_pointer; + skipfield_pointer += *skipfield_pointer; + --distance; + + if (skipfield_pointer == end) { + break; + } else if (distance == 0) { + element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield); + return; + } + } + + if (group_pointer->next_group == NULL) return; + } + + group_pointer = group_pointer->next_group; + + if (distance == 0) { + element_pointer = group_pointer->elements + *(group_pointer->skipfield); + skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield); + return; + } + } + + // Intermediary groups: + while (static_cast(group_pointer->size) <= distance) { + if (group_pointer->next_group == NULL) // bound to last element slot in block + { + skipfield_pointer = group_pointer->skipfield + group_pointer->capacity - 1; + element_pointer = + (pointer_cast(group_pointer->skipfield) - 1) - *skipfield_pointer; + skipfield_pointer -= *skipfield_pointer; + return; + } else if ((distance -= group_pointer->size) == 0) { + group_pointer = group_pointer->next_group; + element_pointer = group_pointer->elements + *(group_pointer->skipfield); + skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield); + return; + } else { + group_pointer = group_pointer->next_group; + } + } + + // Final group (if not already reached): + if (group_pointer->free_list_head == std::numeric_limits::max()) { + element_pointer = group_pointer->elements + distance; + skipfield_pointer = group_pointer->skipfield + distance; + return; + } else // we already know size > distance from previous loop - so it's safe to ignore endpoint check condition + // while incrementing: + { + skipfield_pointer = group_pointer->skipfield + *(group_pointer->skipfield); + + do { + ++skipfield_pointer; + skipfield_pointer += *skipfield_pointer; + } while (--distance != 0); + + element_pointer = group_pointer->elements + (skipfield_pointer - group_pointer->skipfield); + return; + } + + return; + } + } + + }; // hive_reverse_iterator +}; // hive +} // namespace detail + +namespace std +{ +template +void swap(detail::hive &a, detail::hive &b) noexcept( + std::allocator_traits::propagate_on_container_swap::value + || std::allocator_traits::is_always_equal::value) +{ + a.swap(b); +} + +template +typename detail::hive::size_type erase_if(detail::hive &container, + predicate_function predicate) +{ + typedef typename detail::hive hive; + typedef typename hive::const_iterator const_iterator; + typedef typename hive::size_type size_type; + size_type count = 0; + const const_iterator end = container.cend(); + + for (const_iterator current = container.cbegin(); current != end; ++current) { + if (predicate(*current)) { + const size_type original_count = ++count; + const_iterator last = current; + + while (++last != end && predicate(*last)) { ++count; } + + if (count != original_count) { + current = container.erase(current, last); // optimised range-erase + } else { + current = container.erase(current); + } + + if (last == end) break; + } + } + + return count; +} + +template +typename detail::hive::size_type erase(detail::hive &container, + const element_type &value) +{ + return erase_if(container, detail::equal_to(value)); +} +} // namespace std + +#if defined(_MSC_VER) && !defined(__clang__) && !defined(__GNUC__) +#pragma warning(pop) +#endif \ No newline at end of file diff --git a/shared_module/container/hive.hpp b/shared_module/container/hive.hpp new file mode 100644 index 0000000..2af961e --- /dev/null +++ b/shared_module/container/hive.hpp @@ -0,0 +1,8 @@ +#pragma once + +#include + +#include "detail/hive_detail.hpp" + +template +using hive_mp = detail::hive>; \ No newline at end of file