Home | History | Annotate | Line # | Download | only in gdbsupport
unordered_dense.h revision 1.1.1.1
      1 ///////////////////////// ankerl::unordered_dense::{map, set} /////////////////////////
      2 
      3 // A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion.
      4 // Version 4.4.0
      5 // https://github.com/martinus/unordered_dense
      6 //
      7 // Licensed under the MIT License <http://opensource.org/licenses/MIT>.
      8 // SPDX-License-Identifier: MIT
      9 // Copyright (c) 2022-2023 Martin Leitner-Ankerl <martin.ankerl (at) gmail.com>
     10 //
     11 // Permission is hereby granted, free of charge, to any person obtaining a copy
     12 // of this software and associated documentation files (the "Software"), to deal
     13 // in the Software without restriction, including without limitation the rights
     14 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     15 // copies of the Software, and to permit persons to whom the Software is
     16 // furnished to do so, subject to the following conditions:
     17 //
     18 // The above copyright notice and this permission notice shall be included in all
     19 // copies or substantial portions of the Software.
     20 //
     21 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     22 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     23 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     24 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     25 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     26 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     27 // SOFTWARE.
     28 
     29 #ifndef ANKERL_UNORDERED_DENSE_H
     30 #define ANKERL_UNORDERED_DENSE_H
     31 
     32 // see https://semver.org/spec/v2.0.0.html
     33 #define ANKERL_UNORDERED_DENSE_VERSION_MAJOR 4 // NOLINT(cppcoreguidelines-macro-usage) incompatible API changes
     34 #define ANKERL_UNORDERED_DENSE_VERSION_MINOR 4 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible functionality
     35 #define ANKERL_UNORDERED_DENSE_VERSION_PATCH 0 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible bug fixes
     36 
     37 // API versioning with inline namespace, see https://www.foonathan.net/2018/11/inline-namespaces/
     38 
     39 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
     40 #define ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch) v##major##_##minor##_##patch
     41 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
     42 #define ANKERL_UNORDERED_DENSE_VERSION_CONCAT(major, minor, patch) ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch)
     43 #define ANKERL_UNORDERED_DENSE_NAMESPACE   \
     44     ANKERL_UNORDERED_DENSE_VERSION_CONCAT( \
     45         ANKERL_UNORDERED_DENSE_VERSION_MAJOR, ANKERL_UNORDERED_DENSE_VERSION_MINOR, ANKERL_UNORDERED_DENSE_VERSION_PATCH)
     46 
     47 #if defined(_MSVC_LANG)
     48 #    define ANKERL_UNORDERED_DENSE_CPP_VERSION _MSVC_LANG
     49 #else
     50 #    define ANKERL_UNORDERED_DENSE_CPP_VERSION __cplusplus
     51 #endif
     52 
     53 #if defined(__GNUC__)
     54 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
     55 #    define ANKERL_UNORDERED_DENSE_PACK(decl) decl __attribute__((__packed__))
     56 #elif defined(_MSC_VER)
     57 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
     58 #    define ANKERL_UNORDERED_DENSE_PACK(decl) __pragma(pack(push, 1)) decl __pragma(pack(pop))
     59 #endif
     60 
     61 // exceptions
     62 #if defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)
     63 #    define ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() 1 // NOLINT(cppcoreguidelines-macro-usage)
     64 #else
     65 #    define ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() 0 // NOLINT(cppcoreguidelines-macro-usage)
     66 #endif
     67 #ifdef _MSC_VER
     68 #    define ANKERL_UNORDERED_DENSE_NOINLINE __declspec(noinline)
     69 #else
     70 #    define ANKERL_UNORDERED_DENSE_NOINLINE __attribute__((noinline))
     71 #endif
     72 
     73 // defined in unordered_dense.cpp
     74 #if !defined(ANKERL_UNORDERED_DENSE_EXPORT)
     75 #    define ANKERL_UNORDERED_DENSE_EXPORT
     76 #endif
     77 
     78 #if ANKERL_UNORDERED_DENSE_CPP_VERSION < 201703L
     79 #    error ankerl::unordered_dense requires C++17 or higher
     80 #else
     81 #    include <array>            // for array
     82 #    include <cstdint>          // for uint64_t, uint32_t, uint8_t, UINT64_C
     83 #    include <cstring>          // for size_t, memcpy, memset
     84 #    include <functional>       // for equal_to, hash
     85 #    include <initializer_list> // for initializer_list
     86 #    include <iterator>         // for pair, distance
     87 #    include <limits>           // for numeric_limits
     88 #    include <memory>           // for allocator, allocator_traits, shared_ptr
     89 #    include <optional>         // for optional
     90 #    include <stdexcept>        // for out_of_range
     91 #    include <string>           // for basic_string
     92 #    include <string_view>      // for basic_string_view, hash
     93 #    include <tuple>            // for forward_as_tuple
     94 #    include <type_traits>      // for enable_if_t, declval, conditional_t, ena...
     95 #    include <utility>          // for forward, exchange, pair, as_const, piece...
     96 #    include <vector>           // for vector
     97 #    if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() == 0
     98 #        include <cstdlib> // for abort
     99 #    endif
    100 
    101 #    if defined(__has_include)
    102 #        if __has_include(<memory_resource>)
    103 #            define ANKERL_UNORDERED_DENSE_PMR std::pmr // NOLINT(cppcoreguidelines-macro-usage)
    104 #            include <memory_resource>                  // for polymorphic_allocator
    105 #        elif __has_include(<experimental/memory_resource>)
    106 #            define ANKERL_UNORDERED_DENSE_PMR std::experimental::pmr // NOLINT(cppcoreguidelines-macro-usage)
    107 #            include <experimental/memory_resource>                   // for polymorphic_allocator
    108 #        endif
    109 #    endif
    110 
    111 #    if defined(_MSC_VER) && defined(_M_X64)
    112 #        include <intrin.h>
    113 #        pragma intrinsic(_umul128)
    114 #    endif
    115 
    116 #    if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
    117 #        define ANKERL_UNORDERED_DENSE_LIKELY(x) __builtin_expect(x, 1)   // NOLINT(cppcoreguidelines-macro-usage)
    118 #        define ANKERL_UNORDERED_DENSE_UNLIKELY(x) __builtin_expect(x, 0) // NOLINT(cppcoreguidelines-macro-usage)
    119 #    else
    120 #        define ANKERL_UNORDERED_DENSE_LIKELY(x) (x)   // NOLINT(cppcoreguidelines-macro-usage)
    121 #        define ANKERL_UNORDERED_DENSE_UNLIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage)
    122 #    endif
    123 
    124 namespace ankerl::unordered_dense {
    125 inline namespace ANKERL_UNORDERED_DENSE_NAMESPACE {
    126 
    127 namespace detail {
    128 
    129 #    if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS()
    130 
    131 // make sure this is not inlined as it is slow and dramatically enlarges code, thus making other
    132 // inlinings more difficult. Throws are also generally the slow path.
    133 [[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_key_not_found() {
    134     throw std::out_of_range("ankerl::unordered_dense::map::at(): key not found");
    135 }
    136 [[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_bucket_overflow() {
    137     throw std::overflow_error("ankerl::unordered_dense: reached max bucket size, cannot increase size");
    138 }
    139 [[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_too_many_elements() {
    140     throw std::out_of_range("ankerl::unordered_dense::map::replace(): too many elements");
    141 }
    142 
    143 #    else
    144 
    145 [[noreturn]] inline void on_error_key_not_found() {
    146     abort();
    147 }
    148 [[noreturn]] inline void on_error_bucket_overflow() {
    149     abort();
    150 }
    151 [[noreturn]] inline void on_error_too_many_elements() {
    152     abort();
    153 }
    154 
    155 #    endif
    156 
    157 } // namespace detail
    158 
    159 // hash ///////////////////////////////////////////////////////////////////////
    160 
    161 // This is a stripped-down implementation of wyhash: https://github.com/wangyi-fudan/wyhash
    162 // No big-endian support (because different values on different machines don't matter),
    163 // hardcodes seed and the secret, reformats the code, and clang-tidy fixes.
    164 namespace detail::wyhash {
    165 
    166 inline void mum(uint64_t* a, uint64_t* b) {
    167 #    if defined(__SIZEOF_INT128__)
    168     __uint128_t r = *a;
    169     r *= *b;
    170     *a = static_cast<uint64_t>(r);
    171     *b = static_cast<uint64_t>(r >> 64U);
    172 #    elif defined(_MSC_VER) && defined(_M_X64)
    173     *a = _umul128(*a, *b, b);
    174 #    else
    175     uint64_t ha = *a >> 32U;
    176     uint64_t hb = *b >> 32U;
    177     uint64_t la = static_cast<uint32_t>(*a);
    178     uint64_t lb = static_cast<uint32_t>(*b);
    179     uint64_t hi{};
    180     uint64_t lo{};
    181     uint64_t rh = ha * hb;
    182     uint64_t rm0 = ha * lb;
    183     uint64_t rm1 = hb * la;
    184     uint64_t rl = la * lb;
    185     uint64_t t = rl + (rm0 << 32U);
    186     auto c = static_cast<uint64_t>(t < rl);
    187     lo = t + (rm1 << 32U);
    188     c += static_cast<uint64_t>(lo < t);
    189     hi = rh + (rm0 >> 32U) + (rm1 >> 32U) + c;
    190     *a = lo;
    191     *b = hi;
    192 #    endif
    193 }
    194 
    195 // multiply and xor mix function, aka MUM
    196 [[nodiscard]] inline auto mix(uint64_t a, uint64_t b) -> uint64_t {
    197     mum(&a, &b);
    198     return a ^ b;
    199 }
    200 
    201 // read functions. WARNING: we don't care about endianness, so results are different on big endian!
    202 [[nodiscard]] inline auto r8(const uint8_t* p) -> uint64_t {
    203     uint64_t v{};
    204     std::memcpy(&v, p, 8U);
    205     return v;
    206 }
    207 
    208 [[nodiscard]] inline auto r4(const uint8_t* p) -> uint64_t {
    209     uint32_t v{};
    210     std::memcpy(&v, p, 4);
    211     return v;
    212 }
    213 
    214 // reads 1, 2, or 3 bytes
    215 [[nodiscard]] inline auto r3(const uint8_t* p, size_t k) -> uint64_t {
    216     return (static_cast<uint64_t>(p[0]) << 16U) | (static_cast<uint64_t>(p[k >> 1U]) << 8U) | p[k - 1];
    217 }
    218 
    219 [[maybe_unused]] [[nodiscard]] inline auto hash(void const* key, size_t len) -> uint64_t {
    220     static constexpr auto secret = std::array{UINT64_C(0xa0761d6478bd642f),
    221                                               UINT64_C(0xe7037ed1a0b428db),
    222                                               UINT64_C(0x8ebc6af09c88c6e3),
    223                                               UINT64_C(0x589965cc75374cc3)};
    224 
    225     auto const* p = static_cast<uint8_t const*>(key);
    226     uint64_t seed = secret[0];
    227     uint64_t a{};
    228     uint64_t b{};
    229     if (ANKERL_UNORDERED_DENSE_LIKELY(len <= 16)) {
    230         if (ANKERL_UNORDERED_DENSE_LIKELY(len >= 4)) {
    231             a = (r4(p) << 32U) | r4(p + ((len >> 3U) << 2U));
    232             b = (r4(p + len - 4) << 32U) | r4(p + len - 4 - ((len >> 3U) << 2U));
    233         } else if (ANKERL_UNORDERED_DENSE_LIKELY(len > 0)) {
    234             a = r3(p, len);
    235             b = 0;
    236         } else {
    237             a = 0;
    238             b = 0;
    239         }
    240     } else {
    241         size_t i = len;
    242         if (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 48)) {
    243             uint64_t see1 = seed;
    244             uint64_t see2 = seed;
    245             do {
    246                 seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed);
    247                 see1 = mix(r8(p + 16) ^ secret[2], r8(p + 24) ^ see1);
    248                 see2 = mix(r8(p + 32) ^ secret[3], r8(p + 40) ^ see2);
    249                 p += 48;
    250                 i -= 48;
    251             } while (ANKERL_UNORDERED_DENSE_LIKELY(i > 48));
    252             seed ^= see1 ^ see2;
    253         }
    254         while (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 16)) {
    255             seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed);
    256             i -= 16;
    257             p += 16;
    258         }
    259         a = r8(p + i - 16);
    260         b = r8(p + i - 8);
    261     }
    262 
    263     return mix(secret[1] ^ len, mix(a ^ secret[1], b ^ seed));
    264 }
    265 
    266 [[nodiscard]] inline auto hash(uint64_t x) -> uint64_t {
    267     return detail::wyhash::mix(x, UINT64_C(0x9E3779B97F4A7C15));
    268 }
    269 
    270 } // namespace detail::wyhash
    271 
    272 ANKERL_UNORDERED_DENSE_EXPORT template <typename T, typename Enable = void>
    273 struct hash {
    274     auto operator()(T const& obj) const noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>())))
    275         -> uint64_t {
    276         return std::hash<T>{}(obj);
    277     }
    278 };
    279 
    280 template <typename CharT>
    281 struct hash<std::basic_string<CharT>> {
    282     using is_avalanching = void;
    283     auto operator()(std::basic_string<CharT> const& str) const noexcept -> uint64_t {
    284         return detail::wyhash::hash(str.data(), sizeof(CharT) * str.size());
    285     }
    286 };
    287 
    288 template <typename CharT>
    289 struct hash<std::basic_string_view<CharT>> {
    290     using is_avalanching = void;
    291     auto operator()(std::basic_string_view<CharT> const& sv) const noexcept -> uint64_t {
    292         return detail::wyhash::hash(sv.data(), sizeof(CharT) * sv.size());
    293     }
    294 };
    295 
    296 template <class T>
    297 struct hash<T*> {
    298     using is_avalanching = void;
    299     auto operator()(T* ptr) const noexcept -> uint64_t {
    300         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
    301         return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr));
    302     }
    303 };
    304 
    305 template <class T>
    306 struct hash<std::unique_ptr<T>> {
    307     using is_avalanching = void;
    308     auto operator()(std::unique_ptr<T> const& ptr) const noexcept -> uint64_t {
    309         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
    310         return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr.get()));
    311     }
    312 };
    313 
    314 template <class T>
    315 struct hash<std::shared_ptr<T>> {
    316     using is_avalanching = void;
    317     auto operator()(std::shared_ptr<T> const& ptr) const noexcept -> uint64_t {
    318         // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
    319         return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr.get()));
    320     }
    321 };
    322 
    323 template <typename Enum>
    324 struct hash<Enum, typename std::enable_if<std::is_enum<Enum>::value>::type> {
    325     using is_avalanching = void;
    326     auto operator()(Enum e) const noexcept -> uint64_t {
    327         using underlying = typename std::underlying_type_t<Enum>;
    328         return detail::wyhash::hash(static_cast<underlying>(e));
    329     }
    330 };
    331 
    332 template <typename... Args>
    333 struct tuple_hash_helper {
    334     // Converts the value into 64bit. If it is an integral type, just cast it. Mixing is doing the rest.
    335     // If it isn't an integral we need to hash it.
    336     template <typename Arg>
    337     [[nodiscard]] constexpr static auto to64(Arg const& arg) -> uint64_t {
    338         if constexpr (std::is_integral_v<Arg> || std::is_enum_v<Arg>) {
    339             return static_cast<uint64_t>(arg);
    340         } else {
    341             return hash<Arg>{}(arg);
    342         }
    343     }
    344 
    345     [[nodiscard]] static auto mix64(uint64_t state, uint64_t v) -> uint64_t {
    346         return detail::wyhash::mix(state + v, uint64_t{0x9ddfea08eb382d69});
    347     }
    348 
    349     // Creates a buffer that holds all the data from each element of the tuple. If possible we memcpy the data directly. If
    350     // not, we hash the object and use this for the array. Size of the array is known at compile time, and memcpy is optimized
    351     // away, so filling the buffer is highly efficient. Finally, call wyhash with this buffer.
    352     template <typename T, std::size_t... Idx>
    353     [[nodiscard]] static auto calc_hash(T const& t, std::index_sequence<Idx...>) noexcept -> uint64_t {
    354         auto h = uint64_t{};
    355         ((h = mix64(h, to64(std::get<Idx>(t)))), ...);
    356         return h;
    357     }
    358 };
    359 
    360 template <typename... Args>
    361 struct hash<std::tuple<Args...>> : tuple_hash_helper<Args...> {
    362     using is_avalanching = void;
    363     auto operator()(std::tuple<Args...> const& t) const noexcept -> uint64_t {
    364         return tuple_hash_helper<Args...>::calc_hash(t, std::index_sequence_for<Args...>{});
    365     }
    366 };
    367 
    368 template <typename A, typename B>
    369 struct hash<std::pair<A, B>> : tuple_hash_helper<A, B> {
    370     using is_avalanching = void;
    371     auto operator()(std::pair<A, B> const& t) const noexcept -> uint64_t {
    372         return tuple_hash_helper<A, B>::calc_hash(t, std::index_sequence_for<A, B>{});
    373     }
    374 };
    375 
    376 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
    377 #    define ANKERL_UNORDERED_DENSE_HASH_STATICCAST(T)                    \
    378         template <>                                                      \
    379         struct hash<T> {                                                 \
    380             using is_avalanching = void;                                 \
    381             auto operator()(T const& obj) const noexcept -> uint64_t {   \
    382                 return detail::wyhash::hash(static_cast<uint64_t>(obj)); \
    383             }                                                            \
    384         }
    385 
    386 #    if defined(__GNUC__) && !defined(__clang__)
    387 #        pragma GCC diagnostic push
    388 #        pragma GCC diagnostic ignored "-Wuseless-cast"
    389 #    endif
    390 // see https://en.cppreference.com/w/cpp/utility/hash
    391 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(bool);
    392 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char);
    393 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(signed char);
    394 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned char);
    395 #    if ANKERL_UNORDERED_DENSE_CPP_VERSION >= 202002L && defined(__cpp_char8_t)
    396 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char8_t);
    397 #    endif
    398 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char16_t);
    399 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char32_t);
    400 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(wchar_t);
    401 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(short);
    402 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned short);
    403 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(int);
    404 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned int);
    405 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long);
    406 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long long);
    407 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long);
    408 ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long long);
    409 
    410 #    if defined(__GNUC__) && !defined(__clang__)
    411 #        pragma GCC diagnostic pop
    412 #    endif
    413 
    414 // bucket_type //////////////////////////////////////////////////////////
    415 
    416 namespace bucket_type {
    417 
    418 struct standard {
    419     static constexpr uint32_t dist_inc = 1U << 8U;             // skip 1 byte fingerprint
    420     static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint
    421 
    422     uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash
    423     uint32_t m_value_idx;            // index into the m_values vector.
    424 };
    425 
    426 ANKERL_UNORDERED_DENSE_PACK(struct big {
    427     static constexpr uint32_t dist_inc = 1U << 8U;             // skip 1 byte fingerprint
    428     static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint
    429 
    430     uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash
    431     size_t m_value_idx;              // index into the m_values vector.
    432 });
    433 
    434 } // namespace bucket_type
    435 
    436 namespace detail {
    437 
    438 struct nonesuch {};
    439 
    440 template <class Default, class AlwaysVoid, template <class...> class Op, class... Args>
    441 struct detector {
    442     using value_t = std::false_type;
    443     using type = Default;
    444 };
    445 
    446 template <class Default, template <class...> class Op, class... Args>
    447 struct detector<Default, std::void_t<Op<Args...>>, Op, Args...> {
    448     using value_t = std::true_type;
    449     using type = Op<Args...>;
    450 };
    451 
    452 template <template <class...> class Op, class... Args>
    453 using is_detected = typename detail::detector<detail::nonesuch, void, Op, Args...>::value_t;
    454 
    455 template <template <class...> class Op, class... Args>
    456 constexpr bool is_detected_v = is_detected<Op, Args...>::value;
    457 
    458 template <typename T>
    459 using detect_avalanching = typename T::is_avalanching;
    460 
    461 template <typename T>
    462 using detect_is_transparent = typename T::is_transparent;
    463 
    464 template <typename T>
    465 using detect_iterator = typename T::iterator;
    466 
    467 template <typename T>
    468 using detect_reserve = decltype(std::declval<T&>().reserve(size_t{}));
    469 
    470 // enable_if helpers
    471 
    472 template <typename Mapped>
    473 constexpr bool is_map_v = !std::is_void_v<Mapped>;
    474 
    475 // clang-format off
    476 template <typename Hash, typename KeyEqual>
    477 constexpr bool is_transparent_v = is_detected_v<detect_is_transparent, Hash> && is_detected_v<detect_is_transparent, KeyEqual>;
    478 // clang-format on
    479 
    480 template <typename From, typename To1, typename To2>
    481 constexpr bool is_neither_convertible_v = !std::is_convertible_v<From, To1> && !std::is_convertible_v<From, To2>;
    482 
    483 template <typename T>
    484 constexpr bool has_reserve = is_detected_v<detect_reserve, T>;
    485 
    486 // base type for map has mapped_type
    487 template <class T>
    488 struct base_table_type_map {
    489     using mapped_type = T;
    490 };
    491 
    492 // base type for set doesn't have mapped_type
    493 struct base_table_type_set {};
    494 
    495 } // namespace detail
    496 
    497 // Very much like std::deque, but faster for indexing (in most cases). As of now this doesn't implement the full std::vector
    498 // API, but merely what's necessary to work as an underlying container for ankerl::unordered_dense::{map, set}.
    499 // It allocates blocks of equal size and puts them into the m_blocks vector. That means it can grow simply by adding a new
    500 // block to the back of m_blocks, and doesn't double its size like an std::vector. The disadvantage is that memory is not
    501 // linear and thus there is one more indirection necessary for indexing.
    502 template <typename T, typename Allocator = std::allocator<T>, size_t MaxSegmentSizeBytes = 4096>
    503 class segmented_vector {
    504     template <bool IsConst>
    505     class iter_t;
    506 
    507 public:
    508     using allocator_type = Allocator;
    509     using pointer = typename std::allocator_traits<allocator_type>::pointer;
    510     using const_pointer = typename std::allocator_traits<allocator_type>::const_pointer;
    511     using difference_type = typename std::allocator_traits<allocator_type>::difference_type;
    512     using value_type = T;
    513     using size_type = std::size_t;
    514     using reference = T&;
    515     using const_reference = T const&;
    516     using iterator = iter_t<false>;
    517     using const_iterator = iter_t<true>;
    518 
    519 private:
    520     using vec_alloc = typename std::allocator_traits<Allocator>::template rebind_alloc<pointer>;
    521     std::vector<pointer, vec_alloc> m_blocks{};
    522     size_t m_size{};
    523 
    524     // Calculates the maximum number for x in  (s << x) <= max_val
    525     static constexpr auto num_bits_closest(size_t max_val, size_t s) -> size_t {
    526         auto f = size_t{0};
    527         while (s << (f + 1) <= max_val) {
    528             ++f;
    529         }
    530         return f;
    531     }
    532 
    533     using self_t = segmented_vector<T, Allocator, MaxSegmentSizeBytes>;
    534     static constexpr auto num_bits = num_bits_closest(MaxSegmentSizeBytes, sizeof(T));
    535     static constexpr auto num_elements_in_block = 1U << num_bits;
    536     static constexpr auto mask = num_elements_in_block - 1U;
    537 
    538     /**
    539      * Iterator class doubles as const_iterator and iterator
    540      */
    541     template <bool IsConst>
    542     class iter_t {
    543         using ptr_t = typename std::conditional_t<IsConst, segmented_vector::const_pointer const*, segmented_vector::pointer*>;
    544         ptr_t m_data{};
    545         size_t m_idx{};
    546 
    547         template <bool B>
    548         friend class iter_t;
    549 
    550     public:
    551         using difference_type = segmented_vector::difference_type;
    552         using value_type = T;
    553         using reference = typename std::conditional_t<IsConst, value_type const&, value_type&>;
    554         using pointer = typename std::conditional_t<IsConst, segmented_vector::const_pointer, segmented_vector::pointer>;
    555         using iterator_category = std::forward_iterator_tag;
    556 
    557         iter_t() noexcept = default;
    558 
    559         template <bool OtherIsConst, typename = typename std::enable_if<IsConst && !OtherIsConst>::type>
    560         // NOLINTNEXTLINE(google-explicit-constructor,hicpp-explicit-conversions)
    561         constexpr iter_t(iter_t<OtherIsConst> const& other) noexcept
    562             : m_data(other.m_data)
    563             , m_idx(other.m_idx) {}
    564 
    565         constexpr iter_t(ptr_t data, size_t idx) noexcept
    566             : m_data(data)
    567             , m_idx(idx) {}
    568 
    569         template <bool OtherIsConst, typename = typename std::enable_if<IsConst && !OtherIsConst>::type>
    570         constexpr auto operator=(iter_t<OtherIsConst> const& other) noexcept -> iter_t& {
    571             m_data = other.m_data;
    572             m_idx = other.m_idx;
    573             return *this;
    574         }
    575 
    576         constexpr auto operator++() noexcept -> iter_t& {
    577             ++m_idx;
    578             return *this;
    579         }
    580 
    581         constexpr auto operator+(difference_type diff) noexcept -> iter_t {
    582             return {m_data, static_cast<size_t>(static_cast<difference_type>(m_idx) + diff)};
    583         }
    584 
    585         template <bool OtherIsConst>
    586         constexpr auto operator-(iter_t<OtherIsConst> const& other) noexcept -> difference_type {
    587             return static_cast<difference_type>(m_idx) - static_cast<difference_type>(other.m_idx);
    588         }
    589 
    590         constexpr auto operator*() const noexcept -> reference {
    591             return m_data[m_idx >> num_bits][m_idx & mask];
    592         }
    593 
    594         constexpr auto operator->() const noexcept -> pointer {
    595             return &m_data[m_idx >> num_bits][m_idx & mask];
    596         }
    597 
    598         template <bool O>
    599         constexpr auto operator==(iter_t<O> const& o) const noexcept -> bool {
    600             return m_idx == o.m_idx;
    601         }
    602 
    603         template <bool O>
    604         constexpr auto operator!=(iter_t<O> const& o) const noexcept -> bool {
    605             return !(*this == o);
    606         }
    607     };
    608 
    609     // slow path: need to allocate a new segment every once in a while
    610     void increase_capacity() {
    611         auto ba = Allocator(m_blocks.get_allocator());
    612         pointer block = std::allocator_traits<Allocator>::allocate(ba, num_elements_in_block);
    613         m_blocks.push_back(block);
    614     }
    615 
    616     // Moves everything from other
    617     void append_everything_from(segmented_vector&& other) {
    618         reserve(size() + other.size());
    619         for (auto&& o : other) {
    620             emplace_back(std::move(o));
    621         }
    622     }
    623 
    624     // Copies everything from other
    625     void append_everything_from(segmented_vector const& other) {
    626         reserve(size() + other.size());
    627         for (auto const& o : other) {
    628             emplace_back(o);
    629         }
    630     }
    631 
    632     void dealloc() {
    633         auto ba = Allocator(m_blocks.get_allocator());
    634         for (auto ptr : m_blocks) {
    635             std::allocator_traits<Allocator>::deallocate(ba, ptr, num_elements_in_block);
    636         }
    637     }
    638 
    639     [[nodiscard]] static constexpr auto calc_num_blocks_for_capacity(size_t capacity) {
    640         return (capacity + num_elements_in_block - 1U) / num_elements_in_block;
    641     }
    642 
    643 public:
    644     segmented_vector() = default;
    645 
    646     // NOLINTNEXTLINE(google-explicit-constructor,hicpp-explicit-conversions)
    647     segmented_vector(Allocator alloc)
    648         : m_blocks(vec_alloc(alloc)) {}
    649 
    650     segmented_vector(segmented_vector&& other, Allocator alloc)
    651         : segmented_vector(alloc) {
    652         *this = std::move(other);
    653     }
    654 
    655     segmented_vector(segmented_vector const& other, Allocator alloc)
    656         : m_blocks(vec_alloc(alloc)) {
    657         append_everything_from(other);
    658     }
    659 
    660     segmented_vector(segmented_vector&& other) noexcept
    661         : segmented_vector(std::move(other), get_allocator()) {}
    662 
    663     segmented_vector(segmented_vector const& other) {
    664         append_everything_from(other);
    665     }
    666 
    667     auto operator=(segmented_vector const& other) -> segmented_vector& {
    668         if (this == &other) {
    669             return *this;
    670         }
    671         clear();
    672         append_everything_from(other);
    673         return *this;
    674     }
    675 
    676     auto operator=(segmented_vector&& other) noexcept -> segmented_vector& {
    677         clear();
    678         dealloc();
    679         if (other.get_allocator() == get_allocator()) {
    680             m_blocks = std::move(other.m_blocks);
    681             m_size = std::exchange(other.m_size, {});
    682         } else {
    683             // make sure to construct with other's allocator!
    684             m_blocks = std::vector<pointer, vec_alloc>(vec_alloc(other.get_allocator()));
    685             append_everything_from(std::move(other));
    686         }
    687         return *this;
    688     }
    689 
    690     ~segmented_vector() {
    691         clear();
    692         dealloc();
    693     }
    694 
    695     [[nodiscard]] constexpr auto size() const -> size_t {
    696         return m_size;
    697     }
    698 
    699     [[nodiscard]] constexpr auto capacity() const -> size_t {
    700         return m_blocks.size() * num_elements_in_block;
    701     }
    702 
    703     // Indexing is highly performance critical
    704     [[nodiscard]] constexpr auto operator[](size_t i) const noexcept -> T const& {
    705         return m_blocks[i >> num_bits][i & mask];
    706     }
    707 
    708     [[nodiscard]] constexpr auto operator[](size_t i) noexcept -> T& {
    709         return m_blocks[i >> num_bits][i & mask];
    710     }
    711 
    712     [[nodiscard]] constexpr auto begin() -> iterator {
    713         return {m_blocks.data(), 0U};
    714     }
    715     [[nodiscard]] constexpr auto begin() const -> const_iterator {
    716         return {m_blocks.data(), 0U};
    717     }
    718     [[nodiscard]] constexpr auto cbegin() const -> const_iterator {
    719         return {m_blocks.data(), 0U};
    720     }
    721 
    722     [[nodiscard]] constexpr auto end() -> iterator {
    723         return {m_blocks.data(), m_size};
    724     }
    725     [[nodiscard]] constexpr auto end() const -> const_iterator {
    726         return {m_blocks.data(), m_size};
    727     }
    728     [[nodiscard]] constexpr auto cend() const -> const_iterator {
    729         return {m_blocks.data(), m_size};
    730     }
    731 
    732     [[nodiscard]] constexpr auto back() -> reference {
    733         return operator[](m_size - 1);
    734     }
    735     [[nodiscard]] constexpr auto back() const -> const_reference {
    736         return operator[](m_size - 1);
    737     }
    738 
    739     void pop_back() {
    740         back().~T();
    741         --m_size;
    742     }
    743 
    744     [[nodiscard]] auto empty() const {
    745         return 0 == m_size;
    746     }
    747 
    748     void reserve(size_t new_capacity) {
    749         m_blocks.reserve(calc_num_blocks_for_capacity(new_capacity));
    750         while (new_capacity > capacity()) {
    751             increase_capacity();
    752         }
    753     }
    754 
    755     [[nodiscard]] auto get_allocator() const -> allocator_type {
    756         return allocator_type{m_blocks.get_allocator()};
    757     }
    758 
    759     template <class... Args>
    760     auto emplace_back(Args&&... args) -> reference {
    761         if (m_size == capacity()) {
    762             increase_capacity();
    763         }
    764         auto* ptr = static_cast<void*>(&operator[](m_size));
    765         auto& ref = *new (ptr) T(std::forward<Args>(args)...);
    766         ++m_size;
    767         return ref;
    768     }
    769 
    770     void clear() {
    771         if constexpr (!std::is_trivially_destructible_v<T>) {
    772             for (size_t i = 0, s = size(); i < s; ++i) {
    773                 operator[](i).~T();
    774             }
    775         }
    776         m_size = 0;
    777     }
    778 
    779     void shrink_to_fit() {
    780         auto ba = Allocator(m_blocks.get_allocator());
    781         auto num_blocks_required = calc_num_blocks_for_capacity(m_size);
    782         while (m_blocks.size() > num_blocks_required) {
    783             std::allocator_traits<Allocator>::deallocate(ba, m_blocks.back(), num_elements_in_block);
    784             m_blocks.pop_back();
    785         }
    786         m_blocks.shrink_to_fit();
    787     }
    788 };
    789 
    790 namespace detail {
    791 
    792 // This is it, the table. Doubles as map and set, and uses `void` for T when its used as a set.
    793 template <class Key,
    794           class T, // when void, treat it as a set.
    795           class Hash,
    796           class KeyEqual,
    797           class AllocatorOrContainer,
    798           class Bucket,
    799           bool IsSegmented>
    800 class table : public std::conditional_t<is_map_v<T>, base_table_type_map<T>, base_table_type_set> {
    801     using underlying_value_type = typename std::conditional_t<is_map_v<T>, std::pair<Key, T>, Key>;
    802     using underlying_container_type = std::conditional_t<IsSegmented,
    803                                                          segmented_vector<underlying_value_type, AllocatorOrContainer>,
    804                                                          std::vector<underlying_value_type, AllocatorOrContainer>>;
    805 
    806 public:
    807     using value_container_type = std::
    808         conditional_t<is_detected_v<detect_iterator, AllocatorOrContainer>, AllocatorOrContainer, underlying_container_type>;
    809 
    810 private:
    811     using bucket_alloc =
    812         typename std::allocator_traits<typename value_container_type::allocator_type>::template rebind_alloc<Bucket>;
    813     using bucket_alloc_traits = std::allocator_traits<bucket_alloc>;
    814 
    815     static constexpr uint8_t initial_shifts = 64 - 2; // 2^(64-m_shift) number of buckets
    816     static constexpr float default_max_load_factor = 0.8F;
    817 
    818 public:
    819     using key_type = Key;
    820     using value_type = typename value_container_type::value_type;
    821     using size_type = typename value_container_type::size_type;
    822     using difference_type = typename value_container_type::difference_type;
    823     using hasher = Hash;
    824     using key_equal = KeyEqual;
    825     using allocator_type = typename value_container_type::allocator_type;
    826     using reference = typename value_container_type::reference;
    827     using const_reference = typename value_container_type::const_reference;
    828     using pointer = typename value_container_type::pointer;
    829     using const_pointer = typename value_container_type::const_pointer;
    830     using const_iterator = typename value_container_type::const_iterator;
    831     using iterator = std::conditional_t<is_map_v<T>, typename value_container_type::iterator, const_iterator>;
    832     using bucket_type = Bucket;
    833 
    834 private:
    835     using value_idx_type = decltype(Bucket::m_value_idx);
    836     using dist_and_fingerprint_type = decltype(Bucket::m_dist_and_fingerprint);
    837 
    838     static_assert(std::is_trivially_destructible_v<Bucket>, "assert there's no need to call destructor / std::destroy");
    839     static_assert(std::is_trivially_copyable_v<Bucket>, "assert we can just memset / memcpy");
    840 
    841     value_container_type m_values{}; // Contains all the key-value pairs in one densely stored container. No holes.
    842     using bucket_pointer = typename std::allocator_traits<bucket_alloc>::pointer;
    843     bucket_pointer m_buckets{};
    844     size_t m_num_buckets = 0;
    845     size_t m_max_bucket_capacity = 0;
    846     float m_max_load_factor = default_max_load_factor;
    847     Hash m_hash{};
    848     KeyEqual m_equal{};
    849     uint8_t m_shifts = initial_shifts;
    850 
    851     [[nodiscard]] auto next(value_idx_type bucket_idx) const -> value_idx_type {
    852         return ANKERL_UNORDERED_DENSE_UNLIKELY(bucket_idx + 1U == m_num_buckets)
    853                    ? 0
    854                    : static_cast<value_idx_type>(bucket_idx + 1U);
    855     }
    856 
    857     // Helper to access bucket through pointer types
    858     [[nodiscard]] static constexpr auto at(bucket_pointer bucket_ptr, size_t offset) -> Bucket& {
    859         return *(bucket_ptr + static_cast<typename std::allocator_traits<bucket_alloc>::difference_type>(offset));
    860     }
    861 
    862     // use the dist_inc and dist_dec functions so that uint16_t types work without warning
    863     [[nodiscard]] static constexpr auto dist_inc(dist_and_fingerprint_type x) -> dist_and_fingerprint_type {
    864         return static_cast<dist_and_fingerprint_type>(x + Bucket::dist_inc);
    865     }
    866 
    867     [[nodiscard]] static constexpr auto dist_dec(dist_and_fingerprint_type x) -> dist_and_fingerprint_type {
    868         return static_cast<dist_and_fingerprint_type>(x - Bucket::dist_inc);
    869     }
    870 
    871     // The goal of mixed_hash is to always produce a high quality 64bit hash.
    872     template <typename K>
    873     [[nodiscard]] constexpr auto mixed_hash(K const& key) const -> uint64_t {
    874         if constexpr (is_detected_v<detect_avalanching, Hash>) {
    875             // we know that the hash is good because is_avalanching.
    876             if constexpr (sizeof(decltype(m_hash(key))) < sizeof(uint64_t)) {
    877                 // 32bit hash and is_avalanching => multiply with a constant to avalanche bits upwards
    878                 return m_hash(key) * UINT64_C(0x9ddfea08eb382d69);
    879             } else {
    880                 // 64bit and is_avalanching => only use the hash itself.
    881                 return m_hash(key);
    882             }
    883         } else {
    884             // not is_avalanching => apply wyhash
    885             return wyhash::hash(m_hash(key));
    886         }
    887     }
    888 
    889     [[nodiscard]] constexpr auto dist_and_fingerprint_from_hash(uint64_t hash) const -> dist_and_fingerprint_type {
    890         return Bucket::dist_inc | (static_cast<dist_and_fingerprint_type>(hash) & Bucket::fingerprint_mask);
    891     }
    892 
    893     [[nodiscard]] constexpr auto bucket_idx_from_hash(uint64_t hash) const -> value_idx_type {
    894         return static_cast<value_idx_type>(hash >> m_shifts);
    895     }
    896 
    897     [[nodiscard]] static constexpr auto get_key(value_type const& vt) -> key_type const& {
    898         if constexpr (is_map_v<T>) {
    899             return vt.first;
    900         } else {
    901             return vt;
    902         }
    903     }
    904 
    905     template <typename K>
    906     [[nodiscard]] auto next_while_less(K const& key) const -> Bucket {
    907         auto hash = mixed_hash(key);
    908         auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
    909         auto bucket_idx = bucket_idx_from_hash(hash);
    910 
    911         while (dist_and_fingerprint < at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
    912             dist_and_fingerprint = dist_inc(dist_and_fingerprint);
    913             bucket_idx = next(bucket_idx);
    914         }
    915         return {dist_and_fingerprint, bucket_idx};
    916     }
    917 
    918     void place_and_shift_up(Bucket bucket, value_idx_type place) {
    919         while (0 != at(m_buckets, place).m_dist_and_fingerprint) {
    920             bucket = std::exchange(at(m_buckets, place), bucket);
    921             bucket.m_dist_and_fingerprint = dist_inc(bucket.m_dist_and_fingerprint);
    922             place = next(place);
    923         }
    924         at(m_buckets, place) = bucket;
    925     }
    926 
    927     [[nodiscard]] static constexpr auto calc_num_buckets(uint8_t shifts) -> size_t {
    928         return (std::min)(max_bucket_count(), size_t{1} << (64U - shifts));
    929     }
    930 
    931     [[nodiscard]] constexpr auto calc_shifts_for_size(size_t s) const -> uint8_t {
    932         auto shifts = initial_shifts;
    933         while (shifts > 0 && static_cast<size_t>(static_cast<float>(calc_num_buckets(shifts)) * max_load_factor()) < s) {
    934             --shifts;
    935         }
    936         return shifts;
    937     }
    938 
    939     // assumes m_values has data, m_buckets=m_buckets_end=nullptr, m_shifts is INITIAL_SHIFTS
    940     void copy_buckets(table const& other) {
    941         // assumes m_values has already the correct data copied over.
    942         if (empty()) {
    943             // when empty, at least allocate an initial buckets and clear them.
    944             allocate_buckets_from_shift();
    945             clear_buckets();
    946         } else {
    947             m_shifts = other.m_shifts;
    948             allocate_buckets_from_shift();
    949             std::memcpy(m_buckets, other.m_buckets, sizeof(Bucket) * bucket_count());
    950         }
    951     }
    952 
    953     /**
    954      * True when no element can be added any more without increasing the size
    955      */
    956     [[nodiscard]] auto is_full() const -> bool {
    957         return size() > m_max_bucket_capacity;
    958     }
    959 
    960     void deallocate_buckets() {
    961         auto ba = bucket_alloc(m_values.get_allocator());
    962         if (nullptr != m_buckets) {
    963             bucket_alloc_traits::deallocate(ba, m_buckets, bucket_count());
    964             m_buckets = nullptr;
    965         }
    966         m_num_buckets = 0;
    967         m_max_bucket_capacity = 0;
    968     }
    969 
    970     void allocate_buckets_from_shift() {
    971         auto ba = bucket_alloc(m_values.get_allocator());
    972         m_num_buckets = calc_num_buckets(m_shifts);
    973         m_buckets = bucket_alloc_traits::allocate(ba, m_num_buckets);
    974         if (m_num_buckets == max_bucket_count()) {
    975             // reached the maximum, make sure we can use each bucket
    976             m_max_bucket_capacity = max_bucket_count();
    977         } else {
    978             m_max_bucket_capacity = static_cast<value_idx_type>(static_cast<float>(m_num_buckets) * max_load_factor());
    979         }
    980     }
    981 
    982     void clear_buckets() {
    983         if (m_buckets != nullptr) {
    984             std::memset(&*m_buckets, 0, sizeof(Bucket) * bucket_count());
    985         }
    986     }
    987 
    988     void clear_and_fill_buckets_from_values() {
    989         clear_buckets();
    990         for (value_idx_type value_idx = 0, end_idx = static_cast<value_idx_type>(m_values.size()); value_idx < end_idx;
    991              ++value_idx) {
    992             auto const& key = get_key(m_values[value_idx]);
    993             auto [dist_and_fingerprint, bucket] = next_while_less(key);
    994 
    995             // we know for certain that key has not yet been inserted, so no need to check it.
    996             place_and_shift_up({dist_and_fingerprint, value_idx}, bucket);
    997         }
    998     }
    999 
   1000     void increase_size() {
   1001         if (m_max_bucket_capacity == max_bucket_count()) {
   1002             // remove the value again, we can't add it!
   1003             m_values.pop_back();
   1004             on_error_bucket_overflow();
   1005         }
   1006         --m_shifts;
   1007         deallocate_buckets();
   1008         allocate_buckets_from_shift();
   1009         clear_and_fill_buckets_from_values();
   1010     }
   1011 
   1012     template <typename Op>
   1013     void do_erase(value_idx_type bucket_idx, Op handle_erased_value) {
   1014         auto const value_idx_to_remove = at(m_buckets, bucket_idx).m_value_idx;
   1015 
   1016         // shift down until either empty or an element with correct spot is found
   1017         auto next_bucket_idx = next(bucket_idx);
   1018         while (at(m_buckets, next_bucket_idx).m_dist_and_fingerprint >= Bucket::dist_inc * 2) {
   1019             at(m_buckets, bucket_idx) = {dist_dec(at(m_buckets, next_bucket_idx).m_dist_and_fingerprint),
   1020                                          at(m_buckets, next_bucket_idx).m_value_idx};
   1021             bucket_idx = std::exchange(next_bucket_idx, next(next_bucket_idx));
   1022         }
   1023         at(m_buckets, bucket_idx) = {};
   1024         handle_erased_value(std::move(m_values[value_idx_to_remove]));
   1025 
   1026         // update m_values
   1027         if (value_idx_to_remove != m_values.size() - 1) {
   1028             // no luck, we'll have to replace the value with the last one and update the index accordingly
   1029             auto& val = m_values[value_idx_to_remove];
   1030             val = std::move(m_values.back());
   1031 
   1032             // update the values_idx of the moved entry. No need to play the info game, just look until we find the values_idx
   1033             auto mh = mixed_hash(get_key(val));
   1034             bucket_idx = bucket_idx_from_hash(mh);
   1035 
   1036             auto const values_idx_back = static_cast<value_idx_type>(m_values.size() - 1);
   1037             while (values_idx_back != at(m_buckets, bucket_idx).m_value_idx) {
   1038                 bucket_idx = next(bucket_idx);
   1039             }
   1040             at(m_buckets, bucket_idx).m_value_idx = value_idx_to_remove;
   1041         }
   1042         m_values.pop_back();
   1043     }
   1044 
   1045     template <typename K, typename Op>
   1046     auto do_erase_key(K&& key, Op handle_erased_value) -> size_t {
   1047         if (empty()) {
   1048             return 0;
   1049         }
   1050 
   1051         auto [dist_and_fingerprint, bucket_idx] = next_while_less(key);
   1052 
   1053         while (dist_and_fingerprint == at(m_buckets, bucket_idx).m_dist_and_fingerprint &&
   1054                !m_equal(key, get_key(m_values[at(m_buckets, bucket_idx).m_value_idx]))) {
   1055             dist_and_fingerprint = dist_inc(dist_and_fingerprint);
   1056             bucket_idx = next(bucket_idx);
   1057         }
   1058 
   1059         if (dist_and_fingerprint != at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
   1060             return 0;
   1061         }
   1062         do_erase(bucket_idx, handle_erased_value);
   1063         return 1;
   1064     }
   1065 
   1066     template <class K, class M>
   1067     auto do_insert_or_assign(K&& key, M&& mapped) -> std::pair<iterator, bool> {
   1068         auto it_isinserted = try_emplace(std::forward<K>(key), std::forward<M>(mapped));
   1069         if (!it_isinserted.second) {
   1070             it_isinserted.first->second = std::forward<M>(mapped);
   1071         }
   1072         return it_isinserted;
   1073     }
   1074 
   1075     template <typename... Args>
   1076     auto do_place_element(dist_and_fingerprint_type dist_and_fingerprint, value_idx_type bucket_idx, Args&&... args)
   1077         -> std::pair<iterator, bool> {
   1078 
   1079         // emplace the new value. If that throws an exception, no harm done; index is still in a valid state
   1080         m_values.emplace_back(std::forward<Args>(args)...);
   1081 
   1082         auto value_idx = static_cast<value_idx_type>(m_values.size() - 1);
   1083         if (ANKERL_UNORDERED_DENSE_UNLIKELY(is_full())) {
   1084             increase_size();
   1085         } else {
   1086             place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
   1087         }
   1088 
   1089         // place element and shift up until we find an empty spot
   1090         return {begin() + static_cast<difference_type>(value_idx), true};
   1091     }
   1092 
   1093     template <typename K, typename... Args>
   1094     auto do_try_emplace(K&& key, Args&&... args) -> std::pair<iterator, bool> {
   1095         auto hash = mixed_hash(key);
   1096         auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
   1097         auto bucket_idx = bucket_idx_from_hash(hash);
   1098 
   1099         while (true) {
   1100             auto* bucket = &at(m_buckets, bucket_idx);
   1101             if (dist_and_fingerprint == bucket->m_dist_and_fingerprint) {
   1102                 if (m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
   1103                     return {begin() + static_cast<difference_type>(bucket->m_value_idx), false};
   1104                 }
   1105             } else if (dist_and_fingerprint > bucket->m_dist_and_fingerprint) {
   1106                 return do_place_element(dist_and_fingerprint,
   1107                                         bucket_idx,
   1108                                         std::piecewise_construct,
   1109                                         std::forward_as_tuple(std::forward<K>(key)),
   1110                                         std::forward_as_tuple(std::forward<Args>(args)...));
   1111             }
   1112             dist_and_fingerprint = dist_inc(dist_and_fingerprint);
   1113             bucket_idx = next(bucket_idx);
   1114         }
   1115     }
   1116 
   1117     template <typename K>
   1118     auto do_find(K const& key) -> iterator {
   1119         if (ANKERL_UNORDERED_DENSE_UNLIKELY(empty())) {
   1120             return end();
   1121         }
   1122 
   1123         auto mh = mixed_hash(key);
   1124         auto dist_and_fingerprint = dist_and_fingerprint_from_hash(mh);
   1125         auto bucket_idx = bucket_idx_from_hash(mh);
   1126         auto* bucket = &at(m_buckets, bucket_idx);
   1127 
   1128         // unrolled loop. *Always* check a few directly, then enter the loop. This is faster.
   1129         if (dist_and_fingerprint == bucket->m_dist_and_fingerprint && m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
   1130             return begin() + static_cast<difference_type>(bucket->m_value_idx);
   1131         }
   1132         dist_and_fingerprint = dist_inc(dist_and_fingerprint);
   1133         bucket_idx = next(bucket_idx);
   1134         bucket = &at(m_buckets, bucket_idx);
   1135 
   1136         if (dist_and_fingerprint == bucket->m_dist_and_fingerprint && m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
   1137             return begin() + static_cast<difference_type>(bucket->m_value_idx);
   1138         }
   1139         dist_and_fingerprint = dist_inc(dist_and_fingerprint);
   1140         bucket_idx = next(bucket_idx);
   1141         bucket = &at(m_buckets, bucket_idx);
   1142 
   1143         while (true) {
   1144             if (dist_and_fingerprint == bucket->m_dist_and_fingerprint) {
   1145                 if (m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
   1146                     return begin() + static_cast<difference_type>(bucket->m_value_idx);
   1147                 }
   1148             } else if (dist_and_fingerprint > bucket->m_dist_and_fingerprint) {
   1149                 return end();
   1150             }
   1151             dist_and_fingerprint = dist_inc(dist_and_fingerprint);
   1152             bucket_idx = next(bucket_idx);
   1153             bucket = &at(m_buckets, bucket_idx);
   1154         }
   1155     }
   1156 
   1157     template <typename K>
   1158     auto do_find(K const& key) const -> const_iterator {
   1159         return const_cast<table*>(this)->do_find(key); // NOLINT(cppcoreguidelines-pro-type-const-cast)
   1160     }
   1161 
   1162     template <typename K, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1163     auto do_at(K const& key) -> Q& {
   1164         if (auto it = find(key); ANKERL_UNORDERED_DENSE_LIKELY(end() != it)) {
   1165             return it->second;
   1166         }
   1167         on_error_key_not_found();
   1168     }
   1169 
   1170     template <typename K, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1171     auto do_at(K const& key) const -> Q const& {
   1172         return const_cast<table*>(this)->at(key); // NOLINT(cppcoreguidelines-pro-type-const-cast)
   1173     }
   1174 
   1175 public:
   1176     explicit table(size_t bucket_count,
   1177                    Hash const& hash = Hash(),
   1178                    KeyEqual const& equal = KeyEqual(),
   1179                    allocator_type const& alloc_or_container = allocator_type())
   1180         : m_values(alloc_or_container)
   1181         , m_hash(hash)
   1182         , m_equal(equal) {
   1183         if (0 != bucket_count) {
   1184             reserve(bucket_count);
   1185         } else {
   1186             allocate_buckets_from_shift();
   1187             clear_buckets();
   1188         }
   1189     }
   1190 
   1191     table()
   1192         : table(0) {}
   1193 
   1194     table(size_t bucket_count, allocator_type const& alloc)
   1195         : table(bucket_count, Hash(), KeyEqual(), alloc) {}
   1196 
   1197     table(size_t bucket_count, Hash const& hash, allocator_type const& alloc)
   1198         : table(bucket_count, hash, KeyEqual(), alloc) {}
   1199 
   1200     explicit table(allocator_type const& alloc)
   1201         : table(0, Hash(), KeyEqual(), alloc) {}
   1202 
   1203     template <class InputIt>
   1204     table(InputIt first,
   1205           InputIt last,
   1206           size_type bucket_count = 0,
   1207           Hash const& hash = Hash(),
   1208           KeyEqual const& equal = KeyEqual(),
   1209           allocator_type const& alloc = allocator_type())
   1210         : table(bucket_count, hash, equal, alloc) {
   1211         insert(first, last);
   1212     }
   1213 
   1214     template <class InputIt>
   1215     table(InputIt first, InputIt last, size_type bucket_count, allocator_type const& alloc)
   1216         : table(first, last, bucket_count, Hash(), KeyEqual(), alloc) {}
   1217 
   1218     template <class InputIt>
   1219     table(InputIt first, InputIt last, size_type bucket_count, Hash const& hash, allocator_type const& alloc)
   1220         : table(first, last, bucket_count, hash, KeyEqual(), alloc) {}
   1221 
   1222     table(table const& other)
   1223         : table(other, other.m_values.get_allocator()) {}
   1224 
   1225     table(table const& other, allocator_type const& alloc)
   1226         : m_values(other.m_values, alloc)
   1227         , m_max_load_factor(other.m_max_load_factor)
   1228         , m_hash(other.m_hash)
   1229         , m_equal(other.m_equal) {
   1230         copy_buckets(other);
   1231     }
   1232 
   1233     table(table&& other) noexcept
   1234         : table(std::move(other), other.m_values.get_allocator()) {}
   1235 
   1236     table(table&& other, allocator_type const& alloc) noexcept
   1237         : m_values(alloc) {
   1238         *this = std::move(other);
   1239     }
   1240 
   1241     table(std::initializer_list<value_type> ilist,
   1242           size_t bucket_count = 0,
   1243           Hash const& hash = Hash(),
   1244           KeyEqual const& equal = KeyEqual(),
   1245           allocator_type const& alloc = allocator_type())
   1246         : table(bucket_count, hash, equal, alloc) {
   1247         insert(ilist);
   1248     }
   1249 
   1250     table(std::initializer_list<value_type> ilist, size_type bucket_count, allocator_type const& alloc)
   1251         : table(ilist, bucket_count, Hash(), KeyEqual(), alloc) {}
   1252 
   1253     table(std::initializer_list<value_type> init, size_type bucket_count, Hash const& hash, allocator_type const& alloc)
   1254         : table(init, bucket_count, hash, KeyEqual(), alloc) {}
   1255 
   1256     ~table() {
   1257         if (nullptr != m_buckets) {
   1258             auto ba = bucket_alloc(m_values.get_allocator());
   1259             bucket_alloc_traits::deallocate(ba, m_buckets, bucket_count());
   1260         }
   1261     }
   1262 
   1263     auto operator=(table const& other) -> table& {
   1264         if (&other != this) {
   1265             deallocate_buckets(); // deallocate before m_values is set (might have another allocator)
   1266             m_values = other.m_values;
   1267             m_max_load_factor = other.m_max_load_factor;
   1268             m_hash = other.m_hash;
   1269             m_equal = other.m_equal;
   1270             m_shifts = initial_shifts;
   1271             copy_buckets(other);
   1272         }
   1273         return *this;
   1274     }
   1275 
   1276     auto operator=(table&& other) noexcept(noexcept(std::is_nothrow_move_assignable_v<value_container_type> &&
   1277                                                     std::is_nothrow_move_assignable_v<Hash> &&
   1278                                                     std::is_nothrow_move_assignable_v<KeyEqual>)) -> table& {
   1279         if (&other != this) {
   1280             deallocate_buckets(); // deallocate before m_values is set (might have another allocator)
   1281             m_values = std::move(other.m_values);
   1282             other.m_values.clear();
   1283 
   1284             // we can only reuse m_buckets when both maps have the same allocator!
   1285             if (get_allocator() == other.get_allocator()) {
   1286                 m_buckets = std::exchange(other.m_buckets, nullptr);
   1287                 m_num_buckets = std::exchange(other.m_num_buckets, 0);
   1288                 m_max_bucket_capacity = std::exchange(other.m_max_bucket_capacity, 0);
   1289                 m_shifts = std::exchange(other.m_shifts, initial_shifts);
   1290                 m_max_load_factor = std::exchange(other.m_max_load_factor, default_max_load_factor);
   1291                 m_hash = std::exchange(other.m_hash, {});
   1292                 m_equal = std::exchange(other.m_equal, {});
   1293                 other.allocate_buckets_from_shift();
   1294                 other.clear_buckets();
   1295             } else {
   1296                 // set max_load_factor *before* copying the other's buckets, so we have the same
   1297                 // behavior
   1298                 m_max_load_factor = other.m_max_load_factor;
   1299 
   1300                 // copy_buckets sets m_buckets, m_num_buckets, m_max_bucket_capacity, m_shifts
   1301                 copy_buckets(other);
   1302                 // clear's the other's buckets so other is now already usable.
   1303                 other.clear_buckets();
   1304                 m_hash = other.m_hash;
   1305                 m_equal = other.m_equal;
   1306             }
   1307             // map "other" is now already usable, it's empty.
   1308         }
   1309         return *this;
   1310     }
   1311 
   1312     auto operator=(std::initializer_list<value_type> ilist) -> table& {
   1313         clear();
   1314         insert(ilist);
   1315         return *this;
   1316     }
   1317 
   1318     auto get_allocator() const noexcept -> allocator_type {
   1319         return m_values.get_allocator();
   1320     }
   1321 
   1322     // iterators //////////////////////////////////////////////////////////////
   1323 
   1324     auto begin() noexcept -> iterator {
   1325         return m_values.begin();
   1326     }
   1327 
   1328     auto begin() const noexcept -> const_iterator {
   1329         return m_values.begin();
   1330     }
   1331 
   1332     auto cbegin() const noexcept -> const_iterator {
   1333         return m_values.cbegin();
   1334     }
   1335 
   1336     auto end() noexcept -> iterator {
   1337         return m_values.end();
   1338     }
   1339 
   1340     auto cend() const noexcept -> const_iterator {
   1341         return m_values.cend();
   1342     }
   1343 
   1344     auto end() const noexcept -> const_iterator {
   1345         return m_values.end();
   1346     }
   1347 
   1348     // capacity ///////////////////////////////////////////////////////////////
   1349 
   1350     [[nodiscard]] auto empty() const noexcept -> bool {
   1351         return m_values.empty();
   1352     }
   1353 
   1354     [[nodiscard]] auto size() const noexcept -> size_t {
   1355         return m_values.size();
   1356     }
   1357 
   1358     [[nodiscard]] static constexpr auto max_size() noexcept -> size_t {
   1359         if constexpr ((std::numeric_limits<value_idx_type>::max)() == (std::numeric_limits<size_t>::max)()) {
   1360             return size_t{1} << (sizeof(value_idx_type) * 8 - 1);
   1361         } else {
   1362             return size_t{1} << (sizeof(value_idx_type) * 8);
   1363         }
   1364     }
   1365 
   1366     // modifiers //////////////////////////////////////////////////////////////
   1367 
   1368     void clear() {
   1369         m_values.clear();
   1370         clear_buckets();
   1371     }
   1372 
   1373     auto insert(value_type const& value) -> std::pair<iterator, bool> {
   1374         return emplace(value);
   1375     }
   1376 
   1377     auto insert(value_type&& value) -> std::pair<iterator, bool> {
   1378         return emplace(std::move(value));
   1379     }
   1380 
   1381     template <class P, std::enable_if_t<std::is_constructible_v<value_type, P&&>, bool> = true>
   1382     auto insert(P&& value) -> std::pair<iterator, bool> {
   1383         return emplace(std::forward<P>(value));
   1384     }
   1385 
   1386     auto insert(const_iterator /*hint*/, value_type const& value) -> iterator {
   1387         return insert(value).first;
   1388     }
   1389 
   1390     auto insert(const_iterator /*hint*/, value_type&& value) -> iterator {
   1391         return insert(std::move(value)).first;
   1392     }
   1393 
   1394     template <class P, std::enable_if_t<std::is_constructible_v<value_type, P&&>, bool> = true>
   1395     auto insert(const_iterator /*hint*/, P&& value) -> iterator {
   1396         return insert(std::forward<P>(value)).first;
   1397     }
   1398 
   1399     template <class InputIt>
   1400     void insert(InputIt first, InputIt last) {
   1401         while (first != last) {
   1402             insert(*first);
   1403             ++first;
   1404         }
   1405     }
   1406 
   1407     void insert(std::initializer_list<value_type> ilist) {
   1408         insert(ilist.begin(), ilist.end());
   1409     }
   1410 
   1411     // nonstandard API: *this is emptied.
   1412     // Also see "A Standard flat_map" https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2022/p0429r9.pdf
   1413     auto extract() && -> value_container_type {
   1414         return std::move(m_values);
   1415     }
   1416 
   1417     // nonstandard API:
   1418     // Discards the internally held container and replaces it with the one passed. Erases non-unique elements.
   1419     auto replace(value_container_type&& container) {
   1420         if (ANKERL_UNORDERED_DENSE_UNLIKELY(container.size() > max_size())) {
   1421             on_error_too_many_elements();
   1422         }
   1423         auto shifts = calc_shifts_for_size(container.size());
   1424         if (0 == m_num_buckets || shifts < m_shifts || container.get_allocator() != m_values.get_allocator()) {
   1425             m_shifts = shifts;
   1426             deallocate_buckets();
   1427             allocate_buckets_from_shift();
   1428         }
   1429         clear_buckets();
   1430 
   1431         m_values = std::move(container);
   1432 
   1433         // can't use clear_and_fill_buckets_from_values() because container elements might not be unique
   1434         auto value_idx = value_idx_type{};
   1435 
   1436         // loop until we reach the end of the container. duplicated entries will be replaced with back().
   1437         while (value_idx != static_cast<value_idx_type>(m_values.size())) {
   1438             auto const& key = get_key(m_values[value_idx]);
   1439 
   1440             auto hash = mixed_hash(key);
   1441             auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
   1442             auto bucket_idx = bucket_idx_from_hash(hash);
   1443 
   1444             bool key_found = false;
   1445             while (true) {
   1446                 auto const& bucket = at(m_buckets, bucket_idx);
   1447                 if (dist_and_fingerprint > bucket.m_dist_and_fingerprint) {
   1448                     break;
   1449                 }
   1450                 if (dist_and_fingerprint == bucket.m_dist_and_fingerprint &&
   1451                     m_equal(key, get_key(m_values[bucket.m_value_idx]))) {
   1452                     key_found = true;
   1453                     break;
   1454                 }
   1455                 dist_and_fingerprint = dist_inc(dist_and_fingerprint);
   1456                 bucket_idx = next(bucket_idx);
   1457             }
   1458 
   1459             if (key_found) {
   1460                 if (value_idx != static_cast<value_idx_type>(m_values.size() - 1)) {
   1461                     m_values[value_idx] = std::move(m_values.back());
   1462                 }
   1463                 m_values.pop_back();
   1464             } else {
   1465                 place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
   1466                 ++value_idx;
   1467             }
   1468         }
   1469     }
   1470 
   1471     template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1472     auto insert_or_assign(Key const& key, M&& mapped) -> std::pair<iterator, bool> {
   1473         return do_insert_or_assign(key, std::forward<M>(mapped));
   1474     }
   1475 
   1476     template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1477     auto insert_or_assign(Key&& key, M&& mapped) -> std::pair<iterator, bool> {
   1478         return do_insert_or_assign(std::move(key), std::forward<M>(mapped));
   1479     }
   1480 
   1481     template <typename K,
   1482               typename M,
   1483               typename Q = T,
   1484               typename H = Hash,
   1485               typename KE = KeyEqual,
   1486               std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
   1487     auto insert_or_assign(K&& key, M&& mapped) -> std::pair<iterator, bool> {
   1488         return do_insert_or_assign(std::forward<K>(key), std::forward<M>(mapped));
   1489     }
   1490 
   1491     template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1492     auto insert_or_assign(const_iterator /*hint*/, Key const& key, M&& mapped) -> iterator {
   1493         return do_insert_or_assign(key, std::forward<M>(mapped)).first;
   1494     }
   1495 
   1496     template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1497     auto insert_or_assign(const_iterator /*hint*/, Key&& key, M&& mapped) -> iterator {
   1498         return do_insert_or_assign(std::move(key), std::forward<M>(mapped)).first;
   1499     }
   1500 
   1501     template <typename K,
   1502               typename M,
   1503               typename Q = T,
   1504               typename H = Hash,
   1505               typename KE = KeyEqual,
   1506               std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
   1507     auto insert_or_assign(const_iterator /*hint*/, K&& key, M&& mapped) -> iterator {
   1508         return do_insert_or_assign(std::forward<K>(key), std::forward<M>(mapped)).first;
   1509     }
   1510 
   1511     // Single arguments for unordered_set can be used without having to construct the value_type
   1512     template <class K,
   1513               typename Q = T,
   1514               typename H = Hash,
   1515               typename KE = KeyEqual,
   1516               std::enable_if_t<!is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
   1517     auto emplace(K&& key) -> std::pair<iterator, bool> {
   1518         auto hash = mixed_hash(key);
   1519         auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
   1520         auto bucket_idx = bucket_idx_from_hash(hash);
   1521 
   1522         while (dist_and_fingerprint <= at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
   1523             if (dist_and_fingerprint == at(m_buckets, bucket_idx).m_dist_and_fingerprint &&
   1524                 m_equal(key, m_values[at(m_buckets, bucket_idx).m_value_idx])) {
   1525                 // found it, return without ever actually creating anything
   1526                 return {begin() + static_cast<difference_type>(at(m_buckets, bucket_idx).m_value_idx), false};
   1527             }
   1528             dist_and_fingerprint = dist_inc(dist_and_fingerprint);
   1529             bucket_idx = next(bucket_idx);
   1530         }
   1531 
   1532         // value is new, insert element first, so when exception happens we are in a valid state
   1533         return do_place_element(dist_and_fingerprint, bucket_idx, std::forward<K>(key));
   1534     }
   1535 
   1536     template <class... Args>
   1537     auto emplace(Args&&... args) -> std::pair<iterator, bool> {
   1538         // we have to instantiate the value_type to be able to access the key.
   1539         // 1. emplace_back the object so it is constructed. 2. If the key is already there, pop it later in the loop.
   1540         auto& key = get_key(m_values.emplace_back(std::forward<Args>(args)...));
   1541         auto hash = mixed_hash(key);
   1542         auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
   1543         auto bucket_idx = bucket_idx_from_hash(hash);
   1544 
   1545         while (dist_and_fingerprint <= at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
   1546             if (dist_and_fingerprint == at(m_buckets, bucket_idx).m_dist_and_fingerprint &&
   1547                 m_equal(key, get_key(m_values[at(m_buckets, bucket_idx).m_value_idx]))) {
   1548                 m_values.pop_back(); // value was already there, so get rid of it
   1549                 return {begin() + static_cast<difference_type>(at(m_buckets, bucket_idx).m_value_idx), false};
   1550             }
   1551             dist_and_fingerprint = dist_inc(dist_and_fingerprint);
   1552             bucket_idx = next(bucket_idx);
   1553         }
   1554 
   1555         // value is new, place the bucket and shift up until we find an empty spot
   1556         auto value_idx = static_cast<value_idx_type>(m_values.size() - 1);
   1557         if (ANKERL_UNORDERED_DENSE_UNLIKELY(is_full())) {
   1558             // increase_size just rehashes all the data we have in m_values
   1559             increase_size();
   1560         } else {
   1561             // place element and shift up until we find an empty spot
   1562             place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
   1563         }
   1564         return {begin() + static_cast<difference_type>(value_idx), true};
   1565     }
   1566 
   1567     template <class... Args>
   1568     auto emplace_hint(const_iterator /*hint*/, Args&&... args) -> iterator {
   1569         return emplace(std::forward<Args>(args)...).first;
   1570     }
   1571 
   1572     template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1573     auto try_emplace(Key const& key, Args&&... args) -> std::pair<iterator, bool> {
   1574         return do_try_emplace(key, std::forward<Args>(args)...);
   1575     }
   1576 
   1577     template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1578     auto try_emplace(Key&& key, Args&&... args) -> std::pair<iterator, bool> {
   1579         return do_try_emplace(std::move(key), std::forward<Args>(args)...);
   1580     }
   1581 
   1582     template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1583     auto try_emplace(const_iterator /*hint*/, Key const& key, Args&&... args) -> iterator {
   1584         return do_try_emplace(key, std::forward<Args>(args)...).first;
   1585     }
   1586 
   1587     template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1588     auto try_emplace(const_iterator /*hint*/, Key&& key, Args&&... args) -> iterator {
   1589         return do_try_emplace(std::move(key), std::forward<Args>(args)...).first;
   1590     }
   1591 
   1592     template <
   1593         typename K,
   1594         typename... Args,
   1595         typename Q = T,
   1596         typename H = Hash,
   1597         typename KE = KeyEqual,
   1598         std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE> && is_neither_convertible_v<K&&, iterator, const_iterator>,
   1599                          bool> = true>
   1600     auto try_emplace(K&& key, Args&&... args) -> std::pair<iterator, bool> {
   1601         return do_try_emplace(std::forward<K>(key), std::forward<Args>(args)...);
   1602     }
   1603 
   1604     template <
   1605         typename K,
   1606         typename... Args,
   1607         typename Q = T,
   1608         typename H = Hash,
   1609         typename KE = KeyEqual,
   1610         std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE> && is_neither_convertible_v<K&&, iterator, const_iterator>,
   1611                          bool> = true>
   1612     auto try_emplace(const_iterator /*hint*/, K&& key, Args&&... args) -> iterator {
   1613         return do_try_emplace(std::forward<K>(key), std::forward<Args>(args)...).first;
   1614     }
   1615 
   1616     auto erase(iterator it) -> iterator {
   1617         auto hash = mixed_hash(get_key(*it));
   1618         auto bucket_idx = bucket_idx_from_hash(hash);
   1619 
   1620         auto const value_idx_to_remove = static_cast<value_idx_type>(it - cbegin());
   1621         while (at(m_buckets, bucket_idx).m_value_idx != value_idx_to_remove) {
   1622             bucket_idx = next(bucket_idx);
   1623         }
   1624 
   1625         do_erase(bucket_idx, [](value_type&& /*unused*/) {
   1626         });
   1627         return begin() + static_cast<difference_type>(value_idx_to_remove);
   1628     }
   1629 
   1630     auto extract(iterator it) -> value_type {
   1631         auto hash = mixed_hash(get_key(*it));
   1632         auto bucket_idx = bucket_idx_from_hash(hash);
   1633 
   1634         auto const value_idx_to_remove = static_cast<value_idx_type>(it - cbegin());
   1635         while (at(m_buckets, bucket_idx).m_value_idx != value_idx_to_remove) {
   1636             bucket_idx = next(bucket_idx);
   1637         }
   1638 
   1639         auto tmp = std::optional<value_type>{};
   1640         do_erase(bucket_idx, [&tmp](value_type&& val) {
   1641             tmp = std::move(val);
   1642         });
   1643         return std::move(tmp).value();
   1644     }
   1645 
   1646     template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1647     auto erase(const_iterator it) -> iterator {
   1648         return erase(begin() + (it - cbegin()));
   1649     }
   1650 
   1651     template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1652     auto extract(const_iterator it) -> value_type {
   1653         return extract(begin() + (it - cbegin()));
   1654     }
   1655 
   1656     auto erase(const_iterator first, const_iterator last) -> iterator {
   1657         auto const idx_first = first - cbegin();
   1658         auto const idx_last = last - cbegin();
   1659         auto const first_to_last = std::distance(first, last);
   1660         auto const last_to_end = std::distance(last, cend());
   1661 
   1662         // remove elements from left to right which moves elements from the end back
   1663         auto const mid = idx_first + (std::min)(first_to_last, last_to_end);
   1664         auto idx = idx_first;
   1665         while (idx != mid) {
   1666             erase(begin() + idx);
   1667             ++idx;
   1668         }
   1669 
   1670         // all elements from the right are moved, now remove the last element until all done
   1671         idx = idx_last;
   1672         while (idx != mid) {
   1673             --idx;
   1674             erase(begin() + idx);
   1675         }
   1676 
   1677         return begin() + idx_first;
   1678     }
   1679 
   1680     auto erase(Key const& key) -> size_t {
   1681         return do_erase_key(key, [](value_type&& /*unused*/) {
   1682         });
   1683     }
   1684 
   1685     auto extract(Key const& key) -> std::optional<value_type> {
   1686         auto tmp = std::optional<value_type>{};
   1687         do_erase_key(key, [&tmp](value_type&& val) {
   1688             tmp = std::move(val);
   1689         });
   1690         return tmp;
   1691     }
   1692 
   1693     template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
   1694     auto erase(K&& key) -> size_t {
   1695         return do_erase_key(std::forward<K>(key), [](value_type&& /*unused*/) {
   1696         });
   1697     }
   1698 
   1699     template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
   1700     auto extract(K&& key) -> std::optional<value_type> {
   1701         auto tmp = std::optional<value_type>{};
   1702         do_erase_key(std::forward<K>(key), [&tmp](value_type&& val) {
   1703             tmp = std::move(val);
   1704         });
   1705         return tmp;
   1706     }
   1707 
   1708     void swap(table& other) noexcept(noexcept(std::is_nothrow_swappable_v<value_container_type> &&
   1709                                               std::is_nothrow_swappable_v<Hash> && std::is_nothrow_swappable_v<KeyEqual>)) {
   1710         using std::swap;
   1711         swap(other, *this);
   1712     }
   1713 
   1714     // lookup /////////////////////////////////////////////////////////////////
   1715 
   1716     template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1717     auto at(key_type const& key) -> Q& {
   1718         return do_at(key);
   1719     }
   1720 
   1721     template <typename K,
   1722               typename Q = T,
   1723               typename H = Hash,
   1724               typename KE = KeyEqual,
   1725               std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
   1726     auto at(K const& key) -> Q& {
   1727         return do_at(key);
   1728     }
   1729 
   1730     template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1731     auto at(key_type const& key) const -> Q const& {
   1732         return do_at(key);
   1733     }
   1734 
   1735     template <typename K,
   1736               typename Q = T,
   1737               typename H = Hash,
   1738               typename KE = KeyEqual,
   1739               std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
   1740     auto at(K const& key) const -> Q const& {
   1741         return do_at(key);
   1742     }
   1743 
   1744     template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1745     auto operator[](Key const& key) -> Q& {
   1746         return try_emplace(key).first->second;
   1747     }
   1748 
   1749     template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
   1750     auto operator[](Key&& key) -> Q& {
   1751         return try_emplace(std::move(key)).first->second;
   1752     }
   1753 
   1754     template <typename K,
   1755               typename Q = T,
   1756               typename H = Hash,
   1757               typename KE = KeyEqual,
   1758               std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
   1759     auto operator[](K&& key) -> Q& {
   1760         return try_emplace(std::forward<K>(key)).first->second;
   1761     }
   1762 
   1763     auto count(Key const& key) const -> size_t {
   1764         return find(key) == end() ? 0 : 1;
   1765     }
   1766 
   1767     template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
   1768     auto count(K const& key) const -> size_t {
   1769         return find(key) == end() ? 0 : 1;
   1770     }
   1771 
   1772     auto find(Key const& key) -> iterator {
   1773         return do_find(key);
   1774     }
   1775 
   1776     auto find(Key const& key) const -> const_iterator {
   1777         return do_find(key);
   1778     }
   1779 
   1780     template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
   1781     auto find(K const& key) -> iterator {
   1782         return do_find(key);
   1783     }
   1784 
   1785     template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
   1786     auto find(K const& key) const -> const_iterator {
   1787         return do_find(key);
   1788     }
   1789 
   1790     auto contains(Key const& key) const -> bool {
   1791         return find(key) != end();
   1792     }
   1793 
   1794     template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
   1795     auto contains(K const& key) const -> bool {
   1796         return find(key) != end();
   1797     }
   1798 
   1799     auto equal_range(Key const& key) -> std::pair<iterator, iterator> {
   1800         auto it = do_find(key);
   1801         return {it, it == end() ? end() : it + 1};
   1802     }
   1803 
   1804     auto equal_range(const Key& key) const -> std::pair<const_iterator, const_iterator> {
   1805         auto it = do_find(key);
   1806         return {it, it == end() ? end() : it + 1};
   1807     }
   1808 
   1809     template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
   1810     auto equal_range(K const& key) -> std::pair<iterator, iterator> {
   1811         auto it = do_find(key);
   1812         return {it, it == end() ? end() : it + 1};
   1813     }
   1814 
   1815     template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
   1816     auto equal_range(K const& key) const -> std::pair<const_iterator, const_iterator> {
   1817         auto it = do_find(key);
   1818         return {it, it == end() ? end() : it + 1};
   1819     }
   1820 
   1821     // bucket interface ///////////////////////////////////////////////////////
   1822 
   1823     auto bucket_count() const noexcept -> size_t { // NOLINT(modernize-use-nodiscard)
   1824         return m_num_buckets;
   1825     }
   1826 
   1827     static constexpr auto max_bucket_count() noexcept -> size_t { // NOLINT(modernize-use-nodiscard)
   1828         return max_size();
   1829     }
   1830 
   1831     // hash policy ////////////////////////////////////////////////////////////
   1832 
   1833     [[nodiscard]] auto load_factor() const -> float {
   1834         return bucket_count() ? static_cast<float>(size()) / static_cast<float>(bucket_count()) : 0.0F;
   1835     }
   1836 
   1837     [[nodiscard]] auto max_load_factor() const -> float {
   1838         return m_max_load_factor;
   1839     }
   1840 
   1841     void max_load_factor(float ml) {
   1842         m_max_load_factor = ml;
   1843         if (m_num_buckets != max_bucket_count()) {
   1844             m_max_bucket_capacity = static_cast<value_idx_type>(static_cast<float>(bucket_count()) * max_load_factor());
   1845         }
   1846     }
   1847 
   1848     void rehash(size_t count) {
   1849         count = (std::min)(count, max_size());
   1850         auto shifts = calc_shifts_for_size((std::max)(count, size()));
   1851         if (shifts != m_shifts) {
   1852             m_shifts = shifts;
   1853             deallocate_buckets();
   1854             m_values.shrink_to_fit();
   1855             allocate_buckets_from_shift();
   1856             clear_and_fill_buckets_from_values();
   1857         }
   1858     }
   1859 
   1860     void reserve(size_t capa) {
   1861         capa = (std::min)(capa, max_size());
   1862         if constexpr (has_reserve<value_container_type>) {
   1863             // std::deque doesn't have reserve(). Make sure we only call when available
   1864             m_values.reserve(capa);
   1865         }
   1866         auto shifts = calc_shifts_for_size((std::max)(capa, size()));
   1867         if (0 == m_num_buckets || shifts < m_shifts) {
   1868             m_shifts = shifts;
   1869             deallocate_buckets();
   1870             allocate_buckets_from_shift();
   1871             clear_and_fill_buckets_from_values();
   1872         }
   1873     }
   1874 
   1875     // observers //////////////////////////////////////////////////////////////
   1876 
   1877     auto hash_function() const -> hasher {
   1878         return m_hash;
   1879     }
   1880 
   1881     auto key_eq() const -> key_equal {
   1882         return m_equal;
   1883     }
   1884 
   1885     // nonstandard API: expose the underlying values container
   1886     [[nodiscard]] auto values() const noexcept -> value_container_type const& {
   1887         return m_values;
   1888     }
   1889 
   1890     // non-member functions ///////////////////////////////////////////////////
   1891 
   1892     friend auto operator==(table const& a, table const& b) -> bool {
   1893         if (&a == &b) {
   1894             return true;
   1895         }
   1896         if (a.size() != b.size()) {
   1897             return false;
   1898         }
   1899         for (auto const& b_entry : b) {
   1900             auto it = a.find(get_key(b_entry));
   1901             if constexpr (is_map_v<T>) {
   1902                 // map: check that key is here, then also check that value is the same
   1903                 if (a.end() == it || !(b_entry.second == it->second)) {
   1904                     return false;
   1905                 }
   1906             } else {
   1907                 // set: only check that the key is here
   1908                 if (a.end() == it) {
   1909                     return false;
   1910                 }
   1911             }
   1912         }
   1913         return true;
   1914     }
   1915 
   1916     friend auto operator!=(table const& a, table const& b) -> bool {
   1917         return !(a == b);
   1918     }
   1919 };
   1920 
   1921 } // namespace detail
   1922 
   1923 ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
   1924                                         class T,
   1925                                         class Hash = hash<Key>,
   1926                                         class KeyEqual = std::equal_to<Key>,
   1927                                         class AllocatorOrContainer = std::allocator<std::pair<Key, T>>,
   1928                                         class Bucket = bucket_type::standard>
   1929 using map = detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, false>;
   1930 
   1931 ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
   1932                                         class T,
   1933                                         class Hash = hash<Key>,
   1934                                         class KeyEqual = std::equal_to<Key>,
   1935                                         class AllocatorOrContainer = std::allocator<std::pair<Key, T>>,
   1936                                         class Bucket = bucket_type::standard>
   1937 using segmented_map = detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, true>;
   1938 
   1939 ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
   1940                                         class Hash = hash<Key>,
   1941                                         class KeyEqual = std::equal_to<Key>,
   1942                                         class AllocatorOrContainer = std::allocator<Key>,
   1943                                         class Bucket = bucket_type::standard>
   1944 using set = detail::table<Key, void, Hash, KeyEqual, AllocatorOrContainer, Bucket, false>;
   1945 
   1946 ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
   1947                                         class Hash = hash<Key>,
   1948                                         class KeyEqual = std::equal_to<Key>,
   1949                                         class AllocatorOrContainer = std::allocator<Key>,
   1950                                         class Bucket = bucket_type::standard>
   1951 using segmented_set = detail::table<Key, void, Hash, KeyEqual, AllocatorOrContainer, Bucket, true>;
   1952 
   1953 #    if defined(ANKERL_UNORDERED_DENSE_PMR)
   1954 
   1955 namespace pmr {
   1956 
   1957 ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
   1958                                         class T,
   1959                                         class Hash = hash<Key>,
   1960                                         class KeyEqual = std::equal_to<Key>,
   1961                                         class Bucket = bucket_type::standard>
   1962 using map =
   1963     detail::table<Key, T, Hash, KeyEqual, ANKERL_UNORDERED_DENSE_PMR::polymorphic_allocator<std::pair<Key, T>>, Bucket, false>;
   1964 
   1965 ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
   1966                                         class T,
   1967                                         class Hash = hash<Key>,
   1968                                         class KeyEqual = std::equal_to<Key>,
   1969                                         class Bucket = bucket_type::standard>
   1970 using segmented_map =
   1971     detail::table<Key, T, Hash, KeyEqual, ANKERL_UNORDERED_DENSE_PMR::polymorphic_allocator<std::pair<Key, T>>, Bucket, true>;
   1972 
   1973 ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
   1974                                         class Hash = hash<Key>,
   1975                                         class KeyEqual = std::equal_to<Key>,
   1976                                         class Bucket = bucket_type::standard>
   1977 using set = detail::table<Key, void, Hash, KeyEqual, ANKERL_UNORDERED_DENSE_PMR::polymorphic_allocator<Key>, Bucket, false>;
   1978 
   1979 ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
   1980                                         class Hash = hash<Key>,
   1981                                         class KeyEqual = std::equal_to<Key>,
   1982                                         class Bucket = bucket_type::standard>
   1983 using segmented_set =
   1984     detail::table<Key, void, Hash, KeyEqual, ANKERL_UNORDERED_DENSE_PMR::polymorphic_allocator<Key>, Bucket, true>;
   1985 
   1986 } // namespace pmr
   1987 
   1988 #    endif
   1989 
   1990 // deduction guides ///////////////////////////////////////////////////////////
   1991 
   1992 // deduction guides for alias templates are only possible since C++20
   1993 // see https://en.cppreference.com/w/cpp/language/class_template_argument_deduction
   1994 
   1995 } // namespace ANKERL_UNORDERED_DENSE_NAMESPACE
   1996 } // namespace ankerl::unordered_dense
   1997 
   1998 // std extensions /////////////////////////////////////////////////////////////
   1999 
   2000 namespace std { // NOLINT(cert-dcl58-cpp)
   2001 
   2002 ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
   2003                                         class T,
   2004                                         class Hash,
   2005                                         class KeyEqual,
   2006                                         class AllocatorOrContainer,
   2007                                         class Bucket,
   2008                                         class Pred,
   2009                                         bool IsSegmented>
   2010 // NOLINTNEXTLINE(cert-dcl58-cpp)
   2011 auto erase_if(ankerl::unordered_dense::detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, IsSegmented>& map,
   2012               Pred pred) -> size_t {
   2013     using map_t = ankerl::unordered_dense::detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, IsSegmented>;
   2014 
   2015     // going back to front because erase() invalidates the end iterator
   2016     auto const old_size = map.size();
   2017     auto idx = old_size;
   2018     while (idx) {
   2019         --idx;
   2020         auto it = map.begin() + static_cast<typename map_t::difference_type>(idx);
   2021         if (pred(*it)) {
   2022             map.erase(it);
   2023         }
   2024     }
   2025 
   2026     return old_size - map.size();
   2027 }
   2028 
   2029 } // namespace std
   2030 
   2031 #endif
   2032 #endif
   2033