Subversion Repositories QNX 8.QNX8 LLVM/Clang compiler suite

Rev

Blame | Last modification | View Log | Download | RSS feed

  1. //===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the newly proposed standard C++ interfaces for hashing
  10. // arbitrary data and building hash functions for user-defined types. This
  11. // interface was originally proposed in N3333[1] and is currently under review
  12. // for inclusion in a future TR and/or standard.
  13. //
  14. // The primary interfaces provide are comprised of one type and three functions:
  15. //
  16. //  -- 'hash_code' class is an opaque type representing the hash code for some
  17. //     data. It is the intended product of hashing, and can be used to implement
  18. //     hash tables, checksumming, and other common uses of hashes. It is not an
  19. //     integer type (although it can be converted to one) because it is risky
  20. //     to assume much about the internals of a hash_code. In particular, each
  21. //     execution of the program has a high probability of producing a different
  22. //     hash_code for a given input. Thus their values are not stable to save or
  23. //     persist, and should only be used during the execution for the
  24. //     construction of hashing datastructures.
  25. //
  26. //  -- 'hash_value' is a function designed to be overloaded for each
  27. //     user-defined type which wishes to be used within a hashing context. It
  28. //     should be overloaded within the user-defined type's namespace and found
  29. //     via ADL. Overloads for primitive types are provided by this library.
  30. //
  31. //  -- 'hash_combine' and 'hash_combine_range' are functions designed to aid
  32. //      programmers in easily and intuitively combining a set of data into
  33. //      a single hash_code for their object. They should only logically be used
  34. //      within the implementation of a 'hash_value' routine or similar context.
  35. //
  36. // Note that 'hash_combine_range' contains very special logic for hashing
  37. // a contiguous array of integers or pointers. This logic is *extremely* fast,
  38. // on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were
  39. // benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys
  40. // under 32-bytes.
  41. //
  42. //===----------------------------------------------------------------------===//
  43.  
  44. #ifndef LLVM_ADT_HASHING_H
  45. #define LLVM_ADT_HASHING_H
  46.  
  47. #include "llvm/Support/DataTypes.h"
  48. #include "llvm/Support/ErrorHandling.h"
  49. #include "llvm/Support/SwapByteOrder.h"
  50. #include "llvm/Support/type_traits.h"
  51. #include <algorithm>
  52. #include <cassert>
  53. #include <cstring>
  54. #include <optional>
  55. #include <string>
  56. #include <tuple>
  57. #include <utility>
  58.  
  59. namespace llvm {
  60. template <typename T, typename Enable> struct DenseMapInfo;
  61.  
  62. /// An opaque object representing a hash code.
  63. ///
  64. /// This object represents the result of hashing some entity. It is intended to
  65. /// be used to implement hashtables or other hashing-based data structures.
  66. /// While it wraps and exposes a numeric value, this value should not be
  67. /// trusted to be stable or predictable across processes or executions.
  68. ///
  69. /// In order to obtain the hash_code for an object 'x':
  70. /// \code
  71. ///   using llvm::hash_value;
  72. ///   llvm::hash_code code = hash_value(x);
  73. /// \endcode
  74. class hash_code {
  75.   size_t value;
  76.  
  77. public:
  78.   /// Default construct a hash_code.
  79.   /// Note that this leaves the value uninitialized.
  80.   hash_code() = default;
  81.  
  82.   /// Form a hash code directly from a numerical value.
  83.   hash_code(size_t value) : value(value) {}
  84.  
  85.   /// Convert the hash code to its numerical value for use.
  86.   /*explicit*/ operator size_t() const { return value; }
  87.  
  88.   friend bool operator==(const hash_code &lhs, const hash_code &rhs) {
  89.     return lhs.value == rhs.value;
  90.   }
  91.   friend bool operator!=(const hash_code &lhs, const hash_code &rhs) {
  92.     return lhs.value != rhs.value;
  93.   }
  94.  
  95.   /// Allow a hash_code to be directly run through hash_value.
  96.   friend size_t hash_value(const hash_code &code) { return code.value; }
  97. };
  98.  
  99. /// Compute a hash_code for any integer value.
  100. ///
  101. /// Note that this function is intended to compute the same hash_code for
  102. /// a particular value without regard to the pre-promotion type. This is in
  103. /// contrast to hash_combine which may produce different hash_codes for
  104. /// differing argument types even if they would implicit promote to a common
  105. /// type without changing the value.
  106. template <typename T>
  107. std::enable_if_t<is_integral_or_enum<T>::value, hash_code> hash_value(T value);
  108.  
  109. /// Compute a hash_code for a pointer's address.
  110. ///
  111. /// N.B.: This hashes the *address*. Not the value and not the type.
  112. template <typename T> hash_code hash_value(const T *ptr);
  113.  
  114. /// Compute a hash_code for a pair of objects.
  115. template <typename T, typename U>
  116. hash_code hash_value(const std::pair<T, U> &arg);
  117.  
  118. /// Compute a hash_code for a tuple.
  119. template <typename... Ts>
  120. hash_code hash_value(const std::tuple<Ts...> &arg);
  121.  
  122. /// Compute a hash_code for a standard string.
  123. template <typename T>
  124. hash_code hash_value(const std::basic_string<T> &arg);
  125.  
  126. /// Compute a hash_code for a standard string.
  127. template <typename T> hash_code hash_value(const std::optional<T> &arg);
  128.  
  129. /// Override the execution seed with a fixed value.
  130. ///
  131. /// This hashing library uses a per-execution seed designed to change on each
  132. /// run with high probability in order to ensure that the hash codes are not
  133. /// attackable and to ensure that output which is intended to be stable does
  134. /// not rely on the particulars of the hash codes produced.
  135. ///
  136. /// That said, there are use cases where it is important to be able to
  137. /// reproduce *exactly* a specific behavior. To that end, we provide a function
  138. /// which will forcibly set the seed to a fixed value. This must be done at the
  139. /// start of the program, before any hashes are computed. Also, it cannot be
  140. /// undone. This makes it thread-hostile and very hard to use outside of
  141. /// immediately on start of a simple program designed for reproducible
  142. /// behavior.
  143. void set_fixed_execution_hash_seed(uint64_t fixed_value);
  144.  
  145.  
  146. // All of the implementation details of actually computing the various hash
  147. // code values are held within this namespace. These routines are included in
  148. // the header file mainly to allow inlining and constant propagation.
  149. namespace hashing {
  150. namespace detail {
  151.  
  152. inline uint64_t fetch64(const char *p) {
  153.   uint64_t result;
  154.   memcpy(&result, p, sizeof(result));
  155.   if (sys::IsBigEndianHost)
  156.     sys::swapByteOrder(result);
  157.   return result;
  158. }
  159.  
  160. inline uint32_t fetch32(const char *p) {
  161.   uint32_t result;
  162.   memcpy(&result, p, sizeof(result));
  163.   if (sys::IsBigEndianHost)
  164.     sys::swapByteOrder(result);
  165.   return result;
  166. }
  167.  
  168. /// Some primes between 2^63 and 2^64 for various uses.
  169. static constexpr uint64_t k0 = 0xc3a5c85c97cb3127ULL;
  170. static constexpr uint64_t k1 = 0xb492b66fbe98f273ULL;
  171. static constexpr uint64_t k2 = 0x9ae16a3b2f90404fULL;
  172. static constexpr uint64_t k3 = 0xc949d7c7509e6557ULL;
  173.  
  174. /// Bitwise right rotate.
  175. /// Normally this will compile to a single instruction, especially if the
  176. /// shift is a manifest constant.
  177. inline uint64_t rotate(uint64_t val, size_t shift) {
  178.   // Avoid shifting by 64: doing so yields an undefined result.
  179.   return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
  180. }
  181.  
  182. inline uint64_t shift_mix(uint64_t val) {
  183.   return val ^ (val >> 47);
  184. }
  185.  
  186. inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) {
  187.   // Murmur-inspired hashing.
  188.   const uint64_t kMul = 0x9ddfea08eb382d69ULL;
  189.   uint64_t a = (low ^ high) * kMul;
  190.   a ^= (a >> 47);
  191.   uint64_t b = (high ^ a) * kMul;
  192.   b ^= (b >> 47);
  193.   b *= kMul;
  194.   return b;
  195. }
  196.  
  197. inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) {
  198.   uint8_t a = s[0];
  199.   uint8_t b = s[len >> 1];
  200.   uint8_t c = s[len - 1];
  201.   uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
  202.   uint32_t z = static_cast<uint32_t>(len) + (static_cast<uint32_t>(c) << 2);
  203.   return shift_mix(y * k2 ^ z * k3 ^ seed) * k2;
  204. }
  205.  
  206. inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) {
  207.   uint64_t a = fetch32(s);
  208.   return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4));
  209. }
  210.  
  211. inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) {
  212.   uint64_t a = fetch64(s);
  213.   uint64_t b = fetch64(s + len - 8);
  214.   return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b;
  215. }
  216.  
  217. inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) {
  218.   uint64_t a = fetch64(s) * k1;
  219.   uint64_t b = fetch64(s + 8);
  220.   uint64_t c = fetch64(s + len - 8) * k2;
  221.   uint64_t d = fetch64(s + len - 16) * k0;
  222.   return hash_16_bytes(rotate(a - b, 43) + rotate(c ^ seed, 30) + d,
  223.                        a + rotate(b ^ k3, 20) - c + len + seed);
  224. }
  225.  
  226. inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) {
  227.   uint64_t z = fetch64(s + 24);
  228.   uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0;
  229.   uint64_t b = rotate(a + z, 52);
  230.   uint64_t c = rotate(a, 37);
  231.   a += fetch64(s + 8);
  232.   c += rotate(a, 7);
  233.   a += fetch64(s + 16);
  234.   uint64_t vf = a + z;
  235.   uint64_t vs = b + rotate(a, 31) + c;
  236.   a = fetch64(s + 16) + fetch64(s + len - 32);
  237.   z = fetch64(s + len - 8);
  238.   b = rotate(a + z, 52);
  239.   c = rotate(a, 37);
  240.   a += fetch64(s + len - 24);
  241.   c += rotate(a, 7);
  242.   a += fetch64(s + len - 16);
  243.   uint64_t wf = a + z;
  244.   uint64_t ws = b + rotate(a, 31) + c;
  245.   uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0);
  246.   return shift_mix((seed ^ (r * k0)) + vs) * k2;
  247. }
  248.  
  249. inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) {
  250.   if (length >= 4 && length <= 8)
  251.     return hash_4to8_bytes(s, length, seed);
  252.   if (length > 8 && length <= 16)
  253.     return hash_9to16_bytes(s, length, seed);
  254.   if (length > 16 && length <= 32)
  255.     return hash_17to32_bytes(s, length, seed);
  256.   if (length > 32)
  257.     return hash_33to64_bytes(s, length, seed);
  258.   if (length != 0)
  259.     return hash_1to3_bytes(s, length, seed);
  260.  
  261.   return k2 ^ seed;
  262. }
  263.  
  264. /// The intermediate state used during hashing.
  265. /// Currently, the algorithm for computing hash codes is based on CityHash and
  266. /// keeps 56 bytes of arbitrary state.
  267. struct hash_state {
  268.   uint64_t h0 = 0, h1 = 0, h2 = 0, h3 = 0, h4 = 0, h5 = 0, h6 = 0;
  269.  
  270.   /// Create a new hash_state structure and initialize it based on the
  271.   /// seed and the first 64-byte chunk.
  272.   /// This effectively performs the initial mix.
  273.   static hash_state create(const char *s, uint64_t seed) {
  274.     hash_state state = {
  275.       0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49),
  276.       seed * k1, shift_mix(seed), 0 };
  277.     state.h6 = hash_16_bytes(state.h4, state.h5);
  278.     state.mix(s);
  279.     return state;
  280.   }
  281.  
  282.   /// Mix 32-bytes from the input sequence into the 16-bytes of 'a'
  283.   /// and 'b', including whatever is already in 'a' and 'b'.
  284.   static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
  285.     a += fetch64(s);
  286.     uint64_t c = fetch64(s + 24);
  287.     b = rotate(b + a + c, 21);
  288.     uint64_t d = a;
  289.     a += fetch64(s + 8) + fetch64(s + 16);
  290.     b += rotate(a, 44) + d;
  291.     a += c;
  292.   }
  293.  
  294.   /// Mix in a 64-byte buffer of data.
  295.   /// We mix all 64 bytes even when the chunk length is smaller, but we
  296.   /// record the actual length.
  297.   void mix(const char *s) {
  298.     h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1;
  299.     h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1;
  300.     h0 ^= h6;
  301.     h1 += h3 + fetch64(s + 40);
  302.     h2 = rotate(h2 + h5, 33) * k1;
  303.     h3 = h4 * k1;
  304.     h4 = h0 + h5;
  305.     mix_32_bytes(s, h3, h4);
  306.     h5 = h2 + h6;
  307.     h6 = h1 + fetch64(s + 16);
  308.     mix_32_bytes(s + 32, h5, h6);
  309.     std::swap(h2, h0);
  310.   }
  311.  
  312.   /// Compute the final 64-bit hash code value based on the current
  313.   /// state and the length of bytes hashed.
  314.   uint64_t finalize(size_t length) {
  315.     return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2,
  316.                          hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0);
  317.   }
  318. };
  319.  
  320.  
  321. /// A global, fixed seed-override variable.
  322. ///
  323. /// This variable can be set using the \see llvm::set_fixed_execution_seed
  324. /// function. See that function for details. Do not, under any circumstances,
  325. /// set or read this variable.
  326. extern uint64_t fixed_seed_override;
  327.  
  328. inline uint64_t get_execution_seed() {
  329.   // FIXME: This needs to be a per-execution seed. This is just a placeholder
  330.   // implementation. Switching to a per-execution seed is likely to flush out
  331.   // instability bugs and so will happen as its own commit.
  332.   //
  333.   // However, if there is a fixed seed override set the first time this is
  334.   // called, return that instead of the per-execution seed.
  335.   const uint64_t seed_prime = 0xff51afd7ed558ccdULL;
  336.   static uint64_t seed = fixed_seed_override ? fixed_seed_override : seed_prime;
  337.   return seed;
  338. }
  339.  
  340.  
  341. /// Trait to indicate whether a type's bits can be hashed directly.
  342. ///
  343. /// A type trait which is true if we want to combine values for hashing by
  344. /// reading the underlying data. It is false if values of this type must
  345. /// first be passed to hash_value, and the resulting hash_codes combined.
  346. //
  347. // FIXME: We want to replace is_integral_or_enum and is_pointer here with
  348. // a predicate which asserts that comparing the underlying storage of two
  349. // values of the type for equality is equivalent to comparing the two values
  350. // for equality. For all the platforms we care about, this holds for integers
  351. // and pointers, but there are platforms where it doesn't and we would like to
  352. // support user-defined types which happen to satisfy this property.
  353. template <typename T> struct is_hashable_data
  354.   : std::integral_constant<bool, ((is_integral_or_enum<T>::value ||
  355.                                    std::is_pointer<T>::value) &&
  356.                                   64 % sizeof(T) == 0)> {};
  357.  
  358. // Special case std::pair to detect when both types are viable and when there
  359. // is no alignment-derived padding in the pair. This is a bit of a lie because
  360. // std::pair isn't truly POD, but it's close enough in all reasonable
  361. // implementations for our use case of hashing the underlying data.
  362. template <typename T, typename U> struct is_hashable_data<std::pair<T, U> >
  363.   : std::integral_constant<bool, (is_hashable_data<T>::value &&
  364.                                   is_hashable_data<U>::value &&
  365.                                   (sizeof(T) + sizeof(U)) ==
  366.                                    sizeof(std::pair<T, U>))> {};
  367.  
  368. /// Helper to get the hashable data representation for a type.
  369. /// This variant is enabled when the type itself can be used.
  370. template <typename T>
  371. std::enable_if_t<is_hashable_data<T>::value, T>
  372. get_hashable_data(const T &value) {
  373.   return value;
  374. }
  375. /// Helper to get the hashable data representation for a type.
  376. /// This variant is enabled when we must first call hash_value and use the
  377. /// result as our data.
  378. template <typename T>
  379. std::enable_if_t<!is_hashable_data<T>::value, size_t>
  380. get_hashable_data(const T &value) {
  381.   using ::llvm::hash_value;
  382.   return hash_value(value);
  383. }
  384.  
  385. /// Helper to store data from a value into a buffer and advance the
  386. /// pointer into that buffer.
  387. ///
  388. /// This routine first checks whether there is enough space in the provided
  389. /// buffer, and if not immediately returns false. If there is space, it
  390. /// copies the underlying bytes of value into the buffer, advances the
  391. /// buffer_ptr past the copied bytes, and returns true.
  392. template <typename T>
  393. bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
  394.                        size_t offset = 0) {
  395.   size_t store_size = sizeof(value) - offset;
  396.   if (buffer_ptr + store_size > buffer_end)
  397.     return false;
  398.   const char *value_data = reinterpret_cast<const char *>(&value);
  399.   memcpy(buffer_ptr, value_data + offset, store_size);
  400.   buffer_ptr += store_size;
  401.   return true;
  402. }
  403.  
  404. /// Implement the combining of integral values into a hash_code.
  405. ///
  406. /// This overload is selected when the value type of the iterator is
  407. /// integral. Rather than computing a hash_code for each object and then
  408. /// combining them, this (as an optimization) directly combines the integers.
  409. template <typename InputIteratorT>
  410. hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
  411.   const uint64_t seed = get_execution_seed();
  412.   char buffer[64], *buffer_ptr = buffer;
  413.   char *const buffer_end = std::end(buffer);
  414.   while (first != last && store_and_advance(buffer_ptr, buffer_end,
  415.                                             get_hashable_data(*first)))
  416.     ++first;
  417.   if (first == last)
  418.     return hash_short(buffer, buffer_ptr - buffer, seed);
  419.   assert(buffer_ptr == buffer_end);
  420.  
  421.   hash_state state = state.create(buffer, seed);
  422.   size_t length = 64;
  423.   while (first != last) {
  424.     // Fill up the buffer. We don't clear it, which re-mixes the last round
  425.     // when only a partial 64-byte chunk is left.
  426.     buffer_ptr = buffer;
  427.     while (first != last && store_and_advance(buffer_ptr, buffer_end,
  428.                                               get_hashable_data(*first)))
  429.       ++first;
  430.  
  431.     // Rotate the buffer if we did a partial fill in order to simulate doing
  432.     // a mix of the last 64-bytes. That is how the algorithm works when we
  433.     // have a contiguous byte sequence, and we want to emulate that here.
  434.     std::rotate(buffer, buffer_ptr, buffer_end);
  435.  
  436.     // Mix this chunk into the current state.
  437.     state.mix(buffer);
  438.     length += buffer_ptr - buffer;
  439.   };
  440.  
  441.   return state.finalize(length);
  442. }
  443.  
  444. /// Implement the combining of integral values into a hash_code.
  445. ///
  446. /// This overload is selected when the value type of the iterator is integral
  447. /// and when the input iterator is actually a pointer. Rather than computing
  448. /// a hash_code for each object and then combining them, this (as an
  449. /// optimization) directly combines the integers. Also, because the integers
  450. /// are stored in contiguous memory, this routine avoids copying each value
  451. /// and directly reads from the underlying memory.
  452. template <typename ValueT>
  453. std::enable_if_t<is_hashable_data<ValueT>::value, hash_code>
  454. hash_combine_range_impl(ValueT *first, ValueT *last) {
  455.   const uint64_t seed = get_execution_seed();
  456.   const char *s_begin = reinterpret_cast<const char *>(first);
  457.   const char *s_end = reinterpret_cast<const char *>(last);
  458.   const size_t length = std::distance(s_begin, s_end);
  459.   if (length <= 64)
  460.     return hash_short(s_begin, length, seed);
  461.  
  462.   const char *s_aligned_end = s_begin + (length & ~63);
  463.   hash_state state = state.create(s_begin, seed);
  464.   s_begin += 64;
  465.   while (s_begin != s_aligned_end) {
  466.     state.mix(s_begin);
  467.     s_begin += 64;
  468.   }
  469.   if (length & 63)
  470.     state.mix(s_end - 64);
  471.  
  472.   return state.finalize(length);
  473. }
  474.  
  475. } // namespace detail
  476. } // namespace hashing
  477.  
  478.  
  479. /// Compute a hash_code for a sequence of values.
  480. ///
  481. /// This hashes a sequence of values. It produces the same hash_code as
  482. /// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences
  483. /// and is significantly faster given pointers and types which can be hashed as
  484. /// a sequence of bytes.
  485. template <typename InputIteratorT>
  486. hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) {
  487.   return ::llvm::hashing::detail::hash_combine_range_impl(first, last);
  488. }
  489.  
  490.  
  491. // Implementation details for hash_combine.
  492. namespace hashing {
  493. namespace detail {
  494.  
  495. /// Helper class to manage the recursive combining of hash_combine
  496. /// arguments.
  497. ///
  498. /// This class exists to manage the state and various calls involved in the
  499. /// recursive combining of arguments used in hash_combine. It is particularly
  500. /// useful at minimizing the code in the recursive calls to ease the pain
  501. /// caused by a lack of variadic functions.
  502. struct hash_combine_recursive_helper {
  503.   char buffer[64] = {};
  504.   hash_state state;
  505.   const uint64_t seed;
  506.  
  507. public:
  508.   /// Construct a recursive hash combining helper.
  509.   ///
  510.   /// This sets up the state for a recursive hash combine, including getting
  511.   /// the seed and buffer setup.
  512.   hash_combine_recursive_helper()
  513.     : seed(get_execution_seed()) {}
  514.  
  515.   /// Combine one chunk of data into the current in-flight hash.
  516.   ///
  517.   /// This merges one chunk of data into the hash. First it tries to buffer
  518.   /// the data. If the buffer is full, it hashes the buffer into its
  519.   /// hash_state, empties it, and then merges the new chunk in. This also
  520.   /// handles cases where the data straddles the end of the buffer.
  521.   template <typename T>
  522.   char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) {
  523.     if (!store_and_advance(buffer_ptr, buffer_end, data)) {
  524.       // Check for skew which prevents the buffer from being packed, and do
  525.       // a partial store into the buffer to fill it. This is only a concern
  526.       // with the variadic combine because that formation can have varying
  527.       // argument types.
  528.       size_t partial_store_size = buffer_end - buffer_ptr;
  529.       memcpy(buffer_ptr, &data, partial_store_size);
  530.  
  531.       // If the store fails, our buffer is full and ready to hash. We have to
  532.       // either initialize the hash state (on the first full buffer) or mix
  533.       // this buffer into the existing hash state. Length tracks the *hashed*
  534.       // length, not the buffered length.
  535.       if (length == 0) {
  536.         state = state.create(buffer, seed);
  537.         length = 64;
  538.       } else {
  539.         // Mix this chunk into the current state and bump length up by 64.
  540.         state.mix(buffer);
  541.         length += 64;
  542.       }
  543.       // Reset the buffer_ptr to the head of the buffer for the next chunk of
  544.       // data.
  545.       buffer_ptr = buffer;
  546.  
  547.       // Try again to store into the buffer -- this cannot fail as we only
  548.       // store types smaller than the buffer.
  549.       if (!store_and_advance(buffer_ptr, buffer_end, data,
  550.                              partial_store_size))
  551.         llvm_unreachable("buffer smaller than stored type");
  552.     }
  553.     return buffer_ptr;
  554.   }
  555.  
  556.   /// Recursive, variadic combining method.
  557.   ///
  558.   /// This function recurses through each argument, combining that argument
  559.   /// into a single hash.
  560.   template <typename T, typename ...Ts>
  561.   hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
  562.                     const T &arg, const Ts &...args) {
  563.     buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg));
  564.  
  565.     // Recurse to the next argument.
  566.     return combine(length, buffer_ptr, buffer_end, args...);
  567.   }
  568.  
  569.   /// Base case for recursive, variadic combining.
  570.   ///
  571.   /// The base case when combining arguments recursively is reached when all
  572.   /// arguments have been handled. It flushes the remaining buffer and
  573.   /// constructs a hash_code.
  574.   hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) {
  575.     // Check whether the entire set of values fit in the buffer. If so, we'll
  576.     // use the optimized short hashing routine and skip state entirely.
  577.     if (length == 0)
  578.       return hash_short(buffer, buffer_ptr - buffer, seed);
  579.  
  580.     // Mix the final buffer, rotating it if we did a partial fill in order to
  581.     // simulate doing a mix of the last 64-bytes. That is how the algorithm
  582.     // works when we have a contiguous byte sequence, and we want to emulate
  583.     // that here.
  584.     std::rotate(buffer, buffer_ptr, buffer_end);
  585.  
  586.     // Mix this chunk into the current state.
  587.     state.mix(buffer);
  588.     length += buffer_ptr - buffer;
  589.  
  590.     return state.finalize(length);
  591.   }
  592. };
  593.  
  594. } // namespace detail
  595. } // namespace hashing
  596.  
  597. /// Combine values into a single hash_code.
  598. ///
  599. /// This routine accepts a varying number of arguments of any type. It will
  600. /// attempt to combine them into a single hash_code. For user-defined types it
  601. /// attempts to call a \see hash_value overload (via ADL) for the type. For
  602. /// integer and pointer types it directly combines their data into the
  603. /// resulting hash_code.
  604. ///
  605. /// The result is suitable for returning from a user's hash_value
  606. /// *implementation* for their user-defined type. Consumers of a type should
  607. /// *not* call this routine, they should instead call 'hash_value'.
  608. template <typename ...Ts> hash_code hash_combine(const Ts &...args) {
  609.   // Recursively hash each argument using a helper class.
  610.   ::llvm::hashing::detail::hash_combine_recursive_helper helper;
  611.   return helper.combine(0, helper.buffer, helper.buffer + 64, args...);
  612. }
  613.  
  614. // Implementation details for implementations of hash_value overloads provided
  615. // here.
  616. namespace hashing {
  617. namespace detail {
  618.  
  619. /// Helper to hash the value of a single integer.
  620. ///
  621. /// Overloads for smaller integer types are not provided to ensure consistent
  622. /// behavior in the presence of integral promotions. Essentially,
  623. /// "hash_value('4')" and "hash_value('0' + 4)" should be the same.
  624. inline hash_code hash_integer_value(uint64_t value) {
  625.   // Similar to hash_4to8_bytes but using a seed instead of length.
  626.   const uint64_t seed = get_execution_seed();
  627.   const char *s = reinterpret_cast<const char *>(&value);
  628.   const uint64_t a = fetch32(s);
  629.   return hash_16_bytes(seed + (a << 3), fetch32(s + 4));
  630. }
  631.  
  632. } // namespace detail
  633. } // namespace hashing
  634.  
  635. // Declared and documented above, but defined here so that any of the hashing
  636. // infrastructure is available.
  637. template <typename T>
  638. std::enable_if_t<is_integral_or_enum<T>::value, hash_code> hash_value(T value) {
  639.   return ::llvm::hashing::detail::hash_integer_value(
  640.       static_cast<uint64_t>(value));
  641. }
  642.  
  643. // Declared and documented above, but defined here so that any of the hashing
  644. // infrastructure is available.
  645. template <typename T> hash_code hash_value(const T *ptr) {
  646.   return ::llvm::hashing::detail::hash_integer_value(
  647.     reinterpret_cast<uintptr_t>(ptr));
  648. }
  649.  
  650. // Declared and documented above, but defined here so that any of the hashing
  651. // infrastructure is available.
  652. template <typename T, typename U>
  653. hash_code hash_value(const std::pair<T, U> &arg) {
  654.   return hash_combine(arg.first, arg.second);
  655. }
  656.  
  657. template <typename... Ts> hash_code hash_value(const std::tuple<Ts...> &arg) {
  658.   return std::apply([](const auto &...xs) { return hash_combine(xs...); }, arg);
  659. }
  660.  
  661. // Declared and documented above, but defined here so that any of the hashing
  662. // infrastructure is available.
  663. template <typename T>
  664. hash_code hash_value(const std::basic_string<T> &arg) {
  665.   return hash_combine_range(arg.begin(), arg.end());
  666. }
  667.  
  668. template <typename T> hash_code hash_value(const std::optional<T> &arg) {
  669.   return arg ? hash_combine(true, *arg) : hash_value(false);
  670. }
  671.  
  672. template <> struct DenseMapInfo<hash_code, void> {
  673.   static inline hash_code getEmptyKey() { return hash_code(-1); }
  674.   static inline hash_code getTombstoneKey() { return hash_code(-2); }
  675.   static unsigned getHashValue(hash_code val) { return val; }
  676.   static bool isEqual(hash_code LHS, hash_code RHS) { return LHS == RHS; }
  677. };
  678.  
  679. } // namespace llvm
  680.  
  681. #endif
  682.