partition_bucket_lookup.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. // Copyright 2021 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_LOOKUP_H_
  5. #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_LOOKUP_H_
  6. #include <cstdint>
  7. #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
  8. #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
  9. #include "base/allocator/partition_allocator/partition_alloc_check.h"
  10. #include "base/allocator/partition_allocator/partition_alloc_config.h"
  11. #include "base/allocator/partition_allocator/partition_alloc_constants.h"
  12. namespace partition_alloc::internal {
  13. // Don't use an anonymous namespace for the constants because it can inhibit
  14. // collapsing them together, even when they are tagged as inline.
  15. // Precalculate some shift and mask constants used in the hot path.
  16. // Example: malloc(41) == 101001 binary.
  17. // Order is 6 (1 << 6-1) == 32 is highest bit set.
  18. // order_index is the next three MSB == 010 == 2.
  19. // sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01
  20. // for the sub_order_index).
  21. constexpr uint8_t OrderIndexShift(uint8_t order) {
  22. if (order < kNumBucketsPerOrderBits + 1)
  23. return 0;
  24. return order - (kNumBucketsPerOrderBits + 1);
  25. }
  26. constexpr size_t OrderSubIndexMask(uint8_t order) {
  27. if (order == kBitsPerSizeT)
  28. return static_cast<size_t>(-1) >> (kNumBucketsPerOrderBits + 1);
  29. return ((static_cast<size_t>(1) << order) - 1) >>
  30. (kNumBucketsPerOrderBits + 1);
  31. }
  32. #if defined(PA_HAS_64_BITS_POINTERS)
  33. #define PA_BITS_PER_SIZE_T 64
  34. static_assert(kBitsPerSizeT == 64, "");
  35. #else
  36. #define PA_BITS_PER_SIZE_T 32
  37. static_assert(kBitsPerSizeT == 32, "");
  38. #endif // defined(PA_HAS_64_BITS_POINTERS)
  39. inline constexpr uint8_t kOrderIndexShift[PA_BITS_PER_SIZE_T + 1] = {
  40. OrderIndexShift(0), OrderIndexShift(1), OrderIndexShift(2),
  41. OrderIndexShift(3), OrderIndexShift(4), OrderIndexShift(5),
  42. OrderIndexShift(6), OrderIndexShift(7), OrderIndexShift(8),
  43. OrderIndexShift(9), OrderIndexShift(10), OrderIndexShift(11),
  44. OrderIndexShift(12), OrderIndexShift(13), OrderIndexShift(14),
  45. OrderIndexShift(15), OrderIndexShift(16), OrderIndexShift(17),
  46. OrderIndexShift(18), OrderIndexShift(19), OrderIndexShift(20),
  47. OrderIndexShift(21), OrderIndexShift(22), OrderIndexShift(23),
  48. OrderIndexShift(24), OrderIndexShift(25), OrderIndexShift(26),
  49. OrderIndexShift(27), OrderIndexShift(28), OrderIndexShift(29),
  50. OrderIndexShift(30), OrderIndexShift(31), OrderIndexShift(32),
  51. #if PA_BITS_PER_SIZE_T == 64
  52. OrderIndexShift(33), OrderIndexShift(34), OrderIndexShift(35),
  53. OrderIndexShift(36), OrderIndexShift(37), OrderIndexShift(38),
  54. OrderIndexShift(39), OrderIndexShift(40), OrderIndexShift(41),
  55. OrderIndexShift(42), OrderIndexShift(43), OrderIndexShift(44),
  56. OrderIndexShift(45), OrderIndexShift(46), OrderIndexShift(47),
  57. OrderIndexShift(48), OrderIndexShift(49), OrderIndexShift(50),
  58. OrderIndexShift(51), OrderIndexShift(52), OrderIndexShift(53),
  59. OrderIndexShift(54), OrderIndexShift(55), OrderIndexShift(56),
  60. OrderIndexShift(57), OrderIndexShift(58), OrderIndexShift(59),
  61. OrderIndexShift(60), OrderIndexShift(61), OrderIndexShift(62),
  62. OrderIndexShift(63), OrderIndexShift(64)
  63. #endif
  64. };
  65. inline constexpr size_t kOrderSubIndexMask[PA_BITS_PER_SIZE_T + 1] = {
  66. OrderSubIndexMask(0), OrderSubIndexMask(1), OrderSubIndexMask(2),
  67. OrderSubIndexMask(3), OrderSubIndexMask(4), OrderSubIndexMask(5),
  68. OrderSubIndexMask(6), OrderSubIndexMask(7), OrderSubIndexMask(8),
  69. OrderSubIndexMask(9), OrderSubIndexMask(10), OrderSubIndexMask(11),
  70. OrderSubIndexMask(12), OrderSubIndexMask(13), OrderSubIndexMask(14),
  71. OrderSubIndexMask(15), OrderSubIndexMask(16), OrderSubIndexMask(17),
  72. OrderSubIndexMask(18), OrderSubIndexMask(19), OrderSubIndexMask(20),
  73. OrderSubIndexMask(21), OrderSubIndexMask(22), OrderSubIndexMask(23),
  74. OrderSubIndexMask(24), OrderSubIndexMask(25), OrderSubIndexMask(26),
  75. OrderSubIndexMask(27), OrderSubIndexMask(28), OrderSubIndexMask(29),
  76. OrderSubIndexMask(30), OrderSubIndexMask(31), OrderSubIndexMask(32),
  77. #if PA_BITS_PER_SIZE_T == 64
  78. OrderSubIndexMask(33), OrderSubIndexMask(34), OrderSubIndexMask(35),
  79. OrderSubIndexMask(36), OrderSubIndexMask(37), OrderSubIndexMask(38),
  80. OrderSubIndexMask(39), OrderSubIndexMask(40), OrderSubIndexMask(41),
  81. OrderSubIndexMask(42), OrderSubIndexMask(43), OrderSubIndexMask(44),
  82. OrderSubIndexMask(45), OrderSubIndexMask(46), OrderSubIndexMask(47),
  83. OrderSubIndexMask(48), OrderSubIndexMask(49), OrderSubIndexMask(50),
  84. OrderSubIndexMask(51), OrderSubIndexMask(52), OrderSubIndexMask(53),
  85. OrderSubIndexMask(54), OrderSubIndexMask(55), OrderSubIndexMask(56),
  86. OrderSubIndexMask(57), OrderSubIndexMask(58), OrderSubIndexMask(59),
  87. OrderSubIndexMask(60), OrderSubIndexMask(61), OrderSubIndexMask(62),
  88. OrderSubIndexMask(63), OrderSubIndexMask(64)
  89. #endif
  90. };
  91. // The class used to generate the bucket lookup table at compile-time.
  92. class BucketIndexLookup final {
  93. public:
  94. PA_ALWAYS_INLINE constexpr static uint16_t GetIndexForDenserBuckets(
  95. size_t size);
  96. PA_ALWAYS_INLINE constexpr static uint16_t GetIndex(size_t size);
  97. constexpr BucketIndexLookup() {
  98. constexpr uint16_t sentinel_bucket_index = kNumBuckets;
  99. InitBucketSizes();
  100. uint16_t* bucket_index_ptr = &bucket_index_lookup_[0];
  101. uint16_t bucket_index = 0;
  102. // Very small allocations, smaller than the first bucketed order ->
  103. // everything goes to the first bucket.
  104. for (uint8_t order = 0; order < kMinBucketedOrder; ++order) {
  105. for (uint16_t j = 0; j < kNumBucketsPerOrder; ++j) {
  106. *bucket_index_ptr++ = 0;
  107. }
  108. }
  109. // Normal buckets.
  110. for (uint8_t order = kMinBucketedOrder; order <= kMaxBucketedOrder;
  111. ++order) {
  112. size_t size = static_cast<size_t>(1) << (order - 1);
  113. size_t current_increment = size >> kNumBucketsPerOrderBits;
  114. for (uint16_t j = 0; j < kNumBucketsPerOrder; ++j) {
  115. *bucket_index_ptr++ = bucket_index;
  116. // For small sizes, buckets are close together (current_increment is
  117. // small). For instance, for:
  118. // - kAlignment == 16 (which is the case on most 64 bit systems)
  119. // - kNumBucketsPerOrder == 4
  120. //
  121. // The 3 next buckets after 16 are {20, 24, 28}. None of these are a
  122. // multiple of kAlignment, so they use the next bucket, that is 32 here.
  123. if (size % kAlignment != 0) {
  124. PA_DCHECK(bucket_sizes_[bucket_index] > size);
  125. // Do not increment bucket_index, since in the example above
  126. // current_size may be 20, and bucket_sizes_[bucket_index] == 32.
  127. } else {
  128. PA_DCHECK(bucket_sizes_[bucket_index] == size);
  129. bucket_index++;
  130. }
  131. size += current_increment;
  132. }
  133. }
  134. // Direct-mapped, and overflow.
  135. for (uint8_t order = kMaxBucketedOrder + 1; order <= kBitsPerSizeT;
  136. ++order) {
  137. for (uint16_t j = 0; j < kNumBucketsPerOrder; ++j) {
  138. *bucket_index_ptr++ = sentinel_bucket_index;
  139. }
  140. }
  141. // Smaller because some buckets are not valid due to alignment constraints.
  142. PA_DCHECK(bucket_index < kNumBuckets);
  143. PA_DCHECK(bucket_index_ptr == bucket_index_lookup_ + ((kBitsPerSizeT + 1) *
  144. kNumBucketsPerOrder));
  145. // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
  146. // which tries to overflow to a non-existent order.
  147. *bucket_index_ptr = sentinel_bucket_index;
  148. }
  149. constexpr const size_t* bucket_sizes() const { return &bucket_sizes_[0]; }
  150. private:
  151. constexpr void InitBucketSizes() {
  152. size_t current_size = kSmallestBucket;
  153. size_t current_increment = kSmallestBucket >> kNumBucketsPerOrderBits;
  154. size_t* bucket_size = &bucket_sizes_[0];
  155. for (size_t i = 0; i < kNumBucketedOrders; ++i) {
  156. for (size_t j = 0; j < kNumBucketsPerOrder; ++j) {
  157. // All bucket sizes have to be multiples of kAlignment, skip otherwise.
  158. if (current_size % kAlignment == 0) {
  159. *bucket_size = current_size;
  160. ++bucket_size;
  161. }
  162. current_size += current_increment;
  163. }
  164. current_increment <<= 1;
  165. }
  166. // The remaining buckets are invalid.
  167. while (bucket_size < bucket_sizes_ + kNumBuckets) {
  168. *(bucket_size++) = kInvalidBucketSize;
  169. }
  170. }
  171. size_t bucket_sizes_[kNumBuckets]{};
  172. // The bucket lookup table lets us map a size_t to a bucket quickly.
  173. // The trailing +1 caters for the overflow case for very large allocation
  174. // sizes. It is one flat array instead of a 2D array because in the 2D
  175. // world, we'd need to index array[blah][max+1] which risks undefined
  176. // behavior.
  177. uint16_t
  178. bucket_index_lookup_[((kBitsPerSizeT + 1) * kNumBucketsPerOrder) + 1]{};
  179. };
  180. PA_ALWAYS_INLINE constexpr size_t RoundUpToPowerOfTwo(size_t size) {
  181. const size_t n = 1 << base::bits::Log2Ceiling(static_cast<uint32_t>(size));
  182. PA_DCHECK(size <= n);
  183. return n;
  184. }
  185. PA_ALWAYS_INLINE constexpr size_t RoundUpSize(size_t size) {
  186. const size_t next_power = RoundUpToPowerOfTwo(size);
  187. const size_t prev_power = next_power >> 1;
  188. PA_DCHECK(size <= next_power);
  189. PA_DCHECK(prev_power < size);
  190. if (size <= prev_power * 5 / 4) {
  191. return prev_power * 5 / 4;
  192. } else {
  193. return next_power;
  194. }
  195. }
  196. // static
  197. PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndex(size_t size) {
  198. // For any order 2^N, under the denser bucket distribution ("Distribution A"),
  199. // we have 4 evenly distributed buckets: 2^N, 1.25*2^N, 1.5*2^N, and 1.75*2^N.
  200. // These numbers represent the maximum size of an allocation that can go into
  201. // a given bucket.
  202. //
  203. // Under the less dense bucket distribution ("Distribution B"), we only have
  204. // 2 buckets for the same order 2^N: 2^N and 1.25*2^N.
  205. //
  206. // Everything that would be mapped to the last two buckets of an order under
  207. // Distribution A is instead mapped to the first bucket of the next order
  208. // under Distribution B. The following diagram shows roughly what this looks
  209. // like for the order starting from 2^10, as an example.
  210. //
  211. // A: ... | 2^10 | 1.25*2^10 | 1.5*2^10 | 1.75*2^10 | 2^11 | ...
  212. // B: ... | 2^10 | 1.25*2^10 | -------- | --------- | 2^11 | ...
  213. //
  214. // So, an allocation of size 1.4*2^10 would go into the 1.5*2^10 bucket under
  215. // Distribution A, but to the 2^11 bucket under Distribution B.
  216. if (1 << 8 < size && size < kHighThresholdForAlternateDistribution)
  217. return BucketIndexLookup::GetIndexForDenserBuckets(RoundUpSize(size));
  218. return BucketIndexLookup::GetIndexForDenserBuckets(size);
  219. }
  220. // static
  221. PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndexForDenserBuckets(
  222. size_t size) {
  223. // This forces the bucket table to be constant-initialized and immediately
  224. // materialized in the binary.
  225. constexpr BucketIndexLookup lookup{};
  226. const size_t order =
  227. kBitsPerSizeT -
  228. static_cast<size_t>(base::bits::CountLeadingZeroBits(size));
  229. // The order index is simply the next few bits after the most significant
  230. // bit.
  231. const size_t order_index =
  232. (size >> kOrderIndexShift[order]) & (kNumBucketsPerOrder - 1);
  233. // And if the remaining bits are non-zero we must bump the bucket up.
  234. const size_t sub_order_index = size & kOrderSubIndexMask[order];
  235. const uint16_t index =
  236. lookup.bucket_index_lookup_[(order << kNumBucketsPerOrderBits) +
  237. order_index + !!sub_order_index];
  238. PA_DCHECK(index <= kNumBuckets); // Last one is the sentinel bucket.
  239. return index;
  240. }
  241. } // namespace partition_alloc::internal
  242. #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_LOOKUP_H_