shuffle.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright(c) 2018 Intel Corporation. All rights reserved.
  3. #include <linux/mm.h>
  4. #include <linux/init.h>
  5. #include <linux/mmzone.h>
  6. #include <linux/random.h>
  7. #include <linux/moduleparam.h>
  8. #include "internal.h"
  9. #include "shuffle.h"
  10. DEFINE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
  11. static bool shuffle_param;
  12. static int shuffle_show(char *buffer, const struct kernel_param *kp)
  13. {
  14. return sprintf(buffer, "%c\n", shuffle_param ? 'Y' : 'N');
  15. }
  16. static __meminit int shuffle_store(const char *val,
  17. const struct kernel_param *kp)
  18. {
  19. int rc = param_set_bool(val, kp);
  20. if (rc < 0)
  21. return rc;
  22. if (shuffle_param)
  23. static_branch_enable(&page_alloc_shuffle_key);
  24. return 0;
  25. }
  26. module_param_call(shuffle, shuffle_store, shuffle_show, &shuffle_param, 0400);
  27. /*
  28. * For two pages to be swapped in the shuffle, they must be free (on a
  29. * 'free_area' lru), have the same order, and have the same migratetype.
  30. */
  31. static struct page * __meminit shuffle_valid_page(struct zone *zone,
  32. unsigned long pfn, int order)
  33. {
  34. struct page *page = pfn_to_online_page(pfn);
  35. /*
  36. * Given we're dealing with randomly selected pfns in a zone we
  37. * need to ask questions like...
  38. */
  39. /* ... is the page managed by the buddy? */
  40. if (!page)
  41. return NULL;
  42. /* ... is the page assigned to the same zone? */
  43. if (page_zone(page) != zone)
  44. return NULL;
  45. /* ...is the page free and currently on a free_area list? */
  46. if (!PageBuddy(page))
  47. return NULL;
  48. /*
  49. * ...is the page on the same list as the page we will
  50. * shuffle it with?
  51. */
  52. if (buddy_order(page) != order)
  53. return NULL;
  54. return page;
  55. }
  56. /*
  57. * Fisher-Yates shuffle the freelist which prescribes iterating through an
  58. * array, pfns in this case, and randomly swapping each entry with another in
  59. * the span, end_pfn - start_pfn.
  60. *
  61. * To keep the implementation simple it does not attempt to correct for sources
  62. * of bias in the distribution, like modulo bias or pseudo-random number
  63. * generator bias. I.e. the expectation is that this shuffling raises the bar
  64. * for attacks that exploit the predictability of page allocations, but need not
  65. * be a perfect shuffle.
  66. */
  67. #define SHUFFLE_RETRY 10
  68. void __meminit __shuffle_zone(struct zone *z)
  69. {
  70. unsigned long i, flags;
  71. unsigned long start_pfn = z->zone_start_pfn;
  72. unsigned long end_pfn = zone_end_pfn(z);
  73. const int order = SHUFFLE_ORDER;
  74. const int order_pages = 1 << order;
  75. spin_lock_irqsave(&z->lock, flags);
  76. start_pfn = ALIGN(start_pfn, order_pages);
  77. for (i = start_pfn; i < end_pfn; i += order_pages) {
  78. unsigned long j;
  79. int migratetype, retry;
  80. struct page *page_i, *page_j;
  81. /*
  82. * We expect page_i, in the sub-range of a zone being added
  83. * (@start_pfn to @end_pfn), to more likely be valid compared to
  84. * page_j randomly selected in the span @zone_start_pfn to
  85. * @spanned_pages.
  86. */
  87. page_i = shuffle_valid_page(z, i, order);
  88. if (!page_i)
  89. continue;
  90. for (retry = 0; retry < SHUFFLE_RETRY; retry++) {
  91. /*
  92. * Pick a random order aligned page in the zone span as
  93. * a swap target. If the selected pfn is a hole, retry
  94. * up to SHUFFLE_RETRY attempts find a random valid pfn
  95. * in the zone.
  96. */
  97. j = z->zone_start_pfn +
  98. ALIGN_DOWN(get_random_long() % z->spanned_pages,
  99. order_pages);
  100. page_j = shuffle_valid_page(z, j, order);
  101. if (page_j && page_j != page_i)
  102. break;
  103. }
  104. if (retry >= SHUFFLE_RETRY) {
  105. pr_debug("%s: failed to swap %#lx\n", __func__, i);
  106. continue;
  107. }
  108. /*
  109. * Each migratetype corresponds to its own list, make sure the
  110. * types match otherwise we're moving pages to lists where they
  111. * do not belong.
  112. */
  113. migratetype = get_pageblock_migratetype(page_i);
  114. if (get_pageblock_migratetype(page_j) != migratetype) {
  115. pr_debug("%s: migratetype mismatch %#lx\n", __func__, i);
  116. continue;
  117. }
  118. list_swap(&page_i->lru, &page_j->lru);
  119. pr_debug("%s: swap: %#lx -> %#lx\n", __func__, i, j);
  120. /* take it easy on the zone lock */
  121. if ((i % (100 * order_pages)) == 0) {
  122. spin_unlock_irqrestore(&z->lock, flags);
  123. cond_resched();
  124. spin_lock_irqsave(&z->lock, flags);
  125. }
  126. }
  127. spin_unlock_irqrestore(&z->lock, flags);
  128. }
  129. /**
  130. * shuffle_free_memory - reduce the predictability of the page allocator
  131. * @pgdat: node page data
  132. */
  133. void __meminit __shuffle_free_memory(pg_data_t *pgdat)
  134. {
  135. struct zone *z;
  136. for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
  137. shuffle_zone(z);
  138. }
  139. bool shuffle_pick_tail(void)
  140. {
  141. static u64 rand;
  142. static u8 rand_bits;
  143. bool ret;
  144. /*
  145. * The lack of locking is deliberate. If 2 threads race to
  146. * update the rand state it just adds to the entropy.
  147. */
  148. if (rand_bits == 0) {
  149. rand_bits = 64;
  150. rand = get_random_u64();
  151. }
  152. ret = rand & 1;
  153. rand_bits--;
  154. rand >>= 1;
  155. return ret;
  156. }