mmap.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * flexible mmap layout support
  4. *
  5. * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
  6. * All Rights Reserved.
  7. *
  8. * Started by Ingo Molnar <mingo@elte.hu>
  9. */
  10. #include <linux/elf-randomize.h>
  11. #include <linux/personality.h>
  12. #include <linux/mm.h>
  13. #include <linux/mman.h>
  14. #include <linux/sched/signal.h>
  15. #include <linux/sched/mm.h>
  16. #include <linux/random.h>
  17. #include <linux/compat.h>
  18. #include <linux/security.h>
  19. #include <asm/elf.h>
  20. static unsigned long stack_maxrandom_size(void)
  21. {
  22. if (!(current->flags & PF_RANDOMIZE))
  23. return 0;
  24. return STACK_RND_MASK << PAGE_SHIFT;
  25. }
  26. static inline int mmap_is_legacy(struct rlimit *rlim_stack)
  27. {
  28. if (current->personality & ADDR_COMPAT_LAYOUT)
  29. return 1;
  30. if (rlim_stack->rlim_cur == RLIM_INFINITY)
  31. return 1;
  32. return sysctl_legacy_va_layout;
  33. }
  34. unsigned long arch_mmap_rnd(void)
  35. {
  36. return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
  37. }
  38. static unsigned long mmap_base_legacy(unsigned long rnd)
  39. {
  40. return TASK_UNMAPPED_BASE + rnd;
  41. }
  42. static inline unsigned long mmap_base(unsigned long rnd,
  43. struct rlimit *rlim_stack)
  44. {
  45. unsigned long gap = rlim_stack->rlim_cur;
  46. unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
  47. unsigned long gap_min, gap_max;
  48. /* Values close to RLIM_INFINITY can overflow. */
  49. if (gap + pad > gap)
  50. gap += pad;
  51. /*
  52. * Top of mmap area (just below the process stack).
  53. * Leave at least a ~32 MB hole.
  54. */
  55. gap_min = 32 * 1024 * 1024UL;
  56. gap_max = (STACK_TOP / 6) * 5;
  57. if (gap < gap_min)
  58. gap = gap_min;
  59. else if (gap > gap_max)
  60. gap = gap_max;
  61. return PAGE_ALIGN(STACK_TOP - gap - rnd);
  62. }
  63. unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
  64. unsigned long len, unsigned long pgoff,
  65. unsigned long flags)
  66. {
  67. struct mm_struct *mm = current->mm;
  68. struct vm_area_struct *vma;
  69. struct vm_unmapped_area_info info;
  70. if (len > TASK_SIZE - mmap_min_addr)
  71. return -ENOMEM;
  72. if (flags & MAP_FIXED)
  73. goto check_asce_limit;
  74. if (addr) {
  75. addr = PAGE_ALIGN(addr);
  76. vma = find_vma(mm, addr);
  77. if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
  78. (!vma || addr + len <= vm_start_gap(vma)))
  79. goto check_asce_limit;
  80. }
  81. info.flags = 0;
  82. info.length = len;
  83. info.low_limit = mm->mmap_base;
  84. info.high_limit = TASK_SIZE;
  85. if (filp || (flags & MAP_SHARED))
  86. info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
  87. else
  88. info.align_mask = 0;
  89. info.align_offset = pgoff << PAGE_SHIFT;
  90. addr = vm_unmapped_area(&info);
  91. if (offset_in_page(addr))
  92. return addr;
  93. check_asce_limit:
  94. return check_asce_limit(mm, addr, len);
  95. }
  96. unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
  97. unsigned long len, unsigned long pgoff,
  98. unsigned long flags)
  99. {
  100. struct vm_area_struct *vma;
  101. struct mm_struct *mm = current->mm;
  102. struct vm_unmapped_area_info info;
  103. /* requested length too big for entire address space */
  104. if (len > TASK_SIZE - mmap_min_addr)
  105. return -ENOMEM;
  106. if (flags & MAP_FIXED)
  107. goto check_asce_limit;
  108. /* requesting a specific address */
  109. if (addr) {
  110. addr = PAGE_ALIGN(addr);
  111. vma = find_vma(mm, addr);
  112. if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
  113. (!vma || addr + len <= vm_start_gap(vma)))
  114. goto check_asce_limit;
  115. }
  116. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  117. info.length = len;
  118. info.low_limit = max(PAGE_SIZE, mmap_min_addr);
  119. info.high_limit = mm->mmap_base;
  120. if (filp || (flags & MAP_SHARED))
  121. info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
  122. else
  123. info.align_mask = 0;
  124. info.align_offset = pgoff << PAGE_SHIFT;
  125. addr = vm_unmapped_area(&info);
  126. /*
  127. * A failed mmap() very likely causes application failure,
  128. * so fall back to the bottom-up function here. This scenario
  129. * can happen with large stack limits and large mmap()
  130. * allocations.
  131. */
  132. if (offset_in_page(addr)) {
  133. VM_BUG_ON(addr != -ENOMEM);
  134. info.flags = 0;
  135. info.low_limit = TASK_UNMAPPED_BASE;
  136. info.high_limit = TASK_SIZE;
  137. addr = vm_unmapped_area(&info);
  138. if (offset_in_page(addr))
  139. return addr;
  140. }
  141. check_asce_limit:
  142. return check_asce_limit(mm, addr, len);
  143. }
  144. /*
  145. * This function, called very early during the creation of a new
  146. * process VM image, sets up which VM layout function to use:
  147. */
  148. void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
  149. {
  150. unsigned long random_factor = 0UL;
  151. if (current->flags & PF_RANDOMIZE)
  152. random_factor = arch_mmap_rnd();
  153. /*
  154. * Fall back to the standard layout if the personality
  155. * bit is set, or if the expected stack growth is unlimited:
  156. */
  157. if (mmap_is_legacy(rlim_stack)) {
  158. mm->mmap_base = mmap_base_legacy(random_factor);
  159. mm->get_unmapped_area = arch_get_unmapped_area;
  160. } else {
  161. mm->mmap_base = mmap_base(random_factor, rlim_stack);
  162. mm->get_unmapped_area = arch_get_unmapped_area_topdown;
  163. }
  164. }