fault.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * MMU fault handling support.
  4. *
  5. * Copyright (C) 1998-2002 Hewlett-Packard Co
  6. * David Mosberger-Tang <davidm@hpl.hp.com>
  7. */
  8. #include <linux/sched/signal.h>
  9. #include <linux/kernel.h>
  10. #include <linux/mm.h>
  11. #include <linux/extable.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kprobes.h>
  14. #include <linux/kdebug.h>
  15. #include <linux/prefetch.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/perf_event.h>
  18. #include <asm/processor.h>
  19. #include <asm/exception.h>
  20. extern int die(char *, struct pt_regs *, long);
  21. /*
  22. * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
  23. * (inside region 5, on ia64) and that page is present.
  24. */
  25. static int
  26. mapped_kernel_page_is_present (unsigned long address)
  27. {
  28. pgd_t *pgd;
  29. p4d_t *p4d;
  30. pud_t *pud;
  31. pmd_t *pmd;
  32. pte_t *ptep, pte;
  33. pgd = pgd_offset_k(address);
  34. if (pgd_none(*pgd) || pgd_bad(*pgd))
  35. return 0;
  36. p4d = p4d_offset(pgd, address);
  37. if (p4d_none(*p4d) || p4d_bad(*p4d))
  38. return 0;
  39. pud = pud_offset(p4d, address);
  40. if (pud_none(*pud) || pud_bad(*pud))
  41. return 0;
  42. pmd = pmd_offset(pud, address);
  43. if (pmd_none(*pmd) || pmd_bad(*pmd))
  44. return 0;
  45. ptep = pte_offset_kernel(pmd, address);
  46. if (!ptep)
  47. return 0;
  48. pte = *ptep;
  49. return pte_present(pte);
  50. }
  51. # define VM_READ_BIT 0
  52. # define VM_WRITE_BIT 1
  53. # define VM_EXEC_BIT 2
  54. void __kprobes
  55. ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
  56. {
  57. int signal = SIGSEGV, code = SEGV_MAPERR;
  58. struct vm_area_struct *vma, *prev_vma;
  59. struct mm_struct *mm = current->mm;
  60. unsigned long mask;
  61. vm_fault_t fault;
  62. unsigned int flags = FAULT_FLAG_DEFAULT;
  63. mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
  64. | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
  65. /* mmap_lock is performance critical.... */
  66. prefetchw(&mm->mmap_lock);
  67. /*
  68. * If we're in an interrupt or have no user context, we must not take the fault..
  69. */
  70. if (faulthandler_disabled() || !mm)
  71. goto no_context;
  72. #ifdef CONFIG_VIRTUAL_MEM_MAP
  73. /*
  74. * If fault is in region 5 and we are in the kernel, we may already
  75. * have the mmap_lock (pfn_valid macro is called during mmap). There
  76. * is no vma for region 5 addr's anyway, so skip getting the semaphore
  77. * and go directly to the exception handling code.
  78. */
  79. if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
  80. goto bad_area_no_up;
  81. #endif
  82. /*
  83. * This is to handle the kprobes on user space access instructions
  84. */
  85. if (kprobe_page_fault(regs, TRAP_BRKPT))
  86. return;
  87. if (user_mode(regs))
  88. flags |= FAULT_FLAG_USER;
  89. if (mask & VM_WRITE)
  90. flags |= FAULT_FLAG_WRITE;
  91. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
  92. retry:
  93. mmap_read_lock(mm);
  94. vma = find_vma_prev(mm, address, &prev_vma);
  95. if (!vma && !prev_vma )
  96. goto bad_area;
  97. /*
  98. * find_vma_prev() returns vma such that address < vma->vm_end or NULL
  99. *
  100. * May find no vma, but could be that the last vm area is the
  101. * register backing store that needs to expand upwards, in
  102. * this case vma will be null, but prev_vma will ne non-null
  103. */
  104. if (( !vma && prev_vma ) || (address < vma->vm_start) )
  105. goto check_expansion;
  106. good_area:
  107. code = SEGV_ACCERR;
  108. /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
  109. # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
  110. || (1 << VM_EXEC_BIT) != VM_EXEC)
  111. # error File is out of sync with <linux/mm.h>. Please update.
  112. # endif
  113. if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
  114. goto bad_area;
  115. if ((vma->vm_flags & mask) != mask)
  116. goto bad_area;
  117. /*
  118. * If for any reason at all we couldn't handle the fault, make
  119. * sure we exit gracefully rather than endlessly redo the
  120. * fault.
  121. */
  122. fault = handle_mm_fault(vma, address, flags, regs);
  123. if (fault_signal_pending(fault, regs))
  124. return;
  125. if (unlikely(fault & VM_FAULT_ERROR)) {
  126. /*
  127. * We ran out of memory, or some other thing happened
  128. * to us that made us unable to handle the page fault
  129. * gracefully.
  130. */
  131. if (fault & VM_FAULT_OOM) {
  132. goto out_of_memory;
  133. } else if (fault & VM_FAULT_SIGSEGV) {
  134. goto bad_area;
  135. } else if (fault & VM_FAULT_SIGBUS) {
  136. signal = SIGBUS;
  137. goto bad_area;
  138. }
  139. BUG();
  140. }
  141. if (flags & FAULT_FLAG_ALLOW_RETRY) {
  142. if (fault & VM_FAULT_RETRY) {
  143. flags |= FAULT_FLAG_TRIED;
  144. /* No need to mmap_read_unlock(mm) as we would
  145. * have already released it in __lock_page_or_retry
  146. * in mm/filemap.c.
  147. */
  148. goto retry;
  149. }
  150. }
  151. mmap_read_unlock(mm);
  152. return;
  153. check_expansion:
  154. if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
  155. if (!vma)
  156. goto bad_area;
  157. if (!(vma->vm_flags & VM_GROWSDOWN))
  158. goto bad_area;
  159. if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
  160. || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
  161. goto bad_area;
  162. if (expand_stack(vma, address))
  163. goto bad_area;
  164. } else {
  165. vma = prev_vma;
  166. if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
  167. || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
  168. goto bad_area;
  169. /*
  170. * Since the register backing store is accessed sequentially,
  171. * we disallow growing it by more than a page at a time.
  172. */
  173. if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
  174. goto bad_area;
  175. if (expand_upwards(vma, address))
  176. goto bad_area;
  177. }
  178. goto good_area;
  179. bad_area:
  180. mmap_read_unlock(mm);
  181. #ifdef CONFIG_VIRTUAL_MEM_MAP
  182. bad_area_no_up:
  183. #endif
  184. if ((isr & IA64_ISR_SP)
  185. || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
  186. {
  187. /*
  188. * This fault was due to a speculative load or lfetch.fault, set the "ed"
  189. * bit in the psr to ensure forward progress. (Target register will get a
  190. * NaT for ld.s, lfetch will be canceled.)
  191. */
  192. ia64_psr(regs)->ed = 1;
  193. return;
  194. }
  195. if (user_mode(regs)) {
  196. force_sig_fault(signal, code, (void __user *) address,
  197. 0, __ISR_VALID, isr);
  198. return;
  199. }
  200. no_context:
  201. if ((isr & IA64_ISR_SP)
  202. || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
  203. {
  204. /*
  205. * This fault was due to a speculative load or lfetch.fault, set the "ed"
  206. * bit in the psr to ensure forward progress. (Target register will get a
  207. * NaT for ld.s, lfetch will be canceled.)
  208. */
  209. ia64_psr(regs)->ed = 1;
  210. return;
  211. }
  212. /*
  213. * Since we have no vma's for region 5, we might get here even if the address is
  214. * valid, due to the VHPT walker inserting a non present translation that becomes
  215. * stale. If that happens, the non present fault handler already purged the stale
  216. * translation, which fixed the problem. So, we check to see if the translation is
  217. * valid, and return if it is.
  218. */
  219. if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
  220. return;
  221. if (ia64_done_with_exception(regs))
  222. return;
  223. /*
  224. * Oops. The kernel tried to access some bad page. We'll have to terminate things
  225. * with extreme prejudice.
  226. */
  227. bust_spinlocks(1);
  228. if (address < PAGE_SIZE)
  229. printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
  230. else
  231. printk(KERN_ALERT "Unable to handle kernel paging request at "
  232. "virtual address %016lx\n", address);
  233. if (die("Oops", regs, isr))
  234. regs = NULL;
  235. bust_spinlocks(0);
  236. if (regs)
  237. do_exit(SIGKILL);
  238. return;
  239. out_of_memory:
  240. mmap_read_unlock(mm);
  241. if (!user_mode(regs))
  242. goto no_context;
  243. pagefault_out_of_memory();
  244. }