trap.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. /*
  2. * Copyright (C) 2000, 2001 Jeff Dike (jdike@karaya.com)
  3. * Licensed under the GPL
  4. */
  5. #include "linux/kernel.h"
  6. #include "asm/errno.h"
  7. #include "linux/sched.h"
  8. #include "linux/mm.h"
  9. #include "linux/spinlock.h"
  10. #include "linux/init.h"
  11. #include "linux/ptrace.h"
  12. #include "asm/semaphore.h"
  13. #include "asm/pgtable.h"
  14. #include "asm/pgalloc.h"
  15. #include "asm/tlbflush.h"
  16. #include "asm/a.out.h"
  17. #include "asm/current.h"
  18. #include "asm/irq.h"
  19. #include "sysdep/sigcontext.h"
  20. #include "user_util.h"
  21. #include "kern_util.h"
  22. #include "kern.h"
  23. #include "chan_kern.h"
  24. #include "mconsole_kern.h"
  25. #include "mem.h"
  26. #include "mem_kern.h"
  27. #include "sysdep/sigcontext.h"
  28. #include "sysdep/ptrace.h"
  29. #include "os.h"
  30. #ifdef CONFIG_MODE_SKAS
  31. #include "skas.h"
  32. #endif
  33. #include "os.h"
  34. /* Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by segv(). */
  35. int handle_page_fault(unsigned long address, unsigned long ip,
  36. int is_write, int is_user, int *code_out)
  37. {
  38. struct mm_struct *mm = current->mm;
  39. struct vm_area_struct *vma;
  40. pgd_t *pgd;
  41. pud_t *pud;
  42. pmd_t *pmd;
  43. pte_t *pte;
  44. int err = -EFAULT;
  45. *code_out = SEGV_MAPERR;
  46. /* If the fault was during atomic operation, don't take the fault, just
  47. * fail. */
  48. if (in_atomic())
  49. goto out_nosemaphore;
  50. down_read(&mm->mmap_sem);
  51. vma = find_vma(mm, address);
  52. if(!vma)
  53. goto out;
  54. else if(vma->vm_start <= address)
  55. goto good_area;
  56. else if(!(vma->vm_flags & VM_GROWSDOWN))
  57. goto out;
  58. else if(is_user && !ARCH_IS_STACKGROW(address))
  59. goto out;
  60. else if(expand_stack(vma, address))
  61. goto out;
  62. good_area:
  63. *code_out = SEGV_ACCERR;
  64. if(is_write && !(vma->vm_flags & VM_WRITE))
  65. goto out;
  66. /* Don't require VM_READ|VM_EXEC for write faults! */
  67. if(!is_write && !(vma->vm_flags & (VM_READ | VM_EXEC)))
  68. goto out;
  69. do {
  70. survive:
  71. switch (handle_mm_fault(mm, vma, address, is_write)){
  72. case VM_FAULT_MINOR:
  73. current->min_flt++;
  74. break;
  75. case VM_FAULT_MAJOR:
  76. current->maj_flt++;
  77. break;
  78. case VM_FAULT_SIGBUS:
  79. err = -EACCES;
  80. goto out;
  81. case VM_FAULT_OOM:
  82. err = -ENOMEM;
  83. goto out_of_memory;
  84. default:
  85. BUG();
  86. }
  87. pgd = pgd_offset(mm, address);
  88. pud = pud_offset(pgd, address);
  89. pmd = pmd_offset(pud, address);
  90. pte = pte_offset_kernel(pmd, address);
  91. } while(!pte_present(*pte));
  92. err = 0;
  93. /* The below warning was added in place of
  94. * pte_mkyoung(); if (is_write) pte_mkdirty();
  95. * If it's triggered, we'd see normally a hang here (a clean pte is
  96. * marked read-only to emulate the dirty bit).
  97. * However, the generic code can mark a PTE writable but clean on a
  98. * concurrent read fault, triggering this harmlessly. So comment it out.
  99. */
  100. #if 0
  101. WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte)));
  102. #endif
  103. flush_tlb_page(vma, address);
  104. out:
  105. up_read(&mm->mmap_sem);
  106. out_nosemaphore:
  107. return(err);
  108. /*
  109. * We ran out of memory, or some other thing happened to us that made
  110. * us unable to handle the page fault gracefully.
  111. */
  112. out_of_memory:
  113. if (is_init(current)) {
  114. up_read(&mm->mmap_sem);
  115. yield();
  116. down_read(&mm->mmap_sem);
  117. goto survive;
  118. }
  119. goto out;
  120. }
  121. static void bad_segv(struct faultinfo fi, unsigned long ip)
  122. {
  123. struct siginfo si;
  124. si.si_signo = SIGSEGV;
  125. si.si_code = SEGV_ACCERR;
  126. si.si_addr = (void __user *) FAULT_ADDRESS(fi);
  127. current->thread.arch.faultinfo = fi;
  128. force_sig_info(SIGSEGV, &si, current);
  129. }
  130. static void segv_handler(int sig, union uml_pt_regs *regs)
  131. {
  132. struct faultinfo * fi = UPT_FAULTINFO(regs);
  133. if(UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)){
  134. bad_segv(*fi, UPT_IP(regs));
  135. return;
  136. }
  137. segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs);
  138. }
  139. /*
  140. * We give a *copy* of the faultinfo in the regs to segv.
  141. * This must be done, since nesting SEGVs could overwrite
  142. * the info in the regs. A pointer to the info then would
  143. * give us bad data!
  144. */
  145. unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc)
  146. {
  147. struct siginfo si;
  148. void *catcher;
  149. int err;
  150. int is_write = FAULT_WRITE(fi);
  151. unsigned long address = FAULT_ADDRESS(fi);
  152. if(!is_user && (address >= start_vm) && (address < end_vm)){
  153. flush_tlb_kernel_vm();
  154. return(0);
  155. }
  156. else if(current->mm == NULL)
  157. panic("Segfault with no mm");
  158. if (SEGV_IS_FIXABLE(&fi) || SEGV_MAYBE_FIXABLE(&fi))
  159. err = handle_page_fault(address, ip, is_write, is_user, &si.si_code);
  160. else {
  161. err = -EFAULT;
  162. /* A thread accessed NULL, we get a fault, but CR2 is invalid.
  163. * This code is used in __do_copy_from_user() of TT mode. */
  164. address = 0;
  165. }
  166. catcher = current->thread.fault_catcher;
  167. if(!err)
  168. return(0);
  169. else if(catcher != NULL){
  170. current->thread.fault_addr = (void *) address;
  171. do_longjmp(catcher, 1);
  172. }
  173. else if(current->thread.fault_addr != NULL)
  174. panic("fault_addr set but no fault catcher");
  175. else if(!is_user && arch_fixup(ip, sc))
  176. return(0);
  177. if(!is_user)
  178. panic("Kernel mode fault at addr 0x%lx, ip 0x%lx",
  179. address, ip);
  180. if (err == -EACCES) {
  181. si.si_signo = SIGBUS;
  182. si.si_errno = 0;
  183. si.si_code = BUS_ADRERR;
  184. si.si_addr = (void __user *)address;
  185. current->thread.arch.faultinfo = fi;
  186. force_sig_info(SIGBUS, &si, current);
  187. } else if (err == -ENOMEM) {
  188. printk("VM: killing process %s\n", current->comm);
  189. do_exit(SIGKILL);
  190. } else {
  191. BUG_ON(err != -EFAULT);
  192. si.si_signo = SIGSEGV;
  193. si.si_addr = (void __user *) address;
  194. current->thread.arch.faultinfo = fi;
  195. force_sig_info(SIGSEGV, &si, current);
  196. }
  197. return(0);
  198. }
  199. void relay_signal(int sig, union uml_pt_regs *regs)
  200. {
  201. if(arch_handle_signal(sig, regs))
  202. return;
  203. if(!UPT_IS_USER(regs)){
  204. if(sig == SIGBUS)
  205. printk("Bus error - the /dev/shm or /tmp mount likely "
  206. "just ran out of space\n");
  207. panic("Kernel mode signal %d", sig);
  208. }
  209. current->thread.arch.faultinfo = *UPT_FAULTINFO(regs);
  210. force_sig(sig, current);
  211. }
  212. static void bus_handler(int sig, union uml_pt_regs *regs)
  213. {
  214. if(current->thread.fault_catcher != NULL)
  215. do_longjmp(current->thread.fault_catcher, 1);
  216. else relay_signal(sig, regs);
  217. }
  218. static void winch(int sig, union uml_pt_regs *regs)
  219. {
  220. do_IRQ(WINCH_IRQ, regs);
  221. }
  222. const struct kern_handlers handlinfo_kern = {
  223. .relay_signal = relay_signal,
  224. .winch = winch,
  225. .bus_handler = bus_handler,
  226. .page_fault = segv_handler,
  227. .sigio_handler = sigio_handler,
  228. .timer_handler = timer_handler
  229. };
  230. void trap_init(void)
  231. {
  232. }