syscall_64.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. #include <linux/err.h>
  3. #include <asm/asm-prototypes.h>
  4. #include <asm/kup.h>
  5. #include <asm/cputime.h>
  6. #include <asm/hw_irq.h>
  7. #include <asm/kprobes.h>
  8. #include <asm/paca.h>
  9. #include <asm/ptrace.h>
  10. #include <asm/reg.h>
  11. #include <asm/signal.h>
  12. #include <asm/switch_to.h>
  13. #include <asm/syscall.h>
  14. #include <asm/time.h>
  15. #include <asm/unistd.h>
  16. typedef long (*syscall_fn)(long, long, long, long, long, long);
  17. /* Has to run notrace because it is entered not completely "reconciled" */
  18. notrace long system_call_exception(long r3, long r4, long r5,
  19. long r6, long r7, long r8,
  20. unsigned long r0, struct pt_regs *regs)
  21. {
  22. syscall_fn f;
  23. if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
  24. BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
  25. trace_hardirqs_off(); /* finish reconciling */
  26. if (IS_ENABLED(CONFIG_PPC_BOOK3S))
  27. BUG_ON(!(regs->msr & MSR_RI));
  28. BUG_ON(!(regs->msr & MSR_PR));
  29. BUG_ON(!FULL_REGS(regs));
  30. BUG_ON(regs->softe != IRQS_ENABLED);
  31. kuap_check_amr();
  32. account_cpu_user_entry();
  33. #ifdef CONFIG_PPC_SPLPAR
  34. if (IS_ENABLED(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) &&
  35. firmware_has_feature(FW_FEATURE_SPLPAR)) {
  36. struct lppaca *lp = local_paca->lppaca_ptr;
  37. if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx)))
  38. accumulate_stolen_time();
  39. }
  40. #endif
  41. /*
  42. * This is not required for the syscall exit path, but makes the
  43. * stack frame look nicer. If this was initialised in the first stack
  44. * frame, or if the unwinder was taught the first stack frame always
  45. * returns to user with IRQS_ENABLED, this store could be avoided!
  46. */
  47. regs->softe = IRQS_ENABLED;
  48. local_irq_enable();
  49. if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
  50. if (unlikely(regs->trap == 0x7ff0)) {
  51. /* Unsupported scv vector */
  52. _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
  53. return regs->gpr[3];
  54. }
  55. /*
  56. * We use the return value of do_syscall_trace_enter() as the
  57. * syscall number. If the syscall was rejected for any reason
  58. * do_syscall_trace_enter() returns an invalid syscall number
  59. * and the test against NR_syscalls will fail and the return
  60. * value to be used is in regs->gpr[3].
  61. */
  62. r0 = do_syscall_trace_enter(regs);
  63. if (unlikely(r0 >= NR_syscalls))
  64. return regs->gpr[3];
  65. r3 = regs->gpr[3];
  66. r4 = regs->gpr[4];
  67. r5 = regs->gpr[5];
  68. r6 = regs->gpr[6];
  69. r7 = regs->gpr[7];
  70. r8 = regs->gpr[8];
  71. } else if (unlikely(r0 >= NR_syscalls)) {
  72. if (unlikely(regs->trap == 0x7ff0)) {
  73. /* Unsupported scv vector */
  74. _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
  75. return regs->gpr[3];
  76. }
  77. return -ENOSYS;
  78. }
  79. /* May be faster to do array_index_nospec? */
  80. barrier_nospec();
  81. if (unlikely(is_32bit_task())) {
  82. f = (void *)compat_sys_call_table[r0];
  83. r3 &= 0x00000000ffffffffULL;
  84. r4 &= 0x00000000ffffffffULL;
  85. r5 &= 0x00000000ffffffffULL;
  86. r6 &= 0x00000000ffffffffULL;
  87. r7 &= 0x00000000ffffffffULL;
  88. r8 &= 0x00000000ffffffffULL;
  89. } else {
  90. f = (void *)sys_call_table[r0];
  91. }
  92. return f(r3, r4, r5, r6, r7, r8);
  93. }
  94. /*
  95. * local irqs must be disabled. Returns false if the caller must re-enable
  96. * them, check for new work, and try again.
  97. */
  98. static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri)
  99. {
  100. /* This must be done with RI=1 because tracing may touch vmaps */
  101. trace_hardirqs_on();
  102. /* This pattern matches prep_irq_for_idle */
  103. if (clear_ri)
  104. __hard_EE_RI_disable();
  105. else
  106. __hard_irq_disable();
  107. if (unlikely(lazy_irq_pending_nocheck())) {
  108. /* Took an interrupt, may have more exit work to do. */
  109. if (clear_ri)
  110. __hard_RI_enable();
  111. trace_hardirqs_off();
  112. local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
  113. return false;
  114. }
  115. local_paca->irq_happened = 0;
  116. irq_soft_mask_set(IRQS_ENABLED);
  117. return true;
  118. }
  119. /*
  120. * This should be called after a syscall returns, with r3 the return value
  121. * from the syscall. If this function returns non-zero, the system call
  122. * exit assembly should additionally load all GPR registers and CTR and XER
  123. * from the interrupt frame.
  124. *
  125. * The function graph tracer can not trace the return side of this function,
  126. * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
  127. */
  128. notrace unsigned long syscall_exit_prepare(unsigned long r3,
  129. struct pt_regs *regs,
  130. long scv)
  131. {
  132. unsigned long *ti_flagsp = &current_thread_info()->flags;
  133. unsigned long ti_flags;
  134. unsigned long ret = 0;
  135. kuap_check_amr();
  136. regs->result = r3;
  137. /* Check whether the syscall is issued inside a restartable sequence */
  138. rseq_syscall(regs);
  139. ti_flags = *ti_flagsp;
  140. if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && !scv) {
  141. if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
  142. r3 = -r3;
  143. regs->ccr |= 0x10000000; /* Set SO bit in CR */
  144. }
  145. }
  146. if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
  147. if (ti_flags & _TIF_RESTOREALL)
  148. ret = _TIF_RESTOREALL;
  149. else
  150. regs->gpr[3] = r3;
  151. clear_bits(_TIF_PERSYSCALL_MASK, ti_flagsp);
  152. } else {
  153. regs->gpr[3] = r3;
  154. }
  155. if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
  156. do_syscall_trace_leave(regs);
  157. ret |= _TIF_RESTOREALL;
  158. }
  159. again:
  160. local_irq_disable();
  161. ti_flags = READ_ONCE(*ti_flagsp);
  162. while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
  163. local_irq_enable();
  164. if (ti_flags & _TIF_NEED_RESCHED) {
  165. schedule();
  166. } else {
  167. /*
  168. * SIGPENDING must restore signal handler function
  169. * argument GPRs, and some non-volatiles (e.g., r1).
  170. * Restore all for now. This could be made lighter.
  171. */
  172. if (ti_flags & _TIF_SIGPENDING)
  173. ret |= _TIF_RESTOREALL;
  174. do_notify_resume(regs, ti_flags);
  175. }
  176. local_irq_disable();
  177. ti_flags = READ_ONCE(*ti_flagsp);
  178. }
  179. if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
  180. if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
  181. unlikely((ti_flags & _TIF_RESTORE_TM))) {
  182. restore_tm_state(regs);
  183. } else {
  184. unsigned long mathflags = MSR_FP;
  185. if (cpu_has_feature(CPU_FTR_VSX))
  186. mathflags |= MSR_VEC | MSR_VSX;
  187. else if (cpu_has_feature(CPU_FTR_ALTIVEC))
  188. mathflags |= MSR_VEC;
  189. /*
  190. * If userspace MSR has all available FP bits set,
  191. * then they are live and no need to restore. If not,
  192. * it means the regs were given up and restore_math
  193. * may decide to restore them (to avoid taking an FP
  194. * fault).
  195. */
  196. if ((regs->msr & mathflags) != mathflags)
  197. restore_math(regs);
  198. }
  199. }
  200. /* scv need not set RI=0 because SRRs are not used */
  201. if (unlikely(!prep_irq_for_enabled_exit(!scv))) {
  202. local_irq_enable();
  203. goto again;
  204. }
  205. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  206. local_paca->tm_scratch = regs->msr;
  207. #endif
  208. account_cpu_user_exit();
  209. return ret;
  210. }
  211. #ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */
  212. notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
  213. {
  214. #ifdef CONFIG_PPC_BOOK3E
  215. struct thread_struct *ts = &current->thread;
  216. #endif
  217. unsigned long *ti_flagsp = &current_thread_info()->flags;
  218. unsigned long ti_flags;
  219. unsigned long flags;
  220. unsigned long ret = 0;
  221. if (IS_ENABLED(CONFIG_PPC_BOOK3S))
  222. BUG_ON(!(regs->msr & MSR_RI));
  223. BUG_ON(!(regs->msr & MSR_PR));
  224. BUG_ON(!FULL_REGS(regs));
  225. BUG_ON(regs->softe != IRQS_ENABLED);
  226. /*
  227. * We don't need to restore AMR on the way back to userspace for KUAP.
  228. * AMR can only have been unlocked if we interrupted the kernel.
  229. */
  230. kuap_check_amr();
  231. local_irq_save(flags);
  232. again:
  233. ti_flags = READ_ONCE(*ti_flagsp);
  234. while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
  235. local_irq_enable(); /* returning to user: may enable */
  236. if (ti_flags & _TIF_NEED_RESCHED) {
  237. schedule();
  238. } else {
  239. if (ti_flags & _TIF_SIGPENDING)
  240. ret |= _TIF_RESTOREALL;
  241. do_notify_resume(regs, ti_flags);
  242. }
  243. local_irq_disable();
  244. ti_flags = READ_ONCE(*ti_flagsp);
  245. }
  246. if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
  247. if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
  248. unlikely((ti_flags & _TIF_RESTORE_TM))) {
  249. restore_tm_state(regs);
  250. } else {
  251. unsigned long mathflags = MSR_FP;
  252. if (cpu_has_feature(CPU_FTR_VSX))
  253. mathflags |= MSR_VEC | MSR_VSX;
  254. else if (cpu_has_feature(CPU_FTR_ALTIVEC))
  255. mathflags |= MSR_VEC;
  256. /* See above restore_math comment */
  257. if ((regs->msr & mathflags) != mathflags)
  258. restore_math(regs);
  259. }
  260. }
  261. if (unlikely(!prep_irq_for_enabled_exit(true))) {
  262. local_irq_enable();
  263. local_irq_disable();
  264. goto again;
  265. }
  266. #ifdef CONFIG_PPC_BOOK3E
  267. if (unlikely(ts->debug.dbcr0 & DBCR0_IDM)) {
  268. /*
  269. * Check to see if the dbcr0 register is set up to debug.
  270. * Use the internal debug mode bit to do this.
  271. */
  272. mtmsr(mfmsr() & ~MSR_DE);
  273. mtspr(SPRN_DBCR0, ts->debug.dbcr0);
  274. mtspr(SPRN_DBSR, -1);
  275. }
  276. #endif
  277. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  278. local_paca->tm_scratch = regs->msr;
  279. #endif
  280. account_cpu_user_exit();
  281. return ret;
  282. }
  283. void unrecoverable_exception(struct pt_regs *regs);
  284. void preempt_schedule_irq(void);
  285. notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr)
  286. {
  287. unsigned long *ti_flagsp = &current_thread_info()->flags;
  288. unsigned long flags;
  289. unsigned long ret = 0;
  290. unsigned long amr;
  291. if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI)))
  292. unrecoverable_exception(regs);
  293. BUG_ON(regs->msr & MSR_PR);
  294. BUG_ON(!FULL_REGS(regs));
  295. amr = kuap_get_and_check_amr();
  296. if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
  297. clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
  298. ret = 1;
  299. }
  300. local_irq_save(flags);
  301. if (regs->softe == IRQS_ENABLED) {
  302. /* Returning to a kernel context with local irqs enabled. */
  303. WARN_ON_ONCE(!(regs->msr & MSR_EE));
  304. again:
  305. if (IS_ENABLED(CONFIG_PREEMPT)) {
  306. /* Return to preemptible kernel context */
  307. if (unlikely(*ti_flagsp & _TIF_NEED_RESCHED)) {
  308. if (preempt_count() == 0)
  309. preempt_schedule_irq();
  310. }
  311. }
  312. if (unlikely(!prep_irq_for_enabled_exit(true))) {
  313. /*
  314. * Can't local_irq_restore to replay if we were in
  315. * interrupt context. Must replay directly.
  316. */
  317. if (irqs_disabled_flags(flags)) {
  318. replay_soft_interrupts();
  319. } else {
  320. local_irq_restore(flags);
  321. local_irq_save(flags);
  322. }
  323. /* Took an interrupt, may have more exit work to do. */
  324. goto again;
  325. }
  326. } else {
  327. /* Returning to a kernel context with local irqs disabled. */
  328. __hard_EE_RI_disable();
  329. if (regs->msr & MSR_EE)
  330. local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
  331. }
  332. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  333. local_paca->tm_scratch = regs->msr;
  334. #endif
  335. /*
  336. * Don't want to mfspr(SPRN_AMR) here, because this comes after mtmsr,
  337. * which would cause Read-After-Write stalls. Hence, we take the AMR
  338. * value from the check above.
  339. */
  340. kuap_restore_amr(regs, amr);
  341. return ret;
  342. }
  343. #endif