booke_interrupts.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. *
  4. * Copyright IBM Corp. 2007
  5. * Copyright 2011 Freescale Semiconductor, Inc.
  6. *
  7. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  8. */
  9. #include <asm/ppc_asm.h>
  10. #include <asm/kvm_asm.h>
  11. #include <asm/reg.h>
  12. #include <asm/page.h>
  13. #include <asm/asm-offsets.h>
  14. /* The host stack layout: */
  15. #define HOST_R1 0 /* Implied by stwu. */
  16. #define HOST_CALLEE_LR 4
  17. #define HOST_RUN 8
  18. /* r2 is special: it holds 'current', and it made nonvolatile in the
  19. * kernel with the -ffixed-r2 gcc option. */
  20. #define HOST_R2 12
  21. #define HOST_CR 16
  22. #define HOST_NV_GPRS 20
  23. #define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
  24. #define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n)
  25. #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4)
  26. #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
  27. #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
  28. #define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
  29. (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
  30. (1<<BOOKE_INTERRUPT_DEBUG))
  31. #define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
  32. (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
  33. (1<<BOOKE_INTERRUPT_ALIGNMENT))
  34. #define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
  35. (1<<BOOKE_INTERRUPT_INST_STORAGE) | \
  36. (1<<BOOKE_INTERRUPT_PROGRAM) | \
  37. (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
  38. (1<<BOOKE_INTERRUPT_ALIGNMENT))
  39. .macro __KVM_HANDLER ivor_nr scratch srr0
  40. /* Get pointer to vcpu and record exit number. */
  41. mtspr \scratch , r4
  42. mfspr r4, SPRN_SPRG_THREAD
  43. lwz r4, THREAD_KVM_VCPU(r4)
  44. stw r3, VCPU_GPR(R3)(r4)
  45. stw r5, VCPU_GPR(R5)(r4)
  46. stw r6, VCPU_GPR(R6)(r4)
  47. mfspr r3, \scratch
  48. mfctr r5
  49. stw r3, VCPU_GPR(R4)(r4)
  50. stw r5, VCPU_CTR(r4)
  51. mfspr r3, \srr0
  52. lis r6, kvmppc_resume_host@h
  53. stw r3, VCPU_PC(r4)
  54. li r5, \ivor_nr
  55. ori r6, r6, kvmppc_resume_host@l
  56. mtctr r6
  57. bctr
  58. .endm
  59. .macro KVM_HANDLER ivor_nr scratch srr0
  60. _GLOBAL(kvmppc_handler_\ivor_nr)
  61. __KVM_HANDLER \ivor_nr \scratch \srr0
  62. .endm
  63. .macro KVM_DBG_HANDLER ivor_nr scratch srr0
  64. _GLOBAL(kvmppc_handler_\ivor_nr)
  65. mtspr \scratch, r4
  66. mfspr r4, SPRN_SPRG_THREAD
  67. lwz r4, THREAD_KVM_VCPU(r4)
  68. stw r3, VCPU_CRIT_SAVE(r4)
  69. mfcr r3
  70. mfspr r4, SPRN_CSRR1
  71. andi. r4, r4, MSR_PR
  72. bne 1f
  73. /* debug interrupt happened in enter/exit path */
  74. mfspr r4, SPRN_CSRR1
  75. rlwinm r4, r4, 0, ~MSR_DE
  76. mtspr SPRN_CSRR1, r4
  77. lis r4, 0xffff
  78. ori r4, r4, 0xffff
  79. mtspr SPRN_DBSR, r4
  80. mfspr r4, SPRN_SPRG_THREAD
  81. lwz r4, THREAD_KVM_VCPU(r4)
  82. mtcr r3
  83. lwz r3, VCPU_CRIT_SAVE(r4)
  84. mfspr r4, \scratch
  85. rfci
  86. 1: /* debug interrupt happened in guest */
  87. mtcr r3
  88. mfspr r4, SPRN_SPRG_THREAD
  89. lwz r4, THREAD_KVM_VCPU(r4)
  90. lwz r3, VCPU_CRIT_SAVE(r4)
  91. mfspr r4, \scratch
  92. __KVM_HANDLER \ivor_nr \scratch \srr0
  93. .endm
  94. .macro KVM_HANDLER_ADDR ivor_nr
  95. .long kvmppc_handler_\ivor_nr
  96. .endm
  97. .macro KVM_HANDLER_END
  98. .long kvmppc_handlers_end
  99. .endm
  100. _GLOBAL(kvmppc_handlers_start)
  101. KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
  102. KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0
  103. KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  104. KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  105. KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  106. KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  107. KVM_HANDLER BOOKE_INTERRUPT_PROGRAM SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  108. KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  109. KVM_HANDLER BOOKE_INTERRUPT_SYSCALL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  110. KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  111. KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  112. KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  113. KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
  114. KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  115. KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  116. KVM_DBG_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
  117. KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  118. KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  119. KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0
  120. _GLOBAL(kvmppc_handlers_end)
  121. /* Registers:
  122. * SPRG_SCRATCH0: guest r4
  123. * r4: vcpu pointer
  124. * r5: KVM exit number
  125. */
  126. _GLOBAL(kvmppc_resume_host)
  127. mfcr r3
  128. stw r3, VCPU_CR(r4)
  129. stw r7, VCPU_GPR(R7)(r4)
  130. stw r8, VCPU_GPR(R8)(r4)
  131. stw r9, VCPU_GPR(R9)(r4)
  132. li r6, 1
  133. slw r6, r6, r5
  134. #ifdef CONFIG_KVM_EXIT_TIMING
  135. /* save exit time */
  136. 1:
  137. mfspr r7, SPRN_TBRU
  138. mfspr r8, SPRN_TBRL
  139. mfspr r9, SPRN_TBRU
  140. cmpw r9, r7
  141. bne 1b
  142. stw r8, VCPU_TIMING_EXIT_TBL(r4)
  143. stw r9, VCPU_TIMING_EXIT_TBU(r4)
  144. #endif
  145. /* Save the faulting instruction and all GPRs for emulation. */
  146. andi. r7, r6, NEED_INST_MASK
  147. beq ..skip_inst_copy
  148. mfspr r9, SPRN_SRR0
  149. mfmsr r8
  150. ori r7, r8, MSR_DS
  151. mtmsr r7
  152. isync
  153. lwz r9, 0(r9)
  154. mtmsr r8
  155. isync
  156. stw r9, VCPU_LAST_INST(r4)
  157. stw r15, VCPU_GPR(R15)(r4)
  158. stw r16, VCPU_GPR(R16)(r4)
  159. stw r17, VCPU_GPR(R17)(r4)
  160. stw r18, VCPU_GPR(R18)(r4)
  161. stw r19, VCPU_GPR(R19)(r4)
  162. stw r20, VCPU_GPR(R20)(r4)
  163. stw r21, VCPU_GPR(R21)(r4)
  164. stw r22, VCPU_GPR(R22)(r4)
  165. stw r23, VCPU_GPR(R23)(r4)
  166. stw r24, VCPU_GPR(R24)(r4)
  167. stw r25, VCPU_GPR(R25)(r4)
  168. stw r26, VCPU_GPR(R26)(r4)
  169. stw r27, VCPU_GPR(R27)(r4)
  170. stw r28, VCPU_GPR(R28)(r4)
  171. stw r29, VCPU_GPR(R29)(r4)
  172. stw r30, VCPU_GPR(R30)(r4)
  173. stw r31, VCPU_GPR(R31)(r4)
  174. ..skip_inst_copy:
  175. /* Also grab DEAR and ESR before the host can clobber them. */
  176. andi. r7, r6, NEED_DEAR_MASK
  177. beq ..skip_dear
  178. mfspr r9, SPRN_DEAR
  179. stw r9, VCPU_FAULT_DEAR(r4)
  180. ..skip_dear:
  181. andi. r7, r6, NEED_ESR_MASK
  182. beq ..skip_esr
  183. mfspr r9, SPRN_ESR
  184. stw r9, VCPU_FAULT_ESR(r4)
  185. ..skip_esr:
  186. /* Save remaining volatile guest register state to vcpu. */
  187. stw r0, VCPU_GPR(R0)(r4)
  188. stw r1, VCPU_GPR(R1)(r4)
  189. stw r2, VCPU_GPR(R2)(r4)
  190. stw r10, VCPU_GPR(R10)(r4)
  191. stw r11, VCPU_GPR(R11)(r4)
  192. stw r12, VCPU_GPR(R12)(r4)
  193. stw r13, VCPU_GPR(R13)(r4)
  194. stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */
  195. mflr r3
  196. stw r3, VCPU_LR(r4)
  197. mfxer r3
  198. stw r3, VCPU_XER(r4)
  199. /* Restore host stack pointer and PID before IVPR, since the host
  200. * exception handlers use them. */
  201. lwz r1, VCPU_HOST_STACK(r4)
  202. lwz r3, VCPU_HOST_PID(r4)
  203. mtspr SPRN_PID, r3
  204. #ifdef CONFIG_FSL_BOOKE
  205. /* we cheat and know that Linux doesn't use PID1 which is always 0 */
  206. lis r3, 0
  207. mtspr SPRN_PID1, r3
  208. #endif
  209. /* Restore host IVPR before re-enabling interrupts. We cheat and know
  210. * that Linux IVPR is always 0xc0000000. */
  211. lis r3, 0xc000
  212. mtspr SPRN_IVPR, r3
  213. /* Switch to kernel stack and jump to handler. */
  214. LOAD_REG_ADDR(r3, kvmppc_handle_exit)
  215. mtctr r3
  216. mr r3, r4
  217. lwz r2, HOST_R2(r1)
  218. mr r14, r4 /* Save vcpu pointer. */
  219. bctrl /* kvmppc_handle_exit() */
  220. /* Restore vcpu pointer and the nonvolatiles we used. */
  221. mr r4, r14
  222. lwz r14, VCPU_GPR(R14)(r4)
  223. /* Sometimes instruction emulation must restore complete GPR state. */
  224. andi. r5, r3, RESUME_FLAG_NV
  225. beq ..skip_nv_load
  226. lwz r15, VCPU_GPR(R15)(r4)
  227. lwz r16, VCPU_GPR(R16)(r4)
  228. lwz r17, VCPU_GPR(R17)(r4)
  229. lwz r18, VCPU_GPR(R18)(r4)
  230. lwz r19, VCPU_GPR(R19)(r4)
  231. lwz r20, VCPU_GPR(R20)(r4)
  232. lwz r21, VCPU_GPR(R21)(r4)
  233. lwz r22, VCPU_GPR(R22)(r4)
  234. lwz r23, VCPU_GPR(R23)(r4)
  235. lwz r24, VCPU_GPR(R24)(r4)
  236. lwz r25, VCPU_GPR(R25)(r4)
  237. lwz r26, VCPU_GPR(R26)(r4)
  238. lwz r27, VCPU_GPR(R27)(r4)
  239. lwz r28, VCPU_GPR(R28)(r4)
  240. lwz r29, VCPU_GPR(R29)(r4)
  241. lwz r30, VCPU_GPR(R30)(r4)
  242. lwz r31, VCPU_GPR(R31)(r4)
  243. ..skip_nv_load:
  244. /* Should we return to the guest? */
  245. andi. r5, r3, RESUME_FLAG_HOST
  246. beq lightweight_exit
  247. srawi r3, r3, 2 /* Shift -ERR back down. */
  248. heavyweight_exit:
  249. /* Not returning to guest. */
  250. #ifdef CONFIG_SPE
  251. /* save guest SPEFSCR and load host SPEFSCR */
  252. mfspr r9, SPRN_SPEFSCR
  253. stw r9, VCPU_SPEFSCR(r4)
  254. lwz r9, VCPU_HOST_SPEFSCR(r4)
  255. mtspr SPRN_SPEFSCR, r9
  256. #endif
  257. /* We already saved guest volatile register state; now save the
  258. * non-volatiles. */
  259. stw r15, VCPU_GPR(R15)(r4)
  260. stw r16, VCPU_GPR(R16)(r4)
  261. stw r17, VCPU_GPR(R17)(r4)
  262. stw r18, VCPU_GPR(R18)(r4)
  263. stw r19, VCPU_GPR(R19)(r4)
  264. stw r20, VCPU_GPR(R20)(r4)
  265. stw r21, VCPU_GPR(R21)(r4)
  266. stw r22, VCPU_GPR(R22)(r4)
  267. stw r23, VCPU_GPR(R23)(r4)
  268. stw r24, VCPU_GPR(R24)(r4)
  269. stw r25, VCPU_GPR(R25)(r4)
  270. stw r26, VCPU_GPR(R26)(r4)
  271. stw r27, VCPU_GPR(R27)(r4)
  272. stw r28, VCPU_GPR(R28)(r4)
  273. stw r29, VCPU_GPR(R29)(r4)
  274. stw r30, VCPU_GPR(R30)(r4)
  275. stw r31, VCPU_GPR(R31)(r4)
  276. /* Load host non-volatile register state from host stack. */
  277. lwz r14, HOST_NV_GPR(R14)(r1)
  278. lwz r15, HOST_NV_GPR(R15)(r1)
  279. lwz r16, HOST_NV_GPR(R16)(r1)
  280. lwz r17, HOST_NV_GPR(R17)(r1)
  281. lwz r18, HOST_NV_GPR(R18)(r1)
  282. lwz r19, HOST_NV_GPR(R19)(r1)
  283. lwz r20, HOST_NV_GPR(R20)(r1)
  284. lwz r21, HOST_NV_GPR(R21)(r1)
  285. lwz r22, HOST_NV_GPR(R22)(r1)
  286. lwz r23, HOST_NV_GPR(R23)(r1)
  287. lwz r24, HOST_NV_GPR(R24)(r1)
  288. lwz r25, HOST_NV_GPR(R25)(r1)
  289. lwz r26, HOST_NV_GPR(R26)(r1)
  290. lwz r27, HOST_NV_GPR(R27)(r1)
  291. lwz r28, HOST_NV_GPR(R28)(r1)
  292. lwz r29, HOST_NV_GPR(R29)(r1)
  293. lwz r30, HOST_NV_GPR(R30)(r1)
  294. lwz r31, HOST_NV_GPR(R31)(r1)
  295. /* Return to kvm_vcpu_run(). */
  296. lwz r4, HOST_STACK_LR(r1)
  297. lwz r5, HOST_CR(r1)
  298. addi r1, r1, HOST_STACK_SIZE
  299. mtlr r4
  300. mtcr r5
  301. /* r3 still contains the return code from kvmppc_handle_exit(). */
  302. blr
  303. /* Registers:
  304. * r3: vcpu pointer
  305. */
  306. _GLOBAL(__kvmppc_vcpu_run)
  307. stwu r1, -HOST_STACK_SIZE(r1)
  308. stw r1, VCPU_HOST_STACK(r3) /* Save stack pointer to vcpu. */
  309. /* Save host state to stack. */
  310. mr r4, r3
  311. mflr r3
  312. stw r3, HOST_STACK_LR(r1)
  313. mfcr r5
  314. stw r5, HOST_CR(r1)
  315. /* Save host non-volatile register state to stack. */
  316. stw r14, HOST_NV_GPR(R14)(r1)
  317. stw r15, HOST_NV_GPR(R15)(r1)
  318. stw r16, HOST_NV_GPR(R16)(r1)
  319. stw r17, HOST_NV_GPR(R17)(r1)
  320. stw r18, HOST_NV_GPR(R18)(r1)
  321. stw r19, HOST_NV_GPR(R19)(r1)
  322. stw r20, HOST_NV_GPR(R20)(r1)
  323. stw r21, HOST_NV_GPR(R21)(r1)
  324. stw r22, HOST_NV_GPR(R22)(r1)
  325. stw r23, HOST_NV_GPR(R23)(r1)
  326. stw r24, HOST_NV_GPR(R24)(r1)
  327. stw r25, HOST_NV_GPR(R25)(r1)
  328. stw r26, HOST_NV_GPR(R26)(r1)
  329. stw r27, HOST_NV_GPR(R27)(r1)
  330. stw r28, HOST_NV_GPR(R28)(r1)
  331. stw r29, HOST_NV_GPR(R29)(r1)
  332. stw r30, HOST_NV_GPR(R30)(r1)
  333. stw r31, HOST_NV_GPR(R31)(r1)
  334. /* Load guest non-volatiles. */
  335. lwz r14, VCPU_GPR(R14)(r4)
  336. lwz r15, VCPU_GPR(R15)(r4)
  337. lwz r16, VCPU_GPR(R16)(r4)
  338. lwz r17, VCPU_GPR(R17)(r4)
  339. lwz r18, VCPU_GPR(R18)(r4)
  340. lwz r19, VCPU_GPR(R19)(r4)
  341. lwz r20, VCPU_GPR(R20)(r4)
  342. lwz r21, VCPU_GPR(R21)(r4)
  343. lwz r22, VCPU_GPR(R22)(r4)
  344. lwz r23, VCPU_GPR(R23)(r4)
  345. lwz r24, VCPU_GPR(R24)(r4)
  346. lwz r25, VCPU_GPR(R25)(r4)
  347. lwz r26, VCPU_GPR(R26)(r4)
  348. lwz r27, VCPU_GPR(R27)(r4)
  349. lwz r28, VCPU_GPR(R28)(r4)
  350. lwz r29, VCPU_GPR(R29)(r4)
  351. lwz r30, VCPU_GPR(R30)(r4)
  352. lwz r31, VCPU_GPR(R31)(r4)
  353. #ifdef CONFIG_SPE
  354. /* save host SPEFSCR and load guest SPEFSCR */
  355. mfspr r3, SPRN_SPEFSCR
  356. stw r3, VCPU_HOST_SPEFSCR(r4)
  357. lwz r3, VCPU_SPEFSCR(r4)
  358. mtspr SPRN_SPEFSCR, r3
  359. #endif
  360. lightweight_exit:
  361. stw r2, HOST_R2(r1)
  362. mfspr r3, SPRN_PID
  363. stw r3, VCPU_HOST_PID(r4)
  364. lwz r3, VCPU_SHADOW_PID(r4)
  365. mtspr SPRN_PID, r3
  366. #ifdef CONFIG_FSL_BOOKE
  367. lwz r3, VCPU_SHADOW_PID1(r4)
  368. mtspr SPRN_PID1, r3
  369. #endif
  370. /* Load some guest volatiles. */
  371. lwz r0, VCPU_GPR(R0)(r4)
  372. lwz r2, VCPU_GPR(R2)(r4)
  373. lwz r9, VCPU_GPR(R9)(r4)
  374. lwz r10, VCPU_GPR(R10)(r4)
  375. lwz r11, VCPU_GPR(R11)(r4)
  376. lwz r12, VCPU_GPR(R12)(r4)
  377. lwz r13, VCPU_GPR(R13)(r4)
  378. lwz r3, VCPU_LR(r4)
  379. mtlr r3
  380. lwz r3, VCPU_XER(r4)
  381. mtxer r3
  382. /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed,
  383. * so how do we make sure vcpu won't fault? */
  384. lis r8, kvmppc_booke_handlers@ha
  385. lwz r8, kvmppc_booke_handlers@l(r8)
  386. mtspr SPRN_IVPR, r8
  387. lwz r5, VCPU_SHARED(r4)
  388. /* Can't switch the stack pointer until after IVPR is switched,
  389. * because host interrupt handlers would get confused. */
  390. lwz r1, VCPU_GPR(R1)(r4)
  391. /*
  392. * Host interrupt handlers may have clobbered these
  393. * guest-readable SPRGs, or the guest kernel may have
  394. * written directly to the shared area, so we
  395. * need to reload them here with the guest's values.
  396. */
  397. PPC_LD(r3, VCPU_SHARED_SPRG4, r5)
  398. mtspr SPRN_SPRG4W, r3
  399. PPC_LD(r3, VCPU_SHARED_SPRG5, r5)
  400. mtspr SPRN_SPRG5W, r3
  401. PPC_LD(r3, VCPU_SHARED_SPRG6, r5)
  402. mtspr SPRN_SPRG6W, r3
  403. PPC_LD(r3, VCPU_SHARED_SPRG7, r5)
  404. mtspr SPRN_SPRG7W, r3
  405. #ifdef CONFIG_KVM_EXIT_TIMING
  406. /* save enter time */
  407. 1:
  408. mfspr r6, SPRN_TBRU
  409. mfspr r7, SPRN_TBRL
  410. mfspr r8, SPRN_TBRU
  411. cmpw r8, r6
  412. bne 1b
  413. stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
  414. stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
  415. #endif
  416. /* Finish loading guest volatiles and jump to guest. */
  417. lwz r3, VCPU_CTR(r4)
  418. lwz r5, VCPU_CR(r4)
  419. lwz r6, VCPU_PC(r4)
  420. lwz r7, VCPU_SHADOW_MSR(r4)
  421. mtctr r3
  422. mtcr r5
  423. mtsrr0 r6
  424. mtsrr1 r7
  425. lwz r5, VCPU_GPR(R5)(r4)
  426. lwz r6, VCPU_GPR(R6)(r4)
  427. lwz r7, VCPU_GPR(R7)(r4)
  428. lwz r8, VCPU_GPR(R8)(r4)
  429. /* Clear any debug events which occurred since we disabled MSR[DE].
  430. * XXX This gives us a 3-instruction window in which a breakpoint
  431. * intended for guest context could fire in the host instead. */
  432. lis r3, 0xffff
  433. ori r3, r3, 0xffff
  434. mtspr SPRN_DBSR, r3
  435. lwz r3, VCPU_GPR(R3)(r4)
  436. lwz r4, VCPU_GPR(R4)(r4)
  437. rfi
  438. .data
  439. .align 4
  440. .globl kvmppc_booke_handler_addr
  441. kvmppc_booke_handler_addr:
  442. KVM_HANDLER_ADDR BOOKE_INTERRUPT_CRITICAL
  443. KVM_HANDLER_ADDR BOOKE_INTERRUPT_MACHINE_CHECK
  444. KVM_HANDLER_ADDR BOOKE_INTERRUPT_DATA_STORAGE
  445. KVM_HANDLER_ADDR BOOKE_INTERRUPT_INST_STORAGE
  446. KVM_HANDLER_ADDR BOOKE_INTERRUPT_EXTERNAL
  447. KVM_HANDLER_ADDR BOOKE_INTERRUPT_ALIGNMENT
  448. KVM_HANDLER_ADDR BOOKE_INTERRUPT_PROGRAM
  449. KVM_HANDLER_ADDR BOOKE_INTERRUPT_FP_UNAVAIL
  450. KVM_HANDLER_ADDR BOOKE_INTERRUPT_SYSCALL
  451. KVM_HANDLER_ADDR BOOKE_INTERRUPT_AP_UNAVAIL
  452. KVM_HANDLER_ADDR BOOKE_INTERRUPT_DECREMENTER
  453. KVM_HANDLER_ADDR BOOKE_INTERRUPT_FIT
  454. KVM_HANDLER_ADDR BOOKE_INTERRUPT_WATCHDOG
  455. KVM_HANDLER_ADDR BOOKE_INTERRUPT_DTLB_MISS
  456. KVM_HANDLER_ADDR BOOKE_INTERRUPT_ITLB_MISS
  457. KVM_HANDLER_ADDR BOOKE_INTERRUPT_DEBUG
  458. KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_UNAVAIL
  459. KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_DATA
  460. KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_ROUND
  461. KVM_HANDLER_END /*Always keep this in end*/
  462. #ifdef CONFIG_SPE
  463. _GLOBAL(kvmppc_save_guest_spe)
  464. cmpi 0,r3,0
  465. beqlr-
  466. SAVE_32EVRS(0, r4, r3, VCPU_EVR)
  467. evxor evr6, evr6, evr6
  468. evmwumiaa evr6, evr6, evr6
  469. li r4,VCPU_ACC
  470. evstddx evr6, r4, r3 /* save acc */
  471. blr
  472. _GLOBAL(kvmppc_load_guest_spe)
  473. cmpi 0,r3,0
  474. beqlr-
  475. li r4,VCPU_ACC
  476. evlddx evr6,r4,r3
  477. evmra evr6,evr6 /* load acc */
  478. REST_32EVRS(0, r4, r3, VCPU_EVR)
  479. blr
  480. #endif