book3s_interrupts.S 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. *
  4. * Copyright SUSE Linux Products GmbH 2009
  5. *
  6. * Authors: Alexander Graf <agraf@suse.de>
  7. */
  8. #include <asm/ppc_asm.h>
  9. #include <asm/kvm_asm.h>
  10. #include <asm/reg.h>
  11. #include <asm/page.h>
  12. #include <asm/asm-offsets.h>
  13. #include <asm/exception-64s.h>
  14. #include <asm/asm-compat.h>
  15. #if defined(CONFIG_PPC_BOOK3S_64)
  16. #ifdef PPC64_ELF_ABI_v2
  17. #define FUNC(name) name
  18. #else
  19. #define FUNC(name) GLUE(.,name)
  20. #endif
  21. #define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
  22. #elif defined(CONFIG_PPC_BOOK3S_32)
  23. #define FUNC(name) name
  24. #define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
  25. #endif /* CONFIG_PPC_BOOK3S_64 */
  26. #define VCPU_LOAD_NVGPRS(vcpu) \
  27. PPC_LL r14, VCPU_GPR(R14)(vcpu); \
  28. PPC_LL r15, VCPU_GPR(R15)(vcpu); \
  29. PPC_LL r16, VCPU_GPR(R16)(vcpu); \
  30. PPC_LL r17, VCPU_GPR(R17)(vcpu); \
  31. PPC_LL r18, VCPU_GPR(R18)(vcpu); \
  32. PPC_LL r19, VCPU_GPR(R19)(vcpu); \
  33. PPC_LL r20, VCPU_GPR(R20)(vcpu); \
  34. PPC_LL r21, VCPU_GPR(R21)(vcpu); \
  35. PPC_LL r22, VCPU_GPR(R22)(vcpu); \
  36. PPC_LL r23, VCPU_GPR(R23)(vcpu); \
  37. PPC_LL r24, VCPU_GPR(R24)(vcpu); \
  38. PPC_LL r25, VCPU_GPR(R25)(vcpu); \
  39. PPC_LL r26, VCPU_GPR(R26)(vcpu); \
  40. PPC_LL r27, VCPU_GPR(R27)(vcpu); \
  41. PPC_LL r28, VCPU_GPR(R28)(vcpu); \
  42. PPC_LL r29, VCPU_GPR(R29)(vcpu); \
  43. PPC_LL r30, VCPU_GPR(R30)(vcpu); \
  44. PPC_LL r31, VCPU_GPR(R31)(vcpu); \
  45. /*****************************************************************************
  46. * *
  47. * Guest entry / exit code that is in kernel module memory (highmem) *
  48. * *
  49. ****************************************************************************/
  50. /* Registers:
  51. * r3: vcpu pointer
  52. */
  53. _GLOBAL(__kvmppc_vcpu_run)
  54. kvm_start_entry:
  55. /* Write correct stack frame */
  56. mflr r0
  57. PPC_STL r0,PPC_LR_STKOFF(r1)
  58. /* Save host state to the stack */
  59. PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
  60. /* Save r3 (vcpu) */
  61. SAVE_GPR(3, r1)
  62. /* Save non-volatile registers (r14 - r31) */
  63. SAVE_NVGPRS(r1)
  64. /* Save CR */
  65. mfcr r14
  66. stw r14, _CCR(r1)
  67. /* Save LR */
  68. PPC_STL r0, _LINK(r1)
  69. /* Load non-volatile guest state from the vcpu */
  70. VCPU_LOAD_NVGPRS(r3)
  71. kvm_start_lightweight:
  72. /* Copy registers into shadow vcpu so we can access them in real mode */
  73. bl FUNC(kvmppc_copy_to_svcpu)
  74. nop
  75. REST_GPR(3, r1)
  76. #ifdef CONFIG_PPC_BOOK3S_64
  77. /* Get the dcbz32 flag */
  78. PPC_LL r0, VCPU_HFLAGS(r3)
  79. rldicl r0, r0, 0, 63 /* r3 &= 1 */
  80. stb r0, HSTATE_RESTORE_HID5(r13)
  81. /* Load up guest SPRG3 value, since it's user readable */
  82. lbz r4, VCPU_SHAREDBE(r3)
  83. cmpwi r4, 0
  84. ld r5, VCPU_SHARED(r3)
  85. beq sprg3_little_endian
  86. sprg3_big_endian:
  87. #ifdef __BIG_ENDIAN__
  88. ld r4, VCPU_SHARED_SPRG3(r5)
  89. #else
  90. addi r5, r5, VCPU_SHARED_SPRG3
  91. ldbrx r4, 0, r5
  92. #endif
  93. b after_sprg3_load
  94. sprg3_little_endian:
  95. #ifdef __LITTLE_ENDIAN__
  96. ld r4, VCPU_SHARED_SPRG3(r5)
  97. #else
  98. addi r5, r5, VCPU_SHARED_SPRG3
  99. ldbrx r4, 0, r5
  100. #endif
  101. after_sprg3_load:
  102. mtspr SPRN_SPRG3, r4
  103. #endif /* CONFIG_PPC_BOOK3S_64 */
  104. PPC_LL r4, VCPU_SHADOW_MSR(r3) /* get shadow_msr */
  105. /* Jump to segment patching handler and into our guest */
  106. bl FUNC(kvmppc_entry_trampoline)
  107. nop
  108. /*
  109. * This is the handler in module memory. It gets jumped at from the
  110. * lowmem trampoline code, so it's basically the guest exit code.
  111. *
  112. */
  113. /*
  114. * Register usage at this point:
  115. *
  116. * R1 = host R1
  117. * R2 = host R2
  118. * R12 = exit handler id
  119. * R13 = PACA
  120. * SVCPU.* = guest *
  121. * MSR.EE = 1
  122. *
  123. */
  124. PPC_LL r3, GPR3(r1) /* vcpu pointer */
  125. /*
  126. * kvmppc_copy_from_svcpu can clobber volatile registers, save
  127. * the exit handler id to the vcpu and restore it from there later.
  128. */
  129. stw r12, VCPU_TRAP(r3)
  130. /* Transfer reg values from shadow vcpu back to vcpu struct */
  131. bl FUNC(kvmppc_copy_from_svcpu)
  132. nop
  133. #ifdef CONFIG_PPC_BOOK3S_64
  134. /*
  135. * Reload kernel SPRG3 value.
  136. * No need to save guest value as usermode can't modify SPRG3.
  137. */
  138. ld r3, PACA_SPRG_VDSO(r13)
  139. mtspr SPRN_SPRG_VDSO_WRITE, r3
  140. #endif /* CONFIG_PPC_BOOK3S_64 */
  141. /* R7 = vcpu */
  142. PPC_LL r7, GPR3(r1)
  143. PPC_STL r14, VCPU_GPR(R14)(r7)
  144. PPC_STL r15, VCPU_GPR(R15)(r7)
  145. PPC_STL r16, VCPU_GPR(R16)(r7)
  146. PPC_STL r17, VCPU_GPR(R17)(r7)
  147. PPC_STL r18, VCPU_GPR(R18)(r7)
  148. PPC_STL r19, VCPU_GPR(R19)(r7)
  149. PPC_STL r20, VCPU_GPR(R20)(r7)
  150. PPC_STL r21, VCPU_GPR(R21)(r7)
  151. PPC_STL r22, VCPU_GPR(R22)(r7)
  152. PPC_STL r23, VCPU_GPR(R23)(r7)
  153. PPC_STL r24, VCPU_GPR(R24)(r7)
  154. PPC_STL r25, VCPU_GPR(R25)(r7)
  155. PPC_STL r26, VCPU_GPR(R26)(r7)
  156. PPC_STL r27, VCPU_GPR(R27)(r7)
  157. PPC_STL r28, VCPU_GPR(R28)(r7)
  158. PPC_STL r29, VCPU_GPR(R29)(r7)
  159. PPC_STL r30, VCPU_GPR(R30)(r7)
  160. PPC_STL r31, VCPU_GPR(R31)(r7)
  161. /* Pass the exit number as 2nd argument to kvmppc_handle_exit */
  162. lwz r4, VCPU_TRAP(r7)
  163. /* Restore r3 (vcpu) */
  164. REST_GPR(3, r1)
  165. bl FUNC(kvmppc_handle_exit_pr)
  166. /* If RESUME_GUEST, get back in the loop */
  167. cmpwi r3, RESUME_GUEST
  168. beq kvm_loop_lightweight
  169. cmpwi r3, RESUME_GUEST_NV
  170. beq kvm_loop_heavyweight
  171. kvm_exit_loop:
  172. PPC_LL r4, _LINK(r1)
  173. mtlr r4
  174. lwz r14, _CCR(r1)
  175. mtcr r14
  176. /* Restore non-volatile host registers (r14 - r31) */
  177. REST_NVGPRS(r1)
  178. addi r1, r1, SWITCH_FRAME_SIZE
  179. blr
  180. kvm_loop_heavyweight:
  181. PPC_LL r4, _LINK(r1)
  182. PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
  183. /* Load vcpu */
  184. REST_GPR(3, r1)
  185. /* Load non-volatile guest state from the vcpu */
  186. VCPU_LOAD_NVGPRS(r3)
  187. /* Jump back into the beginning of this function */
  188. b kvm_start_lightweight
  189. kvm_loop_lightweight:
  190. /* We'll need the vcpu pointer */
  191. REST_GPR(3, r1)
  192. /* Jump back into the beginning of this function */
  193. b kvm_start_lightweight