book3s_hv_tm.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/kvm_host.h>
  7. #include <asm/kvm_ppc.h>
  8. #include <asm/kvm_book3s.h>
  9. #include <asm/kvm_book3s_64.h>
  10. #include <asm/reg.h>
  11. #include <asm/ppc-opcode.h>
  12. static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause)
  13. {
  14. u64 texasr, tfiar;
  15. u64 msr = vcpu->arch.shregs.msr;
  16. tfiar = vcpu->arch.regs.nip & ~0x3ull;
  17. texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT;
  18. if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
  19. texasr |= TEXASR_SUSP;
  20. if (msr & MSR_PR) {
  21. texasr |= TEXASR_PR;
  22. tfiar |= 1;
  23. }
  24. vcpu->arch.tfiar = tfiar;
  25. /* Preserve ROT and TL fields of existing TEXASR */
  26. vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr;
  27. }
  28. /*
  29. * This gets called on a softpatch interrupt on POWER9 DD2.2 processors.
  30. * We expect to find a TM-related instruction to be emulated. The
  31. * instruction image is in vcpu->arch.emul_inst. If the guest was in
  32. * TM suspended or transactional state, the checkpointed state has been
  33. * reclaimed and is in the vcpu struct. The CPU is in virtual mode in
  34. * host context.
  35. */
  36. int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
  37. {
  38. u32 instr = vcpu->arch.emul_inst;
  39. u64 msr = vcpu->arch.shregs.msr;
  40. u64 newmsr, bescr;
  41. int ra, rs;
  42. /*
  43. * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
  44. * in these instructions, so masking bit 31 out doesn't change these
  45. * instructions. For treclaim., tsr., and trechkpt. instructions if bit
  46. * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section
  47. * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit
  48. * 31 is an acceptable way to handle these invalid forms that have
  49. * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/
  50. * bit 31 set) can generate a softpatch interrupt. Hence both forms
  51. * are handled below for these instructions so they behave the same way.
  52. */
  53. switch (instr & PO_XOP_OPCODE_MASK) {
  54. case PPC_INST_RFID:
  55. /* XXX do we need to check for PR=0 here? */
  56. newmsr = vcpu->arch.shregs.srr1;
  57. /* should only get here for Sx -> T1 transition */
  58. WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
  59. MSR_TM_TRANSACTIONAL(newmsr) &&
  60. (newmsr & MSR_TM)));
  61. newmsr = sanitize_msr(newmsr);
  62. vcpu->arch.shregs.msr = newmsr;
  63. vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
  64. vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
  65. return RESUME_GUEST;
  66. case PPC_INST_RFEBB:
  67. if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
  68. /* generate an illegal instruction interrupt */
  69. kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
  70. return RESUME_GUEST;
  71. }
  72. /* check EBB facility is available */
  73. if (!(vcpu->arch.hfscr & HFSCR_EBB)) {
  74. /* generate an illegal instruction interrupt */
  75. kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
  76. return RESUME_GUEST;
  77. }
  78. if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) {
  79. /* generate a facility unavailable interrupt */
  80. vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
  81. ((u64)FSCR_EBB_LG << 56);
  82. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
  83. return RESUME_GUEST;
  84. }
  85. bescr = vcpu->arch.bescr;
  86. /* expect to see a S->T transition requested */
  87. WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
  88. ((bescr >> 30) & 3) == 2));
  89. bescr &= ~BESCR_GE;
  90. if (instr & (1 << 11))
  91. bescr |= BESCR_GE;
  92. vcpu->arch.bescr = bescr;
  93. msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
  94. vcpu->arch.shregs.msr = msr;
  95. vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
  96. vcpu->arch.regs.nip = vcpu->arch.ebbrr;
  97. return RESUME_GUEST;
  98. case PPC_INST_MTMSRD:
  99. /* XXX do we need to check for PR=0 here? */
  100. rs = (instr >> 21) & 0x1f;
  101. newmsr = kvmppc_get_gpr(vcpu, rs);
  102. /* check this is a Sx -> T1 transition */
  103. WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
  104. MSR_TM_TRANSACTIONAL(newmsr) &&
  105. (newmsr & MSR_TM)));
  106. /* mtmsrd doesn't change LE */
  107. newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE);
  108. newmsr = sanitize_msr(newmsr);
  109. vcpu->arch.shregs.msr = newmsr;
  110. return RESUME_GUEST;
  111. /* ignore bit 31, see comment above */
  112. case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
  113. /* check for PR=1 and arch 2.06 bit set in PCR */
  114. if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
  115. /* generate an illegal instruction interrupt */
  116. kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
  117. return RESUME_GUEST;
  118. }
  119. /* check for TM disabled in the HFSCR or MSR */
  120. if (!(vcpu->arch.hfscr & HFSCR_TM)) {
  121. /* generate an illegal instruction interrupt */
  122. kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
  123. return RESUME_GUEST;
  124. }
  125. if (!(msr & MSR_TM)) {
  126. /* generate a facility unavailable interrupt */
  127. vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
  128. ((u64)FSCR_TM_LG << 56);
  129. kvmppc_book3s_queue_irqprio(vcpu,
  130. BOOK3S_INTERRUPT_FAC_UNAVAIL);
  131. return RESUME_GUEST;
  132. }
  133. /* Set CR0 to indicate previous transactional state */
  134. vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
  135. (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
  136. /* L=1 => tresume, L=0 => tsuspend */
  137. if (instr & (1 << 21)) {
  138. if (MSR_TM_SUSPENDED(msr))
  139. msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
  140. } else {
  141. if (MSR_TM_TRANSACTIONAL(msr))
  142. msr = (msr & ~MSR_TS_MASK) | MSR_TS_S;
  143. }
  144. vcpu->arch.shregs.msr = msr;
  145. return RESUME_GUEST;
  146. /* ignore bit 31, see comment above */
  147. case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
  148. /* check for TM disabled in the HFSCR or MSR */
  149. if (!(vcpu->arch.hfscr & HFSCR_TM)) {
  150. /* generate an illegal instruction interrupt */
  151. kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
  152. return RESUME_GUEST;
  153. }
  154. if (!(msr & MSR_TM)) {
  155. /* generate a facility unavailable interrupt */
  156. vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
  157. ((u64)FSCR_TM_LG << 56);
  158. kvmppc_book3s_queue_irqprio(vcpu,
  159. BOOK3S_INTERRUPT_FAC_UNAVAIL);
  160. return RESUME_GUEST;
  161. }
  162. /* If no transaction active, generate TM bad thing */
  163. if (!MSR_TM_ACTIVE(msr)) {
  164. kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
  165. return RESUME_GUEST;
  166. }
  167. /* If failure was not previously recorded, recompute TEXASR */
  168. if (!(vcpu->arch.orig_texasr & TEXASR_FS)) {
  169. ra = (instr >> 16) & 0x1f;
  170. if (ra)
  171. ra = kvmppc_get_gpr(vcpu, ra) & 0xff;
  172. emulate_tx_failure(vcpu, ra);
  173. }
  174. copy_from_checkpoint(vcpu);
  175. /* Set CR0 to indicate previous transactional state */
  176. vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
  177. (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
  178. vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
  179. return RESUME_GUEST;
  180. /* ignore bit 31, see comment above */
  181. case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK):
  182. /* XXX do we need to check for PR=0 here? */
  183. /* check for TM disabled in the HFSCR or MSR */
  184. if (!(vcpu->arch.hfscr & HFSCR_TM)) {
  185. /* generate an illegal instruction interrupt */
  186. kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
  187. return RESUME_GUEST;
  188. }
  189. if (!(msr & MSR_TM)) {
  190. /* generate a facility unavailable interrupt */
  191. vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
  192. ((u64)FSCR_TM_LG << 56);
  193. kvmppc_book3s_queue_irqprio(vcpu,
  194. BOOK3S_INTERRUPT_FAC_UNAVAIL);
  195. return RESUME_GUEST;
  196. }
  197. /* If transaction active or TEXASR[FS] = 0, bad thing */
  198. if (MSR_TM_ACTIVE(msr) || !(vcpu->arch.texasr & TEXASR_FS)) {
  199. kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
  200. return RESUME_GUEST;
  201. }
  202. copy_to_checkpoint(vcpu);
  203. /* Set CR0 to indicate previous transactional state */
  204. vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
  205. (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
  206. vcpu->arch.shregs.msr = msr | MSR_TS_S;
  207. return RESUME_GUEST;
  208. }
  209. /* What should we do here? We didn't recognize the instruction */
  210. kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
  211. pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr);
  212. return RESUME_GUEST;
  213. }