e500_emulate.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
  4. *
  5. * Author: Yu Liu, <yu.liu@freescale.com>
  6. *
  7. * Description:
  8. * This file is derived from arch/powerpc/kvm/44x_emulate.c,
  9. * by Hollis Blanchard <hollisb@us.ibm.com>.
  10. */
  11. #include <asm/kvm_ppc.h>
  12. #include <asm/disassemble.h>
  13. #include <asm/dbell.h>
  14. #include <asm/reg_booke.h>
  15. #include "booke.h"
  16. #include "e500.h"
  17. #define XOP_DCBTLS 166
  18. #define XOP_MSGSND 206
  19. #define XOP_MSGCLR 238
  20. #define XOP_MFTMR 366
  21. #define XOP_TLBIVAX 786
  22. #define XOP_TLBSX 914
  23. #define XOP_TLBRE 946
  24. #define XOP_TLBWE 978
  25. #define XOP_TLBILX 18
  26. #define XOP_EHPRIV 270
  27. #ifdef CONFIG_KVM_E500MC
  28. static int dbell2prio(ulong param)
  29. {
  30. int msg = param & PPC_DBELL_TYPE_MASK;
  31. int prio = -1;
  32. switch (msg) {
  33. case PPC_DBELL_TYPE(PPC_DBELL):
  34. prio = BOOKE_IRQPRIO_DBELL;
  35. break;
  36. case PPC_DBELL_TYPE(PPC_DBELL_CRIT):
  37. prio = BOOKE_IRQPRIO_DBELL_CRIT;
  38. break;
  39. default:
  40. break;
  41. }
  42. return prio;
  43. }
  44. static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
  45. {
  46. ulong param = vcpu->arch.regs.gpr[rb];
  47. int prio = dbell2prio(param);
  48. if (prio < 0)
  49. return EMULATE_FAIL;
  50. clear_bit(prio, &vcpu->arch.pending_exceptions);
  51. return EMULATE_DONE;
  52. }
  53. static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
  54. {
  55. ulong param = vcpu->arch.regs.gpr[rb];
  56. int prio = dbell2prio(rb);
  57. int pir = param & PPC_DBELL_PIR_MASK;
  58. int i;
  59. struct kvm_vcpu *cvcpu;
  60. if (prio < 0)
  61. return EMULATE_FAIL;
  62. kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
  63. int cpir = cvcpu->arch.shared->pir;
  64. if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) {
  65. set_bit(prio, &cvcpu->arch.pending_exceptions);
  66. kvm_vcpu_kick(cvcpu);
  67. }
  68. }
  69. return EMULATE_DONE;
  70. }
  71. #endif
  72. static int kvmppc_e500_emul_ehpriv(struct kvm_vcpu *vcpu,
  73. unsigned int inst, int *advance)
  74. {
  75. int emulated = EMULATE_DONE;
  76. switch (get_oc(inst)) {
  77. case EHPRIV_OC_DEBUG:
  78. vcpu->run->exit_reason = KVM_EXIT_DEBUG;
  79. vcpu->run->debug.arch.address = vcpu->arch.regs.nip;
  80. vcpu->run->debug.arch.status = 0;
  81. kvmppc_account_exit(vcpu, DEBUG_EXITS);
  82. emulated = EMULATE_EXIT_USER;
  83. *advance = 0;
  84. break;
  85. default:
  86. emulated = EMULATE_FAIL;
  87. }
  88. return emulated;
  89. }
  90. static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu)
  91. {
  92. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  93. /* Always fail to lock the cache */
  94. vcpu_e500->l1csr0 |= L1CSR0_CUL;
  95. return EMULATE_DONE;
  96. }
  97. static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
  98. int rt)
  99. {
  100. /* Expose one thread per vcpu */
  101. if (get_tmrn(inst) == TMRN_TMCFG0) {
  102. kvmppc_set_gpr(vcpu, rt,
  103. 1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT));
  104. return EMULATE_DONE;
  105. }
  106. return EMULATE_FAIL;
  107. }
  108. int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu,
  109. unsigned int inst, int *advance)
  110. {
  111. int emulated = EMULATE_DONE;
  112. int ra = get_ra(inst);
  113. int rb = get_rb(inst);
  114. int rt = get_rt(inst);
  115. gva_t ea;
  116. switch (get_op(inst)) {
  117. case 31:
  118. switch (get_xop(inst)) {
  119. case XOP_DCBTLS:
  120. emulated = kvmppc_e500_emul_dcbtls(vcpu);
  121. break;
  122. #ifdef CONFIG_KVM_E500MC
  123. case XOP_MSGSND:
  124. emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
  125. break;
  126. case XOP_MSGCLR:
  127. emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
  128. break;
  129. #endif
  130. case XOP_TLBRE:
  131. emulated = kvmppc_e500_emul_tlbre(vcpu);
  132. break;
  133. case XOP_TLBWE:
  134. emulated = kvmppc_e500_emul_tlbwe(vcpu);
  135. break;
  136. case XOP_TLBSX:
  137. ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
  138. emulated = kvmppc_e500_emul_tlbsx(vcpu, ea);
  139. break;
  140. case XOP_TLBILX: {
  141. int type = rt & 0x3;
  142. ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
  143. emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea);
  144. break;
  145. }
  146. case XOP_TLBIVAX:
  147. ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
  148. emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
  149. break;
  150. case XOP_MFTMR:
  151. emulated = kvmppc_e500_emul_mftmr(vcpu, inst, rt);
  152. break;
  153. case XOP_EHPRIV:
  154. emulated = kvmppc_e500_emul_ehpriv(vcpu, inst, advance);
  155. break;
  156. default:
  157. emulated = EMULATE_FAIL;
  158. }
  159. break;
  160. default:
  161. emulated = EMULATE_FAIL;
  162. }
  163. if (emulated == EMULATE_FAIL)
  164. emulated = kvmppc_booke_emulate_op(vcpu, inst, advance);
  165. return emulated;
  166. }
  167. int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
  168. {
  169. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  170. int emulated = EMULATE_DONE;
  171. switch (sprn) {
  172. #ifndef CONFIG_KVM_BOOKE_HV
  173. case SPRN_PID:
  174. kvmppc_set_pid(vcpu, spr_val);
  175. break;
  176. case SPRN_PID1:
  177. if (spr_val != 0)
  178. return EMULATE_FAIL;
  179. vcpu_e500->pid[1] = spr_val;
  180. break;
  181. case SPRN_PID2:
  182. if (spr_val != 0)
  183. return EMULATE_FAIL;
  184. vcpu_e500->pid[2] = spr_val;
  185. break;
  186. case SPRN_MAS0:
  187. vcpu->arch.shared->mas0 = spr_val;
  188. break;
  189. case SPRN_MAS1:
  190. vcpu->arch.shared->mas1 = spr_val;
  191. break;
  192. case SPRN_MAS2:
  193. vcpu->arch.shared->mas2 = spr_val;
  194. break;
  195. case SPRN_MAS3:
  196. vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
  197. vcpu->arch.shared->mas7_3 |= spr_val;
  198. break;
  199. case SPRN_MAS4:
  200. vcpu->arch.shared->mas4 = spr_val;
  201. break;
  202. case SPRN_MAS6:
  203. vcpu->arch.shared->mas6 = spr_val;
  204. break;
  205. case SPRN_MAS7:
  206. vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
  207. vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
  208. break;
  209. #endif
  210. case SPRN_L1CSR0:
  211. vcpu_e500->l1csr0 = spr_val;
  212. vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
  213. break;
  214. case SPRN_L1CSR1:
  215. vcpu_e500->l1csr1 = spr_val;
  216. vcpu_e500->l1csr1 &= ~(L1CSR1_ICFI | L1CSR1_ICLFR);
  217. break;
  218. case SPRN_HID0:
  219. vcpu_e500->hid0 = spr_val;
  220. break;
  221. case SPRN_HID1:
  222. vcpu_e500->hid1 = spr_val;
  223. break;
  224. case SPRN_MMUCSR0:
  225. emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
  226. spr_val);
  227. break;
  228. case SPRN_PWRMGTCR0:
  229. /*
  230. * Guest relies on host power management configurations
  231. * Treat the request as a general store
  232. */
  233. vcpu->arch.pwrmgtcr0 = spr_val;
  234. break;
  235. case SPRN_BUCSR:
  236. /*
  237. * If we are here, it means that we have already flushed the
  238. * branch predictor, so just return to guest.
  239. */
  240. break;
  241. /* extra exceptions */
  242. #ifdef CONFIG_SPE_POSSIBLE
  243. case SPRN_IVOR32:
  244. vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
  245. break;
  246. case SPRN_IVOR33:
  247. vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
  248. break;
  249. case SPRN_IVOR34:
  250. vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
  251. break;
  252. #endif
  253. #ifdef CONFIG_ALTIVEC
  254. case SPRN_IVOR32:
  255. vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val;
  256. break;
  257. case SPRN_IVOR33:
  258. vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val;
  259. break;
  260. #endif
  261. case SPRN_IVOR35:
  262. vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
  263. break;
  264. #ifdef CONFIG_KVM_BOOKE_HV
  265. case SPRN_IVOR36:
  266. vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val;
  267. break;
  268. case SPRN_IVOR37:
  269. vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val;
  270. break;
  271. #endif
  272. default:
  273. emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
  274. }
  275. return emulated;
  276. }
  277. int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
  278. {
  279. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  280. int emulated = EMULATE_DONE;
  281. switch (sprn) {
  282. #ifndef CONFIG_KVM_BOOKE_HV
  283. case SPRN_PID:
  284. *spr_val = vcpu_e500->pid[0];
  285. break;
  286. case SPRN_PID1:
  287. *spr_val = vcpu_e500->pid[1];
  288. break;
  289. case SPRN_PID2:
  290. *spr_val = vcpu_e500->pid[2];
  291. break;
  292. case SPRN_MAS0:
  293. *spr_val = vcpu->arch.shared->mas0;
  294. break;
  295. case SPRN_MAS1:
  296. *spr_val = vcpu->arch.shared->mas1;
  297. break;
  298. case SPRN_MAS2:
  299. *spr_val = vcpu->arch.shared->mas2;
  300. break;
  301. case SPRN_MAS3:
  302. *spr_val = (u32)vcpu->arch.shared->mas7_3;
  303. break;
  304. case SPRN_MAS4:
  305. *spr_val = vcpu->arch.shared->mas4;
  306. break;
  307. case SPRN_MAS6:
  308. *spr_val = vcpu->arch.shared->mas6;
  309. break;
  310. case SPRN_MAS7:
  311. *spr_val = vcpu->arch.shared->mas7_3 >> 32;
  312. break;
  313. #endif
  314. case SPRN_DECAR:
  315. *spr_val = vcpu->arch.decar;
  316. break;
  317. case SPRN_TLB0CFG:
  318. *spr_val = vcpu->arch.tlbcfg[0];
  319. break;
  320. case SPRN_TLB1CFG:
  321. *spr_val = vcpu->arch.tlbcfg[1];
  322. break;
  323. case SPRN_TLB0PS:
  324. if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
  325. return EMULATE_FAIL;
  326. *spr_val = vcpu->arch.tlbps[0];
  327. break;
  328. case SPRN_TLB1PS:
  329. if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
  330. return EMULATE_FAIL;
  331. *spr_val = vcpu->arch.tlbps[1];
  332. break;
  333. case SPRN_L1CSR0:
  334. *spr_val = vcpu_e500->l1csr0;
  335. break;
  336. case SPRN_L1CSR1:
  337. *spr_val = vcpu_e500->l1csr1;
  338. break;
  339. case SPRN_HID0:
  340. *spr_val = vcpu_e500->hid0;
  341. break;
  342. case SPRN_HID1:
  343. *spr_val = vcpu_e500->hid1;
  344. break;
  345. case SPRN_SVR:
  346. *spr_val = vcpu_e500->svr;
  347. break;
  348. case SPRN_MMUCSR0:
  349. *spr_val = 0;
  350. break;
  351. case SPRN_MMUCFG:
  352. *spr_val = vcpu->arch.mmucfg;
  353. break;
  354. case SPRN_EPTCFG:
  355. if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
  356. return EMULATE_FAIL;
  357. /*
  358. * Legacy Linux guests access EPTCFG register even if the E.PT
  359. * category is disabled in the VM. Give them a chance to live.
  360. */
  361. *spr_val = vcpu->arch.eptcfg;
  362. break;
  363. case SPRN_PWRMGTCR0:
  364. *spr_val = vcpu->arch.pwrmgtcr0;
  365. break;
  366. /* extra exceptions */
  367. #ifdef CONFIG_SPE_POSSIBLE
  368. case SPRN_IVOR32:
  369. *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
  370. break;
  371. case SPRN_IVOR33:
  372. *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
  373. break;
  374. case SPRN_IVOR34:
  375. *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
  376. break;
  377. #endif
  378. #ifdef CONFIG_ALTIVEC
  379. case SPRN_IVOR32:
  380. *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL];
  381. break;
  382. case SPRN_IVOR33:
  383. *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST];
  384. break;
  385. #endif
  386. case SPRN_IVOR35:
  387. *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
  388. break;
  389. #ifdef CONFIG_KVM_BOOKE_HV
  390. case SPRN_IVOR36:
  391. *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
  392. break;
  393. case SPRN_IVOR37:
  394. *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
  395. break;
  396. #endif
  397. default:
  398. emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
  399. }
  400. return emulated;
  401. }