emulate_loadstore.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright IBM Corp. 2007
  5. * Copyright 2011 Freescale Semiconductor, Inc.
  6. *
  7. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  8. */
  9. #include <linux/jiffies.h>
  10. #include <linux/hrtimer.h>
  11. #include <linux/types.h>
  12. #include <linux/string.h>
  13. #include <linux/kvm_host.h>
  14. #include <linux/clockchips.h>
  15. #include <asm/reg.h>
  16. #include <asm/time.h>
  17. #include <asm/byteorder.h>
  18. #include <asm/kvm_ppc.h>
  19. #include <asm/disassemble.h>
  20. #include <asm/ppc-opcode.h>
  21. #include <asm/sstep.h>
  22. #include "timing.h"
  23. #include "trace.h"
  24. #ifdef CONFIG_PPC_FPU
  25. static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
  26. {
  27. if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
  28. kvmppc_core_queue_fpunavail(vcpu);
  29. return true;
  30. }
  31. return false;
  32. }
  33. #endif /* CONFIG_PPC_FPU */
  34. #ifdef CONFIG_VSX
  35. static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
  36. {
  37. if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
  38. kvmppc_core_queue_vsx_unavail(vcpu);
  39. return true;
  40. }
  41. return false;
  42. }
  43. #endif /* CONFIG_VSX */
  44. #ifdef CONFIG_ALTIVEC
  45. static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
  46. {
  47. if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
  48. kvmppc_core_queue_vec_unavail(vcpu);
  49. return true;
  50. }
  51. return false;
  52. }
  53. #endif /* CONFIG_ALTIVEC */
  54. /*
  55. * XXX to do:
  56. * lfiwax, lfiwzx
  57. * vector loads and stores
  58. *
  59. * Instructions that trap when used on cache-inhibited mappings
  60. * are not emulated here: multiple and string instructions,
  61. * lq/stq, and the load-reserve/store-conditional instructions.
  62. */
  63. int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
  64. {
  65. u32 inst;
  66. enum emulation_result emulated = EMULATE_FAIL;
  67. int advance = 1;
  68. struct instruction_op op;
  69. /* this default type might be overwritten by subcategories */
  70. kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
  71. emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
  72. if (emulated != EMULATE_DONE)
  73. return emulated;
  74. vcpu->arch.mmio_vsx_copy_nums = 0;
  75. vcpu->arch.mmio_vsx_offset = 0;
  76. vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
  77. vcpu->arch.mmio_sp64_extend = 0;
  78. vcpu->arch.mmio_sign_extend = 0;
  79. vcpu->arch.mmio_vmx_copy_nums = 0;
  80. vcpu->arch.mmio_vmx_offset = 0;
  81. vcpu->arch.mmio_host_swabbed = 0;
  82. emulated = EMULATE_FAIL;
  83. vcpu->arch.regs.msr = vcpu->arch.shared->msr;
  84. if (analyse_instr(&op, &vcpu->arch.regs, ppc_inst(inst)) == 0) {
  85. int type = op.type & INSTR_TYPE_MASK;
  86. int size = GETSIZE(op.type);
  87. switch (type) {
  88. case LOAD: {
  89. int instr_byte_swap = op.type & BYTEREV;
  90. if (op.type & SIGNEXT)
  91. emulated = kvmppc_handle_loads(vcpu,
  92. op.reg, size, !instr_byte_swap);
  93. else
  94. emulated = kvmppc_handle_load(vcpu,
  95. op.reg, size, !instr_byte_swap);
  96. if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
  97. kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
  98. break;
  99. }
  100. #ifdef CONFIG_PPC_FPU
  101. case LOAD_FP:
  102. if (kvmppc_check_fp_disabled(vcpu))
  103. return EMULATE_DONE;
  104. if (op.type & FPCONV)
  105. vcpu->arch.mmio_sp64_extend = 1;
  106. if (op.type & SIGNEXT)
  107. emulated = kvmppc_handle_loads(vcpu,
  108. KVM_MMIO_REG_FPR|op.reg, size, 1);
  109. else
  110. emulated = kvmppc_handle_load(vcpu,
  111. KVM_MMIO_REG_FPR|op.reg, size, 1);
  112. if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
  113. kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
  114. break;
  115. #endif
  116. #ifdef CONFIG_ALTIVEC
  117. case LOAD_VMX:
  118. if (kvmppc_check_altivec_disabled(vcpu))
  119. return EMULATE_DONE;
  120. /* Hardware enforces alignment of VMX accesses */
  121. vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
  122. vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
  123. if (size == 16) { /* lvx */
  124. vcpu->arch.mmio_copy_type =
  125. KVMPPC_VMX_COPY_DWORD;
  126. } else if (size == 4) { /* lvewx */
  127. vcpu->arch.mmio_copy_type =
  128. KVMPPC_VMX_COPY_WORD;
  129. } else if (size == 2) { /* lvehx */
  130. vcpu->arch.mmio_copy_type =
  131. KVMPPC_VMX_COPY_HWORD;
  132. } else if (size == 1) { /* lvebx */
  133. vcpu->arch.mmio_copy_type =
  134. KVMPPC_VMX_COPY_BYTE;
  135. } else
  136. break;
  137. vcpu->arch.mmio_vmx_offset =
  138. (vcpu->arch.vaddr_accessed & 0xf)/size;
  139. if (size == 16) {
  140. vcpu->arch.mmio_vmx_copy_nums = 2;
  141. emulated = kvmppc_handle_vmx_load(vcpu,
  142. KVM_MMIO_REG_VMX|op.reg,
  143. 8, 1);
  144. } else {
  145. vcpu->arch.mmio_vmx_copy_nums = 1;
  146. emulated = kvmppc_handle_vmx_load(vcpu,
  147. KVM_MMIO_REG_VMX|op.reg,
  148. size, 1);
  149. }
  150. break;
  151. #endif
  152. #ifdef CONFIG_VSX
  153. case LOAD_VSX: {
  154. int io_size_each;
  155. if (op.vsx_flags & VSX_CHECK_VEC) {
  156. if (kvmppc_check_altivec_disabled(vcpu))
  157. return EMULATE_DONE;
  158. } else {
  159. if (kvmppc_check_vsx_disabled(vcpu))
  160. return EMULATE_DONE;
  161. }
  162. if (op.vsx_flags & VSX_FPCONV)
  163. vcpu->arch.mmio_sp64_extend = 1;
  164. if (op.element_size == 8) {
  165. if (op.vsx_flags & VSX_SPLAT)
  166. vcpu->arch.mmio_copy_type =
  167. KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
  168. else
  169. vcpu->arch.mmio_copy_type =
  170. KVMPPC_VSX_COPY_DWORD;
  171. } else if (op.element_size == 4) {
  172. if (op.vsx_flags & VSX_SPLAT)
  173. vcpu->arch.mmio_copy_type =
  174. KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
  175. else
  176. vcpu->arch.mmio_copy_type =
  177. KVMPPC_VSX_COPY_WORD;
  178. } else
  179. break;
  180. if (size < op.element_size) {
  181. /* precision convert case: lxsspx, etc */
  182. vcpu->arch.mmio_vsx_copy_nums = 1;
  183. io_size_each = size;
  184. } else { /* lxvw4x, lxvd2x, etc */
  185. vcpu->arch.mmio_vsx_copy_nums =
  186. size/op.element_size;
  187. io_size_each = op.element_size;
  188. }
  189. emulated = kvmppc_handle_vsx_load(vcpu,
  190. KVM_MMIO_REG_VSX|op.reg, io_size_each,
  191. 1, op.type & SIGNEXT);
  192. break;
  193. }
  194. #endif
  195. case STORE:
  196. /* if need byte reverse, op.val has been reversed by
  197. * analyse_instr().
  198. */
  199. emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
  200. if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
  201. kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
  202. break;
  203. #ifdef CONFIG_PPC_FPU
  204. case STORE_FP:
  205. if (kvmppc_check_fp_disabled(vcpu))
  206. return EMULATE_DONE;
  207. /* The FP registers need to be flushed so that
  208. * kvmppc_handle_store() can read actual FP vals
  209. * from vcpu->arch.
  210. */
  211. if (vcpu->kvm->arch.kvm_ops->giveup_ext)
  212. vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
  213. MSR_FP);
  214. if (op.type & FPCONV)
  215. vcpu->arch.mmio_sp64_extend = 1;
  216. emulated = kvmppc_handle_store(vcpu,
  217. VCPU_FPR(vcpu, op.reg), size, 1);
  218. if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
  219. kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
  220. break;
  221. #endif
  222. #ifdef CONFIG_ALTIVEC
  223. case STORE_VMX:
  224. if (kvmppc_check_altivec_disabled(vcpu))
  225. return EMULATE_DONE;
  226. /* Hardware enforces alignment of VMX accesses. */
  227. vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
  228. vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
  229. if (vcpu->kvm->arch.kvm_ops->giveup_ext)
  230. vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
  231. MSR_VEC);
  232. if (size == 16) { /* stvx */
  233. vcpu->arch.mmio_copy_type =
  234. KVMPPC_VMX_COPY_DWORD;
  235. } else if (size == 4) { /* stvewx */
  236. vcpu->arch.mmio_copy_type =
  237. KVMPPC_VMX_COPY_WORD;
  238. } else if (size == 2) { /* stvehx */
  239. vcpu->arch.mmio_copy_type =
  240. KVMPPC_VMX_COPY_HWORD;
  241. } else if (size == 1) { /* stvebx */
  242. vcpu->arch.mmio_copy_type =
  243. KVMPPC_VMX_COPY_BYTE;
  244. } else
  245. break;
  246. vcpu->arch.mmio_vmx_offset =
  247. (vcpu->arch.vaddr_accessed & 0xf)/size;
  248. if (size == 16) {
  249. vcpu->arch.mmio_vmx_copy_nums = 2;
  250. emulated = kvmppc_handle_vmx_store(vcpu,
  251. op.reg, 8, 1);
  252. } else {
  253. vcpu->arch.mmio_vmx_copy_nums = 1;
  254. emulated = kvmppc_handle_vmx_store(vcpu,
  255. op.reg, size, 1);
  256. }
  257. break;
  258. #endif
  259. #ifdef CONFIG_VSX
  260. case STORE_VSX: {
  261. int io_size_each;
  262. if (op.vsx_flags & VSX_CHECK_VEC) {
  263. if (kvmppc_check_altivec_disabled(vcpu))
  264. return EMULATE_DONE;
  265. } else {
  266. if (kvmppc_check_vsx_disabled(vcpu))
  267. return EMULATE_DONE;
  268. }
  269. if (vcpu->kvm->arch.kvm_ops->giveup_ext)
  270. vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
  271. MSR_VSX);
  272. if (op.vsx_flags & VSX_FPCONV)
  273. vcpu->arch.mmio_sp64_extend = 1;
  274. if (op.element_size == 8)
  275. vcpu->arch.mmio_copy_type =
  276. KVMPPC_VSX_COPY_DWORD;
  277. else if (op.element_size == 4)
  278. vcpu->arch.mmio_copy_type =
  279. KVMPPC_VSX_COPY_WORD;
  280. else
  281. break;
  282. if (size < op.element_size) {
  283. /* precise conversion case, like stxsspx */
  284. vcpu->arch.mmio_vsx_copy_nums = 1;
  285. io_size_each = size;
  286. } else { /* stxvw4x, stxvd2x, etc */
  287. vcpu->arch.mmio_vsx_copy_nums =
  288. size/op.element_size;
  289. io_size_each = op.element_size;
  290. }
  291. emulated = kvmppc_handle_vsx_store(vcpu,
  292. op.reg, io_size_each, 1);
  293. break;
  294. }
  295. #endif
  296. case CACHEOP:
  297. /* Do nothing. The guest is performing dcbi because
  298. * hardware DMA is not snooped by the dcache, but
  299. * emulated DMA either goes through the dcache as
  300. * normal writes, or the host kernel has handled dcache
  301. * coherence.
  302. */
  303. emulated = EMULATE_DONE;
  304. break;
  305. default:
  306. break;
  307. }
  308. }
  309. if (emulated == EMULATE_FAIL) {
  310. advance = 0;
  311. kvmppc_core_queue_program(vcpu, 0);
  312. }
  313. trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
  314. /* Advance past emulated instruction. */
  315. if (advance)
  316. kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
  317. return emulated;
  318. }