book3s.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
  4. *
  5. * Authors:
  6. * Alexander Graf <agraf@suse.de>
  7. * Kevin Wolf <mail@kevin-wolf.de>
  8. *
  9. * Description:
  10. * This file is derived from arch/powerpc/kvm/44x.c,
  11. * by Hollis Blanchard <hollisb@us.ibm.com>.
  12. */
  13. #include <linux/kvm_host.h>
  14. #include <linux/err.h>
  15. #include <linux/export.h>
  16. #include <linux/slab.h>
  17. #include <linux/module.h>
  18. #include <linux/miscdevice.h>
  19. #include <linux/gfp.h>
  20. #include <linux/sched.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/highmem.h>
  23. #include <asm/reg.h>
  24. #include <asm/cputable.h>
  25. #include <asm/cacheflush.h>
  26. #include <linux/uaccess.h>
  27. #include <asm/io.h>
  28. #include <asm/kvm_ppc.h>
  29. #include <asm/kvm_book3s.h>
  30. #include <asm/mmu_context.h>
  31. #include <asm/page.h>
  32. #include <asm/xive.h>
  33. #include "book3s.h"
  34. #include "trace.h"
  35. /* #define EXIT_DEBUG */
  36. struct kvm_stats_debugfs_item debugfs_entries[] = {
  37. VCPU_STAT("exits", sum_exits),
  38. VCPU_STAT("mmio", mmio_exits),
  39. VCPU_STAT("sig", signal_exits),
  40. VCPU_STAT("sysc", syscall_exits),
  41. VCPU_STAT("inst_emu", emulated_inst_exits),
  42. VCPU_STAT("dec", dec_exits),
  43. VCPU_STAT("ext_intr", ext_intr_exits),
  44. VCPU_STAT("queue_intr", queue_intr),
  45. VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
  46. VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
  47. VCPU_STAT("halt_wait_ns", halt_wait_ns),
  48. VCPU_STAT("halt_successful_poll", halt_successful_poll),
  49. VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
  50. VCPU_STAT("halt_successful_wait", halt_successful_wait),
  51. VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
  52. VCPU_STAT("halt_wakeup", halt_wakeup),
  53. VCPU_STAT("pf_storage", pf_storage),
  54. VCPU_STAT("sp_storage", sp_storage),
  55. VCPU_STAT("pf_instruc", pf_instruc),
  56. VCPU_STAT("sp_instruc", sp_instruc),
  57. VCPU_STAT("ld", ld),
  58. VCPU_STAT("ld_slow", ld_slow),
  59. VCPU_STAT("st", st),
  60. VCPU_STAT("st_slow", st_slow),
  61. VCPU_STAT("pthru_all", pthru_all),
  62. VCPU_STAT("pthru_host", pthru_host),
  63. VCPU_STAT("pthru_bad_aff", pthru_bad_aff),
  64. VM_STAT("largepages_2M", num_2M_pages, .mode = 0444),
  65. VM_STAT("largepages_1G", num_1G_pages, .mode = 0444),
  66. { NULL }
  67. };
  68. static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
  69. unsigned long pending_now, unsigned long old_pending)
  70. {
  71. if (is_kvmppc_hv_enabled(vcpu->kvm))
  72. return;
  73. if (pending_now)
  74. kvmppc_set_int_pending(vcpu, 1);
  75. else if (old_pending)
  76. kvmppc_set_int_pending(vcpu, 0);
  77. }
  78. static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
  79. {
  80. ulong crit_raw;
  81. ulong crit_r1;
  82. bool crit;
  83. if (is_kvmppc_hv_enabled(vcpu->kvm))
  84. return false;
  85. crit_raw = kvmppc_get_critical(vcpu);
  86. crit_r1 = kvmppc_get_gpr(vcpu, 1);
  87. /* Truncate crit indicators in 32 bit mode */
  88. if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
  89. crit_raw &= 0xffffffff;
  90. crit_r1 &= 0xffffffff;
  91. }
  92. /* Critical section when crit == r1 */
  93. crit = (crit_raw == crit_r1);
  94. /* ... and we're in supervisor mode */
  95. crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
  96. return crit;
  97. }
  98. void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
  99. {
  100. vcpu->kvm->arch.kvm_ops->inject_interrupt(vcpu, vec, flags);
  101. }
  102. static int kvmppc_book3s_vec2irqprio(unsigned int vec)
  103. {
  104. unsigned int prio;
  105. switch (vec) {
  106. case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
  107. case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
  108. case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
  109. case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
  110. case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
  111. case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
  112. case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
  113. case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
  114. case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
  115. case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
  116. case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
  117. case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
  118. case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
  119. case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
  120. case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
  121. case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
  122. default: prio = BOOK3S_IRQPRIO_MAX; break;
  123. }
  124. return prio;
  125. }
  126. void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
  127. unsigned int vec)
  128. {
  129. unsigned long old_pending = vcpu->arch.pending_exceptions;
  130. clear_bit(kvmppc_book3s_vec2irqprio(vec),
  131. &vcpu->arch.pending_exceptions);
  132. kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
  133. old_pending);
  134. }
  135. void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
  136. {
  137. vcpu->stat.queue_intr++;
  138. set_bit(kvmppc_book3s_vec2irqprio(vec),
  139. &vcpu->arch.pending_exceptions);
  140. #ifdef EXIT_DEBUG
  141. printk(KERN_INFO "Queueing interrupt %x\n", vec);
  142. #endif
  143. }
  144. EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
  145. void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
  146. {
  147. /* might as well deliver this straight away */
  148. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags);
  149. }
  150. EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
  151. void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
  152. {
  153. /* might as well deliver this straight away */
  154. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
  155. }
  156. EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
  157. void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
  158. {
  159. /* might as well deliver this straight away */
  160. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
  161. }
  162. void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
  163. {
  164. /* might as well deliver this straight away */
  165. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
  166. }
  167. void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
  168. {
  169. /* might as well deliver this straight away */
  170. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
  171. }
  172. void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
  173. {
  174. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
  175. }
  176. EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
  177. int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
  178. {
  179. return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
  180. }
  181. EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
  182. void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
  183. {
  184. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
  185. }
  186. EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
  187. void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
  188. struct kvm_interrupt *irq)
  189. {
  190. /*
  191. * This case (KVM_INTERRUPT_SET) should never actually arise for
  192. * a pseries guest (because pseries guests expect their interrupt
  193. * controllers to continue asserting an external interrupt request
  194. * until it is acknowledged at the interrupt controller), but is
  195. * included to avoid ABI breakage and potentially for other
  196. * sorts of guest.
  197. *
  198. * There is a subtlety here: HV KVM does not test the
  199. * external_oneshot flag in the code that synthesizes
  200. * external interrupts for the guest just before entering
  201. * the guest. That is OK even if userspace did do a
  202. * KVM_INTERRUPT_SET on a pseries guest vcpu, because the
  203. * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick()
  204. * which ends up doing a smp_send_reschedule(), which will
  205. * pull the guest all the way out to the host, meaning that
  206. * we will call kvmppc_core_prepare_to_enter() before entering
  207. * the guest again, and that will handle the external_oneshot
  208. * flag correctly.
  209. */
  210. if (irq->irq == KVM_INTERRUPT_SET)
  211. vcpu->arch.external_oneshot = 1;
  212. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
  213. }
  214. void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
  215. {
  216. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
  217. }
  218. void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
  219. ulong flags)
  220. {
  221. kvmppc_set_dar(vcpu, dar);
  222. kvmppc_set_dsisr(vcpu, flags);
  223. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
  224. }
  225. EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
  226. void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
  227. {
  228. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
  229. }
  230. EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
  231. static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
  232. unsigned int priority)
  233. {
  234. int deliver = 1;
  235. int vec = 0;
  236. bool crit = kvmppc_critical_section(vcpu);
  237. switch (priority) {
  238. case BOOK3S_IRQPRIO_DECREMENTER:
  239. deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
  240. vec = BOOK3S_INTERRUPT_DECREMENTER;
  241. break;
  242. case BOOK3S_IRQPRIO_EXTERNAL:
  243. deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
  244. vec = BOOK3S_INTERRUPT_EXTERNAL;
  245. break;
  246. case BOOK3S_IRQPRIO_SYSTEM_RESET:
  247. vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
  248. break;
  249. case BOOK3S_IRQPRIO_MACHINE_CHECK:
  250. vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
  251. break;
  252. case BOOK3S_IRQPRIO_DATA_STORAGE:
  253. vec = BOOK3S_INTERRUPT_DATA_STORAGE;
  254. break;
  255. case BOOK3S_IRQPRIO_INST_STORAGE:
  256. vec = BOOK3S_INTERRUPT_INST_STORAGE;
  257. break;
  258. case BOOK3S_IRQPRIO_DATA_SEGMENT:
  259. vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
  260. break;
  261. case BOOK3S_IRQPRIO_INST_SEGMENT:
  262. vec = BOOK3S_INTERRUPT_INST_SEGMENT;
  263. break;
  264. case BOOK3S_IRQPRIO_ALIGNMENT:
  265. vec = BOOK3S_INTERRUPT_ALIGNMENT;
  266. break;
  267. case BOOK3S_IRQPRIO_PROGRAM:
  268. vec = BOOK3S_INTERRUPT_PROGRAM;
  269. break;
  270. case BOOK3S_IRQPRIO_VSX:
  271. vec = BOOK3S_INTERRUPT_VSX;
  272. break;
  273. case BOOK3S_IRQPRIO_ALTIVEC:
  274. vec = BOOK3S_INTERRUPT_ALTIVEC;
  275. break;
  276. case BOOK3S_IRQPRIO_FP_UNAVAIL:
  277. vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
  278. break;
  279. case BOOK3S_IRQPRIO_SYSCALL:
  280. vec = BOOK3S_INTERRUPT_SYSCALL;
  281. break;
  282. case BOOK3S_IRQPRIO_DEBUG:
  283. vec = BOOK3S_INTERRUPT_TRACE;
  284. break;
  285. case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
  286. vec = BOOK3S_INTERRUPT_PERFMON;
  287. break;
  288. case BOOK3S_IRQPRIO_FAC_UNAVAIL:
  289. vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
  290. break;
  291. default:
  292. deliver = 0;
  293. printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
  294. break;
  295. }
  296. #if 0
  297. printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
  298. #endif
  299. if (deliver)
  300. kvmppc_inject_interrupt(vcpu, vec, 0);
  301. return deliver;
  302. }
  303. /*
  304. * This function determines if an irqprio should be cleared once issued.
  305. */
  306. static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
  307. {
  308. switch (priority) {
  309. case BOOK3S_IRQPRIO_DECREMENTER:
  310. /* DEC interrupts get cleared by mtdec */
  311. return false;
  312. case BOOK3S_IRQPRIO_EXTERNAL:
  313. /*
  314. * External interrupts get cleared by userspace
  315. * except when set by the KVM_INTERRUPT ioctl with
  316. * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL).
  317. */
  318. if (vcpu->arch.external_oneshot) {
  319. vcpu->arch.external_oneshot = 0;
  320. return true;
  321. }
  322. return false;
  323. }
  324. return true;
  325. }
  326. int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  327. {
  328. unsigned long *pending = &vcpu->arch.pending_exceptions;
  329. unsigned long old_pending = vcpu->arch.pending_exceptions;
  330. unsigned int priority;
  331. #ifdef EXIT_DEBUG
  332. if (vcpu->arch.pending_exceptions)
  333. printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
  334. #endif
  335. priority = __ffs(*pending);
  336. while (priority < BOOK3S_IRQPRIO_MAX) {
  337. if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
  338. clear_irqprio(vcpu, priority)) {
  339. clear_bit(priority, &vcpu->arch.pending_exceptions);
  340. break;
  341. }
  342. priority = find_next_bit(pending,
  343. BITS_PER_BYTE * sizeof(*pending),
  344. priority + 1);
  345. }
  346. /* Tell the guest about our interrupt status */
  347. kvmppc_update_int_pending(vcpu, *pending, old_pending);
  348. return 0;
  349. }
  350. EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
  351. kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
  352. bool *writable)
  353. {
  354. ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
  355. gfn_t gfn = gpa >> PAGE_SHIFT;
  356. if (!(kvmppc_get_msr(vcpu) & MSR_SF))
  357. mp_pa = (uint32_t)mp_pa;
  358. /* Magic page override */
  359. gpa &= ~0xFFFULL;
  360. if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
  361. ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
  362. kvm_pfn_t pfn;
  363. pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
  364. get_page(pfn_to_page(pfn));
  365. if (writable)
  366. *writable = true;
  367. return pfn;
  368. }
  369. return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
  370. }
  371. EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
  372. int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
  373. enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
  374. {
  375. bool data = (xlid == XLATE_DATA);
  376. bool iswrite = (xlrw == XLATE_WRITE);
  377. int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
  378. int r;
  379. if (relocated) {
  380. r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
  381. } else {
  382. pte->eaddr = eaddr;
  383. pte->raddr = eaddr & KVM_PAM;
  384. pte->vpage = VSID_REAL | eaddr >> 12;
  385. pte->may_read = true;
  386. pte->may_write = true;
  387. pte->may_execute = true;
  388. r = 0;
  389. if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
  390. !data) {
  391. if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
  392. ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
  393. pte->raddr &= ~SPLIT_HACK_MASK;
  394. }
  395. }
  396. return r;
  397. }
  398. int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
  399. enum instruction_fetch_type type, u32 *inst)
  400. {
  401. ulong pc = kvmppc_get_pc(vcpu);
  402. int r;
  403. if (type == INST_SC)
  404. pc -= 4;
  405. r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
  406. if (r == EMULATE_DONE)
  407. return r;
  408. else
  409. return EMULATE_AGAIN;
  410. }
  411. EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
  412. int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
  413. {
  414. return 0;
  415. }
  416. void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
  417. {
  418. }
  419. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  420. struct kvm_sregs *sregs)
  421. {
  422. int ret;
  423. vcpu_load(vcpu);
  424. ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
  425. vcpu_put(vcpu);
  426. return ret;
  427. }
  428. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  429. struct kvm_sregs *sregs)
  430. {
  431. int ret;
  432. vcpu_load(vcpu);
  433. ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
  434. vcpu_put(vcpu);
  435. return ret;
  436. }
  437. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  438. {
  439. int i;
  440. regs->pc = kvmppc_get_pc(vcpu);
  441. regs->cr = kvmppc_get_cr(vcpu);
  442. regs->ctr = kvmppc_get_ctr(vcpu);
  443. regs->lr = kvmppc_get_lr(vcpu);
  444. regs->xer = kvmppc_get_xer(vcpu);
  445. regs->msr = kvmppc_get_msr(vcpu);
  446. regs->srr0 = kvmppc_get_srr0(vcpu);
  447. regs->srr1 = kvmppc_get_srr1(vcpu);
  448. regs->pid = vcpu->arch.pid;
  449. regs->sprg0 = kvmppc_get_sprg0(vcpu);
  450. regs->sprg1 = kvmppc_get_sprg1(vcpu);
  451. regs->sprg2 = kvmppc_get_sprg2(vcpu);
  452. regs->sprg3 = kvmppc_get_sprg3(vcpu);
  453. regs->sprg4 = kvmppc_get_sprg4(vcpu);
  454. regs->sprg5 = kvmppc_get_sprg5(vcpu);
  455. regs->sprg6 = kvmppc_get_sprg6(vcpu);
  456. regs->sprg7 = kvmppc_get_sprg7(vcpu);
  457. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  458. regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
  459. return 0;
  460. }
  461. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  462. {
  463. int i;
  464. kvmppc_set_pc(vcpu, regs->pc);
  465. kvmppc_set_cr(vcpu, regs->cr);
  466. kvmppc_set_ctr(vcpu, regs->ctr);
  467. kvmppc_set_lr(vcpu, regs->lr);
  468. kvmppc_set_xer(vcpu, regs->xer);
  469. kvmppc_set_msr(vcpu, regs->msr);
  470. kvmppc_set_srr0(vcpu, regs->srr0);
  471. kvmppc_set_srr1(vcpu, regs->srr1);
  472. kvmppc_set_sprg0(vcpu, regs->sprg0);
  473. kvmppc_set_sprg1(vcpu, regs->sprg1);
  474. kvmppc_set_sprg2(vcpu, regs->sprg2);
  475. kvmppc_set_sprg3(vcpu, regs->sprg3);
  476. kvmppc_set_sprg4(vcpu, regs->sprg4);
  477. kvmppc_set_sprg5(vcpu, regs->sprg5);
  478. kvmppc_set_sprg6(vcpu, regs->sprg6);
  479. kvmppc_set_sprg7(vcpu, regs->sprg7);
  480. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  481. kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
  482. return 0;
  483. }
  484. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  485. {
  486. return -EOPNOTSUPP;
  487. }
  488. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  489. {
  490. return -EOPNOTSUPP;
  491. }
  492. int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
  493. union kvmppc_one_reg *val)
  494. {
  495. int r = 0;
  496. long int i;
  497. r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
  498. if (r == -EINVAL) {
  499. r = 0;
  500. switch (id) {
  501. case KVM_REG_PPC_DAR:
  502. *val = get_reg_val(id, kvmppc_get_dar(vcpu));
  503. break;
  504. case KVM_REG_PPC_DSISR:
  505. *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
  506. break;
  507. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  508. i = id - KVM_REG_PPC_FPR0;
  509. *val = get_reg_val(id, VCPU_FPR(vcpu, i));
  510. break;
  511. case KVM_REG_PPC_FPSCR:
  512. *val = get_reg_val(id, vcpu->arch.fp.fpscr);
  513. break;
  514. #ifdef CONFIG_VSX
  515. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  516. if (cpu_has_feature(CPU_FTR_VSX)) {
  517. i = id - KVM_REG_PPC_VSR0;
  518. val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
  519. val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
  520. } else {
  521. r = -ENXIO;
  522. }
  523. break;
  524. #endif /* CONFIG_VSX */
  525. case KVM_REG_PPC_DEBUG_INST:
  526. *val = get_reg_val(id, INS_TW);
  527. break;
  528. #ifdef CONFIG_KVM_XICS
  529. case KVM_REG_PPC_ICP_STATE:
  530. if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
  531. r = -ENXIO;
  532. break;
  533. }
  534. if (xics_on_xive())
  535. *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
  536. else
  537. *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
  538. break;
  539. #endif /* CONFIG_KVM_XICS */
  540. #ifdef CONFIG_KVM_XIVE
  541. case KVM_REG_PPC_VP_STATE:
  542. if (!vcpu->arch.xive_vcpu) {
  543. r = -ENXIO;
  544. break;
  545. }
  546. if (xive_enabled())
  547. r = kvmppc_xive_native_get_vp(vcpu, val);
  548. else
  549. r = -ENXIO;
  550. break;
  551. #endif /* CONFIG_KVM_XIVE */
  552. case KVM_REG_PPC_FSCR:
  553. *val = get_reg_val(id, vcpu->arch.fscr);
  554. break;
  555. case KVM_REG_PPC_TAR:
  556. *val = get_reg_val(id, vcpu->arch.tar);
  557. break;
  558. case KVM_REG_PPC_EBBHR:
  559. *val = get_reg_val(id, vcpu->arch.ebbhr);
  560. break;
  561. case KVM_REG_PPC_EBBRR:
  562. *val = get_reg_val(id, vcpu->arch.ebbrr);
  563. break;
  564. case KVM_REG_PPC_BESCR:
  565. *val = get_reg_val(id, vcpu->arch.bescr);
  566. break;
  567. case KVM_REG_PPC_IC:
  568. *val = get_reg_val(id, vcpu->arch.ic);
  569. break;
  570. default:
  571. r = -EINVAL;
  572. break;
  573. }
  574. }
  575. return r;
  576. }
  577. int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
  578. union kvmppc_one_reg *val)
  579. {
  580. int r = 0;
  581. long int i;
  582. r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
  583. if (r == -EINVAL) {
  584. r = 0;
  585. switch (id) {
  586. case KVM_REG_PPC_DAR:
  587. kvmppc_set_dar(vcpu, set_reg_val(id, *val));
  588. break;
  589. case KVM_REG_PPC_DSISR:
  590. kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
  591. break;
  592. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  593. i = id - KVM_REG_PPC_FPR0;
  594. VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
  595. break;
  596. case KVM_REG_PPC_FPSCR:
  597. vcpu->arch.fp.fpscr = set_reg_val(id, *val);
  598. break;
  599. #ifdef CONFIG_VSX
  600. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  601. if (cpu_has_feature(CPU_FTR_VSX)) {
  602. i = id - KVM_REG_PPC_VSR0;
  603. vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
  604. vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
  605. } else {
  606. r = -ENXIO;
  607. }
  608. break;
  609. #endif /* CONFIG_VSX */
  610. #ifdef CONFIG_KVM_XICS
  611. case KVM_REG_PPC_ICP_STATE:
  612. if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
  613. r = -ENXIO;
  614. break;
  615. }
  616. if (xics_on_xive())
  617. r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
  618. else
  619. r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
  620. break;
  621. #endif /* CONFIG_KVM_XICS */
  622. #ifdef CONFIG_KVM_XIVE
  623. case KVM_REG_PPC_VP_STATE:
  624. if (!vcpu->arch.xive_vcpu) {
  625. r = -ENXIO;
  626. break;
  627. }
  628. if (xive_enabled())
  629. r = kvmppc_xive_native_set_vp(vcpu, val);
  630. else
  631. r = -ENXIO;
  632. break;
  633. #endif /* CONFIG_KVM_XIVE */
  634. case KVM_REG_PPC_FSCR:
  635. vcpu->arch.fscr = set_reg_val(id, *val);
  636. break;
  637. case KVM_REG_PPC_TAR:
  638. vcpu->arch.tar = set_reg_val(id, *val);
  639. break;
  640. case KVM_REG_PPC_EBBHR:
  641. vcpu->arch.ebbhr = set_reg_val(id, *val);
  642. break;
  643. case KVM_REG_PPC_EBBRR:
  644. vcpu->arch.ebbrr = set_reg_val(id, *val);
  645. break;
  646. case KVM_REG_PPC_BESCR:
  647. vcpu->arch.bescr = set_reg_val(id, *val);
  648. break;
  649. case KVM_REG_PPC_IC:
  650. vcpu->arch.ic = set_reg_val(id, *val);
  651. break;
  652. default:
  653. r = -EINVAL;
  654. break;
  655. }
  656. }
  657. return r;
  658. }
  659. void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  660. {
  661. vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
  662. }
  663. void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
  664. {
  665. vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
  666. }
  667. void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
  668. {
  669. vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
  670. }
  671. EXPORT_SYMBOL_GPL(kvmppc_set_msr);
  672. int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
  673. {
  674. return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu);
  675. }
  676. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  677. struct kvm_translation *tr)
  678. {
  679. return 0;
  680. }
  681. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  682. struct kvm_guest_debug *dbg)
  683. {
  684. vcpu_load(vcpu);
  685. vcpu->guest_debug = dbg->control;
  686. vcpu_put(vcpu);
  687. return 0;
  688. }
  689. void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
  690. {
  691. kvmppc_core_queue_dec(vcpu);
  692. kvm_vcpu_kick(vcpu);
  693. }
  694. int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
  695. {
  696. return vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
  697. }
  698. void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
  699. {
  700. vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
  701. }
  702. int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
  703. {
  704. return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
  705. }
  706. void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
  707. {
  708. }
  709. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  710. {
  711. return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
  712. }
  713. void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
  714. {
  715. kvm->arch.kvm_ops->free_memslot(slot);
  716. }
  717. void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
  718. {
  719. kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
  720. }
  721. int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  722. struct kvm_memory_slot *memslot,
  723. const struct kvm_userspace_memory_region *mem,
  724. enum kvm_mr_change change)
  725. {
  726. return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem,
  727. change);
  728. }
  729. void kvmppc_core_commit_memory_region(struct kvm *kvm,
  730. const struct kvm_userspace_memory_region *mem,
  731. const struct kvm_memory_slot *old,
  732. const struct kvm_memory_slot *new,
  733. enum kvm_mr_change change)
  734. {
  735. kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
  736. }
  737. int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
  738. unsigned flags)
  739. {
  740. return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
  741. }
  742. int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
  743. {
  744. return kvm->arch.kvm_ops->age_hva(kvm, start, end);
  745. }
  746. int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
  747. {
  748. return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
  749. }
  750. int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
  751. {
  752. kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
  753. return 0;
  754. }
  755. int kvmppc_core_init_vm(struct kvm *kvm)
  756. {
  757. #ifdef CONFIG_PPC64
  758. INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
  759. INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
  760. mutex_init(&kvm->arch.rtas_token_lock);
  761. #endif
  762. return kvm->arch.kvm_ops->init_vm(kvm);
  763. }
  764. void kvmppc_core_destroy_vm(struct kvm *kvm)
  765. {
  766. kvm->arch.kvm_ops->destroy_vm(kvm);
  767. #ifdef CONFIG_PPC64
  768. kvmppc_rtas_tokens_free(kvm);
  769. WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
  770. #endif
  771. #ifdef CONFIG_KVM_XICS
  772. /*
  773. * Free the XIVE and XICS devices which are not directly freed by the
  774. * device 'release' method
  775. */
  776. kfree(kvm->arch.xive_devices.native);
  777. kvm->arch.xive_devices.native = NULL;
  778. kfree(kvm->arch.xive_devices.xics_on_xive);
  779. kvm->arch.xive_devices.xics_on_xive = NULL;
  780. kfree(kvm->arch.xics_device);
  781. kvm->arch.xics_device = NULL;
  782. #endif /* CONFIG_KVM_XICS */
  783. }
  784. int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
  785. {
  786. unsigned long size = kvmppc_get_gpr(vcpu, 4);
  787. unsigned long addr = kvmppc_get_gpr(vcpu, 5);
  788. u64 buf;
  789. int srcu_idx;
  790. int ret;
  791. if (!is_power_of_2(size) || (size > sizeof(buf)))
  792. return H_TOO_HARD;
  793. srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  794. ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
  795. srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
  796. if (ret != 0)
  797. return H_TOO_HARD;
  798. switch (size) {
  799. case 1:
  800. kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
  801. break;
  802. case 2:
  803. kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
  804. break;
  805. case 4:
  806. kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
  807. break;
  808. case 8:
  809. kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
  810. break;
  811. default:
  812. BUG();
  813. }
  814. return H_SUCCESS;
  815. }
  816. EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
  817. int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
  818. {
  819. unsigned long size = kvmppc_get_gpr(vcpu, 4);
  820. unsigned long addr = kvmppc_get_gpr(vcpu, 5);
  821. unsigned long val = kvmppc_get_gpr(vcpu, 6);
  822. u64 buf;
  823. int srcu_idx;
  824. int ret;
  825. switch (size) {
  826. case 1:
  827. *(u8 *)&buf = val;
  828. break;
  829. case 2:
  830. *(__be16 *)&buf = cpu_to_be16(val);
  831. break;
  832. case 4:
  833. *(__be32 *)&buf = cpu_to_be32(val);
  834. break;
  835. case 8:
  836. *(__be64 *)&buf = cpu_to_be64(val);
  837. break;
  838. default:
  839. return H_TOO_HARD;
  840. }
  841. srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  842. ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
  843. srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
  844. if (ret != 0)
  845. return H_TOO_HARD;
  846. return H_SUCCESS;
  847. }
  848. EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
  849. int kvmppc_core_check_processor_compat(void)
  850. {
  851. /*
  852. * We always return 0 for book3s. We check
  853. * for compatibility while loading the HV
  854. * or PR module
  855. */
  856. return 0;
  857. }
  858. int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
  859. {
  860. return kvm->arch.kvm_ops->hcall_implemented(hcall);
  861. }
  862. #ifdef CONFIG_KVM_XICS
  863. int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
  864. bool line_status)
  865. {
  866. if (xics_on_xive())
  867. return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
  868. line_status);
  869. else
  870. return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
  871. line_status);
  872. }
  873. int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
  874. struct kvm *kvm, int irq_source_id,
  875. int level, bool line_status)
  876. {
  877. return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
  878. level, line_status);
  879. }
  880. static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
  881. struct kvm *kvm, int irq_source_id, int level,
  882. bool line_status)
  883. {
  884. return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
  885. }
  886. int kvm_irq_map_gsi(struct kvm *kvm,
  887. struct kvm_kernel_irq_routing_entry *entries, int gsi)
  888. {
  889. entries->gsi = gsi;
  890. entries->type = KVM_IRQ_ROUTING_IRQCHIP;
  891. entries->set = kvmppc_book3s_set_irq;
  892. entries->irqchip.irqchip = 0;
  893. entries->irqchip.pin = gsi;
  894. return 1;
  895. }
  896. int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
  897. {
  898. return pin;
  899. }
  900. #endif /* CONFIG_KVM_XICS */
  901. static int kvmppc_book3s_init(void)
  902. {
  903. int r;
  904. r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  905. if (r)
  906. return r;
  907. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  908. r = kvmppc_book3s_init_pr();
  909. #endif
  910. #ifdef CONFIG_KVM_XICS
  911. #ifdef CONFIG_KVM_XIVE
  912. if (xics_on_xive()) {
  913. kvmppc_xive_init_module();
  914. kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
  915. if (kvmppc_xive_native_supported()) {
  916. kvmppc_xive_native_init_module();
  917. kvm_register_device_ops(&kvm_xive_native_ops,
  918. KVM_DEV_TYPE_XIVE);
  919. }
  920. } else
  921. #endif
  922. kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
  923. #endif
  924. return r;
  925. }
  926. static void kvmppc_book3s_exit(void)
  927. {
  928. #ifdef CONFIG_KVM_XICS
  929. if (xics_on_xive()) {
  930. kvmppc_xive_exit_module();
  931. kvmppc_xive_native_exit_module();
  932. }
  933. #endif
  934. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  935. kvmppc_book3s_exit_pr();
  936. #endif
  937. kvm_exit();
  938. }
  939. module_init(kvmppc_book3s_init);
  940. module_exit(kvmppc_book3s_exit);
  941. /* On 32bit this is our one and only kernel module */
  942. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  943. MODULE_ALIAS_MISCDEV(KVM_MINOR);
  944. MODULE_ALIAS("devname:kvm");
  945. #endif