vcpu.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2019 Western Digital Corporation or its affiliates.
  4. *
  5. * Authors:
  6. * Anup Patel <anup.patel@wdc.com>
  7. */
  8. #include <linux/bitops.h>
  9. #include <linux/delay.h>
  10. #include <linux/errno.h>
  11. #include <linux/err.h>
  12. #include <linux/kdebug.h>
  13. #include <linux/module.h>
  14. #include <linux/percpu.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/sched/signal.h>
  18. #include <linux/fs.h>
  19. #include <linux/kvm_host.h>
  20. #include <asm/csr.h>
  21. #include <asm/hwcap.h>
  22. #include <asm/sbi.h>
  23. struct kvm_stats_debugfs_item debugfs_entries[] = {
  24. VCPU_STAT("halt_successful_poll", halt_successful_poll),
  25. VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
  26. VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
  27. VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
  28. VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
  29. VCPU_STAT("halt_wakeup", halt_wakeup),
  30. VCPU_STAT("ecall_exit_stat", ecall_exit_stat),
  31. VCPU_STAT("wfi_exit_stat", wfi_exit_stat),
  32. VCPU_STAT("mmio_exit_user", mmio_exit_user),
  33. VCPU_STAT("mmio_exit_kernel", mmio_exit_kernel),
  34. VCPU_STAT("exits", exits),
  35. { NULL }
  36. };
  37. static struct kvm_vcpu *kvm_vcpu = NULL;
  38. #define KVM_RISCV_ISA_ALLOWED (riscv_isa_extension_mask(a) | \
  39. riscv_isa_extension_mask(c) | \
  40. riscv_isa_extension_mask(d) | \
  41. riscv_isa_extension_mask(f) | \
  42. riscv_isa_extension_mask(i) | \
  43. riscv_isa_extension_mask(m) | \
  44. riscv_isa_extension_mask(s) | \
  45. riscv_isa_extension_mask(u))
  46. int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
  47. {
  48. return 0;
  49. }
  50. int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
  51. {
  52. return 0;
  53. }
  54. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  55. {
  56. return 0;
  57. }
  58. void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
  59. {
  60. }
  61. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  62. {
  63. }
  64. int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
  65. {
  66. return 0;
  67. }
  68. void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
  69. {
  70. }
  71. void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
  72. {
  73. }
  74. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  75. {
  76. return 0;
  77. }
  78. int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  79. {
  80. return 0;
  81. }
  82. bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
  83. {
  84. return false;
  85. }
  86. bool kvm_arch_has_vcpu_debugfs(void)
  87. {
  88. return false;
  89. }
  90. int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
  91. {
  92. return 0;
  93. }
  94. vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
  95. {
  96. return VM_FAULT_SIGBUS;
  97. }
  98. extern u64 khv_reserved_memory;
  99. long kvm_arch_vcpu_async_ioctl(struct file *filp,
  100. unsigned int ioctl, unsigned long arg)
  101. {
  102. return -ENOIOCTLCMD;
  103. }
  104. long kvm_arch_vcpu_ioctl(struct file *filp,
  105. unsigned int ioctl, unsigned long arg)
  106. {
  107. void __user *argp = (void __user *)arg;
  108. long r = -EINVAL;
  109. switch (ioctl) {
  110. case KVM_SET_ONE_REG:
  111. case KVM_GET_ONE_REG: {
  112. struct kvm_one_reg reg;
  113. r = -EFAULT;
  114. if (copy_from_user(&reg, argp, sizeof(reg)))
  115. break;
  116. sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_START, reg.id, khv_reserved_memory + 0x200000, reg.addr, 0, 0, 0);
  117. if (cpu_online(reg.id) || !cpu_possible(reg.id)) {
  118. break;
  119. } else
  120. return 0;
  121. break;
  122. }
  123. default:
  124. break;
  125. }
  126. return r;
  127. }
  128. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  129. struct kvm_sregs *sregs)
  130. {
  131. return -EINVAL;
  132. }
  133. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  134. struct kvm_sregs *sregs)
  135. {
  136. return -EINVAL;
  137. }
  138. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  139. {
  140. return -EINVAL;
  141. }
  142. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  143. {
  144. return -EINVAL;
  145. }
  146. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  147. struct kvm_translation *tr)
  148. {
  149. return -EINVAL;
  150. }
  151. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  152. {
  153. return -EINVAL;
  154. }
  155. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  156. {
  157. return -EINVAL;
  158. }
  159. void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
  160. {
  161. }
  162. void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
  163. {
  164. }
  165. int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
  166. {
  167. return 0;
  168. }
  169. int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
  170. {
  171. return 0;
  172. }
  173. bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask)
  174. {
  175. return false;
  176. }
  177. void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
  178. {
  179. }
  180. void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
  181. {
  182. }
  183. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  184. struct kvm_mp_state *mp_state)
  185. {
  186. return 0;
  187. }
  188. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  189. struct kvm_mp_state *mp_state)
  190. {
  191. return 0;
  192. }
  193. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  194. struct kvm_guest_debug *dbg)
  195. {
  196. /* TODO; To be implemented later. */
  197. return -EINVAL;
  198. }
  199. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  200. {
  201. }
  202. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  203. {
  204. }
  205. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
  206. {
  207. int ret;
  208. struct kvm_cpu_trap trap;
  209. struct kvm_run *run = vcpu->run;
  210. kvm_vcpu = vcpu;
  211. /* Mark this VCPU ran at least once */
  212. vcpu->arch.ran_atleast_once = true;
  213. vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  214. /* Process MMIO value returned from user-space */
  215. if (run->exit_reason == KVM_EXIT_MMIO) {
  216. ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
  217. if (ret) {
  218. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
  219. return ret;
  220. }
  221. }
  222. if (run->immediate_exit) {
  223. printk("%s, immediate_exit.\n", __func__);
  224. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
  225. return -EINTR;
  226. }
  227. vcpu_load(vcpu);
  228. kvm_sigset_activate(vcpu);
  229. ret = 1;
  230. run->exit_reason = KVM_EXIT_UNKNOWN;
  231. while (ret > 0) {
  232. /* Check conditions before entering the guest */
  233. cond_resched();
  234. /*
  235. * Exit if we have a signal pending so that we can deliver
  236. * the signal to user space.
  237. */
  238. if (signal_pending(current)) {
  239. ret = -EINTR;
  240. run->exit_reason = KVM_EXIT_INTR;
  241. }
  242. /*
  243. * Ensure we set mode to IN_GUEST_MODE after we disable
  244. * interrupts and before the final VCPU requests check.
  245. * See the comment in kvm_vcpu_exiting_guest_mode() and
  246. * Documentation/virtual/kvm/vcpu-requests.rst
  247. */
  248. vcpu->mode = IN_GUEST_MODE;
  249. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
  250. smp_mb__after_srcu_read_unlock();
  251. ret = wait_event_interruptible(vcpu->kvm->arch.waitq,
  252. (atomic64_read((atomic64_t *)&vcpu->kvm->arch.io_switch->status) & RVBM_STATUS_MASK) == RVBM_STATUS_START);
  253. if (ret)
  254. while(1);
  255. vcpu->mode = OUTSIDE_GUEST_MODE;
  256. vcpu->stat.exits++;
  257. vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  258. ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
  259. }
  260. kvm_sigset_deactivate(vcpu);
  261. vcpu_put(vcpu);
  262. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
  263. return ret;
  264. }
  265. bool kvm_riscv_vcpu_notify(void)
  266. {
  267. struct kvm_vcpu *vcpu = kvm_vcpu;
  268. struct khv_io *io_switch;
  269. struct kvm_cpu_trap trap;
  270. bool ret = false;
  271. if (!vcpu)
  272. return true;
  273. io_switch = vcpu->kvm->arch.io_switch;
  274. /* Skip guest shutdown command */
  275. if (((io_switch->status >> 16) & 0xff) == 0x5a)
  276. return true;
  277. if ((io_switch->addr & 0xff) == 0x50)
  278. kvm_riscv_vcpu_exit(vcpu, vcpu->run, &trap);
  279. else
  280. ret = true;
  281. return ret;
  282. }