x86.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef ARCH_X86_KVM_X86_H
  3. #define ARCH_X86_KVM_X86_H
  4. #include <linux/kvm_host.h>
  5. #include <asm/pvclock.h>
  6. #include "kvm_cache_regs.h"
  7. #include "kvm_emulate.h"
  8. #define KVM_DEFAULT_PLE_GAP 128
  9. #define KVM_VMX_DEFAULT_PLE_WINDOW 4096
  10. #define KVM_DEFAULT_PLE_WINDOW_GROW 2
  11. #define KVM_DEFAULT_PLE_WINDOW_SHRINK 0
  12. #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX UINT_MAX
  13. #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX
  14. #define KVM_SVM_DEFAULT_PLE_WINDOW 3000
  15. static inline unsigned int __grow_ple_window(unsigned int val,
  16. unsigned int base, unsigned int modifier, unsigned int max)
  17. {
  18. u64 ret = val;
  19. if (modifier < 1)
  20. return base;
  21. if (modifier < base)
  22. ret *= modifier;
  23. else
  24. ret += modifier;
  25. return min(ret, (u64)max);
  26. }
  27. static inline unsigned int __shrink_ple_window(unsigned int val,
  28. unsigned int base, unsigned int modifier, unsigned int min)
  29. {
  30. if (modifier < 1)
  31. return base;
  32. if (modifier < base)
  33. val /= modifier;
  34. else
  35. val -= modifier;
  36. return max(val, min);
  37. }
  38. #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
  39. static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
  40. {
  41. vcpu->arch.exception.pending = false;
  42. vcpu->arch.exception.injected = false;
  43. }
  44. static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
  45. bool soft)
  46. {
  47. vcpu->arch.interrupt.injected = true;
  48. vcpu->arch.interrupt.soft = soft;
  49. vcpu->arch.interrupt.nr = vector;
  50. }
  51. static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
  52. {
  53. vcpu->arch.interrupt.injected = false;
  54. }
  55. static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
  56. {
  57. return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
  58. vcpu->arch.nmi_injected;
  59. }
  60. static inline bool kvm_exception_is_soft(unsigned int nr)
  61. {
  62. return (nr == BP_VECTOR) || (nr == OF_VECTOR);
  63. }
  64. static inline bool is_protmode(struct kvm_vcpu *vcpu)
  65. {
  66. return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
  67. }
  68. static inline int is_long_mode(struct kvm_vcpu *vcpu)
  69. {
  70. #ifdef CONFIG_X86_64
  71. return vcpu->arch.efer & EFER_LMA;
  72. #else
  73. return 0;
  74. #endif
  75. }
  76. static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
  77. {
  78. int cs_db, cs_l;
  79. if (!is_long_mode(vcpu))
  80. return false;
  81. kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  82. return cs_l;
  83. }
  84. static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
  85. {
  86. #ifdef CONFIG_X86_64
  87. return (vcpu->arch.efer & EFER_LMA) &&
  88. kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
  89. #else
  90. return 0;
  91. #endif
  92. }
  93. static inline bool x86_exception_has_error_code(unsigned int vector)
  94. {
  95. static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
  96. BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
  97. BIT(PF_VECTOR) | BIT(AC_VECTOR);
  98. return (1U << vector) & exception_has_error_code;
  99. }
  100. static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
  101. {
  102. return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
  103. }
  104. static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
  105. {
  106. ++vcpu->stat.tlb_flush;
  107. kvm_x86_ops.tlb_flush_current(vcpu);
  108. }
  109. static inline int is_pae(struct kvm_vcpu *vcpu)
  110. {
  111. return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
  112. }
  113. static inline int is_pse(struct kvm_vcpu *vcpu)
  114. {
  115. return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
  116. }
  117. static inline int is_paging(struct kvm_vcpu *vcpu)
  118. {
  119. return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
  120. }
  121. static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
  122. {
  123. return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
  124. }
  125. static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
  126. {
  127. return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
  128. }
  129. static inline u64 get_canonical(u64 la, u8 vaddr_bits)
  130. {
  131. return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
  132. }
  133. static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
  134. {
  135. return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
  136. }
  137. static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
  138. gva_t gva, gfn_t gfn, unsigned access)
  139. {
  140. u64 gen = kvm_memslots(vcpu->kvm)->generation;
  141. if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
  142. return;
  143. /*
  144. * If this is a shadow nested page table, the "GVA" is
  145. * actually a nGPA.
  146. */
  147. vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
  148. vcpu->arch.mmio_access = access;
  149. vcpu->arch.mmio_gfn = gfn;
  150. vcpu->arch.mmio_gen = gen;
  151. }
  152. static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
  153. {
  154. return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
  155. }
  156. /*
  157. * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
  158. * clear all mmio cache info.
  159. */
  160. #define MMIO_GVA_ANY (~(gva_t)0)
  161. static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
  162. {
  163. if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
  164. return;
  165. vcpu->arch.mmio_gva = 0;
  166. }
  167. static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
  168. {
  169. if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
  170. vcpu->arch.mmio_gva == (gva & PAGE_MASK))
  171. return true;
  172. return false;
  173. }
  174. static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
  175. {
  176. if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
  177. vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
  178. return true;
  179. return false;
  180. }
  181. static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, int reg)
  182. {
  183. unsigned long val = kvm_register_read(vcpu, reg);
  184. return is_64_bit_mode(vcpu) ? val : (u32)val;
  185. }
  186. static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
  187. int reg, unsigned long val)
  188. {
  189. if (!is_64_bit_mode(vcpu))
  190. val = (u32)val;
  191. return kvm_register_write(vcpu, reg, val);
  192. }
  193. static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
  194. {
  195. return !(kvm->arch.disabled_quirks & quirk);
  196. }
  197. static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
  198. {
  199. return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu);
  200. }
  201. void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
  202. void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
  203. u64 get_kvmclock_ns(struct kvm *kvm);
  204. int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
  205. gva_t addr, void *val, unsigned int bytes,
  206. struct x86_exception *exception);
  207. int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
  208. gva_t addr, void *val, unsigned int bytes,
  209. struct x86_exception *exception);
  210. int handle_ud(struct kvm_vcpu *vcpu);
  211. void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu);
  212. void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
  213. u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
  214. bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
  215. int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
  216. int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
  217. bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
  218. int page_num);
  219. bool kvm_vector_hashing_enabled(void);
  220. void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
  221. int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
  222. void *insn, int insn_len);
  223. int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
  224. int emulation_type, void *insn, int insn_len);
  225. fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
  226. extern u64 host_xcr0;
  227. extern u64 supported_xcr0;
  228. extern u64 supported_xss;
  229. static inline bool kvm_mpx_supported(void)
  230. {
  231. return (supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
  232. == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
  233. }
  234. extern unsigned int min_timer_period_us;
  235. extern bool enable_vmware_backdoor;
  236. extern int pi_inject_timer;
  237. extern struct static_key kvm_no_apic_vcpu;
  238. static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
  239. {
  240. return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
  241. vcpu->arch.virtual_tsc_shift);
  242. }
  243. /* Same "calling convention" as do_div:
  244. * - divide (n << 32) by base
  245. * - put result in n
  246. * - return remainder
  247. */
  248. #define do_shl32_div32(n, base) \
  249. ({ \
  250. u32 __quot, __rem; \
  251. asm("divl %2" : "=a" (__quot), "=d" (__rem) \
  252. : "rm" (base), "0" (0), "1" ((u32) n)); \
  253. n = __quot; \
  254. __rem; \
  255. })
  256. static inline bool kvm_mwait_in_guest(struct kvm *kvm)
  257. {
  258. return kvm->arch.mwait_in_guest;
  259. }
  260. static inline bool kvm_hlt_in_guest(struct kvm *kvm)
  261. {
  262. return kvm->arch.hlt_in_guest;
  263. }
  264. static inline bool kvm_pause_in_guest(struct kvm *kvm)
  265. {
  266. return kvm->arch.pause_in_guest;
  267. }
  268. static inline bool kvm_cstate_in_guest(struct kvm *kvm)
  269. {
  270. return kvm->arch.cstate_in_guest;
  271. }
  272. DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu);
  273. static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
  274. {
  275. __this_cpu_write(current_vcpu, vcpu);
  276. }
  277. static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
  278. {
  279. __this_cpu_write(current_vcpu, NULL);
  280. }
  281. static inline bool kvm_pat_valid(u64 data)
  282. {
  283. if (data & 0xF8F8F8F8F8F8F8F8ull)
  284. return false;
  285. /* 0, 1, 4, 5, 6, 7 are valid values. */
  286. return (data | ((data & 0x0202020202020202ull) << 1)) == data;
  287. }
  288. static inline bool kvm_dr7_valid(u64 data)
  289. {
  290. /* Bits [63:32] are reserved */
  291. return !(data >> 32);
  292. }
  293. static inline bool kvm_dr6_valid(u64 data)
  294. {
  295. /* Bits [63:32] are reserved */
  296. return !(data >> 32);
  297. }
  298. void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
  299. void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
  300. int kvm_spec_ctrl_test_value(u64 value);
  301. int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
  302. bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
  303. int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
  304. struct x86_exception *e);
  305. int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
  306. bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
  307. /*
  308. * Internal error codes that are used to indicate that MSR emulation encountered
  309. * an error that should result in #GP in the guest, unless userspace
  310. * handles it.
  311. */
  312. #define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */
  313. #define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */
  314. #define __cr4_reserved_bits(__cpu_has, __c) \
  315. ({ \
  316. u64 __reserved_bits = CR4_RESERVED_BITS; \
  317. \
  318. if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \
  319. __reserved_bits |= X86_CR4_OSXSAVE; \
  320. if (!__cpu_has(__c, X86_FEATURE_SMEP)) \
  321. __reserved_bits |= X86_CR4_SMEP; \
  322. if (!__cpu_has(__c, X86_FEATURE_SMAP)) \
  323. __reserved_bits |= X86_CR4_SMAP; \
  324. if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \
  325. __reserved_bits |= X86_CR4_FSGSBASE; \
  326. if (!__cpu_has(__c, X86_FEATURE_PKU)) \
  327. __reserved_bits |= X86_CR4_PKE; \
  328. if (!__cpu_has(__c, X86_FEATURE_LA57)) \
  329. __reserved_bits |= X86_CR4_LA57; \
  330. if (!__cpu_has(__c, X86_FEATURE_UMIP)) \
  331. __reserved_bits |= X86_CR4_UMIP; \
  332. if (!__cpu_has(__c, X86_FEATURE_VMX)) \
  333. __reserved_bits |= X86_CR4_VMXE; \
  334. if (!__cpu_has(__c, X86_FEATURE_PCID)) \
  335. __reserved_bits |= X86_CR4_PCIDE; \
  336. __reserved_bits; \
  337. })
  338. #endif