paravirt.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Paravirtualization interfaces
  3. Copyright (C) 2006 Rusty Russell IBM Corporation
  4. 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
  5. */
  6. #include <linux/errno.h>
  7. #include <linux/init.h>
  8. #include <linux/export.h>
  9. #include <linux/efi.h>
  10. #include <linux/bcd.h>
  11. #include <linux/highmem.h>
  12. #include <linux/kprobes.h>
  13. #include <linux/pgtable.h>
  14. #include <asm/bug.h>
  15. #include <asm/paravirt.h>
  16. #include <asm/debugreg.h>
  17. #include <asm/desc.h>
  18. #include <asm/setup.h>
  19. #include <asm/time.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/irq.h>
  22. #include <asm/delay.h>
  23. #include <asm/fixmap.h>
  24. #include <asm/apic.h>
  25. #include <asm/tlbflush.h>
  26. #include <asm/timer.h>
  27. #include <asm/special_insns.h>
  28. #include <asm/tlb.h>
  29. #include <asm/io_bitmap.h>
  30. /*
  31. * nop stub, which must not clobber anything *including the stack* to
  32. * avoid confusing the entry prologues.
  33. */
  34. extern void _paravirt_nop(void);
  35. asm (".pushsection .entry.text, \"ax\"\n"
  36. ".global _paravirt_nop\n"
  37. "_paravirt_nop:\n\t"
  38. "ret\n\t"
  39. ".size _paravirt_nop, . - _paravirt_nop\n\t"
  40. ".type _paravirt_nop, @function\n\t"
  41. ".popsection");
  42. void __init default_banner(void)
  43. {
  44. printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
  45. pv_info.name);
  46. }
  47. /* Undefined instruction for dealing with missing ops pointers. */
  48. static const unsigned char ud2a[] = { 0x0f, 0x0b };
  49. struct branch {
  50. unsigned char opcode;
  51. u32 delta;
  52. } __attribute__((packed));
  53. static unsigned paravirt_patch_call(void *insn_buff, const void *target,
  54. unsigned long addr, unsigned len)
  55. {
  56. const int call_len = 5;
  57. struct branch *b = insn_buff;
  58. unsigned long delta = (unsigned long)target - (addr+call_len);
  59. if (len < call_len) {
  60. pr_warn("paravirt: Failed to patch indirect CALL at %ps\n", (void *)addr);
  61. /* Kernel might not be viable if patching fails, bail out: */
  62. BUG_ON(1);
  63. }
  64. b->opcode = 0xe8; /* call */
  65. b->delta = delta;
  66. BUILD_BUG_ON(sizeof(*b) != call_len);
  67. return call_len;
  68. }
  69. #ifdef CONFIG_PARAVIRT_XXL
  70. /* identity function, which can be inlined */
  71. u64 notrace _paravirt_ident_64(u64 x)
  72. {
  73. return x;
  74. }
  75. static unsigned paravirt_patch_jmp(void *insn_buff, const void *target,
  76. unsigned long addr, unsigned len)
  77. {
  78. struct branch *b = insn_buff;
  79. unsigned long delta = (unsigned long)target - (addr+5);
  80. if (len < 5) {
  81. #ifdef CONFIG_RETPOLINE
  82. WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr);
  83. #endif
  84. return len; /* call too long for patch site */
  85. }
  86. b->opcode = 0xe9; /* jmp */
  87. b->delta = delta;
  88. return 5;
  89. }
  90. #endif
  91. DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
  92. void __init native_pv_lock_init(void)
  93. {
  94. if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
  95. static_branch_disable(&virt_spin_lock_key);
  96. }
  97. unsigned paravirt_patch_default(u8 type, void *insn_buff,
  98. unsigned long addr, unsigned len)
  99. {
  100. /*
  101. * Neat trick to map patch type back to the call within the
  102. * corresponding structure.
  103. */
  104. void *opfunc = *((void **)&pv_ops + type);
  105. unsigned ret;
  106. if (opfunc == NULL)
  107. /* If there's no function, patch it with a ud2a (BUG) */
  108. ret = paravirt_patch_insns(insn_buff, len, ud2a, ud2a+sizeof(ud2a));
  109. else if (opfunc == _paravirt_nop)
  110. ret = 0;
  111. #ifdef CONFIG_PARAVIRT_XXL
  112. /* identity functions just return their single argument */
  113. else if (opfunc == _paravirt_ident_64)
  114. ret = paravirt_patch_ident_64(insn_buff, len);
  115. else if (type == PARAVIRT_PATCH(cpu.iret) ||
  116. type == PARAVIRT_PATCH(cpu.usergs_sysret64))
  117. /* If operation requires a jmp, then jmp */
  118. ret = paravirt_patch_jmp(insn_buff, opfunc, addr, len);
  119. #endif
  120. else
  121. /* Otherwise call the function. */
  122. ret = paravirt_patch_call(insn_buff, opfunc, addr, len);
  123. return ret;
  124. }
  125. unsigned paravirt_patch_insns(void *insn_buff, unsigned len,
  126. const char *start, const char *end)
  127. {
  128. unsigned insn_len = end - start;
  129. /* Alternative instruction is too large for the patch site and we cannot continue: */
  130. BUG_ON(insn_len > len || start == NULL);
  131. memcpy(insn_buff, start, insn_len);
  132. return insn_len;
  133. }
  134. struct static_key paravirt_steal_enabled;
  135. struct static_key paravirt_steal_rq_enabled;
  136. static u64 native_steal_clock(int cpu)
  137. {
  138. return 0;
  139. }
  140. /* These are in entry.S */
  141. extern void native_iret(void);
  142. extern void native_usergs_sysret64(void);
  143. static struct resource reserve_ioports = {
  144. .start = 0,
  145. .end = IO_SPACE_LIMIT,
  146. .name = "paravirt-ioport",
  147. .flags = IORESOURCE_IO | IORESOURCE_BUSY,
  148. };
  149. /*
  150. * Reserve the whole legacy IO space to prevent any legacy drivers
  151. * from wasting time probing for their hardware. This is a fairly
  152. * brute-force approach to disabling all non-virtual drivers.
  153. *
  154. * Note that this must be called very early to have any effect.
  155. */
  156. int paravirt_disable_iospace(void)
  157. {
  158. return request_resource(&ioport_resource, &reserve_ioports);
  159. }
  160. static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
  161. static inline void enter_lazy(enum paravirt_lazy_mode mode)
  162. {
  163. BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
  164. this_cpu_write(paravirt_lazy_mode, mode);
  165. }
  166. static void leave_lazy(enum paravirt_lazy_mode mode)
  167. {
  168. BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
  169. this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
  170. }
  171. void paravirt_enter_lazy_mmu(void)
  172. {
  173. enter_lazy(PARAVIRT_LAZY_MMU);
  174. }
  175. void paravirt_leave_lazy_mmu(void)
  176. {
  177. leave_lazy(PARAVIRT_LAZY_MMU);
  178. }
  179. void paravirt_flush_lazy_mmu(void)
  180. {
  181. preempt_disable();
  182. if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
  183. arch_leave_lazy_mmu_mode();
  184. arch_enter_lazy_mmu_mode();
  185. }
  186. preempt_enable();
  187. }
  188. #ifdef CONFIG_PARAVIRT_XXL
  189. void paravirt_start_context_switch(struct task_struct *prev)
  190. {
  191. BUG_ON(preemptible());
  192. if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
  193. arch_leave_lazy_mmu_mode();
  194. set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
  195. }
  196. enter_lazy(PARAVIRT_LAZY_CPU);
  197. }
  198. void paravirt_end_context_switch(struct task_struct *next)
  199. {
  200. BUG_ON(preemptible());
  201. leave_lazy(PARAVIRT_LAZY_CPU);
  202. if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
  203. arch_enter_lazy_mmu_mode();
  204. }
  205. #endif
  206. enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
  207. {
  208. if (in_interrupt())
  209. return PARAVIRT_LAZY_NONE;
  210. return this_cpu_read(paravirt_lazy_mode);
  211. }
  212. struct pv_info pv_info = {
  213. .name = "bare hardware",
  214. #ifdef CONFIG_PARAVIRT_XXL
  215. .extra_user_64bit_cs = __USER_CS,
  216. #endif
  217. };
  218. /* 64-bit pagetable entries */
  219. #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
  220. struct paravirt_patch_template pv_ops = {
  221. /* Init ops. */
  222. .init.patch = native_patch,
  223. /* Time ops. */
  224. .time.sched_clock = native_sched_clock,
  225. .time.steal_clock = native_steal_clock,
  226. /* Cpu ops. */
  227. .cpu.io_delay = native_io_delay,
  228. #ifdef CONFIG_PARAVIRT_XXL
  229. .cpu.cpuid = native_cpuid,
  230. .cpu.get_debugreg = native_get_debugreg,
  231. .cpu.set_debugreg = native_set_debugreg,
  232. .cpu.read_cr0 = native_read_cr0,
  233. .cpu.write_cr0 = native_write_cr0,
  234. .cpu.write_cr4 = native_write_cr4,
  235. .cpu.wbinvd = native_wbinvd,
  236. .cpu.read_msr = native_read_msr,
  237. .cpu.write_msr = native_write_msr,
  238. .cpu.read_msr_safe = native_read_msr_safe,
  239. .cpu.write_msr_safe = native_write_msr_safe,
  240. .cpu.read_pmc = native_read_pmc,
  241. .cpu.load_tr_desc = native_load_tr_desc,
  242. .cpu.set_ldt = native_set_ldt,
  243. .cpu.load_gdt = native_load_gdt,
  244. .cpu.load_idt = native_load_idt,
  245. .cpu.store_tr = native_store_tr,
  246. .cpu.load_tls = native_load_tls,
  247. .cpu.load_gs_index = native_load_gs_index,
  248. .cpu.write_ldt_entry = native_write_ldt_entry,
  249. .cpu.write_gdt_entry = native_write_gdt_entry,
  250. .cpu.write_idt_entry = native_write_idt_entry,
  251. .cpu.alloc_ldt = paravirt_nop,
  252. .cpu.free_ldt = paravirt_nop,
  253. .cpu.load_sp0 = native_load_sp0,
  254. .cpu.usergs_sysret64 = native_usergs_sysret64,
  255. .cpu.iret = native_iret,
  256. #ifdef CONFIG_X86_IOPL_IOPERM
  257. .cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap,
  258. .cpu.update_io_bitmap = native_tss_update_io_bitmap,
  259. #endif
  260. .cpu.start_context_switch = paravirt_nop,
  261. .cpu.end_context_switch = paravirt_nop,
  262. /* Irq ops. */
  263. .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
  264. .irq.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
  265. .irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
  266. .irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
  267. .irq.safe_halt = native_safe_halt,
  268. .irq.halt = native_halt,
  269. #endif /* CONFIG_PARAVIRT_XXL */
  270. /* Mmu ops. */
  271. .mmu.flush_tlb_user = native_flush_tlb_local,
  272. .mmu.flush_tlb_kernel = native_flush_tlb_global,
  273. .mmu.flush_tlb_one_user = native_flush_tlb_one_user,
  274. .mmu.flush_tlb_others = native_flush_tlb_others,
  275. .mmu.tlb_remove_table =
  276. (void (*)(struct mmu_gather *, void *))tlb_remove_page,
  277. .mmu.exit_mmap = paravirt_nop,
  278. #ifdef CONFIG_PARAVIRT_XXL
  279. .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(native_read_cr2),
  280. .mmu.write_cr2 = native_write_cr2,
  281. .mmu.read_cr3 = __native_read_cr3,
  282. .mmu.write_cr3 = native_write_cr3,
  283. .mmu.pgd_alloc = __paravirt_pgd_alloc,
  284. .mmu.pgd_free = paravirt_nop,
  285. .mmu.alloc_pte = paravirt_nop,
  286. .mmu.alloc_pmd = paravirt_nop,
  287. .mmu.alloc_pud = paravirt_nop,
  288. .mmu.alloc_p4d = paravirt_nop,
  289. .mmu.release_pte = paravirt_nop,
  290. .mmu.release_pmd = paravirt_nop,
  291. .mmu.release_pud = paravirt_nop,
  292. .mmu.release_p4d = paravirt_nop,
  293. .mmu.set_pte = native_set_pte,
  294. .mmu.set_pmd = native_set_pmd,
  295. .mmu.ptep_modify_prot_start = __ptep_modify_prot_start,
  296. .mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit,
  297. .mmu.set_pud = native_set_pud,
  298. .mmu.pmd_val = PTE_IDENT,
  299. .mmu.make_pmd = PTE_IDENT,
  300. .mmu.pud_val = PTE_IDENT,
  301. .mmu.make_pud = PTE_IDENT,
  302. .mmu.set_p4d = native_set_p4d,
  303. #if CONFIG_PGTABLE_LEVELS >= 5
  304. .mmu.p4d_val = PTE_IDENT,
  305. .mmu.make_p4d = PTE_IDENT,
  306. .mmu.set_pgd = native_set_pgd,
  307. #endif /* CONFIG_PGTABLE_LEVELS >= 5 */
  308. .mmu.pte_val = PTE_IDENT,
  309. .mmu.pgd_val = PTE_IDENT,
  310. .mmu.make_pte = PTE_IDENT,
  311. .mmu.make_pgd = PTE_IDENT,
  312. .mmu.dup_mmap = paravirt_nop,
  313. .mmu.activate_mm = paravirt_nop,
  314. .mmu.lazy_mode = {
  315. .enter = paravirt_nop,
  316. .leave = paravirt_nop,
  317. .flush = paravirt_nop,
  318. },
  319. .mmu.set_fixmap = native_set_fixmap,
  320. #endif /* CONFIG_PARAVIRT_XXL */
  321. #if defined(CONFIG_PARAVIRT_SPINLOCKS)
  322. /* Lock ops. */
  323. #ifdef CONFIG_SMP
  324. .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
  325. .lock.queued_spin_unlock =
  326. PV_CALLEE_SAVE(__native_queued_spin_unlock),
  327. .lock.wait = paravirt_nop,
  328. .lock.kick = paravirt_nop,
  329. .lock.vcpu_is_preempted =
  330. PV_CALLEE_SAVE(__native_vcpu_is_preempted),
  331. #endif /* SMP */
  332. #endif
  333. };
  334. #ifdef CONFIG_PARAVIRT_XXL
  335. /* At this point, native_get/set_debugreg has real function entries */
  336. NOKPROBE_SYMBOL(native_get_debugreg);
  337. NOKPROBE_SYMBOL(native_set_debugreg);
  338. NOKPROBE_SYMBOL(native_load_idt);
  339. #endif
  340. EXPORT_SYMBOL(pv_ops);
  341. EXPORT_SYMBOL_GPL(pv_info);