paravirt_patch.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/stringify.h>
  3. #include <asm/paravirt.h>
  4. #include <asm/asm-offsets.h>
  5. #define PSTART(d, m) \
  6. patch_data_##d.m
  7. #define PEND(d, m) \
  8. (PSTART(d, m) + sizeof(patch_data_##d.m))
  9. #define PATCH(d, m, insn_buff, len) \
  10. paravirt_patch_insns(insn_buff, len, PSTART(d, m), PEND(d, m))
  11. #define PATCH_CASE(ops, m, data, insn_buff, len) \
  12. case PARAVIRT_PATCH(ops.m): \
  13. return PATCH(data, ops##_##m, insn_buff, len)
  14. #ifdef CONFIG_PARAVIRT_XXL
  15. struct patch_xxl {
  16. const unsigned char irq_irq_disable[1];
  17. const unsigned char irq_irq_enable[1];
  18. const unsigned char irq_save_fl[2];
  19. const unsigned char mmu_read_cr2[3];
  20. const unsigned char mmu_read_cr3[3];
  21. const unsigned char mmu_write_cr3[3];
  22. const unsigned char irq_restore_fl[2];
  23. const unsigned char cpu_wbinvd[2];
  24. const unsigned char cpu_usergs_sysret64[6];
  25. const unsigned char mov64[3];
  26. };
  27. static const struct patch_xxl patch_data_xxl = {
  28. .irq_irq_disable = { 0xfa }, // cli
  29. .irq_irq_enable = { 0xfb }, // sti
  30. .irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax
  31. .mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
  32. .mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
  33. .mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
  34. .irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq
  35. .cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
  36. .cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8,
  37. 0x48, 0x0f, 0x07 }, // swapgs; sysretq
  38. .mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
  39. };
  40. unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len)
  41. {
  42. return PATCH(xxl, mov64, insn_buff, len);
  43. }
  44. # endif /* CONFIG_PARAVIRT_XXL */
  45. #ifdef CONFIG_PARAVIRT_SPINLOCKS
  46. struct patch_lock {
  47. unsigned char queued_spin_unlock[3];
  48. unsigned char vcpu_is_preempted[2];
  49. };
  50. static const struct patch_lock patch_data_lock = {
  51. .vcpu_is_preempted = { 0x31, 0xc0 }, // xor %eax, %eax
  52. # ifdef CONFIG_X86_64
  53. .queued_spin_unlock = { 0xc6, 0x07, 0x00 }, // movb $0, (%rdi)
  54. # else
  55. .queued_spin_unlock = { 0xc6, 0x00, 0x00 }, // movb $0, (%eax)
  56. # endif
  57. };
  58. #endif /* CONFIG_PARAVIRT_SPINLOCKS */
  59. unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
  60. unsigned int len)
  61. {
  62. switch (type) {
  63. #ifdef CONFIG_PARAVIRT_XXL
  64. PATCH_CASE(irq, restore_fl, xxl, insn_buff, len);
  65. PATCH_CASE(irq, save_fl, xxl, insn_buff, len);
  66. PATCH_CASE(irq, irq_enable, xxl, insn_buff, len);
  67. PATCH_CASE(irq, irq_disable, xxl, insn_buff, len);
  68. PATCH_CASE(mmu, read_cr2, xxl, insn_buff, len);
  69. PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
  70. PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
  71. PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len);
  72. PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
  73. #endif
  74. #ifdef CONFIG_PARAVIRT_SPINLOCKS
  75. case PARAVIRT_PATCH(lock.queued_spin_unlock):
  76. if (pv_is_native_spin_unlock())
  77. return PATCH(lock, queued_spin_unlock, insn_buff, len);
  78. break;
  79. case PARAVIRT_PATCH(lock.vcpu_is_preempted):
  80. if (pv_is_native_vcpu_is_preempted())
  81. return PATCH(lock, vcpu_is_preempted, insn_buff, len);
  82. break;
  83. #endif
  84. default:
  85. break;
  86. }
  87. return paravirt_patch_default(type, insn_buff, addr, len);
  88. }