book3s_64_slb.S 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. *
  4. * Copyright SUSE Linux Products GmbH 2009
  5. *
  6. * Authors: Alexander Graf <agraf@suse.de>
  7. */
  8. #include <asm/asm-compat.h>
  9. #include <asm/feature-fixups.h>
  10. #define SHADOW_SLB_ENTRY_LEN 0x10
  11. #define OFFSET_ESID(x) (SHADOW_SLB_ENTRY_LEN * x)
  12. #define OFFSET_VSID(x) ((SHADOW_SLB_ENTRY_LEN * x) + 8)
  13. /******************************************************************************
  14. * *
  15. * Entry code *
  16. * *
  17. *****************************************************************************/
  18. .macro LOAD_GUEST_SEGMENTS
  19. /* Required state:
  20. *
  21. * MSR = ~IR|DR
  22. * R13 = PACA
  23. * R1 = host R1
  24. * R2 = host R2
  25. * R3 = shadow vcpu
  26. * all other volatile GPRS = free except R4, R6
  27. * SVCPU[CR] = guest CR
  28. * SVCPU[XER] = guest XER
  29. * SVCPU[CTR] = guest CTR
  30. * SVCPU[LR] = guest LR
  31. */
  32. BEGIN_FW_FTR_SECTION
  33. /* Declare SLB shadow as 0 entries big */
  34. ld r11, PACA_SLBSHADOWPTR(r13)
  35. li r8, 0
  36. stb r8, 3(r11)
  37. END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
  38. /* Flush SLB */
  39. li r10, 0
  40. slbmte r10, r10
  41. slbia
  42. /* Fill SLB with our shadow */
  43. lbz r12, SVCPU_SLB_MAX(r3)
  44. mulli r12, r12, 16
  45. addi r12, r12, SVCPU_SLB
  46. add r12, r12, r3
  47. /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
  48. li r11, SVCPU_SLB
  49. add r11, r11, r3
  50. slb_loop_enter:
  51. ld r10, 0(r11)
  52. andis. r9, r10, SLB_ESID_V@h
  53. beq slb_loop_enter_skip
  54. ld r9, 8(r11)
  55. slbmte r9, r10
  56. slb_loop_enter_skip:
  57. addi r11, r11, 16
  58. cmpd cr0, r11, r12
  59. blt slb_loop_enter
  60. slb_do_enter:
  61. .endm
  62. /******************************************************************************
  63. * *
  64. * Exit code *
  65. * *
  66. *****************************************************************************/
  67. .macro LOAD_HOST_SEGMENTS
  68. /* Register usage at this point:
  69. *
  70. * R1 = host R1
  71. * R2 = host R2
  72. * R12 = exit handler id
  73. * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
  74. * SVCPU.* = guest *
  75. * SVCPU[CR] = guest CR
  76. * SVCPU[XER] = guest XER
  77. * SVCPU[CTR] = guest CTR
  78. * SVCPU[LR] = guest LR
  79. *
  80. */
  81. /* Remove all SLB entries that are in use. */
  82. li r0, 0
  83. slbmte r0, r0
  84. slbia
  85. /* Restore bolted entries from the shadow */
  86. ld r11, PACA_SLBSHADOWPTR(r13)
  87. BEGIN_FW_FTR_SECTION
  88. /* Declare SLB shadow as SLB_NUM_BOLTED entries big */
  89. li r8, SLB_NUM_BOLTED
  90. stb r8, 3(r11)
  91. END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
  92. /* Manually load all entries from shadow SLB */
  93. li r8, SLBSHADOW_SAVEAREA
  94. li r7, SLBSHADOW_SAVEAREA + 8
  95. .rept SLB_NUM_BOLTED
  96. LDX_BE r10, r11, r8
  97. cmpdi r10, 0
  98. beq 1f
  99. LDX_BE r9, r11, r7
  100. slbmte r9, r10
  101. 1: addi r7, r7, SHADOW_SLB_ENTRY_LEN
  102. addi r8, r8, SHADOW_SLB_ENTRY_LEN
  103. .endr
  104. isync
  105. sync
  106. slb_do_exit:
  107. .endm