book3s_32_sr.S 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. *
  4. * Copyright SUSE Linux Products GmbH 2009
  5. *
  6. * Authors: Alexander Graf <agraf@suse.de>
  7. */
  8. /******************************************************************************
  9. * *
  10. * Entry code *
  11. * *
  12. *****************************************************************************/
  13. .macro LOAD_GUEST_SEGMENTS
  14. /* Required state:
  15. *
  16. * MSR = ~IR|DR
  17. * R1 = host R1
  18. * R2 = host R2
  19. * R3 = shadow vcpu
  20. * all other volatile GPRS = free except R4, R6
  21. * SVCPU[CR] = guest CR
  22. * SVCPU[XER] = guest XER
  23. * SVCPU[CTR] = guest CTR
  24. * SVCPU[LR] = guest LR
  25. */
  26. #define XCHG_SR(n) lwz r9, (SVCPU_SR+(n*4))(r3); \
  27. mtsr n, r9
  28. XCHG_SR(0)
  29. XCHG_SR(1)
  30. XCHG_SR(2)
  31. XCHG_SR(3)
  32. XCHG_SR(4)
  33. XCHG_SR(5)
  34. XCHG_SR(6)
  35. XCHG_SR(7)
  36. XCHG_SR(8)
  37. XCHG_SR(9)
  38. XCHG_SR(10)
  39. XCHG_SR(11)
  40. XCHG_SR(12)
  41. XCHG_SR(13)
  42. XCHG_SR(14)
  43. XCHG_SR(15)
  44. /* Clear BATs. */
  45. #define KVM_KILL_BAT(n, reg) \
  46. mtspr SPRN_IBAT##n##U,reg; \
  47. mtspr SPRN_IBAT##n##L,reg; \
  48. mtspr SPRN_DBAT##n##U,reg; \
  49. mtspr SPRN_DBAT##n##L,reg; \
  50. li r9, 0
  51. KVM_KILL_BAT(0, r9)
  52. KVM_KILL_BAT(1, r9)
  53. KVM_KILL_BAT(2, r9)
  54. KVM_KILL_BAT(3, r9)
  55. .endm
  56. /******************************************************************************
  57. * *
  58. * Exit code *
  59. * *
  60. *****************************************************************************/
  61. .macro LOAD_HOST_SEGMENTS
  62. /* Register usage at this point:
  63. *
  64. * R1 = host R1
  65. * R2 = host R2
  66. * R12 = exit handler id
  67. * R13 = shadow vcpu - SHADOW_VCPU_OFF
  68. * SVCPU.* = guest *
  69. * SVCPU[CR] = guest CR
  70. * SVCPU[XER] = guest XER
  71. * SVCPU[CTR] = guest CTR
  72. * SVCPU[LR] = guest LR
  73. *
  74. */
  75. /* Restore BATs */
  76. /* We only overwrite the upper part, so we only restoree
  77. the upper part. */
  78. #define KVM_LOAD_BAT(n, reg, RA, RB) \
  79. lwz RA,(n*16)+0(reg); \
  80. lwz RB,(n*16)+4(reg); \
  81. mtspr SPRN_IBAT##n##U,RA; \
  82. mtspr SPRN_IBAT##n##L,RB; \
  83. lwz RA,(n*16)+8(reg); \
  84. lwz RB,(n*16)+12(reg); \
  85. mtspr SPRN_DBAT##n##U,RA; \
  86. mtspr SPRN_DBAT##n##L,RB; \
  87. lis r9, BATS@ha
  88. addi r9, r9, BATS@l
  89. tophys(r9, r9)
  90. KVM_LOAD_BAT(0, r9, r10, r11)
  91. KVM_LOAD_BAT(1, r9, r10, r11)
  92. KVM_LOAD_BAT(2, r9, r10, r11)
  93. KVM_LOAD_BAT(3, r9, r10, r11)
  94. /* Restore Segment Registers */
  95. /* 0xc - 0xf */
  96. li r0, 4
  97. mtctr r0
  98. LOAD_REG_IMMEDIATE(r3, 0x20000000 | (0x111 * 0xc))
  99. lis r4, 0xc000
  100. 3: mtsrin r3, r4
  101. addi r3, r3, 0x111 /* increment VSID */
  102. addis r4, r4, 0x1000 /* address of next segment */
  103. bdnz 3b
  104. /* 0x0 - 0xb */
  105. /* 'current->mm' needs to be in r4 */
  106. tophys(r4, r2)
  107. lwz r4, MM(r4)
  108. tophys(r4, r4)
  109. /* This only clobbers r0, r3, r4 and r5 */
  110. bl switch_mmu_context
  111. .endm