relocate_kernel.S 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * kexec for arm64
  4. *
  5. * Copyright (C) Linaro.
  6. * Copyright (C) Huawei Futurewei Technologies.
  7. */
  8. #include <linux/kexec.h>
  9. #include <linux/linkage.h>
  10. #include <asm/assembler.h>
  11. #include <asm/kexec.h>
  12. #include <asm/page.h>
  13. #include <asm/sysreg.h>
  14. /*
  15. * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
  16. *
  17. * The memory that the old kernel occupies may be overwritten when coping the
  18. * new image to its final location. To assure that the
  19. * arm64_relocate_new_kernel routine which does that copy is not overwritten,
  20. * all code and data needed by arm64_relocate_new_kernel must be between the
  21. * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The
  22. * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec
  23. * control_code_page, a special page which has been set up to be preserved
  24. * during the copy operation.
  25. */
  26. SYM_CODE_START(arm64_relocate_new_kernel)
  27. /* Setup the list loop variables. */
  28. mov x18, x2 /* x18 = dtb address */
  29. mov x17, x1 /* x17 = kimage_start */
  30. mov x16, x0 /* x16 = kimage_head */
  31. raw_dcache_line_size x15, x0 /* x15 = dcache line size */
  32. mov x14, xzr /* x14 = entry ptr */
  33. mov x13, xzr /* x13 = copy dest */
  34. /* Check if the new image needs relocation. */
  35. tbnz x16, IND_DONE_BIT, .Ldone
  36. .Lloop:
  37. and x12, x16, PAGE_MASK /* x12 = addr */
  38. /* Test the entry flags. */
  39. .Ltest_source:
  40. tbz x16, IND_SOURCE_BIT, .Ltest_indirection
  41. /* Invalidate dest page to PoC. */
  42. mov x0, x13
  43. add x20, x0, #PAGE_SIZE
  44. sub x1, x15, #1
  45. bic x0, x0, x1
  46. 2: dc ivac, x0
  47. add x0, x0, x15
  48. cmp x0, x20
  49. b.lo 2b
  50. dsb sy
  51. mov x20, x13
  52. mov x21, x12
  53. copy_page x20, x21, x0, x1, x2, x3, x4, x5, x6, x7
  54. /* dest += PAGE_SIZE */
  55. add x13, x13, PAGE_SIZE
  56. b .Lnext
  57. .Ltest_indirection:
  58. tbz x16, IND_INDIRECTION_BIT, .Ltest_destination
  59. /* ptr = addr */
  60. mov x14, x12
  61. b .Lnext
  62. .Ltest_destination:
  63. tbz x16, IND_DESTINATION_BIT, .Lnext
  64. /* dest = addr */
  65. mov x13, x12
  66. .Lnext:
  67. /* entry = *ptr++ */
  68. ldr x16, [x14], #8
  69. /* while (!(entry & DONE)) */
  70. tbz x16, IND_DONE_BIT, .Lloop
  71. .Ldone:
  72. /* wait for writes from copy_page to finish */
  73. dsb nsh
  74. ic iallu
  75. dsb nsh
  76. isb
  77. /* Start new image. */
  78. mov x0, x18
  79. mov x1, xzr
  80. mov x2, xzr
  81. mov x3, xzr
  82. br x17
  83. SYM_CODE_END(arm64_relocate_new_kernel)
  84. .align 3 /* To keep the 64-bit values below naturally aligned. */
  85. .Lcopy_end:
  86. .org KEXEC_CONTROL_PAGE_SIZE
  87. /*
  88. * arm64_relocate_new_kernel_size - Number of bytes to copy to the
  89. * control_code_page.
  90. */
  91. .globl arm64_relocate_new_kernel_size
  92. arm64_relocate_new_kernel_size:
  93. .quad .Lcopy_end - arm64_relocate_new_kernel