sev_verify_cbit.S 2.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * sev_verify_cbit.S - Code for verification of the C-bit position reported
  4. * by the Hypervisor when running with SEV enabled.
  5. *
  6. * Copyright (c) 2020 Joerg Roedel (jroedel@suse.de)
  7. *
  8. * sev_verify_cbit() is called before switching to a new long-mode page-table
  9. * at boot.
  10. *
  11. * Verify that the C-bit position is correct by writing a random value to
  12. * an encrypted memory location while on the current page-table. Then it
  13. * switches to the new page-table to verify the memory content is still the
  14. * same. After that it switches back to the current page-table and when the
  15. * check succeeded it returns. If the check failed the code invalidates the
  16. * stack pointer and goes into a hlt loop. The stack-pointer is invalidated to
  17. * make sure no interrupt or exception can get the CPU out of the hlt loop.
  18. *
  19. * New page-table pointer is expected in %rdi (first parameter)
  20. *
  21. */
  22. SYM_FUNC_START(sev_verify_cbit)
  23. #ifdef CONFIG_AMD_MEM_ENCRYPT
  24. /* First check if a C-bit was detected */
  25. movq sme_me_mask(%rip), %rsi
  26. testq %rsi, %rsi
  27. jz 3f
  28. /* sme_me_mask != 0 could mean SME or SEV - Check also for SEV */
  29. movq sev_status(%rip), %rsi
  30. testq %rsi, %rsi
  31. jz 3f
  32. /* Save CR4 in %rsi */
  33. movq %cr4, %rsi
  34. /* Disable Global Pages */
  35. movq %rsi, %rdx
  36. andq $(~X86_CR4_PGE), %rdx
  37. movq %rdx, %cr4
  38. /*
  39. * Verified that running under SEV - now get a random value using
  40. * RDRAND. This instruction is mandatory when running as an SEV guest.
  41. *
  42. * Don't bail out of the loop if RDRAND returns errors. It is better to
  43. * prevent forward progress than to work with a non-random value here.
  44. */
  45. 1: rdrand %rdx
  46. jnc 1b
  47. /* Store value to memory and keep it in %rdx */
  48. movq %rdx, sev_check_data(%rip)
  49. /* Backup current %cr3 value to restore it later */
  50. movq %cr3, %rcx
  51. /* Switch to new %cr3 - This might unmap the stack */
  52. movq %rdi, %cr3
  53. /*
  54. * Compare value in %rdx with memory location. If C-bit is incorrect
  55. * this would read the encrypted data and make the check fail.
  56. */
  57. cmpq %rdx, sev_check_data(%rip)
  58. /* Restore old %cr3 */
  59. movq %rcx, %cr3
  60. /* Restore previous CR4 */
  61. movq %rsi, %cr4
  62. /* Check CMPQ result */
  63. je 3f
  64. /*
  65. * The check failed, prevent any forward progress to prevent ROP
  66. * attacks, invalidate the stack and go into a hlt loop.
  67. */
  68. xorq %rsp, %rsp
  69. subq $0x1000, %rsp
  70. 2: hlt
  71. jmp 2b
  72. 3:
  73. #endif
  74. /* Return page-table pointer */
  75. movq %rdi, %rax
  76. ret
  77. SYM_FUNC_END(sev_verify_cbit)