sev-es-shared.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * AMD Encrypted Register State Support
  4. *
  5. * Author: Joerg Roedel <jroedel@suse.de>
  6. *
  7. * This file is not compiled stand-alone. It contains code shared
  8. * between the pre-decompression boot code and the running Linux kernel
  9. * and is included directly into both code-bases.
  10. */
  11. #ifndef __BOOT_COMPRESSED
  12. #define error(v) pr_err(v)
  13. #define has_cpuflag(f) boot_cpu_has(f)
  14. #endif
  15. static bool __init sev_es_check_cpu_features(void)
  16. {
  17. if (!has_cpuflag(X86_FEATURE_RDRAND)) {
  18. error("RDRAND instruction not supported - no trusted source of randomness available\n");
  19. return false;
  20. }
  21. return true;
  22. }
  23. static void sev_es_terminate(unsigned int reason)
  24. {
  25. u64 val = GHCB_SEV_TERMINATE;
  26. /*
  27. * Tell the hypervisor what went wrong - only reason-set 0 is
  28. * currently supported.
  29. */
  30. val |= GHCB_SEV_TERMINATE_REASON(0, reason);
  31. /* Request Guest Termination from Hypvervisor */
  32. sev_es_wr_ghcb_msr(val);
  33. VMGEXIT();
  34. while (true)
  35. asm volatile("hlt\n" : : : "memory");
  36. }
  37. static bool sev_es_negotiate_protocol(void)
  38. {
  39. u64 val;
  40. /* Do the GHCB protocol version negotiation */
  41. sev_es_wr_ghcb_msr(GHCB_SEV_INFO_REQ);
  42. VMGEXIT();
  43. val = sev_es_rd_ghcb_msr();
  44. if (GHCB_INFO(val) != GHCB_SEV_INFO)
  45. return false;
  46. if (GHCB_PROTO_MAX(val) < GHCB_PROTO_OUR ||
  47. GHCB_PROTO_MIN(val) > GHCB_PROTO_OUR)
  48. return false;
  49. return true;
  50. }
  51. static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
  52. {
  53. ghcb->save.sw_exit_code = 0;
  54. memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
  55. }
  56. static bool vc_decoding_needed(unsigned long exit_code)
  57. {
  58. /* Exceptions don't require to decode the instruction */
  59. return !(exit_code >= SVM_EXIT_EXCP_BASE &&
  60. exit_code <= SVM_EXIT_LAST_EXCP);
  61. }
  62. static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt,
  63. struct pt_regs *regs,
  64. unsigned long exit_code)
  65. {
  66. enum es_result ret = ES_OK;
  67. memset(ctxt, 0, sizeof(*ctxt));
  68. ctxt->regs = regs;
  69. if (vc_decoding_needed(exit_code))
  70. ret = vc_decode_insn(ctxt);
  71. return ret;
  72. }
  73. static void vc_finish_insn(struct es_em_ctxt *ctxt)
  74. {
  75. ctxt->regs->ip += ctxt->insn.length;
  76. }
  77. static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
  78. struct es_em_ctxt *ctxt,
  79. u64 exit_code, u64 exit_info_1,
  80. u64 exit_info_2)
  81. {
  82. enum es_result ret;
  83. /* Fill in protocol and format specifiers */
  84. ghcb->protocol_version = GHCB_PROTOCOL_MAX;
  85. ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;
  86. ghcb_set_sw_exit_code(ghcb, exit_code);
  87. ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
  88. ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
  89. sev_es_wr_ghcb_msr(__pa(ghcb));
  90. VMGEXIT();
  91. if ((ghcb->save.sw_exit_info_1 & 0xffffffff) == 1) {
  92. u64 info = ghcb->save.sw_exit_info_2;
  93. unsigned long v;
  94. info = ghcb->save.sw_exit_info_2;
  95. v = info & SVM_EVTINJ_VEC_MASK;
  96. /* Check if exception information from hypervisor is sane. */
  97. if ((info & SVM_EVTINJ_VALID) &&
  98. ((v == X86_TRAP_GP) || (v == X86_TRAP_UD)) &&
  99. ((info & SVM_EVTINJ_TYPE_MASK) == SVM_EVTINJ_TYPE_EXEPT)) {
  100. ctxt->fi.vector = v;
  101. if (info & SVM_EVTINJ_VALID_ERR)
  102. ctxt->fi.error_code = info >> 32;
  103. ret = ES_EXCEPTION;
  104. } else {
  105. ret = ES_VMM_ERROR;
  106. }
  107. } else if (ghcb->save.sw_exit_info_1 & 0xffffffff) {
  108. ret = ES_VMM_ERROR;
  109. } else {
  110. ret = ES_OK;
  111. }
  112. return ret;
  113. }
  114. /*
  115. * Boot VC Handler - This is the first VC handler during boot, there is no GHCB
  116. * page yet, so it only supports the MSR based communication with the
  117. * hypervisor and only the CPUID exit-code.
  118. */
  119. void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
  120. {
  121. unsigned int fn = lower_bits(regs->ax, 32);
  122. unsigned long val;
  123. /* Only CPUID is supported via MSR protocol */
  124. if (exit_code != SVM_EXIT_CPUID)
  125. goto fail;
  126. sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EAX));
  127. VMGEXIT();
  128. val = sev_es_rd_ghcb_msr();
  129. if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
  130. goto fail;
  131. regs->ax = val >> 32;
  132. sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EBX));
  133. VMGEXIT();
  134. val = sev_es_rd_ghcb_msr();
  135. if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
  136. goto fail;
  137. regs->bx = val >> 32;
  138. sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_ECX));
  139. VMGEXIT();
  140. val = sev_es_rd_ghcb_msr();
  141. if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
  142. goto fail;
  143. regs->cx = val >> 32;
  144. sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EDX));
  145. VMGEXIT();
  146. val = sev_es_rd_ghcb_msr();
  147. if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
  148. goto fail;
  149. regs->dx = val >> 32;
  150. /*
  151. * This is a VC handler and the #VC is only raised when SEV-ES is
  152. * active, which means SEV must be active too. Do sanity checks on the
  153. * CPUID results to make sure the hypervisor does not trick the kernel
  154. * into the no-sev path. This could map sensitive data unencrypted and
  155. * make it accessible to the hypervisor.
  156. *
  157. * In particular, check for:
  158. * - Availability of CPUID leaf 0x8000001f
  159. * - SEV CPUID bit.
  160. *
  161. * The hypervisor might still report the wrong C-bit position, but this
  162. * can't be checked here.
  163. */
  164. if (fn == 0x80000000 && (regs->ax < 0x8000001f))
  165. /* SEV leaf check */
  166. goto fail;
  167. else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
  168. /* SEV bit */
  169. goto fail;
  170. /* Skip over the CPUID two-byte opcode */
  171. regs->ip += 2;
  172. return;
  173. fail:
  174. sev_es_wr_ghcb_msr(GHCB_SEV_TERMINATE);
  175. VMGEXIT();
  176. /* Shouldn't get here - if we do halt the machine */
  177. while (true)
  178. asm volatile("hlt\n");
  179. }
  180. static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
  181. void *src, char *buf,
  182. unsigned int data_size,
  183. unsigned int count,
  184. bool backwards)
  185. {
  186. int i, b = backwards ? -1 : 1;
  187. enum es_result ret = ES_OK;
  188. for (i = 0; i < count; i++) {
  189. void *s = src + (i * data_size * b);
  190. char *d = buf + (i * data_size);
  191. ret = vc_read_mem(ctxt, s, d, data_size);
  192. if (ret != ES_OK)
  193. break;
  194. }
  195. return ret;
  196. }
  197. static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
  198. void *dst, char *buf,
  199. unsigned int data_size,
  200. unsigned int count,
  201. bool backwards)
  202. {
  203. int i, s = backwards ? -1 : 1;
  204. enum es_result ret = ES_OK;
  205. for (i = 0; i < count; i++) {
  206. void *d = dst + (i * data_size * s);
  207. char *b = buf + (i * data_size);
  208. ret = vc_write_mem(ctxt, d, b, data_size);
  209. if (ret != ES_OK)
  210. break;
  211. }
  212. return ret;
  213. }
  214. #define IOIO_TYPE_STR BIT(2)
  215. #define IOIO_TYPE_IN 1
  216. #define IOIO_TYPE_INS (IOIO_TYPE_IN | IOIO_TYPE_STR)
  217. #define IOIO_TYPE_OUT 0
  218. #define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR)
  219. #define IOIO_REP BIT(3)
  220. #define IOIO_ADDR_64 BIT(9)
  221. #define IOIO_ADDR_32 BIT(8)
  222. #define IOIO_ADDR_16 BIT(7)
  223. #define IOIO_DATA_32 BIT(6)
  224. #define IOIO_DATA_16 BIT(5)
  225. #define IOIO_DATA_8 BIT(4)
  226. #define IOIO_SEG_ES (0 << 10)
  227. #define IOIO_SEG_DS (3 << 10)
  228. static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
  229. {
  230. struct insn *insn = &ctxt->insn;
  231. *exitinfo = 0;
  232. switch (insn->opcode.bytes[0]) {
  233. /* INS opcodes */
  234. case 0x6c:
  235. case 0x6d:
  236. *exitinfo |= IOIO_TYPE_INS;
  237. *exitinfo |= IOIO_SEG_ES;
  238. *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
  239. break;
  240. /* OUTS opcodes */
  241. case 0x6e:
  242. case 0x6f:
  243. *exitinfo |= IOIO_TYPE_OUTS;
  244. *exitinfo |= IOIO_SEG_DS;
  245. *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
  246. break;
  247. /* IN immediate opcodes */
  248. case 0xe4:
  249. case 0xe5:
  250. *exitinfo |= IOIO_TYPE_IN;
  251. *exitinfo |= (u8)insn->immediate.value << 16;
  252. break;
  253. /* OUT immediate opcodes */
  254. case 0xe6:
  255. case 0xe7:
  256. *exitinfo |= IOIO_TYPE_OUT;
  257. *exitinfo |= (u8)insn->immediate.value << 16;
  258. break;
  259. /* IN register opcodes */
  260. case 0xec:
  261. case 0xed:
  262. *exitinfo |= IOIO_TYPE_IN;
  263. *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
  264. break;
  265. /* OUT register opcodes */
  266. case 0xee:
  267. case 0xef:
  268. *exitinfo |= IOIO_TYPE_OUT;
  269. *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
  270. break;
  271. default:
  272. return ES_DECODE_FAILED;
  273. }
  274. switch (insn->opcode.bytes[0]) {
  275. case 0x6c:
  276. case 0x6e:
  277. case 0xe4:
  278. case 0xe6:
  279. case 0xec:
  280. case 0xee:
  281. /* Single byte opcodes */
  282. *exitinfo |= IOIO_DATA_8;
  283. break;
  284. default:
  285. /* Length determined by instruction parsing */
  286. *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
  287. : IOIO_DATA_32;
  288. }
  289. switch (insn->addr_bytes) {
  290. case 2:
  291. *exitinfo |= IOIO_ADDR_16;
  292. break;
  293. case 4:
  294. *exitinfo |= IOIO_ADDR_32;
  295. break;
  296. case 8:
  297. *exitinfo |= IOIO_ADDR_64;
  298. break;
  299. }
  300. if (insn_has_rep_prefix(insn))
  301. *exitinfo |= IOIO_REP;
  302. return ES_OK;
  303. }
  304. static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
  305. {
  306. struct pt_regs *regs = ctxt->regs;
  307. u64 exit_info_1, exit_info_2;
  308. enum es_result ret;
  309. ret = vc_ioio_exitinfo(ctxt, &exit_info_1);
  310. if (ret != ES_OK)
  311. return ret;
  312. if (exit_info_1 & IOIO_TYPE_STR) {
  313. /* (REP) INS/OUTS */
  314. bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF);
  315. unsigned int io_bytes, exit_bytes;
  316. unsigned int ghcb_count, op_count;
  317. unsigned long es_base;
  318. u64 sw_scratch;
  319. /*
  320. * For the string variants with rep prefix the amount of in/out
  321. * operations per #VC exception is limited so that the kernel
  322. * has a chance to take interrupts and re-schedule while the
  323. * instruction is emulated.
  324. */
  325. io_bytes = (exit_info_1 >> 4) & 0x7;
  326. ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes;
  327. op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1;
  328. exit_info_2 = min(op_count, ghcb_count);
  329. exit_bytes = exit_info_2 * io_bytes;
  330. es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
  331. /* Read bytes of OUTS into the shared buffer */
  332. if (!(exit_info_1 & IOIO_TYPE_IN)) {
  333. ret = vc_insn_string_read(ctxt,
  334. (void *)(es_base + regs->si),
  335. ghcb->shared_buffer, io_bytes,
  336. exit_info_2, df);
  337. if (ret)
  338. return ret;
  339. }
  340. /*
  341. * Issue an VMGEXIT to the HV to consume the bytes from the
  342. * shared buffer or to have it write them into the shared buffer
  343. * depending on the instruction: OUTS or INS.
  344. */
  345. sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer);
  346. ghcb_set_sw_scratch(ghcb, sw_scratch);
  347. ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO,
  348. exit_info_1, exit_info_2);
  349. if (ret != ES_OK)
  350. return ret;
  351. /* Read bytes from shared buffer into the guest's destination. */
  352. if (exit_info_1 & IOIO_TYPE_IN) {
  353. ret = vc_insn_string_write(ctxt,
  354. (void *)(es_base + regs->di),
  355. ghcb->shared_buffer, io_bytes,
  356. exit_info_2, df);
  357. if (ret)
  358. return ret;
  359. if (df)
  360. regs->di -= exit_bytes;
  361. else
  362. regs->di += exit_bytes;
  363. } else {
  364. if (df)
  365. regs->si -= exit_bytes;
  366. else
  367. regs->si += exit_bytes;
  368. }
  369. if (exit_info_1 & IOIO_REP)
  370. regs->cx -= exit_info_2;
  371. ret = regs->cx ? ES_RETRY : ES_OK;
  372. } else {
  373. /* IN/OUT into/from rAX */
  374. int bits = (exit_info_1 & 0x70) >> 1;
  375. u64 rax = 0;
  376. if (!(exit_info_1 & IOIO_TYPE_IN))
  377. rax = lower_bits(regs->ax, bits);
  378. ghcb_set_rax(ghcb, rax);
  379. ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0);
  380. if (ret != ES_OK)
  381. return ret;
  382. if (exit_info_1 & IOIO_TYPE_IN) {
  383. if (!ghcb_rax_is_valid(ghcb))
  384. return ES_VMM_ERROR;
  385. regs->ax = lower_bits(ghcb->save.rax, bits);
  386. }
  387. }
  388. return ret;
  389. }
  390. static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
  391. struct es_em_ctxt *ctxt)
  392. {
  393. struct pt_regs *regs = ctxt->regs;
  394. u32 cr4 = native_read_cr4();
  395. enum es_result ret;
  396. ghcb_set_rax(ghcb, regs->ax);
  397. ghcb_set_rcx(ghcb, regs->cx);
  398. if (cr4 & X86_CR4_OSXSAVE)
  399. /* Safe to read xcr0 */
  400. ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
  401. else
  402. /* xgetbv will cause #GP - use reset value for xcr0 */
  403. ghcb_set_xcr0(ghcb, 1);
  404. ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
  405. if (ret != ES_OK)
  406. return ret;
  407. if (!(ghcb_rax_is_valid(ghcb) &&
  408. ghcb_rbx_is_valid(ghcb) &&
  409. ghcb_rcx_is_valid(ghcb) &&
  410. ghcb_rdx_is_valid(ghcb)))
  411. return ES_VMM_ERROR;
  412. regs->ax = ghcb->save.rax;
  413. regs->bx = ghcb->save.rbx;
  414. regs->cx = ghcb->save.rcx;
  415. regs->dx = ghcb->save.rdx;
  416. return ES_OK;
  417. }
  418. static enum es_result vc_handle_rdtsc(struct ghcb *ghcb,
  419. struct es_em_ctxt *ctxt,
  420. unsigned long exit_code)
  421. {
  422. bool rdtscp = (exit_code == SVM_EXIT_RDTSCP);
  423. enum es_result ret;
  424. ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0);
  425. if (ret != ES_OK)
  426. return ret;
  427. if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) &&
  428. (!rdtscp || ghcb_rcx_is_valid(ghcb))))
  429. return ES_VMM_ERROR;
  430. ctxt->regs->ax = ghcb->save.rax;
  431. ctxt->regs->dx = ghcb->save.rdx;
  432. if (rdtscp)
  433. ctxt->regs->cx = ghcb->save.rcx;
  434. return ES_OK;
  435. }