security.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588
  1. // SPDX-License-Identifier: GPL-2.0+
  2. //
  3. // Security related flags and so on.
  4. //
  5. // Copyright 2018, Michael Ellerman, IBM Corporation.
  6. #include <linux/cpu.h>
  7. #include <linux/kernel.h>
  8. #include <linux/device.h>
  9. #include <linux/nospec.h>
  10. #include <linux/prctl.h>
  11. #include <linux/seq_buf.h>
  12. #include <asm/asm-prototypes.h>
  13. #include <asm/code-patching.h>
  14. #include <asm/debugfs.h>
  15. #include <asm/security_features.h>
  16. #include <asm/setup.h>
  17. #include <asm/inst.h>
  18. u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
  19. enum branch_cache_flush_type {
  20. BRANCH_CACHE_FLUSH_NONE = 0x1,
  21. BRANCH_CACHE_FLUSH_SW = 0x2,
  22. BRANCH_CACHE_FLUSH_HW = 0x4,
  23. };
  24. static enum branch_cache_flush_type count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE;
  25. static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE;
  26. bool barrier_nospec_enabled;
  27. static bool no_nospec;
  28. static bool btb_flush_enabled;
  29. #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
  30. static bool no_spectrev2;
  31. #endif
  32. static void enable_barrier_nospec(bool enable)
  33. {
  34. barrier_nospec_enabled = enable;
  35. do_barrier_nospec_fixups(enable);
  36. }
  37. void setup_barrier_nospec(void)
  38. {
  39. bool enable;
  40. /*
  41. * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
  42. * But there's a good reason not to. The two flags we check below are
  43. * both are enabled by default in the kernel, so if the hcall is not
  44. * functional they will be enabled.
  45. * On a system where the host firmware has been updated (so the ori
  46. * functions as a barrier), but on which the hypervisor (KVM/Qemu) has
  47. * not been updated, we would like to enable the barrier. Dropping the
  48. * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
  49. * we potentially enable the barrier on systems where the host firmware
  50. * is not updated, but that's harmless as it's a no-op.
  51. */
  52. enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
  53. security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
  54. if (!no_nospec && !cpu_mitigations_off())
  55. enable_barrier_nospec(enable);
  56. }
  57. static int __init handle_nospectre_v1(char *p)
  58. {
  59. no_nospec = true;
  60. return 0;
  61. }
  62. early_param("nospectre_v1", handle_nospectre_v1);
  63. #ifdef CONFIG_DEBUG_FS
  64. static int barrier_nospec_set(void *data, u64 val)
  65. {
  66. switch (val) {
  67. case 0:
  68. case 1:
  69. break;
  70. default:
  71. return -EINVAL;
  72. }
  73. if (!!val == !!barrier_nospec_enabled)
  74. return 0;
  75. enable_barrier_nospec(!!val);
  76. return 0;
  77. }
  78. static int barrier_nospec_get(void *data, u64 *val)
  79. {
  80. *val = barrier_nospec_enabled ? 1 : 0;
  81. return 0;
  82. }
  83. DEFINE_DEBUGFS_ATTRIBUTE(fops_barrier_nospec, barrier_nospec_get,
  84. barrier_nospec_set, "%llu\n");
  85. static __init int barrier_nospec_debugfs_init(void)
  86. {
  87. debugfs_create_file_unsafe("barrier_nospec", 0600,
  88. powerpc_debugfs_root, NULL,
  89. &fops_barrier_nospec);
  90. return 0;
  91. }
  92. device_initcall(barrier_nospec_debugfs_init);
  93. static __init int security_feature_debugfs_init(void)
  94. {
  95. debugfs_create_x64("security_features", 0400, powerpc_debugfs_root,
  96. &powerpc_security_features);
  97. return 0;
  98. }
  99. device_initcall(security_feature_debugfs_init);
  100. #endif /* CONFIG_DEBUG_FS */
  101. #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
  102. static int __init handle_nospectre_v2(char *p)
  103. {
  104. no_spectrev2 = true;
  105. return 0;
  106. }
  107. early_param("nospectre_v2", handle_nospectre_v2);
  108. #endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */
  109. #ifdef CONFIG_PPC_FSL_BOOK3E
  110. void setup_spectre_v2(void)
  111. {
  112. if (no_spectrev2 || cpu_mitigations_off())
  113. do_btb_flush_fixups();
  114. else
  115. btb_flush_enabled = true;
  116. }
  117. #endif /* CONFIG_PPC_FSL_BOOK3E */
  118. #ifdef CONFIG_PPC_BOOK3S_64
  119. ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
  120. {
  121. bool thread_priv;
  122. thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
  123. if (rfi_flush) {
  124. struct seq_buf s;
  125. seq_buf_init(&s, buf, PAGE_SIZE - 1);
  126. seq_buf_printf(&s, "Mitigation: RFI Flush");
  127. if (thread_priv)
  128. seq_buf_printf(&s, ", L1D private per thread");
  129. seq_buf_printf(&s, "\n");
  130. return s.len;
  131. }
  132. if (thread_priv)
  133. return sprintf(buf, "Vulnerable: L1D private per thread\n");
  134. if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
  135. !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
  136. return sprintf(buf, "Not affected\n");
  137. return sprintf(buf, "Vulnerable\n");
  138. }
  139. ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
  140. {
  141. return cpu_show_meltdown(dev, attr, buf);
  142. }
  143. #endif
  144. ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
  145. {
  146. struct seq_buf s;
  147. seq_buf_init(&s, buf, PAGE_SIZE - 1);
  148. if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
  149. if (barrier_nospec_enabled)
  150. seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
  151. else
  152. seq_buf_printf(&s, "Vulnerable");
  153. if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
  154. seq_buf_printf(&s, ", ori31 speculation barrier enabled");
  155. seq_buf_printf(&s, "\n");
  156. } else
  157. seq_buf_printf(&s, "Not affected\n");
  158. return s.len;
  159. }
  160. ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
  161. {
  162. struct seq_buf s;
  163. bool bcs, ccd;
  164. seq_buf_init(&s, buf, PAGE_SIZE - 1);
  165. bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
  166. ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
  167. if (bcs || ccd) {
  168. seq_buf_printf(&s, "Mitigation: ");
  169. if (bcs)
  170. seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
  171. if (bcs && ccd)
  172. seq_buf_printf(&s, ", ");
  173. if (ccd)
  174. seq_buf_printf(&s, "Indirect branch cache disabled");
  175. } else if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) {
  176. seq_buf_printf(&s, "Mitigation: Software count cache flush");
  177. if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW)
  178. seq_buf_printf(&s, " (hardware accelerated)");
  179. } else if (btb_flush_enabled) {
  180. seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
  181. } else {
  182. seq_buf_printf(&s, "Vulnerable");
  183. }
  184. if (bcs || ccd || count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) {
  185. if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE)
  186. seq_buf_printf(&s, ", Software link stack flush");
  187. if (link_stack_flush_type == BRANCH_CACHE_FLUSH_HW)
  188. seq_buf_printf(&s, " (hardware accelerated)");
  189. }
  190. seq_buf_printf(&s, "\n");
  191. return s.len;
  192. }
  193. #ifdef CONFIG_PPC_BOOK3S_64
  194. /*
  195. * Store-forwarding barrier support.
  196. */
  197. static enum stf_barrier_type stf_enabled_flush_types;
  198. static bool no_stf_barrier;
  199. bool stf_barrier;
  200. static int __init handle_no_stf_barrier(char *p)
  201. {
  202. pr_info("stf-barrier: disabled on command line.");
  203. no_stf_barrier = true;
  204. return 0;
  205. }
  206. early_param("no_stf_barrier", handle_no_stf_barrier);
  207. enum stf_barrier_type stf_barrier_type_get(void)
  208. {
  209. return stf_enabled_flush_types;
  210. }
  211. /* This is the generic flag used by other architectures */
  212. static int __init handle_ssbd(char *p)
  213. {
  214. if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
  215. /* Until firmware tells us, we have the barrier with auto */
  216. return 0;
  217. } else if (strncmp(p, "off", 3) == 0) {
  218. handle_no_stf_barrier(NULL);
  219. return 0;
  220. } else
  221. return 1;
  222. return 0;
  223. }
  224. early_param("spec_store_bypass_disable", handle_ssbd);
  225. /* This is the generic flag used by other architectures */
  226. static int __init handle_no_ssbd(char *p)
  227. {
  228. handle_no_stf_barrier(NULL);
  229. return 0;
  230. }
  231. early_param("nospec_store_bypass_disable", handle_no_ssbd);
  232. static void stf_barrier_enable(bool enable)
  233. {
  234. if (enable)
  235. do_stf_barrier_fixups(stf_enabled_flush_types);
  236. else
  237. do_stf_barrier_fixups(STF_BARRIER_NONE);
  238. stf_barrier = enable;
  239. }
  240. void setup_stf_barrier(void)
  241. {
  242. enum stf_barrier_type type;
  243. bool enable, hv;
  244. hv = cpu_has_feature(CPU_FTR_HVMODE);
  245. /* Default to fallback in case fw-features are not available */
  246. if (cpu_has_feature(CPU_FTR_ARCH_300))
  247. type = STF_BARRIER_EIEIO;
  248. else if (cpu_has_feature(CPU_FTR_ARCH_207S))
  249. type = STF_BARRIER_SYNC_ORI;
  250. else if (cpu_has_feature(CPU_FTR_ARCH_206))
  251. type = STF_BARRIER_FALLBACK;
  252. else
  253. type = STF_BARRIER_NONE;
  254. enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
  255. (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
  256. (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
  257. if (type == STF_BARRIER_FALLBACK) {
  258. pr_info("stf-barrier: fallback barrier available\n");
  259. } else if (type == STF_BARRIER_SYNC_ORI) {
  260. pr_info("stf-barrier: hwsync barrier available\n");
  261. } else if (type == STF_BARRIER_EIEIO) {
  262. pr_info("stf-barrier: eieio barrier available\n");
  263. }
  264. stf_enabled_flush_types = type;
  265. if (!no_stf_barrier && !cpu_mitigations_off())
  266. stf_barrier_enable(enable);
  267. }
  268. ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
  269. {
  270. if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
  271. const char *type;
  272. switch (stf_enabled_flush_types) {
  273. case STF_BARRIER_EIEIO:
  274. type = "eieio";
  275. break;
  276. case STF_BARRIER_SYNC_ORI:
  277. type = "hwsync";
  278. break;
  279. case STF_BARRIER_FALLBACK:
  280. type = "fallback";
  281. break;
  282. default:
  283. type = "unknown";
  284. }
  285. return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
  286. }
  287. if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
  288. !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
  289. return sprintf(buf, "Not affected\n");
  290. return sprintf(buf, "Vulnerable\n");
  291. }
  292. static int ssb_prctl_get(struct task_struct *task)
  293. {
  294. if (stf_enabled_flush_types == STF_BARRIER_NONE)
  295. /*
  296. * We don't have an explicit signal from firmware that we're
  297. * vulnerable or not, we only have certain CPU revisions that
  298. * are known to be vulnerable.
  299. *
  300. * We assume that if we're on another CPU, where the barrier is
  301. * NONE, then we are not vulnerable.
  302. */
  303. return PR_SPEC_NOT_AFFECTED;
  304. else
  305. /*
  306. * If we do have a barrier type then we are vulnerable. The
  307. * barrier is not a global or per-process mitigation, so the
  308. * only value we can report here is PR_SPEC_ENABLE, which
  309. * appears as "vulnerable" in /proc.
  310. */
  311. return PR_SPEC_ENABLE;
  312. return -EINVAL;
  313. }
  314. int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
  315. {
  316. switch (which) {
  317. case PR_SPEC_STORE_BYPASS:
  318. return ssb_prctl_get(task);
  319. default:
  320. return -ENODEV;
  321. }
  322. }
  323. #ifdef CONFIG_DEBUG_FS
  324. static int stf_barrier_set(void *data, u64 val)
  325. {
  326. bool enable;
  327. if (val == 1)
  328. enable = true;
  329. else if (val == 0)
  330. enable = false;
  331. else
  332. return -EINVAL;
  333. /* Only do anything if we're changing state */
  334. if (enable != stf_barrier)
  335. stf_barrier_enable(enable);
  336. return 0;
  337. }
  338. static int stf_barrier_get(void *data, u64 *val)
  339. {
  340. *val = stf_barrier ? 1 : 0;
  341. return 0;
  342. }
  343. DEFINE_DEBUGFS_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set,
  344. "%llu\n");
  345. static __init int stf_barrier_debugfs_init(void)
  346. {
  347. debugfs_create_file_unsafe("stf_barrier", 0600, powerpc_debugfs_root,
  348. NULL, &fops_stf_barrier);
  349. return 0;
  350. }
  351. device_initcall(stf_barrier_debugfs_init);
  352. #endif /* CONFIG_DEBUG_FS */
  353. static void update_branch_cache_flush(void)
  354. {
  355. u32 *site;
  356. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  357. site = &patch__call_kvm_flush_link_stack;
  358. // This controls the branch from guest_exit_cont to kvm_flush_link_stack
  359. if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
  360. patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
  361. } else {
  362. // Could use HW flush, but that could also flush count cache
  363. patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
  364. }
  365. #endif
  366. // Patch out the bcctr first, then nop the rest
  367. site = &patch__call_flush_branch_caches3;
  368. patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
  369. site = &patch__call_flush_branch_caches2;
  370. patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
  371. site = &patch__call_flush_branch_caches1;
  372. patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
  373. // This controls the branch from _switch to flush_branch_caches
  374. if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE &&
  375. link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
  376. // Nothing to be done
  377. } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW &&
  378. link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) {
  379. // Patch in the bcctr last
  380. site = &patch__call_flush_branch_caches1;
  381. patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff
  382. site = &patch__call_flush_branch_caches2;
  383. patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9
  384. site = &patch__call_flush_branch_caches3;
  385. patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH));
  386. } else {
  387. patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK);
  388. // If we just need to flush the link stack, early return
  389. if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) {
  390. patch_instruction_site(&patch__flush_link_stack_return,
  391. ppc_inst(PPC_INST_BLR));
  392. // If we have flush instruction, early return
  393. } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) {
  394. patch_instruction_site(&patch__flush_count_cache_return,
  395. ppc_inst(PPC_INST_BLR));
  396. }
  397. }
  398. }
  399. static void toggle_branch_cache_flush(bool enable)
  400. {
  401. if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
  402. if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE)
  403. count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE;
  404. pr_info("count-cache-flush: flush disabled.\n");
  405. } else {
  406. if (security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
  407. count_cache_flush_type = BRANCH_CACHE_FLUSH_HW;
  408. pr_info("count-cache-flush: hardware flush enabled.\n");
  409. } else {
  410. count_cache_flush_type = BRANCH_CACHE_FLUSH_SW;
  411. pr_info("count-cache-flush: software flush enabled.\n");
  412. }
  413. }
  414. if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) {
  415. if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE)
  416. link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE;
  417. pr_info("link-stack-flush: flush disabled.\n");
  418. } else {
  419. if (security_ftr_enabled(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST)) {
  420. link_stack_flush_type = BRANCH_CACHE_FLUSH_HW;
  421. pr_info("link-stack-flush: hardware flush enabled.\n");
  422. } else {
  423. link_stack_flush_type = BRANCH_CACHE_FLUSH_SW;
  424. pr_info("link-stack-flush: software flush enabled.\n");
  425. }
  426. }
  427. update_branch_cache_flush();
  428. }
  429. void setup_count_cache_flush(void)
  430. {
  431. bool enable = true;
  432. if (no_spectrev2 || cpu_mitigations_off()) {
  433. if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
  434. security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
  435. pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
  436. enable = false;
  437. }
  438. /*
  439. * There's no firmware feature flag/hypervisor bit to tell us we need to
  440. * flush the link stack on context switch. So we set it here if we see
  441. * either of the Spectre v2 mitigations that aim to protect userspace.
  442. */
  443. if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
  444. security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
  445. security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
  446. toggle_branch_cache_flush(enable);
  447. }
  448. #ifdef CONFIG_DEBUG_FS
  449. static int count_cache_flush_set(void *data, u64 val)
  450. {
  451. bool enable;
  452. if (val == 1)
  453. enable = true;
  454. else if (val == 0)
  455. enable = false;
  456. else
  457. return -EINVAL;
  458. toggle_branch_cache_flush(enable);
  459. return 0;
  460. }
  461. static int count_cache_flush_get(void *data, u64 *val)
  462. {
  463. if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE)
  464. *val = 0;
  465. else
  466. *val = 1;
  467. return 0;
  468. }
  469. DEFINE_DEBUGFS_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
  470. count_cache_flush_set, "%llu\n");
  471. static __init int count_cache_flush_debugfs_init(void)
  472. {
  473. debugfs_create_file_unsafe("count_cache_flush", 0600,
  474. powerpc_debugfs_root, NULL,
  475. &fops_count_cache_flush);
  476. return 0;
  477. }
  478. device_initcall(count_cache_flush_debugfs_init);
  479. #endif /* CONFIG_DEBUG_FS */
  480. #endif /* CONFIG_PPC_BOOK3S_64 */