proton-pack.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
  4. * detailed at:
  5. *
  6. * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
  7. *
  8. * This code was originally written hastily under an awful lot of stress and so
  9. * aspects of it are somewhat hacky. Unfortunately, changing anything in here
  10. * instantly makes me feel ill. Thanks, Jann. Thann.
  11. *
  12. * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
  13. * Copyright (C) 2020 Google LLC
  14. *
  15. * "If there's something strange in your neighbourhood, who you gonna call?"
  16. *
  17. * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
  18. */
  19. #include <linux/arm-smccc.h>
  20. #include <linux/bpf.h>
  21. #include <linux/cpu.h>
  22. #include <linux/device.h>
  23. #include <linux/nospec.h>
  24. #include <linux/prctl.h>
  25. #include <linux/sched/task_stack.h>
  26. #include <asm/debug-monitors.h>
  27. #include <asm/insn.h>
  28. #include <asm/spectre.h>
  29. #include <asm/traps.h>
  30. #include <asm/vectors.h>
  31. #include <asm/virt.h>
  32. /*
  33. * We try to ensure that the mitigation state can never change as the result of
  34. * onlining a late CPU.
  35. */
  36. static void update_mitigation_state(enum mitigation_state *oldp,
  37. enum mitigation_state new)
  38. {
  39. enum mitigation_state state;
  40. do {
  41. state = READ_ONCE(*oldp);
  42. if (new <= state)
  43. break;
  44. /* Userspace almost certainly can't deal with this. */
  45. if (WARN_ON(system_capabilities_finalized()))
  46. break;
  47. } while (cmpxchg_relaxed(oldp, state, new) != state);
  48. }
  49. /*
  50. * Spectre v1.
  51. *
  52. * The kernel can't protect userspace for this one: it's each person for
  53. * themselves. Advertise what we're doing and be done with it.
  54. */
  55. ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
  56. char *buf)
  57. {
  58. return sprintf(buf, "Mitigation: __user pointer sanitization\n");
  59. }
  60. /*
  61. * Spectre v2.
  62. *
  63. * This one sucks. A CPU is either:
  64. *
  65. * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
  66. * - Mitigated in hardware and listed in our "safe list".
  67. * - Mitigated in software by firmware.
  68. * - Mitigated in software by a CPU-specific dance in the kernel and a
  69. * firmware call at EL2.
  70. * - Vulnerable.
  71. *
  72. * It's not unlikely for different CPUs in a big.LITTLE system to fall into
  73. * different camps.
  74. */
  75. static enum mitigation_state spectre_v2_state;
  76. static bool __read_mostly __nospectre_v2;
  77. static int __init parse_spectre_v2_param(char *str)
  78. {
  79. __nospectre_v2 = true;
  80. return 0;
  81. }
  82. early_param("nospectre_v2", parse_spectre_v2_param);
  83. static bool spectre_v2_mitigations_off(void)
  84. {
  85. bool ret = __nospectre_v2 || cpu_mitigations_off();
  86. if (ret)
  87. pr_info_once("spectre-v2 mitigation disabled by command line option\n");
  88. return ret;
  89. }
  90. static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
  91. {
  92. switch (bhb_state) {
  93. case SPECTRE_UNAFFECTED:
  94. return "";
  95. default:
  96. case SPECTRE_VULNERABLE:
  97. return ", but not BHB";
  98. case SPECTRE_MITIGATED:
  99. return ", BHB";
  100. }
  101. }
  102. static bool _unprivileged_ebpf_enabled(void)
  103. {
  104. #ifdef CONFIG_BPF_SYSCALL
  105. return !sysctl_unprivileged_bpf_disabled;
  106. #else
  107. return false;
  108. #endif
  109. }
  110. ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
  111. char *buf)
  112. {
  113. enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
  114. const char *bhb_str = get_bhb_affected_string(bhb_state);
  115. const char *v2_str = "Branch predictor hardening";
  116. switch (spectre_v2_state) {
  117. case SPECTRE_UNAFFECTED:
  118. if (bhb_state == SPECTRE_UNAFFECTED)
  119. return sprintf(buf, "Not affected\n");
  120. /*
  121. * Platforms affected by Spectre-BHB can't report
  122. * "Not affected" for Spectre-v2.
  123. */
  124. v2_str = "CSV2";
  125. fallthrough;
  126. case SPECTRE_MITIGATED:
  127. if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
  128. return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
  129. return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
  130. case SPECTRE_VULNERABLE:
  131. fallthrough;
  132. default:
  133. return sprintf(buf, "Vulnerable\n");
  134. }
  135. }
  136. static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
  137. {
  138. u64 pfr0;
  139. static const struct midr_range spectre_v2_safe_list[] = {
  140. MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
  141. MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
  142. MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
  143. MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
  144. MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
  145. MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
  146. MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
  147. MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
  148. { /* sentinel */ }
  149. };
  150. /* If the CPU has CSV2 set, we're safe */
  151. pfr0 = read_cpuid(ID_AA64PFR0_EL1);
  152. if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
  153. return SPECTRE_UNAFFECTED;
  154. /* Alternatively, we have a list of unaffected CPUs */
  155. if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
  156. return SPECTRE_UNAFFECTED;
  157. return SPECTRE_VULNERABLE;
  158. }
  159. static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
  160. {
  161. int ret;
  162. struct arm_smccc_res res;
  163. arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
  164. ARM_SMCCC_ARCH_WORKAROUND_1, &res);
  165. ret = res.a0;
  166. switch (ret) {
  167. case SMCCC_RET_SUCCESS:
  168. return SPECTRE_MITIGATED;
  169. case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
  170. return SPECTRE_UNAFFECTED;
  171. default:
  172. fallthrough;
  173. case SMCCC_RET_NOT_SUPPORTED:
  174. return SPECTRE_VULNERABLE;
  175. }
  176. }
  177. bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
  178. {
  179. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  180. if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
  181. return false;
  182. if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
  183. return false;
  184. return true;
  185. }
  186. enum mitigation_state arm64_get_spectre_v2_state(void)
  187. {
  188. return spectre_v2_state;
  189. }
  190. DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
  191. static void install_bp_hardening_cb(bp_hardening_cb_t fn)
  192. {
  193. __this_cpu_write(bp_hardening_data.fn, fn);
  194. /*
  195. * Vinz Clortho takes the hyp_vecs start/end "keys" at
  196. * the door when we're a guest. Skip the hyp-vectors work.
  197. */
  198. if (!is_hyp_mode_available())
  199. return;
  200. __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
  201. }
  202. static void call_smc_arch_workaround_1(void)
  203. {
  204. arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
  205. }
  206. static void call_hvc_arch_workaround_1(void)
  207. {
  208. arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
  209. }
  210. static void qcom_link_stack_sanitisation(void)
  211. {
  212. u64 tmp;
  213. asm volatile("mov %0, x30 \n"
  214. ".rept 16 \n"
  215. "bl . + 4 \n"
  216. ".endr \n"
  217. "mov x30, %0 \n"
  218. : "=&r" (tmp));
  219. }
  220. static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
  221. {
  222. u32 midr = read_cpuid_id();
  223. if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
  224. ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
  225. return NULL;
  226. return qcom_link_stack_sanitisation;
  227. }
  228. static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
  229. {
  230. bp_hardening_cb_t cb;
  231. enum mitigation_state state;
  232. state = spectre_v2_get_cpu_fw_mitigation_state();
  233. if (state != SPECTRE_MITIGATED)
  234. return state;
  235. if (spectre_v2_mitigations_off())
  236. return SPECTRE_VULNERABLE;
  237. switch (arm_smccc_1_1_get_conduit()) {
  238. case SMCCC_CONDUIT_HVC:
  239. cb = call_hvc_arch_workaround_1;
  240. break;
  241. case SMCCC_CONDUIT_SMC:
  242. cb = call_smc_arch_workaround_1;
  243. break;
  244. default:
  245. return SPECTRE_VULNERABLE;
  246. }
  247. /*
  248. * Prefer a CPU-specific workaround if it exists. Note that we
  249. * still rely on firmware for the mitigation at EL2.
  250. */
  251. cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
  252. install_bp_hardening_cb(cb);
  253. return SPECTRE_MITIGATED;
  254. }
  255. void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
  256. {
  257. enum mitigation_state state;
  258. WARN_ON(preemptible());
  259. state = spectre_v2_get_cpu_hw_mitigation_state();
  260. if (state == SPECTRE_VULNERABLE)
  261. state = spectre_v2_enable_fw_mitigation();
  262. update_mitigation_state(&spectre_v2_state, state);
  263. }
  264. /*
  265. * Spectre-v3a.
  266. *
  267. * Phew, there's not an awful lot to do here! We just instruct EL2 to use
  268. * an indirect trampoline for the hyp vectors so that guests can't read
  269. * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
  270. */
  271. bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
  272. {
  273. static const struct midr_range spectre_v3a_unsafe_list[] = {
  274. MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
  275. MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
  276. {},
  277. };
  278. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  279. return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
  280. }
  281. void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
  282. {
  283. struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
  284. if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
  285. data->slot += HYP_VECTOR_INDIRECT;
  286. }
  287. /*
  288. * Spectre v4.
  289. *
  290. * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
  291. * either:
  292. *
  293. * - Mitigated in hardware and listed in our "safe list".
  294. * - Mitigated in hardware via PSTATE.SSBS.
  295. * - Mitigated in software by firmware (sometimes referred to as SSBD).
  296. *
  297. * Wait, that doesn't sound so bad, does it? Keep reading...
  298. *
  299. * A major source of headaches is that the software mitigation is enabled both
  300. * on a per-task basis, but can also be forced on for the kernel, necessitating
  301. * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
  302. * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
  303. * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
  304. * so you can have systems that have both firmware and SSBS mitigations. This
  305. * means we actually have to reject late onlining of CPUs with mitigations if
  306. * all of the currently onlined CPUs are safelisted, as the mitigation tends to
  307. * be opt-in for userspace. Yes, really, the cure is worse than the disease.
  308. *
  309. * The only good part is that if the firmware mitigation is present, then it is
  310. * present for all CPUs, meaning we don't have to worry about late onlining of a
  311. * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
  312. *
  313. * Give me a VAX-11/780 any day of the week...
  314. */
  315. static enum mitigation_state spectre_v4_state;
  316. /* This is the per-cpu state tracking whether we need to talk to firmware */
  317. DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
  318. enum spectre_v4_policy {
  319. SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
  320. SPECTRE_V4_POLICY_MITIGATION_ENABLED,
  321. SPECTRE_V4_POLICY_MITIGATION_DISABLED,
  322. };
  323. static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
  324. static const struct spectre_v4_param {
  325. const char *str;
  326. enum spectre_v4_policy policy;
  327. } spectre_v4_params[] = {
  328. { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
  329. { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
  330. { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
  331. };
  332. static int __init parse_spectre_v4_param(char *str)
  333. {
  334. int i;
  335. if (!str || !str[0])
  336. return -EINVAL;
  337. for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
  338. const struct spectre_v4_param *param = &spectre_v4_params[i];
  339. if (strncmp(str, param->str, strlen(param->str)))
  340. continue;
  341. __spectre_v4_policy = param->policy;
  342. return 0;
  343. }
  344. return -EINVAL;
  345. }
  346. early_param("ssbd", parse_spectre_v4_param);
  347. /*
  348. * Because this was all written in a rush by people working in different silos,
  349. * we've ended up with multiple command line options to control the same thing.
  350. * Wrap these up in some helpers, which prefer disabling the mitigation if faced
  351. * with contradictory parameters. The mitigation is always either "off",
  352. * "dynamic" or "on".
  353. */
  354. static bool spectre_v4_mitigations_off(void)
  355. {
  356. bool ret = cpu_mitigations_off() ||
  357. __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
  358. if (ret)
  359. pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
  360. return ret;
  361. }
  362. /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
  363. static bool spectre_v4_mitigations_dynamic(void)
  364. {
  365. return !spectre_v4_mitigations_off() &&
  366. __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
  367. }
  368. static bool spectre_v4_mitigations_on(void)
  369. {
  370. return !spectre_v4_mitigations_off() &&
  371. __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
  372. }
  373. ssize_t cpu_show_spec_store_bypass(struct device *dev,
  374. struct device_attribute *attr, char *buf)
  375. {
  376. switch (spectre_v4_state) {
  377. case SPECTRE_UNAFFECTED:
  378. return sprintf(buf, "Not affected\n");
  379. case SPECTRE_MITIGATED:
  380. return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
  381. case SPECTRE_VULNERABLE:
  382. fallthrough;
  383. default:
  384. return sprintf(buf, "Vulnerable\n");
  385. }
  386. }
  387. enum mitigation_state arm64_get_spectre_v4_state(void)
  388. {
  389. return spectre_v4_state;
  390. }
  391. static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
  392. {
  393. static const struct midr_range spectre_v4_safe_list[] = {
  394. MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
  395. MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
  396. MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
  397. MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
  398. MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
  399. MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
  400. { /* sentinel */ },
  401. };
  402. if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
  403. return SPECTRE_UNAFFECTED;
  404. /* CPU features are detected first */
  405. if (this_cpu_has_cap(ARM64_SSBS))
  406. return SPECTRE_MITIGATED;
  407. return SPECTRE_VULNERABLE;
  408. }
  409. static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
  410. {
  411. int ret;
  412. struct arm_smccc_res res;
  413. arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
  414. ARM_SMCCC_ARCH_WORKAROUND_2, &res);
  415. ret = res.a0;
  416. switch (ret) {
  417. case SMCCC_RET_SUCCESS:
  418. return SPECTRE_MITIGATED;
  419. case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
  420. fallthrough;
  421. case SMCCC_RET_NOT_REQUIRED:
  422. return SPECTRE_UNAFFECTED;
  423. default:
  424. fallthrough;
  425. case SMCCC_RET_NOT_SUPPORTED:
  426. return SPECTRE_VULNERABLE;
  427. }
  428. }
  429. bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
  430. {
  431. enum mitigation_state state;
  432. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  433. state = spectre_v4_get_cpu_hw_mitigation_state();
  434. if (state == SPECTRE_VULNERABLE)
  435. state = spectre_v4_get_cpu_fw_mitigation_state();
  436. return state != SPECTRE_UNAFFECTED;
  437. }
  438. static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
  439. {
  440. if (user_mode(regs))
  441. return 1;
  442. if (instr & BIT(PSTATE_Imm_shift))
  443. regs->pstate |= PSR_SSBS_BIT;
  444. else
  445. regs->pstate &= ~PSR_SSBS_BIT;
  446. arm64_skip_faulting_instruction(regs, 4);
  447. return 0;
  448. }
  449. static struct undef_hook ssbs_emulation_hook = {
  450. .instr_mask = ~(1U << PSTATE_Imm_shift),
  451. .instr_val = 0xd500401f | PSTATE_SSBS,
  452. .fn = ssbs_emulation_handler,
  453. };
  454. static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
  455. {
  456. static bool undef_hook_registered = false;
  457. static DEFINE_RAW_SPINLOCK(hook_lock);
  458. enum mitigation_state state;
  459. /*
  460. * If the system is mitigated but this CPU doesn't have SSBS, then
  461. * we must be on the safelist and there's nothing more to do.
  462. */
  463. state = spectre_v4_get_cpu_hw_mitigation_state();
  464. if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
  465. return state;
  466. raw_spin_lock(&hook_lock);
  467. if (!undef_hook_registered) {
  468. register_undef_hook(&ssbs_emulation_hook);
  469. undef_hook_registered = true;
  470. }
  471. raw_spin_unlock(&hook_lock);
  472. if (spectre_v4_mitigations_off()) {
  473. sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
  474. set_pstate_ssbs(1);
  475. return SPECTRE_VULNERABLE;
  476. }
  477. /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
  478. set_pstate_ssbs(0);
  479. return SPECTRE_MITIGATED;
  480. }
  481. /*
  482. * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
  483. * we fallthrough and check whether firmware needs to be called on this CPU.
  484. */
  485. void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
  486. __le32 *origptr,
  487. __le32 *updptr, int nr_inst)
  488. {
  489. BUG_ON(nr_inst != 1); /* Branch -> NOP */
  490. if (spectre_v4_mitigations_off())
  491. return;
  492. if (cpus_have_final_cap(ARM64_SSBS))
  493. return;
  494. if (spectre_v4_mitigations_dynamic())
  495. *updptr = cpu_to_le32(aarch64_insn_gen_nop());
  496. }
  497. /*
  498. * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
  499. * to call into firmware to adjust the mitigation state.
  500. */
  501. void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
  502. __le32 *origptr,
  503. __le32 *updptr, int nr_inst)
  504. {
  505. u32 insn;
  506. BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
  507. switch (arm_smccc_1_1_get_conduit()) {
  508. case SMCCC_CONDUIT_HVC:
  509. insn = aarch64_insn_get_hvc_value();
  510. break;
  511. case SMCCC_CONDUIT_SMC:
  512. insn = aarch64_insn_get_smc_value();
  513. break;
  514. default:
  515. return;
  516. }
  517. *updptr = cpu_to_le32(insn);
  518. }
  519. static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
  520. {
  521. enum mitigation_state state;
  522. state = spectre_v4_get_cpu_fw_mitigation_state();
  523. if (state != SPECTRE_MITIGATED)
  524. return state;
  525. if (spectre_v4_mitigations_off()) {
  526. arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
  527. return SPECTRE_VULNERABLE;
  528. }
  529. arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
  530. if (spectre_v4_mitigations_dynamic())
  531. __this_cpu_write(arm64_ssbd_callback_required, 1);
  532. return SPECTRE_MITIGATED;
  533. }
  534. void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
  535. {
  536. enum mitigation_state state;
  537. WARN_ON(preemptible());
  538. state = spectre_v4_enable_hw_mitigation();
  539. if (state == SPECTRE_VULNERABLE)
  540. state = spectre_v4_enable_fw_mitigation();
  541. update_mitigation_state(&spectre_v4_state, state);
  542. }
  543. static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
  544. {
  545. u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
  546. if (state)
  547. regs->pstate |= bit;
  548. else
  549. regs->pstate &= ~bit;
  550. }
  551. void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
  552. {
  553. struct pt_regs *regs = task_pt_regs(tsk);
  554. bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
  555. if (spectre_v4_mitigations_off())
  556. ssbs = true;
  557. else if (spectre_v4_mitigations_dynamic() && !kthread)
  558. ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
  559. __update_pstate_ssbs(regs, ssbs);
  560. }
  561. /*
  562. * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
  563. * This is interesting because the "speculation disabled" behaviour can be
  564. * configured so that it is preserved across exec(), which means that the
  565. * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
  566. * from userspace.
  567. */
  568. static void ssbd_prctl_enable_mitigation(struct task_struct *task)
  569. {
  570. task_clear_spec_ssb_noexec(task);
  571. task_set_spec_ssb_disable(task);
  572. set_tsk_thread_flag(task, TIF_SSBD);
  573. }
  574. static void ssbd_prctl_disable_mitigation(struct task_struct *task)
  575. {
  576. task_clear_spec_ssb_noexec(task);
  577. task_clear_spec_ssb_disable(task);
  578. clear_tsk_thread_flag(task, TIF_SSBD);
  579. }
  580. static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
  581. {
  582. switch (ctrl) {
  583. case PR_SPEC_ENABLE:
  584. /* Enable speculation: disable mitigation */
  585. /*
  586. * Force disabled speculation prevents it from being
  587. * re-enabled.
  588. */
  589. if (task_spec_ssb_force_disable(task))
  590. return -EPERM;
  591. /*
  592. * If the mitigation is forced on, then speculation is forced
  593. * off and we again prevent it from being re-enabled.
  594. */
  595. if (spectre_v4_mitigations_on())
  596. return -EPERM;
  597. ssbd_prctl_disable_mitigation(task);
  598. break;
  599. case PR_SPEC_FORCE_DISABLE:
  600. /* Force disable speculation: force enable mitigation */
  601. /*
  602. * If the mitigation is forced off, then speculation is forced
  603. * on and we prevent it from being disabled.
  604. */
  605. if (spectre_v4_mitigations_off())
  606. return -EPERM;
  607. task_set_spec_ssb_force_disable(task);
  608. fallthrough;
  609. case PR_SPEC_DISABLE:
  610. /* Disable speculation: enable mitigation */
  611. /* Same as PR_SPEC_FORCE_DISABLE */
  612. if (spectre_v4_mitigations_off())
  613. return -EPERM;
  614. ssbd_prctl_enable_mitigation(task);
  615. break;
  616. case PR_SPEC_DISABLE_NOEXEC:
  617. /* Disable speculation until execve(): enable mitigation */
  618. /*
  619. * If the mitigation state is forced one way or the other, then
  620. * we must fail now before we try to toggle it on execve().
  621. */
  622. if (task_spec_ssb_force_disable(task) ||
  623. spectre_v4_mitigations_off() ||
  624. spectre_v4_mitigations_on()) {
  625. return -EPERM;
  626. }
  627. ssbd_prctl_enable_mitigation(task);
  628. task_set_spec_ssb_noexec(task);
  629. break;
  630. default:
  631. return -ERANGE;
  632. }
  633. spectre_v4_enable_task_mitigation(task);
  634. return 0;
  635. }
  636. int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
  637. unsigned long ctrl)
  638. {
  639. switch (which) {
  640. case PR_SPEC_STORE_BYPASS:
  641. return ssbd_prctl_set(task, ctrl);
  642. default:
  643. return -ENODEV;
  644. }
  645. }
  646. static int ssbd_prctl_get(struct task_struct *task)
  647. {
  648. switch (spectre_v4_state) {
  649. case SPECTRE_UNAFFECTED:
  650. return PR_SPEC_NOT_AFFECTED;
  651. case SPECTRE_MITIGATED:
  652. if (spectre_v4_mitigations_on())
  653. return PR_SPEC_NOT_AFFECTED;
  654. if (spectre_v4_mitigations_dynamic())
  655. break;
  656. /* Mitigations are disabled, so we're vulnerable. */
  657. fallthrough;
  658. case SPECTRE_VULNERABLE:
  659. fallthrough;
  660. default:
  661. return PR_SPEC_ENABLE;
  662. }
  663. /* Check the mitigation state for this task */
  664. if (task_spec_ssb_force_disable(task))
  665. return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
  666. if (task_spec_ssb_noexec(task))
  667. return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
  668. if (task_spec_ssb_disable(task))
  669. return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
  670. return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
  671. }
  672. int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
  673. {
  674. switch (which) {
  675. case PR_SPEC_STORE_BYPASS:
  676. return ssbd_prctl_get(task);
  677. default:
  678. return -ENODEV;
  679. }
  680. }
  681. /*
  682. * Spectre BHB.
  683. *
  684. * A CPU is either:
  685. * - Mitigated by a branchy loop a CPU specific number of times, and listed
  686. * in our "loop mitigated list".
  687. * - Mitigated in software by the firmware Spectre v2 call.
  688. * - Has the ClearBHB instruction to perform the mitigation.
  689. * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
  690. * software mitigation in the vectors is needed.
  691. * - Has CSV2.3, so is unaffected.
  692. */
  693. static enum mitigation_state spectre_bhb_state;
  694. enum mitigation_state arm64_get_spectre_bhb_state(void)
  695. {
  696. return spectre_bhb_state;
  697. }
  698. enum bhb_mitigation_bits {
  699. BHB_LOOP,
  700. BHB_FW,
  701. BHB_HW,
  702. BHB_INSN,
  703. };
  704. static unsigned long system_bhb_mitigations;
  705. /*
  706. * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
  707. * SCOPE_SYSTEM call will give the right answer.
  708. */
  709. u8 spectre_bhb_loop_affected(int scope)
  710. {
  711. u8 k = 0;
  712. static u8 max_bhb_k;
  713. if (scope == SCOPE_LOCAL_CPU) {
  714. static const struct midr_range spectre_bhb_k32_list[] = {
  715. MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
  716. MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
  717. MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
  718. MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
  719. MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
  720. MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
  721. MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
  722. MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
  723. {},
  724. };
  725. static const struct midr_range spectre_bhb_k24_list[] = {
  726. MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
  727. MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
  728. MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
  729. {},
  730. };
  731. static const struct midr_range spectre_bhb_k8_list[] = {
  732. MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
  733. MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
  734. {},
  735. };
  736. if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
  737. k = 32;
  738. else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
  739. k = 24;
  740. else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
  741. k = 8;
  742. max_bhb_k = max(max_bhb_k, k);
  743. } else {
  744. k = max_bhb_k;
  745. }
  746. return k;
  747. }
  748. static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
  749. {
  750. int ret;
  751. struct arm_smccc_res res;
  752. arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
  753. ARM_SMCCC_ARCH_WORKAROUND_3, &res);
  754. ret = res.a0;
  755. switch (ret) {
  756. case SMCCC_RET_SUCCESS:
  757. return SPECTRE_MITIGATED;
  758. case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
  759. return SPECTRE_UNAFFECTED;
  760. default:
  761. fallthrough;
  762. case SMCCC_RET_NOT_SUPPORTED:
  763. return SPECTRE_VULNERABLE;
  764. }
  765. }
  766. static bool is_spectre_bhb_fw_affected(int scope)
  767. {
  768. static bool system_affected;
  769. enum mitigation_state fw_state;
  770. bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
  771. static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
  772. MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
  773. MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
  774. {},
  775. };
  776. bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
  777. spectre_bhb_firmware_mitigated_list);
  778. if (scope != SCOPE_LOCAL_CPU)
  779. return system_affected;
  780. fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
  781. if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
  782. system_affected = true;
  783. return true;
  784. }
  785. return false;
  786. }
  787. static bool supports_ecbhb(int scope)
  788. {
  789. u64 mmfr1;
  790. if (scope == SCOPE_LOCAL_CPU)
  791. mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
  792. else
  793. mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
  794. return cpuid_feature_extract_unsigned_field(mmfr1,
  795. ID_AA64MMFR1_ECBHB_SHIFT);
  796. }
  797. bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
  798. int scope)
  799. {
  800. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  801. if (supports_csv2p3(scope))
  802. return false;
  803. if (supports_clearbhb(scope))
  804. return true;
  805. if (spectre_bhb_loop_affected(scope))
  806. return true;
  807. if (is_spectre_bhb_fw_affected(scope))
  808. return true;
  809. return false;
  810. }
  811. static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
  812. {
  813. const char *v = arm64_get_bp_hardening_vector(slot);
  814. if (slot < 0)
  815. return;
  816. __this_cpu_write(this_cpu_vector, v);
  817. /*
  818. * When KPTI is in use, the vectors are switched when exiting to
  819. * user-space.
  820. */
  821. if (arm64_kernel_unmapped_at_el0())
  822. return;
  823. write_sysreg(v, vbar_el1);
  824. isb();
  825. }
  826. void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
  827. {
  828. bp_hardening_cb_t cpu_cb;
  829. enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
  830. struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
  831. if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
  832. return;
  833. if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
  834. /* No point mitigating Spectre-BHB alone. */
  835. } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
  836. pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
  837. } else if (cpu_mitigations_off()) {
  838. pr_info_once("spectre-bhb mitigation disabled by command line option\n");
  839. } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
  840. state = SPECTRE_MITIGATED;
  841. set_bit(BHB_HW, &system_bhb_mitigations);
  842. } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
  843. /*
  844. * Ensure KVM uses the indirect vector which will have ClearBHB
  845. * added.
  846. */
  847. if (!data->slot)
  848. data->slot = HYP_VECTOR_INDIRECT;
  849. this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
  850. state = SPECTRE_MITIGATED;
  851. set_bit(BHB_INSN, &system_bhb_mitigations);
  852. } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
  853. /*
  854. * Ensure KVM uses the indirect vector which will have the
  855. * branchy-loop added. A57/A72-r0 will already have selected
  856. * the spectre-indirect vector, which is sufficient for BHB
  857. * too.
  858. */
  859. if (!data->slot)
  860. data->slot = HYP_VECTOR_INDIRECT;
  861. this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
  862. state = SPECTRE_MITIGATED;
  863. set_bit(BHB_LOOP, &system_bhb_mitigations);
  864. } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
  865. fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
  866. if (fw_state == SPECTRE_MITIGATED) {
  867. /*
  868. * Ensure KVM uses one of the spectre bp_hardening
  869. * vectors. The indirect vector doesn't include the EL3
  870. * call, so needs upgrading to
  871. * HYP_VECTOR_SPECTRE_INDIRECT.
  872. */
  873. if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
  874. data->slot += 1;
  875. this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
  876. /*
  877. * The WA3 call in the vectors supersedes the WA1 call
  878. * made during context-switch. Uninstall any firmware
  879. * bp_hardening callback.
  880. */
  881. cpu_cb = spectre_v2_get_sw_mitigation_cb();
  882. if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
  883. __this_cpu_write(bp_hardening_data.fn, NULL);
  884. state = SPECTRE_MITIGATED;
  885. set_bit(BHB_FW, &system_bhb_mitigations);
  886. }
  887. }
  888. update_mitigation_state(&spectre_bhb_state, state);
  889. }
  890. /* Patched to NOP when enabled */
  891. void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
  892. __le32 *origptr,
  893. __le32 *updptr, int nr_inst)
  894. {
  895. BUG_ON(nr_inst != 1);
  896. if (test_bit(BHB_LOOP, &system_bhb_mitigations))
  897. *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
  898. }
  899. /* Patched to NOP when enabled */
  900. void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
  901. __le32 *origptr,
  902. __le32 *updptr, int nr_inst)
  903. {
  904. BUG_ON(nr_inst != 1);
  905. if (test_bit(BHB_FW, &system_bhb_mitigations))
  906. *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
  907. }
  908. /* Patched to correct the immediate */
  909. void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
  910. __le32 *origptr, __le32 *updptr, int nr_inst)
  911. {
  912. u8 rd;
  913. u32 insn;
  914. u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
  915. BUG_ON(nr_inst != 1); /* MOV -> MOV */
  916. if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
  917. return;
  918. insn = le32_to_cpu(*origptr);
  919. rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
  920. insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
  921. AARCH64_INSN_VARIANT_64BIT,
  922. AARCH64_INSN_MOVEWIDE_ZERO);
  923. *updptr++ = cpu_to_le32(insn);
  924. }
  925. /* Patched to mov WA3 when supported */
  926. void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
  927. __le32 *origptr, __le32 *updptr, int nr_inst)
  928. {
  929. u8 rd;
  930. u32 insn;
  931. BUG_ON(nr_inst != 1); /* MOV -> MOV */
  932. if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
  933. !test_bit(BHB_FW, &system_bhb_mitigations))
  934. return;
  935. insn = le32_to_cpu(*origptr);
  936. rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
  937. insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
  938. AARCH64_INSN_VARIANT_32BIT,
  939. AARCH64_INSN_REG_ZR, rd,
  940. ARM_SMCCC_ARCH_WORKAROUND_3);
  941. if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
  942. return;
  943. *updptr++ = cpu_to_le32(insn);
  944. }
  945. /* Patched to NOP when not supported */
  946. void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
  947. __le32 *origptr, __le32 *updptr, int nr_inst)
  948. {
  949. BUG_ON(nr_inst != 2);
  950. if (test_bit(BHB_INSN, &system_bhb_mitigations))
  951. return;
  952. *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
  953. *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
  954. }
  955. #ifdef CONFIG_BPF_SYSCALL
  956. #define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
  957. void unpriv_ebpf_notify(int new_state)
  958. {
  959. if (spectre_v2_state == SPECTRE_VULNERABLE ||
  960. spectre_bhb_state != SPECTRE_MITIGATED)
  961. return;
  962. if (!new_state)
  963. pr_err("WARNING: %s", EBPF_WARN);
  964. }
  965. #endif