cpu_errata.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Contains CPU specific errata definitions
  4. *
  5. * Copyright (C) 2014 ARM Ltd.
  6. */
  7. #include <linux/arm-smccc.h>
  8. #include <linux/types.h>
  9. #include <linux/cpu.h>
  10. #include <asm/cpu.h>
  11. #include <asm/cputype.h>
  12. #include <asm/cpufeature.h>
  13. #include <asm/kvm_asm.h>
  14. #include <asm/smp_plat.h>
  15. static bool __maybe_unused
  16. is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
  17. {
  18. const struct arm64_midr_revidr *fix;
  19. u32 midr = read_cpuid_id(), revidr;
  20. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  21. if (!is_midr_in_range(midr, &entry->midr_range))
  22. return false;
  23. midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
  24. revidr = read_cpuid(REVIDR_EL1);
  25. for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
  26. if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
  27. return false;
  28. return true;
  29. }
  30. static bool __maybe_unused
  31. is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
  32. int scope)
  33. {
  34. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  35. return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
  36. }
  37. static bool __maybe_unused
  38. is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
  39. {
  40. u32 model;
  41. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  42. model = read_cpuid_id();
  43. model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
  44. MIDR_ARCHITECTURE_MASK;
  45. return model == entry->midr_range.model;
  46. }
  47. static bool
  48. has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
  49. int scope)
  50. {
  51. u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
  52. u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
  53. u64 ctr_raw, ctr_real;
  54. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  55. /*
  56. * We want to make sure that all the CPUs in the system expose
  57. * a consistent CTR_EL0 to make sure that applications behaves
  58. * correctly with migration.
  59. *
  60. * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
  61. *
  62. * 1) It is safe if the system doesn't support IDC, as CPU anyway
  63. * reports IDC = 0, consistent with the rest.
  64. *
  65. * 2) If the system has IDC, it is still safe as we trap CTR_EL0
  66. * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
  67. *
  68. * So, we need to make sure either the raw CTR_EL0 or the effective
  69. * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
  70. */
  71. ctr_raw = read_cpuid_cachetype() & mask;
  72. ctr_real = read_cpuid_effective_cachetype() & mask;
  73. return (ctr_real != sys) && (ctr_raw != sys);
  74. }
  75. static void
  76. cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
  77. {
  78. u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
  79. bool enable_uct_trap = false;
  80. /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
  81. if ((read_cpuid_cachetype() & mask) !=
  82. (arm64_ftr_reg_ctrel0.sys_val & mask))
  83. enable_uct_trap = true;
  84. /* ... or if the system is affected by an erratum */
  85. if (cap->capability == ARM64_WORKAROUND_1542419)
  86. enable_uct_trap = true;
  87. if (enable_uct_trap)
  88. sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
  89. }
  90. #ifdef CONFIG_ARM64_ERRATUM_1463225
  91. DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
  92. static bool
  93. has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
  94. int scope)
  95. {
  96. return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
  97. }
  98. #endif
  99. static void __maybe_unused
  100. cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
  101. {
  102. sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
  103. }
  104. #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
  105. .matches = is_affected_midr_range, \
  106. .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
  107. #define CAP_MIDR_ALL_VERSIONS(model) \
  108. .matches = is_affected_midr_range, \
  109. .midr_range = MIDR_ALL_VERSIONS(model)
  110. #define MIDR_FIXED(rev, revidr_mask) \
  111. .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
  112. #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
  113. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
  114. CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
  115. #define CAP_MIDR_RANGE_LIST(list) \
  116. .matches = is_affected_midr_range_list, \
  117. .midr_range_list = list
  118. /* Errata affecting a range of revisions of given model variant */
  119. #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
  120. ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
  121. /* Errata affecting a single variant/revision of a model */
  122. #define ERRATA_MIDR_REV(model, var, rev) \
  123. ERRATA_MIDR_RANGE(model, var, rev, var, rev)
  124. /* Errata affecting all variants/revisions of a given a model */
  125. #define ERRATA_MIDR_ALL_VERSIONS(model) \
  126. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
  127. CAP_MIDR_ALL_VERSIONS(model)
  128. /* Errata affecting a list of midr ranges, with same work around */
  129. #define ERRATA_MIDR_RANGE_LIST(midr_list) \
  130. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
  131. CAP_MIDR_RANGE_LIST(midr_list)
  132. static const __maybe_unused struct midr_range tx2_family_cpus[] = {
  133. MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
  134. MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
  135. {},
  136. };
  137. static bool __maybe_unused
  138. needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
  139. int scope)
  140. {
  141. int i;
  142. if (!is_affected_midr_range_list(entry, scope) ||
  143. !is_hyp_mode_available())
  144. return false;
  145. for_each_possible_cpu(i) {
  146. if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
  147. return true;
  148. }
  149. return false;
  150. }
  151. static bool __maybe_unused
  152. has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
  153. int scope)
  154. {
  155. u32 midr = read_cpuid_id();
  156. bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
  157. const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
  158. WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
  159. return is_midr_in_range(midr, &range) && has_dic;
  160. }
  161. #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
  162. static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
  163. #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
  164. {
  165. ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
  166. },
  167. {
  168. .midr_range.model = MIDR_QCOM_KRYO,
  169. .matches = is_kryo_midr,
  170. },
  171. #endif
  172. #ifdef CONFIG_ARM64_ERRATUM_1286807
  173. {
  174. ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
  175. },
  176. #endif
  177. {},
  178. };
  179. #endif
  180. #ifdef CONFIG_CAVIUM_ERRATUM_27456
  181. const struct midr_range cavium_erratum_27456_cpus[] = {
  182. /* Cavium ThunderX, T88 pass 1.x - 2.1 */
  183. MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
  184. /* Cavium ThunderX, T81 pass 1.0 */
  185. MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
  186. {},
  187. };
  188. #endif
  189. #ifdef CONFIG_CAVIUM_ERRATUM_30115
  190. static const struct midr_range cavium_erratum_30115_cpus[] = {
  191. /* Cavium ThunderX, T88 pass 1.x - 2.2 */
  192. MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
  193. /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
  194. MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
  195. /* Cavium ThunderX, T83 pass 1.0 */
  196. MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
  197. {},
  198. };
  199. #endif
  200. #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
  201. static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
  202. {
  203. ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
  204. },
  205. {
  206. .midr_range.model = MIDR_QCOM_KRYO,
  207. .matches = is_kryo_midr,
  208. },
  209. {},
  210. };
  211. #endif
  212. #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
  213. static const struct midr_range workaround_clean_cache[] = {
  214. #if defined(CONFIG_ARM64_ERRATUM_826319) || \
  215. defined(CONFIG_ARM64_ERRATUM_827319) || \
  216. defined(CONFIG_ARM64_ERRATUM_824069)
  217. /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
  218. MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
  219. #endif
  220. #ifdef CONFIG_ARM64_ERRATUM_819472
  221. /* Cortex-A53 r0p[01] : ARM errata 819472 */
  222. MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
  223. #endif
  224. {},
  225. };
  226. #endif
  227. #ifdef CONFIG_ARM64_ERRATUM_1418040
  228. /*
  229. * - 1188873 affects r0p0 to r2p0
  230. * - 1418040 affects r0p0 to r3p1
  231. */
  232. static const struct midr_range erratum_1418040_list[] = {
  233. /* Cortex-A76 r0p0 to r3p1 */
  234. MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
  235. /* Neoverse-N1 r0p0 to r3p1 */
  236. MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
  237. /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
  238. MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
  239. {},
  240. };
  241. #endif
  242. #ifdef CONFIG_ARM64_ERRATUM_845719
  243. static const struct midr_range erratum_845719_list[] = {
  244. /* Cortex-A53 r0p[01234] */
  245. MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
  246. /* Brahma-B53 r0p[0] */
  247. MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
  248. /* Kryo2XX Silver rAp4 */
  249. MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
  250. {},
  251. };
  252. #endif
  253. #ifdef CONFIG_ARM64_ERRATUM_843419
  254. static const struct arm64_cpu_capabilities erratum_843419_list[] = {
  255. {
  256. /* Cortex-A53 r0p[01234] */
  257. .matches = is_affected_midr_range,
  258. ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
  259. MIDR_FIXED(0x4, BIT(8)),
  260. },
  261. {
  262. /* Brahma-B53 r0p[0] */
  263. .matches = is_affected_midr_range,
  264. ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
  265. },
  266. {},
  267. };
  268. #endif
  269. #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
  270. static const struct midr_range erratum_speculative_at_list[] = {
  271. #ifdef CONFIG_ARM64_ERRATUM_1165522
  272. /* Cortex A76 r0p0 to r2p0 */
  273. MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
  274. #endif
  275. #ifdef CONFIG_ARM64_ERRATUM_1319367
  276. MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
  277. MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
  278. #endif
  279. #ifdef CONFIG_ARM64_ERRATUM_1530923
  280. /* Cortex A55 r0p0 to r2p0 */
  281. MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
  282. /* Kryo4xx Silver (rdpe => r1p0) */
  283. MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
  284. #endif
  285. {},
  286. };
  287. #endif
  288. #ifdef CONFIG_ARM64_ERRATUM_1463225
  289. static const struct midr_range erratum_1463225[] = {
  290. /* Cortex-A76 r0p0 - r3p1 */
  291. MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
  292. /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
  293. MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
  294. {},
  295. };
  296. #endif
  297. #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
  298. static const struct midr_range tsb_flush_fail_cpus[] = {
  299. #ifdef CONFIG_ARM64_ERRATUM_2067961
  300. MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
  301. #endif
  302. #ifdef CONFIG_ARM64_ERRATUM_2054223
  303. MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
  304. #endif
  305. {},
  306. };
  307. #endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
  308. const struct arm64_cpu_capabilities arm64_errata[] = {
  309. #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
  310. {
  311. .desc = "ARM errata 826319, 827319, 824069, or 819472",
  312. .capability = ARM64_WORKAROUND_CLEAN_CACHE,
  313. ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
  314. .cpu_enable = cpu_enable_cache_maint_trap,
  315. },
  316. #endif
  317. #ifdef CONFIG_ARM64_ERRATUM_832075
  318. {
  319. /* Cortex-A57 r0p0 - r1p2 */
  320. .desc = "ARM erratum 832075",
  321. .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
  322. ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
  323. 0, 0,
  324. 1, 2),
  325. },
  326. #endif
  327. #ifdef CONFIG_ARM64_ERRATUM_834220
  328. {
  329. /* Cortex-A57 r0p0 - r1p2 */
  330. .desc = "ARM erratum 834220",
  331. .capability = ARM64_WORKAROUND_834220,
  332. ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
  333. 0, 0,
  334. 1, 2),
  335. },
  336. #endif
  337. #ifdef CONFIG_ARM64_ERRATUM_843419
  338. {
  339. .desc = "ARM erratum 843419",
  340. .capability = ARM64_WORKAROUND_843419,
  341. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  342. .matches = cpucap_multi_entry_cap_matches,
  343. .match_list = erratum_843419_list,
  344. },
  345. #endif
  346. #ifdef CONFIG_ARM64_ERRATUM_845719
  347. {
  348. .desc = "ARM erratum 845719",
  349. .capability = ARM64_WORKAROUND_845719,
  350. ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
  351. },
  352. #endif
  353. #ifdef CONFIG_CAVIUM_ERRATUM_23154
  354. {
  355. /* Cavium ThunderX, pass 1.x */
  356. .desc = "Cavium erratum 23154",
  357. .capability = ARM64_WORKAROUND_CAVIUM_23154,
  358. ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
  359. },
  360. #endif
  361. #ifdef CONFIG_CAVIUM_ERRATUM_27456
  362. {
  363. .desc = "Cavium erratum 27456",
  364. .capability = ARM64_WORKAROUND_CAVIUM_27456,
  365. ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
  366. },
  367. #endif
  368. #ifdef CONFIG_CAVIUM_ERRATUM_30115
  369. {
  370. .desc = "Cavium erratum 30115",
  371. .capability = ARM64_WORKAROUND_CAVIUM_30115,
  372. ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
  373. },
  374. #endif
  375. {
  376. .desc = "Mismatched cache type (CTR_EL0)",
  377. .capability = ARM64_MISMATCHED_CACHE_TYPE,
  378. .matches = has_mismatched_cache_type,
  379. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  380. .cpu_enable = cpu_enable_trap_ctr_access,
  381. },
  382. #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
  383. {
  384. .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
  385. .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
  386. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  387. .matches = cpucap_multi_entry_cap_matches,
  388. .match_list = qcom_erratum_1003_list,
  389. },
  390. #endif
  391. #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
  392. {
  393. .desc = "Qualcomm erratum 1009, or ARM erratum 1286807",
  394. .capability = ARM64_WORKAROUND_REPEAT_TLBI,
  395. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  396. .matches = cpucap_multi_entry_cap_matches,
  397. .match_list = arm64_repeat_tlbi_list,
  398. },
  399. #endif
  400. #ifdef CONFIG_ARM64_ERRATUM_858921
  401. {
  402. /* Cortex-A73 all versions */
  403. .desc = "ARM erratum 858921",
  404. .capability = ARM64_WORKAROUND_858921,
  405. ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
  406. },
  407. #endif
  408. {
  409. .desc = "Spectre-v2",
  410. .capability = ARM64_SPECTRE_V2,
  411. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  412. .matches = has_spectre_v2,
  413. .cpu_enable = spectre_v2_enable_mitigation,
  414. },
  415. #ifdef CONFIG_RANDOMIZE_BASE
  416. {
  417. /* Must come after the Spectre-v2 entry */
  418. .desc = "Spectre-v3a",
  419. .capability = ARM64_SPECTRE_V3A,
  420. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  421. .matches = has_spectre_v3a,
  422. .cpu_enable = spectre_v3a_enable_mitigation,
  423. },
  424. #endif
  425. {
  426. .desc = "Spectre-v4",
  427. .capability = ARM64_SPECTRE_V4,
  428. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  429. .matches = has_spectre_v4,
  430. .cpu_enable = spectre_v4_enable_mitigation,
  431. },
  432. {
  433. .desc = "Spectre-BHB",
  434. .capability = ARM64_SPECTRE_BHB,
  435. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  436. .matches = is_spectre_bhb_affected,
  437. .cpu_enable = spectre_bhb_enable_mitigation,
  438. },
  439. #ifdef CONFIG_ARM64_ERRATUM_1418040
  440. {
  441. .desc = "ARM erratum 1418040",
  442. .capability = ARM64_WORKAROUND_1418040,
  443. ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
  444. /*
  445. * We need to allow affected CPUs to come in late, but
  446. * also need the non-affected CPUs to be able to come
  447. * in at any point in time. Wonderful.
  448. */
  449. .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
  450. },
  451. #endif
  452. #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
  453. {
  454. .desc = "ARM errata 1165522, 1319367, or 1530923",
  455. .capability = ARM64_WORKAROUND_SPECULATIVE_AT,
  456. ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
  457. },
  458. #endif
  459. #ifdef CONFIG_ARM64_ERRATUM_1463225
  460. {
  461. .desc = "ARM erratum 1463225",
  462. .capability = ARM64_WORKAROUND_1463225,
  463. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  464. .matches = has_cortex_a76_erratum_1463225,
  465. .midr_range_list = erratum_1463225,
  466. },
  467. #endif
  468. #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
  469. {
  470. .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
  471. .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
  472. ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
  473. .matches = needs_tx2_tvm_workaround,
  474. },
  475. {
  476. .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
  477. .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
  478. ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
  479. },
  480. #endif
  481. #ifdef CONFIG_ARM64_ERRATUM_1542419
  482. {
  483. /* we depend on the firmware portion for correctness */
  484. .desc = "ARM erratum 1542419 (kernel portion)",
  485. .capability = ARM64_WORKAROUND_1542419,
  486. .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
  487. .matches = has_neoverse_n1_erratum_1542419,
  488. .cpu_enable = cpu_enable_trap_ctr_access,
  489. },
  490. #endif
  491. #ifdef CONFIG_ARM64_ERRATUM_1508412
  492. {
  493. /* we depend on the firmware portion for correctness */
  494. .desc = "ARM erratum 1508412 (kernel portion)",
  495. .capability = ARM64_WORKAROUND_1508412,
  496. ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
  497. 0, 0,
  498. 1, 0),
  499. },
  500. #endif
  501. #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
  502. {
  503. .desc = "ARM erratum 2067961 or 2054223",
  504. .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
  505. ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
  506. },
  507. #endif
  508. {
  509. }
  510. };