1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Contains CPU specific errata definitions
4 *
5 * Copyright (C) 2014 ARM Ltd.
6 */
7
8 #include <linux/arm-smccc.h>
9 #include <linux/types.h>
10 #include <linux/cpu.h>
11 #include <asm/cpu.h>
12 #include <asm/cputype.h>
13 #include <asm/cpufeature.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/smp_plat.h>
16
17 static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities * entry,int scope)18 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
19 {
20 const struct arm64_midr_revidr *fix;
21 u32 midr = read_cpuid_id(), revidr;
22
23 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
24 if (!is_midr_in_range(midr, &entry->midr_range))
25 return false;
26
27 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
28 revidr = read_cpuid(REVIDR_EL1);
29 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
30 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
31 return false;
32
33 return true;
34 }
35
36 static bool __maybe_unused
is_affected_midr_range_list(const struct arm64_cpu_capabilities * entry,int scope)37 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
38 int scope)
39 {
40 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
41 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
42 }
43
44 static bool __maybe_unused
is_kryo_midr(const struct arm64_cpu_capabilities * entry,int scope)45 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
46 {
47 u32 model;
48
49 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
50
51 model = read_cpuid_id();
52 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
53 MIDR_ARCHITECTURE_MASK;
54
55 return model == entry->midr_range.model;
56 }
57
58 static bool
has_mismatched_cache_type(const struct arm64_cpu_capabilities * entry,int scope)59 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
60 int scope)
61 {
62 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
63 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
64 u64 ctr_raw, ctr_real;
65
66 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
67
68 /*
69 * We want to make sure that all the CPUs in the system expose
70 * a consistent CTR_EL0 to make sure that applications behaves
71 * correctly with migration.
72 *
73 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
74 *
75 * 1) It is safe if the system doesn't support IDC, as CPU anyway
76 * reports IDC = 0, consistent with the rest.
77 *
78 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
79 * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
80 *
81 * So, we need to make sure either the raw CTR_EL0 or the effective
82 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
83 */
84 ctr_raw = read_cpuid_cachetype() & mask;
85 ctr_real = read_cpuid_effective_cachetype() & mask;
86
87 return (ctr_real != sys) && (ctr_raw != sys);
88 }
89
90 static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities * cap)91 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
92 {
93 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
94 bool enable_uct_trap = false;
95
96 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
97 if ((read_cpuid_cachetype() & mask) !=
98 (arm64_ftr_reg_ctrel0.sys_val & mask))
99 enable_uct_trap = true;
100
101 /* ... or if the system is affected by an erratum */
102 if (cap->capability == ARM64_WORKAROUND_1542419)
103 enable_uct_trap = true;
104
105 if (enable_uct_trap)
106 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
107 }
108
109 #ifdef CONFIG_ARM64_ERRATUM_1463225
110 static bool
has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities * entry,int scope)111 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
112 int scope)
113 {
114 return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
115 }
116 #endif
117
118 static void __maybe_unused
cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities * __unused)119 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
120 {
121 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
122 }
123
124 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
125 .matches = is_affected_midr_range, \
126 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
127
128 #define CAP_MIDR_ALL_VERSIONS(model) \
129 .matches = is_affected_midr_range, \
130 .midr_range = MIDR_ALL_VERSIONS(model)
131
132 #define MIDR_FIXED(rev, revidr_mask) \
133 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
134
135 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
136 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
137 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
138
139 #define CAP_MIDR_RANGE_LIST(list) \
140 .matches = is_affected_midr_range_list, \
141 .midr_range_list = list
142
143 /* Errata affecting a range of revisions of given model variant */
144 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
145 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
146
147 /* Errata affecting a single variant/revision of a model */
148 #define ERRATA_MIDR_REV(model, var, rev) \
149 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
150
151 /* Errata affecting all variants/revisions of a given a model */
152 #define ERRATA_MIDR_ALL_VERSIONS(model) \
153 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
154 CAP_MIDR_ALL_VERSIONS(model)
155
156 /* Errata affecting a list of midr ranges, with same work around */
157 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
158 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
159 CAP_MIDR_RANGE_LIST(midr_list)
160
161 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
162 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
163 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
164 {},
165 };
166
167 static bool __maybe_unused
needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities * entry,int scope)168 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
169 int scope)
170 {
171 int i;
172
173 if (!is_affected_midr_range_list(entry, scope) ||
174 !is_hyp_mode_available())
175 return false;
176
177 for_each_possible_cpu(i) {
178 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
179 return true;
180 }
181
182 return false;
183 }
184
185 static bool __maybe_unused
has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities * entry,int scope)186 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
187 int scope)
188 {
189 u32 midr = read_cpuid_id();
190 bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
191 const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
192
193 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
194 return is_midr_in_range(midr, &range) && has_dic;
195 }
196
197 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
198 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
199 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
200 {
201 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
202 },
203 {
204 .midr_range.model = MIDR_QCOM_KRYO,
205 .matches = is_kryo_midr,
206 },
207 #endif
208 #ifdef CONFIG_ARM64_ERRATUM_1286807
209 {
210 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
211 },
212 {
213 /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
214 ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
215 },
216 #endif
217 #ifdef CONFIG_ARM64_ERRATUM_2441009
218 {
219 /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
220 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
221 },
222 #endif
223 {},
224 };
225 #endif
226
227 #ifdef CONFIG_CAVIUM_ERRATUM_23154
228 static const struct midr_range cavium_erratum_23154_cpus[] = {
229 MIDR_ALL_VERSIONS(MIDR_THUNDERX),
230 MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
231 MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
232 MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX),
233 MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX),
234 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX),
235 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN),
236 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM),
237 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO),
238 {},
239 };
240 #endif
241
242 #ifdef CONFIG_CAVIUM_ERRATUM_27456
243 const struct midr_range cavium_erratum_27456_cpus[] = {
244 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
245 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
246 /* Cavium ThunderX, T81 pass 1.0 */
247 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
248 {},
249 };
250 #endif
251
252 #ifdef CONFIG_CAVIUM_ERRATUM_30115
253 static const struct midr_range cavium_erratum_30115_cpus[] = {
254 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
255 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
256 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
257 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
258 /* Cavium ThunderX, T83 pass 1.0 */
259 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
260 {},
261 };
262 #endif
263
264 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
265 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
266 {
267 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
268 },
269 {
270 .midr_range.model = MIDR_QCOM_KRYO,
271 .matches = is_kryo_midr,
272 },
273 {},
274 };
275 #endif
276
277 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
278 static const struct midr_range workaround_clean_cache[] = {
279 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
280 defined(CONFIG_ARM64_ERRATUM_827319) || \
281 defined(CONFIG_ARM64_ERRATUM_824069)
282 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
283 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
284 #endif
285 #ifdef CONFIG_ARM64_ERRATUM_819472
286 /* Cortex-A53 r0p[01] : ARM errata 819472 */
287 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
288 #endif
289 {},
290 };
291 #endif
292
293 #ifdef CONFIG_ARM64_ERRATUM_1418040
294 /*
295 * - 1188873 affects r0p0 to r2p0
296 * - 1418040 affects r0p0 to r3p1
297 */
298 static const struct midr_range erratum_1418040_list[] = {
299 /* Cortex-A76 r0p0 to r3p1 */
300 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
301 /* Neoverse-N1 r0p0 to r3p1 */
302 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
303 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
304 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
305 {},
306 };
307 #endif
308
309 #ifdef CONFIG_ARM64_ERRATUM_845719
310 static const struct midr_range erratum_845719_list[] = {
311 /* Cortex-A53 r0p[01234] */
312 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
313 /* Brahma-B53 r0p[0] */
314 MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
315 /* Kryo2XX Silver rAp4 */
316 MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
317 {},
318 };
319 #endif
320
321 #ifdef CONFIG_ARM64_ERRATUM_843419
322 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
323 {
324 /* Cortex-A53 r0p[01234] */
325 .matches = is_affected_midr_range,
326 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
327 MIDR_FIXED(0x4, BIT(8)),
328 },
329 {
330 /* Brahma-B53 r0p[0] */
331 .matches = is_affected_midr_range,
332 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
333 },
334 {},
335 };
336 #endif
337
338 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
339 static const struct midr_range erratum_speculative_at_list[] = {
340 #ifdef CONFIG_ARM64_ERRATUM_1165522
341 /* Cortex A76 r0p0 to r2p0 */
342 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
343 #endif
344 #ifdef CONFIG_ARM64_ERRATUM_1319367
345 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
346 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
347 #endif
348 #ifdef CONFIG_ARM64_ERRATUM_1530923
349 /* Cortex A55 r0p0 to r2p0 */
350 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
351 /* Kryo4xx Silver (rdpe => r1p0) */
352 MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
353 #endif
354 {},
355 };
356 #endif
357
358 #ifdef CONFIG_ARM64_ERRATUM_1463225
359 static const struct midr_range erratum_1463225[] = {
360 /* Cortex-A76 r0p0 - r3p1 */
361 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
362 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
363 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
364 {},
365 };
366 #endif
367
368 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
369 static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
370 #ifdef CONFIG_ARM64_ERRATUM_2139208
371 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
372 #endif
373 #ifdef CONFIG_ARM64_ERRATUM_2119858
374 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
375 MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
376 #endif
377 {},
378 };
379 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */
380
381 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
382 static const struct midr_range tsb_flush_fail_cpus[] = {
383 #ifdef CONFIG_ARM64_ERRATUM_2067961
384 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
385 #endif
386 #ifdef CONFIG_ARM64_ERRATUM_2054223
387 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
388 #endif
389 {},
390 };
391 #endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
392
393 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
394 static struct midr_range trbe_write_out_of_range_cpus[] = {
395 #ifdef CONFIG_ARM64_ERRATUM_2253138
396 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
397 #endif
398 #ifdef CONFIG_ARM64_ERRATUM_2224489
399 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
400 MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
401 #endif
402 {},
403 };
404 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
405
406 #ifdef CONFIG_ARM64_ERRATUM_1742098
407 static struct midr_range broken_aarch32_aes[] = {
408 MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
409 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
410 {},
411 };
412 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
413
414 const struct arm64_cpu_capabilities arm64_errata[] = {
415 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
416 {
417 .desc = "ARM errata 826319, 827319, 824069, or 819472",
418 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
419 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
420 .cpu_enable = cpu_enable_cache_maint_trap,
421 },
422 #endif
423 #ifdef CONFIG_ARM64_ERRATUM_832075
424 {
425 /* Cortex-A57 r0p0 - r1p2 */
426 .desc = "ARM erratum 832075",
427 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
428 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
429 0, 0,
430 1, 2),
431 },
432 #endif
433 #ifdef CONFIG_ARM64_ERRATUM_834220
434 {
435 /* Cortex-A57 r0p0 - r1p2 */
436 .desc = "ARM erratum 834220",
437 .capability = ARM64_WORKAROUND_834220,
438 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
439 0, 0,
440 1, 2),
441 },
442 #endif
443 #ifdef CONFIG_ARM64_ERRATUM_843419
444 {
445 .desc = "ARM erratum 843419",
446 .capability = ARM64_WORKAROUND_843419,
447 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
448 .matches = cpucap_multi_entry_cap_matches,
449 .match_list = erratum_843419_list,
450 },
451 #endif
452 #ifdef CONFIG_ARM64_ERRATUM_845719
453 {
454 .desc = "ARM erratum 845719",
455 .capability = ARM64_WORKAROUND_845719,
456 ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
457 },
458 #endif
459 #ifdef CONFIG_CAVIUM_ERRATUM_23154
460 {
461 .desc = "Cavium errata 23154 and 38545",
462 .capability = ARM64_WORKAROUND_CAVIUM_23154,
463 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
464 ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus),
465 },
466 #endif
467 #ifdef CONFIG_CAVIUM_ERRATUM_27456
468 {
469 .desc = "Cavium erratum 27456",
470 .capability = ARM64_WORKAROUND_CAVIUM_27456,
471 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
472 },
473 #endif
474 #ifdef CONFIG_CAVIUM_ERRATUM_30115
475 {
476 .desc = "Cavium erratum 30115",
477 .capability = ARM64_WORKAROUND_CAVIUM_30115,
478 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
479 },
480 #endif
481 {
482 .desc = "Mismatched cache type (CTR_EL0)",
483 .capability = ARM64_MISMATCHED_CACHE_TYPE,
484 .matches = has_mismatched_cache_type,
485 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
486 .cpu_enable = cpu_enable_trap_ctr_access,
487 },
488 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
489 {
490 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
491 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
492 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
493 .matches = cpucap_multi_entry_cap_matches,
494 .match_list = qcom_erratum_1003_list,
495 },
496 #endif
497 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
498 {
499 .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009",
500 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
501 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
502 .matches = cpucap_multi_entry_cap_matches,
503 .match_list = arm64_repeat_tlbi_list,
504 },
505 #endif
506 #ifdef CONFIG_ARM64_ERRATUM_858921
507 {
508 /* Cortex-A73 all versions */
509 .desc = "ARM erratum 858921",
510 .capability = ARM64_WORKAROUND_858921,
511 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
512 },
513 #endif
514 {
515 .desc = "Spectre-v2",
516 .capability = ARM64_SPECTRE_V2,
517 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
518 .matches = has_spectre_v2,
519 .cpu_enable = spectre_v2_enable_mitigation,
520 },
521 #ifdef CONFIG_RANDOMIZE_BASE
522 {
523 /* Must come after the Spectre-v2 entry */
524 .desc = "Spectre-v3a",
525 .capability = ARM64_SPECTRE_V3A,
526 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
527 .matches = has_spectre_v3a,
528 .cpu_enable = spectre_v3a_enable_mitigation,
529 },
530 #endif
531 {
532 .desc = "Spectre-v4",
533 .capability = ARM64_SPECTRE_V4,
534 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
535 .matches = has_spectre_v4,
536 .cpu_enable = spectre_v4_enable_mitigation,
537 },
538 {
539 .desc = "Spectre-BHB",
540 .capability = ARM64_SPECTRE_BHB,
541 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
542 .matches = is_spectre_bhb_affected,
543 .cpu_enable = spectre_bhb_enable_mitigation,
544 },
545 #ifdef CONFIG_ARM64_ERRATUM_1418040
546 {
547 .desc = "ARM erratum 1418040",
548 .capability = ARM64_WORKAROUND_1418040,
549 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
550 /*
551 * We need to allow affected CPUs to come in late, but
552 * also need the non-affected CPUs to be able to come
553 * in at any point in time. Wonderful.
554 */
555 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
556 },
557 #endif
558 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
559 {
560 .desc = "ARM errata 1165522, 1319367, or 1530923",
561 .capability = ARM64_WORKAROUND_SPECULATIVE_AT,
562 ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
563 },
564 #endif
565 #ifdef CONFIG_ARM64_ERRATUM_1463225
566 {
567 .desc = "ARM erratum 1463225",
568 .capability = ARM64_WORKAROUND_1463225,
569 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
570 .matches = has_cortex_a76_erratum_1463225,
571 .midr_range_list = erratum_1463225,
572 },
573 #endif
574 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
575 {
576 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
577 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
578 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
579 .matches = needs_tx2_tvm_workaround,
580 },
581 {
582 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
583 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
584 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
585 },
586 #endif
587 #ifdef CONFIG_ARM64_ERRATUM_1542419
588 {
589 /* we depend on the firmware portion for correctness */
590 .desc = "ARM erratum 1542419 (kernel portion)",
591 .capability = ARM64_WORKAROUND_1542419,
592 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
593 .matches = has_neoverse_n1_erratum_1542419,
594 .cpu_enable = cpu_enable_trap_ctr_access,
595 },
596 #endif
597 #ifdef CONFIG_ARM64_ERRATUM_1508412
598 {
599 /* we depend on the firmware portion for correctness */
600 .desc = "ARM erratum 1508412 (kernel portion)",
601 .capability = ARM64_WORKAROUND_1508412,
602 ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
603 0, 0,
604 1, 0),
605 },
606 #endif
607 #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
608 {
609 /* NVIDIA Carmel */
610 .desc = "NVIDIA Carmel CNP erratum",
611 .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
612 ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
613 },
614 #endif
615 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
616 {
617 /*
618 * The erratum work around is handled within the TRBE
619 * driver and can be applied per-cpu. So, we can allow
620 * a late CPU to come online with this erratum.
621 */
622 .desc = "ARM erratum 2119858 or 2139208",
623 .capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
624 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
625 CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus),
626 },
627 #endif
628 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
629 {
630 .desc = "ARM erratum 2067961 or 2054223",
631 .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
632 ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
633 },
634 #endif
635 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
636 {
637 .desc = "ARM erratum 2253138 or 2224489",
638 .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
639 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
640 CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
641 },
642 #endif
643 #ifdef CONFIG_ARM64_ERRATUM_2077057
644 {
645 .desc = "ARM erratum 2077057",
646 .capability = ARM64_WORKAROUND_2077057,
647 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
648 },
649 #endif
650 #ifdef CONFIG_ARM64_ERRATUM_2064142
651 {
652 .desc = "ARM erratum 2064142",
653 .capability = ARM64_WORKAROUND_2064142,
654
655 /* Cortex-A510 r0p0 - r0p2 */
656 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
657 },
658 #endif
659 #ifdef CONFIG_ARM64_ERRATUM_2457168
660 {
661 .desc = "ARM erratum 2457168",
662 .capability = ARM64_WORKAROUND_2457168,
663 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
664
665 /* Cortex-A510 r0p0-r1p1 */
666 CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
667 },
668 #endif
669 #ifdef CONFIG_ARM64_ERRATUM_2038923
670 {
671 .desc = "ARM erratum 2038923",
672 .capability = ARM64_WORKAROUND_2038923,
673
674 /* Cortex-A510 r0p0 - r0p2 */
675 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
676 },
677 #endif
678 #ifdef CONFIG_ARM64_ERRATUM_1902691
679 {
680 .desc = "ARM erratum 1902691",
681 .capability = ARM64_WORKAROUND_1902691,
682
683 /* Cortex-A510 r0p0 - r0p1 */
684 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
685 },
686 #endif
687 #ifdef CONFIG_ARM64_ERRATUM_1742098
688 {
689 .desc = "ARM erratum 1742098",
690 .capability = ARM64_WORKAROUND_1742098,
691 CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
692 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
693 },
694 #endif
695 {
696 }
697 };
698