1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MCE_H
3 #define _ASM_X86_MCE_H
4
5 #include <uapi/asm/mce.h>
6
7 /*
8 * Machine Check support for x86
9 */
10
11 /* MCG_CAP register defines */
12 #define MCG_BANKCNT_MASK 0xff /* Number of Banks */
13 #define MCG_CTL_P BIT_ULL(8) /* MCG_CTL register available */
14 #define MCG_EXT_P BIT_ULL(9) /* Extended registers available */
15 #define MCG_CMCI_P BIT_ULL(10) /* CMCI supported */
16 #define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
17 #define MCG_EXT_CNT_SHIFT 16
18 #define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
19 #define MCG_SER_P BIT_ULL(24) /* MCA recovery/new status bits */
20 #define MCG_ELOG_P BIT_ULL(26) /* Extended error log supported */
21 #define MCG_LMCE_P BIT_ULL(27) /* Local machine check supported */
22
23 /* MCG_STATUS register defines */
24 #define MCG_STATUS_RIPV BIT_ULL(0) /* restart ip valid */
25 #define MCG_STATUS_EIPV BIT_ULL(1) /* ip points to correct instruction */
26 #define MCG_STATUS_MCIP BIT_ULL(2) /* machine check in progress */
27 #define MCG_STATUS_LMCES BIT_ULL(3) /* LMCE signaled */
28
29 /* MCG_EXT_CTL register defines */
30 #define MCG_EXT_CTL_LMCE_EN BIT_ULL(0) /* Enable LMCE */
31
32 /* MCi_STATUS register defines */
33 #define MCI_STATUS_VAL BIT_ULL(63) /* valid error */
34 #define MCI_STATUS_OVER BIT_ULL(62) /* previous errors lost */
35 #define MCI_STATUS_UC BIT_ULL(61) /* uncorrected error */
36 #define MCI_STATUS_EN BIT_ULL(60) /* error enabled */
37 #define MCI_STATUS_MISCV BIT_ULL(59) /* misc error reg. valid */
38 #define MCI_STATUS_ADDRV BIT_ULL(58) /* addr reg. valid */
39 #define MCI_STATUS_PCC BIT_ULL(57) /* processor context corrupt */
40 #define MCI_STATUS_S BIT_ULL(56) /* Signaled machine check */
41 #define MCI_STATUS_AR BIT_ULL(55) /* Action required */
42 #define MCI_STATUS_CEC_SHIFT 38 /* Corrected Error Count */
43 #define MCI_STATUS_CEC_MASK GENMASK_ULL(52,38)
44 #define MCI_STATUS_CEC(c) (((c) & MCI_STATUS_CEC_MASK) >> MCI_STATUS_CEC_SHIFT)
45 #define MCI_STATUS_MSCOD(m) (((m) >> 16) & 0xffff)
46
47 /* AMD-specific bits */
48 #define MCI_STATUS_TCC BIT_ULL(55) /* Task context corrupt */
49 #define MCI_STATUS_SYNDV BIT_ULL(53) /* synd reg. valid */
50 #define MCI_STATUS_DEFERRED BIT_ULL(44) /* uncorrected error, deferred exception */
51 #define MCI_STATUS_POISON BIT_ULL(43) /* access poisonous data */
52 #define MCI_STATUS_SCRUB BIT_ULL(40) /* Error detected during scrub operation */
53
54 /*
55 * McaX field if set indicates a given bank supports MCA extensions:
56 * - Deferred error interrupt type is specifiable by bank.
57 * - MCx_MISC0[BlkPtr] field indicates presence of extended MISC registers,
58 * But should not be used to determine MSR numbers.
59 * - TCC bit is present in MCx_STATUS.
60 */
61 #define MCI_CONFIG_MCAX 0x1
62 #define MCI_IPID_MCATYPE 0xFFFF0000
63 #define MCI_IPID_HWID 0xFFF
64
65 /*
66 * Note that the full MCACOD field of IA32_MCi_STATUS MSR is
67 * bits 15:0. But bit 12 is the 'F' bit, defined for corrected
68 * errors to indicate that errors are being filtered by hardware.
69 * We should mask out bit 12 when looking for specific signatures
70 * of uncorrected errors - so the F bit is deliberately skipped
71 * in this #define.
72 */
73 #define MCACOD 0xefff /* MCA Error Code */
74
75 /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
76 #define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
77 #define MCACOD_SCRUBMSK 0xeff0 /* Skip bit 12 ('F' bit) */
78 #define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
79 #define MCACOD_DATA 0x0134 /* Data Load */
80 #define MCACOD_INSTR 0x0150 /* Instruction Fetch */
81
82 /* MCi_MISC register defines */
83 #define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
84 #define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
85 #define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
86 #define MCI_MISC_ADDR_LINEAR 1 /* linear address */
87 #define MCI_MISC_ADDR_PHYS 2 /* physical address */
88 #define MCI_MISC_ADDR_MEM 3 /* memory address */
89 #define MCI_MISC_ADDR_GENERIC 7 /* generic */
90
91 /* MCi_ADDR register defines */
92 #define MCI_ADDR_PHYSADDR GENMASK_ULL(boot_cpu_data.x86_phys_bits - 1, 0)
93
94 /* CTL2 register defines */
95 #define MCI_CTL2_CMCI_EN BIT_ULL(30)
96 #define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
97
98 #define MCJ_CTX_MASK 3
99 #define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
100 #define MCJ_CTX_RANDOM 0 /* inject context: random */
101 #define MCJ_CTX_PROCESS 0x1 /* inject context: process */
102 #define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
103 #define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
104 #define MCJ_EXCEPTION 0x8 /* raise as exception */
105 #define MCJ_IRQ_BROADCAST 0x10 /* do IRQ broadcasting */
106
107 #define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
108
109 #define MCE_LOG_MIN_LEN 32U
110 #define MCE_LOG_SIGNATURE "MACHINECHECK"
111
112 /* AMD Scalable MCA */
113 #define MSR_AMD64_SMCA_MC0_CTL 0xc0002000
114 #define MSR_AMD64_SMCA_MC0_STATUS 0xc0002001
115 #define MSR_AMD64_SMCA_MC0_ADDR 0xc0002002
116 #define MSR_AMD64_SMCA_MC0_MISC0 0xc0002003
117 #define MSR_AMD64_SMCA_MC0_CONFIG 0xc0002004
118 #define MSR_AMD64_SMCA_MC0_IPID 0xc0002005
119 #define MSR_AMD64_SMCA_MC0_SYND 0xc0002006
120 #define MSR_AMD64_SMCA_MC0_DESTAT 0xc0002008
121 #define MSR_AMD64_SMCA_MC0_DEADDR 0xc0002009
122 #define MSR_AMD64_SMCA_MC0_MISC1 0xc000200a
123 #define MSR_AMD64_SMCA_MCx_CTL(x) (MSR_AMD64_SMCA_MC0_CTL + 0x10*(x))
124 #define MSR_AMD64_SMCA_MCx_STATUS(x) (MSR_AMD64_SMCA_MC0_STATUS + 0x10*(x))
125 #define MSR_AMD64_SMCA_MCx_ADDR(x) (MSR_AMD64_SMCA_MC0_ADDR + 0x10*(x))
126 #define MSR_AMD64_SMCA_MCx_MISC(x) (MSR_AMD64_SMCA_MC0_MISC0 + 0x10*(x))
127 #define MSR_AMD64_SMCA_MCx_CONFIG(x) (MSR_AMD64_SMCA_MC0_CONFIG + 0x10*(x))
128 #define MSR_AMD64_SMCA_MCx_IPID(x) (MSR_AMD64_SMCA_MC0_IPID + 0x10*(x))
129 #define MSR_AMD64_SMCA_MCx_SYND(x) (MSR_AMD64_SMCA_MC0_SYND + 0x10*(x))
130 #define MSR_AMD64_SMCA_MCx_DESTAT(x) (MSR_AMD64_SMCA_MC0_DESTAT + 0x10*(x))
131 #define MSR_AMD64_SMCA_MCx_DEADDR(x) (MSR_AMD64_SMCA_MC0_DEADDR + 0x10*(x))
132 #define MSR_AMD64_SMCA_MCx_MISCy(x, y) ((MSR_AMD64_SMCA_MC0_MISC1 + y) + (0x10*(x)))
133
134 #define XEC(x, mask) (((x) >> 16) & mask)
135
136 /* mce.kflags flag bits for logging etc. */
137 #define MCE_HANDLED_CEC BIT_ULL(0)
138 #define MCE_HANDLED_UC BIT_ULL(1)
139 #define MCE_HANDLED_EXTLOG BIT_ULL(2)
140 #define MCE_HANDLED_NFIT BIT_ULL(3)
141 #define MCE_HANDLED_EDAC BIT_ULL(4)
142 #define MCE_HANDLED_MCELOG BIT_ULL(5)
143
144 /*
145 * Indicates an MCE which has happened in kernel space but from
146 * which the kernel can recover simply by executing fixup_exception()
147 * so that an error is returned to the caller of the function that
148 * hit the machine check.
149 */
150 #define MCE_IN_KERNEL_RECOV BIT_ULL(6)
151
152 /*
153 * Indicates an MCE that happened in kernel space while copying data
154 * from user. In this case fixup_exception() gets the kernel to the
155 * error exit for the copy function. Machine check handler can then
156 * treat it like a fault taken in user mode.
157 */
158 #define MCE_IN_KERNEL_COPYIN BIT_ULL(7)
159
160 /*
161 * This structure contains all data related to the MCE log. Also
162 * carries a signature to make it easier to find from external
163 * debugging tools. Each entry is only valid when its finished flag
164 * is set.
165 */
166 struct mce_log_buffer {
167 char signature[12]; /* "MACHINECHECK" */
168 unsigned len; /* = elements in .mce_entry[] */
169 unsigned next;
170 unsigned flags;
171 unsigned recordlen; /* length of struct mce */
172 struct mce entry[];
173 };
174
175 /* Highest last */
176 enum mce_notifier_prios {
177 MCE_PRIO_LOWEST,
178 MCE_PRIO_MCELOG,
179 MCE_PRIO_EDAC,
180 MCE_PRIO_NFIT,
181 MCE_PRIO_EXTLOG,
182 MCE_PRIO_UC,
183 MCE_PRIO_EARLY,
184 MCE_PRIO_CEC,
185 MCE_PRIO_HIGHEST = MCE_PRIO_CEC
186 };
187
188 struct notifier_block;
189 extern void mce_register_decode_chain(struct notifier_block *nb);
190 extern void mce_unregister_decode_chain(struct notifier_block *nb);
191
192 #include <linux/percpu.h>
193 #include <linux/atomic.h>
194
195 extern int mce_p5_enabled;
196
197 #ifdef CONFIG_ARCH_HAS_COPY_MC
198 extern void enable_copy_mc_fragile(void);
199 unsigned long __must_check copy_mc_fragile(void *dst, const void *src, unsigned cnt);
200 #else
enable_copy_mc_fragile(void)201 static inline void enable_copy_mc_fragile(void)
202 {
203 }
204 #endif
205
206 struct cper_ia_proc_ctx;
207
208 #ifdef CONFIG_X86_MCE
209 int mcheck_init(void);
210 void mcheck_cpu_init(struct cpuinfo_x86 *c);
211 void mcheck_cpu_clear(struct cpuinfo_x86 *c);
212 int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
213 u64 lapic_id);
214 #else
mcheck_init(void)215 static inline int mcheck_init(void) { return 0; }
mcheck_cpu_init(struct cpuinfo_x86 * c)216 static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
mcheck_cpu_clear(struct cpuinfo_x86 * c)217 static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {}
apei_smca_report_x86_error(struct cper_ia_proc_ctx * ctx_info,u64 lapic_id)218 static inline int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
219 u64 lapic_id) { return -EINVAL; }
220 #endif
221
222 void mce_setup(struct mce *m);
223 void mce_log(struct mce *m);
224 DECLARE_PER_CPU(struct device *, mce_device);
225
226 /* Maximum number of MCA banks per CPU. */
227 #define MAX_NR_BANKS 64
228
229 #ifdef CONFIG_X86_MCE_INTEL
230 void mce_intel_feature_init(struct cpuinfo_x86 *c);
231 void mce_intel_feature_clear(struct cpuinfo_x86 *c);
232 void cmci_clear(void);
233 void cmci_reenable(void);
234 void cmci_rediscover(void);
235 void cmci_recheck(void);
236 #else
mce_intel_feature_init(struct cpuinfo_x86 * c)237 static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
mce_intel_feature_clear(struct cpuinfo_x86 * c)238 static inline void mce_intel_feature_clear(struct cpuinfo_x86 *c) { }
cmci_clear(void)239 static inline void cmci_clear(void) {}
cmci_reenable(void)240 static inline void cmci_reenable(void) {}
cmci_rediscover(void)241 static inline void cmci_rediscover(void) {}
cmci_recheck(void)242 static inline void cmci_recheck(void) {}
243 #endif
244
245 int mce_available(struct cpuinfo_x86 *c);
246 bool mce_is_memory_error(struct mce *m);
247 bool mce_is_correctable(struct mce *m);
248 int mce_usable_address(struct mce *m);
249
250 DECLARE_PER_CPU(unsigned, mce_exception_count);
251 DECLARE_PER_CPU(unsigned, mce_poll_count);
252
253 typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
254 DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
255
256 enum mcp_flags {
257 MCP_TIMESTAMP = BIT(0), /* log time stamp */
258 MCP_UC = BIT(1), /* log uncorrected errors */
259 MCP_DONTLOG = BIT(2), /* only clear, don't log */
260 MCP_QUEUE_LOG = BIT(3), /* only queue to genpool */
261 };
262 bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
263
264 int mce_notify_irq(void);
265
266 DECLARE_PER_CPU(struct mce, injectm);
267
268 /* Disable CMCI/polling for MCA bank claimed by firmware */
269 extern void mce_disable_bank(int bank);
270
271 /*
272 * Exception handler
273 */
274 void do_machine_check(struct pt_regs *pt_regs);
275
276 /*
277 * Threshold handler
278 */
279 extern void (*mce_threshold_vector)(void);
280
281 /* Deferred error interrupt handler */
282 extern void (*deferred_error_int_vector)(void);
283
284 /*
285 * Used by APEI to report memory error via /dev/mcelog
286 */
287
288 struct cper_sec_mem_err;
289 extern void apei_mce_report_mem_error(int corrected,
290 struct cper_sec_mem_err *mem_err);
291
292 /*
293 * Enumerate new IP types and HWID values in AMD processors which support
294 * Scalable MCA.
295 */
296 #ifdef CONFIG_X86_MCE_AMD
297
298 /* These may be used by multiple smca_hwid_mcatypes */
299 enum smca_bank_types {
300 SMCA_LS = 0, /* Load Store */
301 SMCA_LS_V2,
302 SMCA_IF, /* Instruction Fetch */
303 SMCA_L2_CACHE, /* L2 Cache */
304 SMCA_DE, /* Decoder Unit */
305 SMCA_RESERVED, /* Reserved */
306 SMCA_EX, /* Execution Unit */
307 SMCA_FP, /* Floating Point */
308 SMCA_L3_CACHE, /* L3 Cache */
309 SMCA_CS, /* Coherent Slave */
310 SMCA_CS_V2,
311 SMCA_PIE, /* Power, Interrupts, etc. */
312 SMCA_UMC, /* Unified Memory Controller */
313 SMCA_UMC_V2,
314 SMCA_PB, /* Parameter Block */
315 SMCA_PSP, /* Platform Security Processor */
316 SMCA_PSP_V2,
317 SMCA_SMU, /* System Management Unit */
318 SMCA_SMU_V2,
319 SMCA_MP5, /* Microprocessor 5 Unit */
320 SMCA_MPDMA, /* MPDMA Unit */
321 SMCA_NBIO, /* Northbridge IO Unit */
322 SMCA_PCIE, /* PCI Express Unit */
323 SMCA_PCIE_V2,
324 SMCA_XGMI_PCS, /* xGMI PCS Unit */
325 SMCA_NBIF, /* NBIF Unit */
326 SMCA_SHUB, /* System HUB Unit */
327 SMCA_SATA, /* SATA Unit */
328 SMCA_USB, /* USB Unit */
329 SMCA_GMI_PCS, /* GMI PCS Unit */
330 SMCA_XGMI_PHY, /* xGMI PHY Unit */
331 SMCA_WAFL_PHY, /* WAFL PHY Unit */
332 SMCA_GMI_PHY, /* GMI PHY Unit */
333 N_SMCA_BANK_TYPES
334 };
335
336 extern const char *smca_get_long_name(enum smca_bank_types t);
337 extern bool amd_mce_is_memory_error(struct mce *m);
338
339 extern int mce_threshold_create_device(unsigned int cpu);
340 extern int mce_threshold_remove_device(unsigned int cpu);
341
342 void mce_amd_feature_init(struct cpuinfo_x86 *c);
343 enum smca_bank_types smca_get_bank_type(unsigned int cpu, unsigned int bank);
344 #else
345
mce_threshold_create_device(unsigned int cpu)346 static inline int mce_threshold_create_device(unsigned int cpu) { return 0; };
mce_threshold_remove_device(unsigned int cpu)347 static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; };
amd_mce_is_memory_error(struct mce * m)348 static inline bool amd_mce_is_memory_error(struct mce *m) { return false; };
mce_amd_feature_init(struct cpuinfo_x86 * c)349 static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
350 #endif
351
mce_hygon_feature_init(struct cpuinfo_x86 * c)352 static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); }
353
354 unsigned long copy_mc_fragile_handle_tail(char *to, char *from, unsigned len);
355
356 #endif /* _ASM_X86_MCE_H */
357