1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __X86_MCE_INTERNAL_H__
3 #define __X86_MCE_INTERNAL_H__
4 
5 #undef pr_fmt
6 #define pr_fmt(fmt) "mce: " fmt
7 
8 #include <linux/device.h>
9 #include <asm/mce.h>
10 
11 enum severity_level {
12 	MCE_NO_SEVERITY,
13 	MCE_DEFERRED_SEVERITY,
14 	MCE_UCNA_SEVERITY = MCE_DEFERRED_SEVERITY,
15 	MCE_KEEP_SEVERITY,
16 	MCE_SOME_SEVERITY,
17 	MCE_AO_SEVERITY,
18 	MCE_UC_SEVERITY,
19 	MCE_AR_SEVERITY,
20 	MCE_PANIC_SEVERITY,
21 };
22 
23 extern struct blocking_notifier_head x86_mce_decoder_chain;
24 
25 #define INITIAL_CHECK_INTERVAL	5 * 60 /* 5 minutes */
26 
27 struct mce_evt_llist {
28 	struct llist_node llnode;
29 	struct mce mce;
30 };
31 
32 void mce_gen_pool_process(struct work_struct *__unused);
33 bool mce_gen_pool_empty(void);
34 int mce_gen_pool_add(struct mce *mce);
35 int mce_gen_pool_init(void);
36 struct llist_node *mce_gen_pool_prepare_records(void);
37 
38 int mce_severity(struct mce *a, struct pt_regs *regs, char **msg, bool is_excp);
39 struct dentry *mce_get_debugfs_dir(void);
40 
41 extern mce_banks_t mce_banks_ce_disabled;
42 
43 #ifdef CONFIG_X86_MCE_INTEL
44 unsigned long cmci_intel_adjust_timer(unsigned long interval);
45 bool mce_intel_cmci_poll(void);
46 void mce_intel_hcpu_update(unsigned long cpu);
47 void cmci_disable_bank(int bank);
48 void intel_init_cmci(void);
49 void intel_init_lmce(void);
50 void intel_clear_lmce(void);
51 bool intel_filter_mce(struct mce *m);
52 #else
53 # define cmci_intel_adjust_timer mce_adjust_timer_default
mce_intel_cmci_poll(void)54 static inline bool mce_intel_cmci_poll(void) { return false; }
mce_intel_hcpu_update(unsigned long cpu)55 static inline void mce_intel_hcpu_update(unsigned long cpu) { }
cmci_disable_bank(int bank)56 static inline void cmci_disable_bank(int bank) { }
intel_init_cmci(void)57 static inline void intel_init_cmci(void) { }
intel_init_lmce(void)58 static inline void intel_init_lmce(void) { }
intel_clear_lmce(void)59 static inline void intel_clear_lmce(void) { }
intel_filter_mce(struct mce * m)60 static inline bool intel_filter_mce(struct mce *m) { return false; }
61 #endif
62 
63 void mce_timer_kick(unsigned long interval);
64 
65 #ifdef CONFIG_ACPI_APEI
66 int apei_write_mce(struct mce *m);
67 ssize_t apei_read_mce(struct mce *m, u64 *record_id);
68 int apei_check_mce(void);
69 int apei_clear_mce(u64 record_id);
70 #else
apei_write_mce(struct mce * m)71 static inline int apei_write_mce(struct mce *m)
72 {
73 	return -EINVAL;
74 }
apei_read_mce(struct mce * m,u64 * record_id)75 static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id)
76 {
77 	return 0;
78 }
apei_check_mce(void)79 static inline int apei_check_mce(void)
80 {
81 	return 0;
82 }
apei_clear_mce(u64 record_id)83 static inline int apei_clear_mce(u64 record_id)
84 {
85 	return -EINVAL;
86 }
87 #endif
88 
89 /*
90  * We consider records to be equivalent if bank+status+addr+misc all match.
91  * This is only used when the system is going down because of a fatal error
92  * to avoid cluttering the console log with essentially repeated information.
93  * In normal processing all errors seen are logged.
94  */
mce_cmp(struct mce * m1,struct mce * m2)95 static inline bool mce_cmp(struct mce *m1, struct mce *m2)
96 {
97 	return m1->bank != m2->bank ||
98 		m1->status != m2->status ||
99 		m1->addr != m2->addr ||
100 		m1->misc != m2->misc;
101 }
102 
103 extern struct device_attribute dev_attr_trigger;
104 
105 #ifdef CONFIG_X86_MCELOG_LEGACY
106 void mce_work_trigger(void);
107 void mce_register_injector_chain(struct notifier_block *nb);
108 void mce_unregister_injector_chain(struct notifier_block *nb);
109 #else
mce_work_trigger(void)110 static inline void mce_work_trigger(void)	{ }
mce_register_injector_chain(struct notifier_block * nb)111 static inline void mce_register_injector_chain(struct notifier_block *nb)	{ }
mce_unregister_injector_chain(struct notifier_block * nb)112 static inline void mce_unregister_injector_chain(struct notifier_block *nb)	{ }
113 #endif
114 
115 struct mca_config {
116 	__u64 lmce_disabled		: 1,
117 	      disabled			: 1,
118 	      ser			: 1,
119 	      recovery			: 1,
120 	      bios_cmci_threshold	: 1,
121 	      /* Proper #MC exception handler is set */
122 	      initialized		: 1,
123 	      __reserved		: 58;
124 
125 	bool dont_log_ce;
126 	bool cmci_disabled;
127 	bool ignore_ce;
128 	bool print_all;
129 
130 	int monarch_timeout;
131 	int panic_timeout;
132 	u32 rip_msr;
133 	s8 bootlog;
134 };
135 
136 extern struct mca_config mca_cfg;
137 DECLARE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
138 
139 struct mce_vendor_flags {
140 	/*
141 	 * Indicates that overflow conditions are not fatal, when set.
142 	 */
143 	__u64 overflow_recov	: 1,
144 
145 	/*
146 	 * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and
147 	 * Recovery. It indicates support for data poisoning in HW and deferred
148 	 * error interrupts.
149 	 */
150 	succor			: 1,
151 
152 	/*
153 	 * (AMD) SMCA: This bit indicates support for Scalable MCA which expands
154 	 * the register space for each MCA bank and also increases number of
155 	 * banks. Also, to accommodate the new banks and registers, the MCA
156 	 * register space is moved to a new MSR range.
157 	 */
158 	smca			: 1,
159 
160 	/* AMD-style error thresholding banks present. */
161 	amd_threshold		: 1,
162 
163 	/* Pentium, family 5-style MCA */
164 	p5			: 1,
165 
166 	/* Centaur Winchip C6-style MCA */
167 	winchip			: 1,
168 
169 	/* SandyBridge IFU quirk */
170 	snb_ifu_quirk		: 1,
171 
172 	/* Skylake, Cascade Lake, Cooper Lake REP;MOVS* quirk */
173 	skx_repmov_quirk	: 1,
174 
175 	__reserved_0		: 56;
176 };
177 
178 extern struct mce_vendor_flags mce_flags;
179 
180 enum mca_msr {
181 	MCA_CTL,
182 	MCA_STATUS,
183 	MCA_ADDR,
184 	MCA_MISC,
185 };
186 
187 /* Decide whether to add MCE record to MCE event pool or filter it out. */
188 extern bool filter_mce(struct mce *m);
189 
190 #ifdef CONFIG_X86_MCE_AMD
191 extern bool amd_filter_mce(struct mce *m);
192 #else
amd_filter_mce(struct mce * m)193 static inline bool amd_filter_mce(struct mce *m) { return false; }
194 #endif
195 
196 #ifdef CONFIG_X86_ANCIENT_MCE
197 void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
198 void winchip_mcheck_init(struct cpuinfo_x86 *c);
199 noinstr void pentium_machine_check(struct pt_regs *regs);
200 noinstr void winchip_machine_check(struct pt_regs *regs);
enable_p5_mce(void)201 static inline void enable_p5_mce(void) { mce_p5_enabled = 1; }
202 #else
intel_p5_mcheck_init(struct cpuinfo_x86 * c)203 static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
winchip_mcheck_init(struct cpuinfo_x86 * c)204 static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
enable_p5_mce(void)205 static inline void enable_p5_mce(void) {}
pentium_machine_check(struct pt_regs * regs)206 static inline void pentium_machine_check(struct pt_regs *regs) {}
winchip_machine_check(struct pt_regs * regs)207 static inline void winchip_machine_check(struct pt_regs *regs) {}
208 #endif
209 
210 noinstr u64 mce_rdmsrl(u32 msr);
211 
mca_msr_reg(int bank,enum mca_msr reg)212 static __always_inline u32 mca_msr_reg(int bank, enum mca_msr reg)
213 {
214 	if (mce_flags.smca) {
215 		switch (reg) {
216 		case MCA_CTL:	 return MSR_AMD64_SMCA_MCx_CTL(bank);
217 		case MCA_ADDR:	 return MSR_AMD64_SMCA_MCx_ADDR(bank);
218 		case MCA_MISC:	 return MSR_AMD64_SMCA_MCx_MISC(bank);
219 		case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank);
220 		}
221 	}
222 
223 	switch (reg) {
224 	case MCA_CTL:	 return MSR_IA32_MCx_CTL(bank);
225 	case MCA_ADDR:	 return MSR_IA32_MCx_ADDR(bank);
226 	case MCA_MISC:	 return MSR_IA32_MCx_MISC(bank);
227 	case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank);
228 	}
229 
230 	return 0;
231 }
232 
233 #endif /* __X86_MCE_INTERNAL_H__ */
234