1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2003, 2004, 2007 Maciej W. Rozycki
4 */
5 #include <linux/context_tracking.h>
6 #include <linux/init.h>
7 #include <linux/kernel.h>
8 #include <linux/ptrace.h>
9 #include <linux/stddef.h>
10
11 #include <asm/bugs.h>
12 #include <asm/compiler.h>
13 #include <asm/cpu.h>
14 #include <asm/fpu.h>
15 #include <asm/mipsregs.h>
16 #include <asm/setup.h>
17
18 static char bug64hit[] __initdata =
19 "reliable operation impossible!\n%s";
20 static char nowar[] __initdata =
21 "Please report to <linux-mips@vger.kernel.org>.";
22 static char r4kwar[] __initdata =
23 "Enable CPU_R4000_WORKAROUNDS to rectify.";
24 static char daddiwar[] __initdata =
25 "Enable CPU_DADDI_WORKAROUNDS to rectify.";
26
27 static __always_inline __init
align_mod(const int align,const int mod)28 void align_mod(const int align, const int mod)
29 {
30 asm volatile(
31 ".set push\n\t"
32 ".set noreorder\n\t"
33 ".balign %0\n\t"
34 ".rept %1\n\t"
35 "nop\n\t"
36 ".endr\n\t"
37 ".set pop"
38 :
39 : "n"(align), "n"(mod));
40 }
41
42 static __always_inline __init
mult_sh_align_mod(long * v1,long * v2,long * w,const int align,const int mod)43 void mult_sh_align_mod(long *v1, long *v2, long *w,
44 const int align, const int mod)
45 {
46 unsigned long flags;
47 int m1, m2;
48 long p, s, lv1, lv2, lw;
49
50 /*
51 * We want the multiply and the shift to be isolated from the
52 * rest of the code to disable gcc optimizations. Hence the
53 * asm statements that execute nothing, but make gcc not know
54 * what the values of m1, m2 and s are and what lv2 and p are
55 * used for.
56 */
57
58 local_irq_save(flags);
59 /*
60 * The following code leads to a wrong result of the first
61 * dsll32 when executed on R4000 rev. 2.2 or 3.0 (PRId
62 * 00000422 or 00000430, respectively).
63 *
64 * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
65 * 3.0" by MIPS Technologies, Inc., errata #16 and #28 for
66 * details. I got no permission to duplicate them here,
67 * sigh... --macro
68 */
69 asm volatile(
70 ""
71 : "=r" (m1), "=r" (m2), "=r" (s)
72 : "0" (5), "1" (8), "2" (5));
73 align_mod(align, mod);
74 /*
75 * The trailing nop is needed to fulfill the two-instruction
76 * requirement between reading hi/lo and staring a mult/div.
77 * Leaving it out may cause gas insert a nop itself breaking
78 * the desired alignment of the next chunk.
79 */
80 asm volatile(
81 ".set push\n\t"
82 ".set noat\n\t"
83 ".set noreorder\n\t"
84 ".set nomacro\n\t"
85 "mult %2, %3\n\t"
86 "dsll32 %0, %4, %5\n\t"
87 "mflo $0\n\t"
88 "dsll32 %1, %4, %5\n\t"
89 "nop\n\t"
90 ".set pop"
91 : "=&r" (lv1), "=r" (lw)
92 : "r" (m1), "r" (m2), "r" (s), "I" (0)
93 : "hi", "lo", "$0");
94 /* We have to use single integers for m1 and m2 and a double
95 * one for p to be sure the mulsidi3 gcc's RTL multiplication
96 * instruction has the workaround applied. Older versions of
97 * gcc have correct umulsi3 and mulsi3, but other
98 * multiplication variants lack the workaround.
99 */
100 asm volatile(
101 ""
102 : "=r" (m1), "=r" (m2), "=r" (s)
103 : "0" (m1), "1" (m2), "2" (s));
104 align_mod(align, mod);
105 p = m1 * m2;
106 lv2 = s << 32;
107 asm volatile(
108 ""
109 : "=r" (lv2)
110 : "0" (lv2), "r" (p));
111 local_irq_restore(flags);
112
113 *v1 = lv1;
114 *v2 = lv2;
115 *w = lw;
116 }
117
check_mult_sh(void)118 static __always_inline __init void check_mult_sh(void)
119 {
120 long v1[8], v2[8], w[8];
121 int bug, fix, i;
122
123 printk("Checking for the multiply/shift bug... ");
124
125 /*
126 * Testing discovered false negatives for certain code offsets
127 * into cache lines. Hence we test all possible offsets for
128 * the worst assumption of an R4000 I-cache line width of 32
129 * bytes.
130 *
131 * We can't use a loop as alignment directives need to be
132 * immediates.
133 */
134 mult_sh_align_mod(&v1[0], &v2[0], &w[0], 32, 0);
135 mult_sh_align_mod(&v1[1], &v2[1], &w[1], 32, 1);
136 mult_sh_align_mod(&v1[2], &v2[2], &w[2], 32, 2);
137 mult_sh_align_mod(&v1[3], &v2[3], &w[3], 32, 3);
138 mult_sh_align_mod(&v1[4], &v2[4], &w[4], 32, 4);
139 mult_sh_align_mod(&v1[5], &v2[5], &w[5], 32, 5);
140 mult_sh_align_mod(&v1[6], &v2[6], &w[6], 32, 6);
141 mult_sh_align_mod(&v1[7], &v2[7], &w[7], 32, 7);
142
143 bug = 0;
144 for (i = 0; i < 8; i++)
145 if (v1[i] != w[i])
146 bug = 1;
147
148 if (bug == 0) {
149 pr_cont("no.\n");
150 return;
151 }
152
153 pr_cont("yes, workaround... ");
154
155 fix = 1;
156 for (i = 0; i < 8; i++)
157 if (v2[i] != w[i])
158 fix = 0;
159
160 if (fix == 1) {
161 pr_cont("yes.\n");
162 return;
163 }
164
165 pr_cont("no.\n");
166 panic(bug64hit,
167 IS_ENABLED(CONFIG_CPU_R4000_WORKAROUNDS) ? nowar : r4kwar);
168 }
169
170 static volatile int daddi_ov;
171
do_daddi_ov(struct pt_regs * regs)172 asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
173 {
174 enum ctx_state prev_state;
175
176 prev_state = exception_enter();
177 daddi_ov = 1;
178 regs->cp0_epc += 4;
179 exception_exit(prev_state);
180 }
181
check_daddi(void)182 static __init void check_daddi(void)
183 {
184 extern asmlinkage void handle_daddi_ov(void);
185 unsigned long flags;
186 void *handler;
187 long v, tmp;
188
189 printk("Checking for the daddi bug... ");
190
191 local_irq_save(flags);
192 handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
193 /*
194 * The following code fails to trigger an overflow exception
195 * when executed on R4000 rev. 2.2 or 3.0 (PRId 00000422 or
196 * 00000430, respectively).
197 *
198 * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
199 * 3.0" by MIPS Technologies, Inc., erratum #23 for details.
200 * I got no permission to duplicate it here, sigh... --macro
201 */
202 asm volatile(
203 ".set push\n\t"
204 ".set noat\n\t"
205 ".set noreorder\n\t"
206 ".set nomacro\n\t"
207 "addiu %1, $0, %2\n\t"
208 "dsrl %1, %1, 1\n\t"
209 #ifdef HAVE_AS_SET_DADDI
210 ".set daddi\n\t"
211 #endif
212 "daddi %0, %1, %3\n\t"
213 ".set pop"
214 : "=r" (v), "=&r" (tmp)
215 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
216 set_except_vector(EXCCODE_OV, handler);
217 local_irq_restore(flags);
218
219 if (daddi_ov) {
220 pr_cont("no.\n");
221 return;
222 }
223
224 pr_cont("yes, workaround... ");
225
226 local_irq_save(flags);
227 handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
228 asm volatile(
229 "addiu %1, $0, %2\n\t"
230 "dsrl %1, %1, 1\n\t"
231 "daddi %0, %1, %3"
232 : "=r" (v), "=&r" (tmp)
233 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
234 set_except_vector(EXCCODE_OV, handler);
235 local_irq_restore(flags);
236
237 if (daddi_ov) {
238 pr_cont("yes.\n");
239 return;
240 }
241
242 pr_cont("no.\n");
243 panic(bug64hit,
244 IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) ? nowar : daddiwar);
245 }
246
247 int daddiu_bug = -1;
248
check_daddiu(void)249 static __init void check_daddiu(void)
250 {
251 long v, w, tmp;
252
253 printk("Checking for the daddiu bug... ");
254
255 /*
256 * The following code leads to a wrong result of daddiu when
257 * executed on R4400 rev. 1.0 (PRId 00000440).
258 *
259 * See "MIPS R4400PC/SC Errata, Processor Revision 1.0" by
260 * MIPS Technologies, Inc., erratum #7 for details.
261 *
262 * According to "MIPS R4000PC/SC Errata, Processor Revision
263 * 2.2 and 3.0" by MIPS Technologies, Inc., erratum #41 this
264 * problem affects R4000 rev. 2.2 and 3.0 (PRId 00000422 and
265 * 00000430, respectively), too. Testing failed to trigger it
266 * so far.
267 *
268 * I got no permission to duplicate the errata here, sigh...
269 * --macro
270 */
271 asm volatile(
272 ".set push\n\t"
273 ".set noat\n\t"
274 ".set noreorder\n\t"
275 ".set nomacro\n\t"
276 "addiu %2, $0, %3\n\t"
277 "dsrl %2, %2, 1\n\t"
278 #ifdef HAVE_AS_SET_DADDI
279 ".set daddi\n\t"
280 #endif
281 "daddiu %0, %2, %4\n\t"
282 "addiu %1, $0, %4\n\t"
283 "daddu %1, %2\n\t"
284 ".set pop"
285 : "=&r" (v), "=&r" (w), "=&r" (tmp)
286 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
287
288 daddiu_bug = v != w;
289
290 if (!daddiu_bug) {
291 pr_cont("no.\n");
292 return;
293 }
294
295 pr_cont("yes, workaround... ");
296
297 asm volatile(
298 "addiu %2, $0, %3\n\t"
299 "dsrl %2, %2, 1\n\t"
300 "daddiu %0, %2, %4\n\t"
301 "addiu %1, $0, %4\n\t"
302 "daddu %1, %2"
303 : "=&r" (v), "=&r" (w), "=&r" (tmp)
304 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
305
306 if (v == w) {
307 pr_cont("yes.\n");
308 return;
309 }
310
311 pr_cont("no.\n");
312 panic(bug64hit,
313 IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) ? nowar : daddiwar);
314 }
315
check_bugs64_early(void)316 void __init check_bugs64_early(void)
317 {
318 check_mult_sh();
319 check_daddiu();
320 }
321
check_bugs64(void)322 void __init check_bugs64(void)
323 {
324 check_daddi();
325 }
326