1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Single-step support.
4  *
5  * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6  */
7 #include <linux/kernel.h>
8 #include <linux/kprobes.h>
9 #include <linux/ptrace.h>
10 #include <linux/prefetch.h>
11 #include <asm/sstep.h>
12 #include <asm/processor.h>
13 #include <linux/uaccess.h>
14 #include <asm/cpu_has_feature.h>
15 #include <asm/cputable.h>
16 #include <asm/disassemble.h>
17 
18 #ifdef CONFIG_PPC64
19 /* Bits in SRR1 that are copied from MSR */
20 #define MSR_MASK	0xffffffff87c0ffffUL
21 #else
22 #define MSR_MASK	0x87c0ffff
23 #endif
24 
25 /* Bits in XER */
26 #define XER_SO		0x80000000U
27 #define XER_OV		0x40000000U
28 #define XER_CA		0x20000000U
29 #define XER_OV32	0x00080000U
30 #define XER_CA32	0x00040000U
31 
32 #ifdef CONFIG_VSX
33 #define VSX_REGISTER_XTP(rd)   ((((rd) & 1) << 5) | ((rd) & 0xfe))
34 #endif
35 
36 #ifdef CONFIG_PPC_FPU
37 /*
38  * Functions in ldstfp.S
39  */
40 extern void get_fpr(int rn, double *p);
41 extern void put_fpr(int rn, const double *p);
42 extern void get_vr(int rn, __vector128 *p);
43 extern void put_vr(int rn, __vector128 *p);
44 extern void load_vsrn(int vsr, const void *p);
45 extern void store_vsrn(int vsr, void *p);
46 extern void conv_sp_to_dp(const float *sp, double *dp);
47 extern void conv_dp_to_sp(const double *dp, float *sp);
48 #endif
49 
50 #ifdef __powerpc64__
51 /*
52  * Functions in quad.S
53  */
54 extern int do_lq(unsigned long ea, unsigned long *regs);
55 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
56 extern int do_lqarx(unsigned long ea, unsigned long *regs);
57 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
58 		    unsigned int *crp);
59 #endif
60 
61 #ifdef __LITTLE_ENDIAN__
62 #define IS_LE	1
63 #define IS_BE	0
64 #else
65 #define IS_LE	0
66 #define IS_BE	1
67 #endif
68 
69 /*
70  * Emulate the truncation of 64 bit values in 32-bit mode.
71  */
truncate_if_32bit(unsigned long msr,unsigned long val)72 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
73 							unsigned long val)
74 {
75 	if ((msr & MSR_64BIT) == 0)
76 		val &= 0xffffffffUL;
77 	return val;
78 }
79 
80 /*
81  * Determine whether a conditional branch instruction would branch.
82  */
branch_taken(unsigned int instr,const struct pt_regs * regs,struct instruction_op * op)83 static nokprobe_inline int branch_taken(unsigned int instr,
84 					const struct pt_regs *regs,
85 					struct instruction_op *op)
86 {
87 	unsigned int bo = (instr >> 21) & 0x1f;
88 	unsigned int bi;
89 
90 	if ((bo & 4) == 0) {
91 		/* decrement counter */
92 		op->type |= DECCTR;
93 		if (((bo >> 1) & 1) ^ (regs->ctr == 1))
94 			return 0;
95 	}
96 	if ((bo & 0x10) == 0) {
97 		/* check bit from CR */
98 		bi = (instr >> 16) & 0x1f;
99 		if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
100 			return 0;
101 	}
102 	return 1;
103 }
104 
address_ok(struct pt_regs * regs,unsigned long ea,int nb)105 static nokprobe_inline long address_ok(struct pt_regs *regs,
106 				       unsigned long ea, int nb)
107 {
108 	if (!user_mode(regs))
109 		return 1;
110 	if (access_ok((void __user *)ea, nb))
111 		return 1;
112 	if (access_ok((void __user *)ea, 1))
113 		/* Access overlaps the end of the user region */
114 		regs->dar = TASK_SIZE_MAX - 1;
115 	else
116 		regs->dar = ea;
117 	return 0;
118 }
119 
120 /*
121  * Calculate effective address for a D-form instruction
122  */
dform_ea(unsigned int instr,const struct pt_regs * regs)123 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
124 					      const struct pt_regs *regs)
125 {
126 	int ra;
127 	unsigned long ea;
128 
129 	ra = (instr >> 16) & 0x1f;
130 	ea = (signed short) instr;		/* sign-extend */
131 	if (ra)
132 		ea += regs->gpr[ra];
133 
134 	return ea;
135 }
136 
137 #ifdef __powerpc64__
138 /*
139  * Calculate effective address for a DS-form instruction
140  */
dsform_ea(unsigned int instr,const struct pt_regs * regs)141 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
142 					       const struct pt_regs *regs)
143 {
144 	int ra;
145 	unsigned long ea;
146 
147 	ra = (instr >> 16) & 0x1f;
148 	ea = (signed short) (instr & ~3);	/* sign-extend */
149 	if (ra)
150 		ea += regs->gpr[ra];
151 
152 	return ea;
153 }
154 
155 /*
156  * Calculate effective address for a DQ-form instruction
157  */
dqform_ea(unsigned int instr,const struct pt_regs * regs)158 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
159 					       const struct pt_regs *regs)
160 {
161 	int ra;
162 	unsigned long ea;
163 
164 	ra = (instr >> 16) & 0x1f;
165 	ea = (signed short) (instr & ~0xf);	/* sign-extend */
166 	if (ra)
167 		ea += regs->gpr[ra];
168 
169 	return ea;
170 }
171 #endif /* __powerpc64 */
172 
173 /*
174  * Calculate effective address for an X-form instruction
175  */
xform_ea(unsigned int instr,const struct pt_regs * regs)176 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
177 					      const struct pt_regs *regs)
178 {
179 	int ra, rb;
180 	unsigned long ea;
181 
182 	ra = (instr >> 16) & 0x1f;
183 	rb = (instr >> 11) & 0x1f;
184 	ea = regs->gpr[rb];
185 	if (ra)
186 		ea += regs->gpr[ra];
187 
188 	return ea;
189 }
190 
191 /*
192  * Calculate effective address for a MLS:D-form / 8LS:D-form
193  * prefixed instruction
194  */
mlsd_8lsd_ea(unsigned int instr,unsigned int suffix,const struct pt_regs * regs)195 static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
196 						  unsigned int suffix,
197 						  const struct pt_regs *regs)
198 {
199 	int ra, prefix_r;
200 	unsigned int  dd;
201 	unsigned long ea, d0, d1, d;
202 
203 	prefix_r = GET_PREFIX_R(instr);
204 	ra = GET_PREFIX_RA(suffix);
205 
206 	d0 = instr & 0x3ffff;
207 	d1 = suffix & 0xffff;
208 	d = (d0 << 16) | d1;
209 
210 	/*
211 	 * sign extend a 34 bit number
212 	 */
213 	dd = (unsigned int)(d >> 2);
214 	ea = (signed int)dd;
215 	ea = (ea << 2) | (d & 0x3);
216 
217 	if (!prefix_r && ra)
218 		ea += regs->gpr[ra];
219 	else if (!prefix_r && !ra)
220 		; /* Leave ea as is */
221 	else if (prefix_r)
222 		ea += regs->nip;
223 
224 	/*
225 	 * (prefix_r && ra) is an invalid form. Should already be
226 	 * checked for by caller!
227 	 */
228 
229 	return ea;
230 }
231 
232 /*
233  * Return the largest power of 2, not greater than sizeof(unsigned long),
234  * such that x is a multiple of it.
235  */
max_align(unsigned long x)236 static nokprobe_inline unsigned long max_align(unsigned long x)
237 {
238 	x |= sizeof(unsigned long);
239 	return x & -x;		/* isolates rightmost bit */
240 }
241 
byterev_2(unsigned long x)242 static nokprobe_inline unsigned long byterev_2(unsigned long x)
243 {
244 	return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
245 }
246 
byterev_4(unsigned long x)247 static nokprobe_inline unsigned long byterev_4(unsigned long x)
248 {
249 	return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
250 		((x & 0xff00) << 8) | ((x & 0xff) << 24);
251 }
252 
253 #ifdef __powerpc64__
byterev_8(unsigned long x)254 static nokprobe_inline unsigned long byterev_8(unsigned long x)
255 {
256 	return (byterev_4(x) << 32) | byterev_4(x >> 32);
257 }
258 #endif
259 
do_byte_reverse(void * ptr,int nb)260 static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
261 {
262 	switch (nb) {
263 	case 2:
264 		*(u16 *)ptr = byterev_2(*(u16 *)ptr);
265 		break;
266 	case 4:
267 		*(u32 *)ptr = byterev_4(*(u32 *)ptr);
268 		break;
269 #ifdef __powerpc64__
270 	case 8:
271 		*(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
272 		break;
273 	case 16: {
274 		unsigned long *up = (unsigned long *)ptr;
275 		unsigned long tmp;
276 		tmp = byterev_8(up[0]);
277 		up[0] = byterev_8(up[1]);
278 		up[1] = tmp;
279 		break;
280 	}
281 	case 32: {
282 		unsigned long *up = (unsigned long *)ptr;
283 		unsigned long tmp;
284 
285 		tmp = byterev_8(up[0]);
286 		up[0] = byterev_8(up[3]);
287 		up[3] = tmp;
288 		tmp = byterev_8(up[2]);
289 		up[2] = byterev_8(up[1]);
290 		up[1] = tmp;
291 		break;
292 	}
293 
294 #endif
295 	default:
296 		WARN_ON_ONCE(1);
297 	}
298 }
299 
300 static __always_inline int
__read_mem_aligned(unsigned long * dest,unsigned long ea,int nb,struct pt_regs * regs)301 __read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
302 {
303 	unsigned long x = 0;
304 
305 	switch (nb) {
306 	case 1:
307 		unsafe_get_user(x, (unsigned char __user *)ea, Efault);
308 		break;
309 	case 2:
310 		unsafe_get_user(x, (unsigned short __user *)ea, Efault);
311 		break;
312 	case 4:
313 		unsafe_get_user(x, (unsigned int __user *)ea, Efault);
314 		break;
315 #ifdef __powerpc64__
316 	case 8:
317 		unsafe_get_user(x, (unsigned long __user *)ea, Efault);
318 		break;
319 #endif
320 	}
321 	*dest = x;
322 	return 0;
323 
324 Efault:
325 	regs->dar = ea;
326 	return -EFAULT;
327 }
328 
329 static nokprobe_inline int
read_mem_aligned(unsigned long * dest,unsigned long ea,int nb,struct pt_regs * regs)330 read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
331 {
332 	int err;
333 
334 	if (is_kernel_addr(ea))
335 		return __read_mem_aligned(dest, ea, nb, regs);
336 
337 	if (user_read_access_begin((void __user *)ea, nb)) {
338 		err = __read_mem_aligned(dest, ea, nb, regs);
339 		user_read_access_end();
340 	} else {
341 		err = -EFAULT;
342 		regs->dar = ea;
343 	}
344 
345 	return err;
346 }
347 
348 /*
349  * Copy from userspace to a buffer, using the largest possible
350  * aligned accesses, up to sizeof(long).
351  */
__copy_mem_in(u8 * dest,unsigned long ea,int nb,struct pt_regs * regs)352 static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
353 {
354 	int c;
355 
356 	for (; nb > 0; nb -= c) {
357 		c = max_align(ea);
358 		if (c > nb)
359 			c = max_align(nb);
360 		switch (c) {
361 		case 1:
362 			unsafe_get_user(*dest, (u8 __user *)ea, Efault);
363 			break;
364 		case 2:
365 			unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault);
366 			break;
367 		case 4:
368 			unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault);
369 			break;
370 #ifdef __powerpc64__
371 		case 8:
372 			unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault);
373 			break;
374 #endif
375 		}
376 		dest += c;
377 		ea += c;
378 	}
379 	return 0;
380 
381 Efault:
382 	regs->dar = ea;
383 	return -EFAULT;
384 }
385 
copy_mem_in(u8 * dest,unsigned long ea,int nb,struct pt_regs * regs)386 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
387 {
388 	int err;
389 
390 	if (is_kernel_addr(ea))
391 		return __copy_mem_in(dest, ea, nb, regs);
392 
393 	if (user_read_access_begin((void __user *)ea, nb)) {
394 		err = __copy_mem_in(dest, ea, nb, regs);
395 		user_read_access_end();
396 	} else {
397 		err = -EFAULT;
398 		regs->dar = ea;
399 	}
400 
401 	return err;
402 }
403 
read_mem_unaligned(unsigned long * dest,unsigned long ea,int nb,struct pt_regs * regs)404 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
405 					      unsigned long ea, int nb,
406 					      struct pt_regs *regs)
407 {
408 	union {
409 		unsigned long ul;
410 		u8 b[sizeof(unsigned long)];
411 	} u;
412 	int i;
413 	int err;
414 
415 	u.ul = 0;
416 	i = IS_BE ? sizeof(unsigned long) - nb : 0;
417 	err = copy_mem_in(&u.b[i], ea, nb, regs);
418 	if (!err)
419 		*dest = u.ul;
420 	return err;
421 }
422 
423 /*
424  * Read memory at address ea for nb bytes, return 0 for success
425  * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
426  * If nb < sizeof(long), the result is right-justified on BE systems.
427  */
read_mem(unsigned long * dest,unsigned long ea,int nb,struct pt_regs * regs)428 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
429 			      struct pt_regs *regs)
430 {
431 	if (!address_ok(regs, ea, nb))
432 		return -EFAULT;
433 	if ((ea & (nb - 1)) == 0)
434 		return read_mem_aligned(dest, ea, nb, regs);
435 	return read_mem_unaligned(dest, ea, nb, regs);
436 }
437 NOKPROBE_SYMBOL(read_mem);
438 
439 static __always_inline int
__write_mem_aligned(unsigned long val,unsigned long ea,int nb,struct pt_regs * regs)440 __write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
441 {
442 	switch (nb) {
443 	case 1:
444 		unsafe_put_user(val, (unsigned char __user *)ea, Efault);
445 		break;
446 	case 2:
447 		unsafe_put_user(val, (unsigned short __user *)ea, Efault);
448 		break;
449 	case 4:
450 		unsafe_put_user(val, (unsigned int __user *)ea, Efault);
451 		break;
452 #ifdef __powerpc64__
453 	case 8:
454 		unsafe_put_user(val, (unsigned long __user *)ea, Efault);
455 		break;
456 #endif
457 	}
458 	return 0;
459 
460 Efault:
461 	regs->dar = ea;
462 	return -EFAULT;
463 }
464 
465 static nokprobe_inline int
write_mem_aligned(unsigned long val,unsigned long ea,int nb,struct pt_regs * regs)466 write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
467 {
468 	int err;
469 
470 	if (is_kernel_addr(ea))
471 		return __write_mem_aligned(val, ea, nb, regs);
472 
473 	if (user_write_access_begin((void __user *)ea, nb)) {
474 		err = __write_mem_aligned(val, ea, nb, regs);
475 		user_write_access_end();
476 	} else {
477 		err = -EFAULT;
478 		regs->dar = ea;
479 	}
480 
481 	return err;
482 }
483 
484 /*
485  * Copy from a buffer to userspace, using the largest possible
486  * aligned accesses, up to sizeof(long).
487  */
__copy_mem_out(u8 * dest,unsigned long ea,int nb,struct pt_regs * regs)488 static nokprobe_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
489 {
490 	int c;
491 
492 	for (; nb > 0; nb -= c) {
493 		c = max_align(ea);
494 		if (c > nb)
495 			c = max_align(nb);
496 		switch (c) {
497 		case 1:
498 			unsafe_put_user(*dest, (u8 __user *)ea, Efault);
499 			break;
500 		case 2:
501 			unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault);
502 			break;
503 		case 4:
504 			unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault);
505 			break;
506 #ifdef __powerpc64__
507 		case 8:
508 			unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault);
509 			break;
510 #endif
511 		}
512 		dest += c;
513 		ea += c;
514 	}
515 	return 0;
516 
517 Efault:
518 	regs->dar = ea;
519 	return -EFAULT;
520 }
521 
copy_mem_out(u8 * dest,unsigned long ea,int nb,struct pt_regs * regs)522 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
523 {
524 	int err;
525 
526 	if (is_kernel_addr(ea))
527 		return __copy_mem_out(dest, ea, nb, regs);
528 
529 	if (user_write_access_begin((void __user *)ea, nb)) {
530 		err = __copy_mem_out(dest, ea, nb, regs);
531 		user_write_access_end();
532 	} else {
533 		err = -EFAULT;
534 		regs->dar = ea;
535 	}
536 
537 	return err;
538 }
539 
write_mem_unaligned(unsigned long val,unsigned long ea,int nb,struct pt_regs * regs)540 static nokprobe_inline int write_mem_unaligned(unsigned long val,
541 					       unsigned long ea, int nb,
542 					       struct pt_regs *regs)
543 {
544 	union {
545 		unsigned long ul;
546 		u8 b[sizeof(unsigned long)];
547 	} u;
548 	int i;
549 
550 	u.ul = val;
551 	i = IS_BE ? sizeof(unsigned long) - nb : 0;
552 	return copy_mem_out(&u.b[i], ea, nb, regs);
553 }
554 
555 /*
556  * Write memory at address ea for nb bytes, return 0 for success
557  * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
558  */
write_mem(unsigned long val,unsigned long ea,int nb,struct pt_regs * regs)559 static int write_mem(unsigned long val, unsigned long ea, int nb,
560 			       struct pt_regs *regs)
561 {
562 	if (!address_ok(regs, ea, nb))
563 		return -EFAULT;
564 	if ((ea & (nb - 1)) == 0)
565 		return write_mem_aligned(val, ea, nb, regs);
566 	return write_mem_unaligned(val, ea, nb, regs);
567 }
568 NOKPROBE_SYMBOL(write_mem);
569 
570 #ifdef CONFIG_PPC_FPU
571 /*
572  * These access either the real FP register or the image in the
573  * thread_struct, depending on regs->msr & MSR_FP.
574  */
do_fp_load(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)575 static int do_fp_load(struct instruction_op *op, unsigned long ea,
576 		      struct pt_regs *regs, bool cross_endian)
577 {
578 	int err, rn, nb;
579 	union {
580 		int i;
581 		unsigned int u;
582 		float f;
583 		double d[2];
584 		unsigned long l[2];
585 		u8 b[2 * sizeof(double)];
586 	} u;
587 
588 	nb = GETSIZE(op->type);
589 	if (!address_ok(regs, ea, nb))
590 		return -EFAULT;
591 	rn = op->reg;
592 	err = copy_mem_in(u.b, ea, nb, regs);
593 	if (err)
594 		return err;
595 	if (unlikely(cross_endian)) {
596 		do_byte_reverse(u.b, min(nb, 8));
597 		if (nb == 16)
598 			do_byte_reverse(&u.b[8], 8);
599 	}
600 	preempt_disable();
601 	if (nb == 4) {
602 		if (op->type & FPCONV)
603 			conv_sp_to_dp(&u.f, &u.d[0]);
604 		else if (op->type & SIGNEXT)
605 			u.l[0] = u.i;
606 		else
607 			u.l[0] = u.u;
608 	}
609 	if (regs->msr & MSR_FP)
610 		put_fpr(rn, &u.d[0]);
611 	else
612 		current->thread.TS_FPR(rn) = u.l[0];
613 	if (nb == 16) {
614 		/* lfdp */
615 		rn |= 1;
616 		if (regs->msr & MSR_FP)
617 			put_fpr(rn, &u.d[1]);
618 		else
619 			current->thread.TS_FPR(rn) = u.l[1];
620 	}
621 	preempt_enable();
622 	return 0;
623 }
624 NOKPROBE_SYMBOL(do_fp_load);
625 
do_fp_store(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)626 static int do_fp_store(struct instruction_op *op, unsigned long ea,
627 		       struct pt_regs *regs, bool cross_endian)
628 {
629 	int rn, nb;
630 	union {
631 		unsigned int u;
632 		float f;
633 		double d[2];
634 		unsigned long l[2];
635 		u8 b[2 * sizeof(double)];
636 	} u;
637 
638 	nb = GETSIZE(op->type);
639 	if (!address_ok(regs, ea, nb))
640 		return -EFAULT;
641 	rn = op->reg;
642 	preempt_disable();
643 	if (regs->msr & MSR_FP)
644 		get_fpr(rn, &u.d[0]);
645 	else
646 		u.l[0] = current->thread.TS_FPR(rn);
647 	if (nb == 4) {
648 		if (op->type & FPCONV)
649 			conv_dp_to_sp(&u.d[0], &u.f);
650 		else
651 			u.u = u.l[0];
652 	}
653 	if (nb == 16) {
654 		rn |= 1;
655 		if (regs->msr & MSR_FP)
656 			get_fpr(rn, &u.d[1]);
657 		else
658 			u.l[1] = current->thread.TS_FPR(rn);
659 	}
660 	preempt_enable();
661 	if (unlikely(cross_endian)) {
662 		do_byte_reverse(u.b, min(nb, 8));
663 		if (nb == 16)
664 			do_byte_reverse(&u.b[8], 8);
665 	}
666 	return copy_mem_out(u.b, ea, nb, regs);
667 }
668 NOKPROBE_SYMBOL(do_fp_store);
669 #endif
670 
671 #ifdef CONFIG_ALTIVEC
672 /* For Altivec/VMX, no need to worry about alignment */
do_vec_load(int rn,unsigned long ea,int size,struct pt_regs * regs,bool cross_endian)673 static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
674 				       int size, struct pt_regs *regs,
675 				       bool cross_endian)
676 {
677 	int err;
678 	union {
679 		__vector128 v;
680 		u8 b[sizeof(__vector128)];
681 	} u = {};
682 
683 	if (!address_ok(regs, ea & ~0xfUL, 16))
684 		return -EFAULT;
685 	/* align to multiple of size */
686 	ea &= ~(size - 1);
687 	err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
688 	if (err)
689 		return err;
690 	if (unlikely(cross_endian))
691 		do_byte_reverse(&u.b[ea & 0xf], size);
692 	preempt_disable();
693 	if (regs->msr & MSR_VEC)
694 		put_vr(rn, &u.v);
695 	else
696 		current->thread.vr_state.vr[rn] = u.v;
697 	preempt_enable();
698 	return 0;
699 }
700 
do_vec_store(int rn,unsigned long ea,int size,struct pt_regs * regs,bool cross_endian)701 static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
702 					int size, struct pt_regs *regs,
703 					bool cross_endian)
704 {
705 	union {
706 		__vector128 v;
707 		u8 b[sizeof(__vector128)];
708 	} u;
709 
710 	if (!address_ok(regs, ea & ~0xfUL, 16))
711 		return -EFAULT;
712 	/* align to multiple of size */
713 	ea &= ~(size - 1);
714 
715 	preempt_disable();
716 	if (regs->msr & MSR_VEC)
717 		get_vr(rn, &u.v);
718 	else
719 		u.v = current->thread.vr_state.vr[rn];
720 	preempt_enable();
721 	if (unlikely(cross_endian))
722 		do_byte_reverse(&u.b[ea & 0xf], size);
723 	return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
724 }
725 #endif /* CONFIG_ALTIVEC */
726 
727 #ifdef __powerpc64__
emulate_lq(struct pt_regs * regs,unsigned long ea,int reg,bool cross_endian)728 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
729 				      int reg, bool cross_endian)
730 {
731 	int err;
732 
733 	if (!address_ok(regs, ea, 16))
734 		return -EFAULT;
735 	/* if aligned, should be atomic */
736 	if ((ea & 0xf) == 0) {
737 		err = do_lq(ea, &regs->gpr[reg]);
738 	} else {
739 		err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
740 		if (!err)
741 			err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
742 	}
743 	if (!err && unlikely(cross_endian))
744 		do_byte_reverse(&regs->gpr[reg], 16);
745 	return err;
746 }
747 
emulate_stq(struct pt_regs * regs,unsigned long ea,int reg,bool cross_endian)748 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
749 				       int reg, bool cross_endian)
750 {
751 	int err;
752 	unsigned long vals[2];
753 
754 	if (!address_ok(regs, ea, 16))
755 		return -EFAULT;
756 	vals[0] = regs->gpr[reg];
757 	vals[1] = regs->gpr[reg + 1];
758 	if (unlikely(cross_endian))
759 		do_byte_reverse(vals, 16);
760 
761 	/* if aligned, should be atomic */
762 	if ((ea & 0xf) == 0)
763 		return do_stq(ea, vals[0], vals[1]);
764 
765 	err = write_mem(vals[IS_LE], ea, 8, regs);
766 	if (!err)
767 		err = write_mem(vals[IS_BE], ea + 8, 8, regs);
768 	return err;
769 }
770 #endif /* __powerpc64 */
771 
772 #ifdef CONFIG_VSX
emulate_vsx_load(struct instruction_op * op,union vsx_reg * reg,const void * mem,bool rev)773 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
774 		      const void *mem, bool rev)
775 {
776 	int size, read_size;
777 	int i, j;
778 	const unsigned int *wp;
779 	const unsigned short *hp;
780 	const unsigned char *bp;
781 
782 	size = GETSIZE(op->type);
783 	reg->d[0] = reg->d[1] = 0;
784 
785 	switch (op->element_size) {
786 	case 32:
787 		/* [p]lxvp[x] */
788 	case 16:
789 		/* whole vector; lxv[x] or lxvl[l] */
790 		if (size == 0)
791 			break;
792 		memcpy(reg, mem, size);
793 		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
794 			rev = !rev;
795 		if (rev)
796 			do_byte_reverse(reg, size);
797 		break;
798 	case 8:
799 		/* scalar loads, lxvd2x, lxvdsx */
800 		read_size = (size >= 8) ? 8 : size;
801 		i = IS_LE ? 8 : 8 - read_size;
802 		memcpy(&reg->b[i], mem, read_size);
803 		if (rev)
804 			do_byte_reverse(&reg->b[i], 8);
805 		if (size < 8) {
806 			if (op->type & SIGNEXT) {
807 				/* size == 4 is the only case here */
808 				reg->d[IS_LE] = (signed int) reg->d[IS_LE];
809 			} else if (op->vsx_flags & VSX_FPCONV) {
810 				preempt_disable();
811 				conv_sp_to_dp(&reg->fp[1 + IS_LE],
812 					      &reg->dp[IS_LE]);
813 				preempt_enable();
814 			}
815 		} else {
816 			if (size == 16) {
817 				unsigned long v = *(unsigned long *)(mem + 8);
818 				reg->d[IS_BE] = !rev ? v : byterev_8(v);
819 			} else if (op->vsx_flags & VSX_SPLAT)
820 				reg->d[IS_BE] = reg->d[IS_LE];
821 		}
822 		break;
823 	case 4:
824 		/* lxvw4x, lxvwsx */
825 		wp = mem;
826 		for (j = 0; j < size / 4; ++j) {
827 			i = IS_LE ? 3 - j : j;
828 			reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
829 		}
830 		if (op->vsx_flags & VSX_SPLAT) {
831 			u32 val = reg->w[IS_LE ? 3 : 0];
832 			for (; j < 4; ++j) {
833 				i = IS_LE ? 3 - j : j;
834 				reg->w[i] = val;
835 			}
836 		}
837 		break;
838 	case 2:
839 		/* lxvh8x */
840 		hp = mem;
841 		for (j = 0; j < size / 2; ++j) {
842 			i = IS_LE ? 7 - j : j;
843 			reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
844 		}
845 		break;
846 	case 1:
847 		/* lxvb16x */
848 		bp = mem;
849 		for (j = 0; j < size; ++j) {
850 			i = IS_LE ? 15 - j : j;
851 			reg->b[i] = *bp++;
852 		}
853 		break;
854 	}
855 }
856 EXPORT_SYMBOL_GPL(emulate_vsx_load);
857 NOKPROBE_SYMBOL(emulate_vsx_load);
858 
emulate_vsx_store(struct instruction_op * op,const union vsx_reg * reg,void * mem,bool rev)859 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
860 		       void *mem, bool rev)
861 {
862 	int size, write_size;
863 	int i, j;
864 	union vsx_reg buf;
865 	unsigned int *wp;
866 	unsigned short *hp;
867 	unsigned char *bp;
868 
869 	size = GETSIZE(op->type);
870 
871 	switch (op->element_size) {
872 	case 32:
873 		/* [p]stxvp[x] */
874 		if (size == 0)
875 			break;
876 		if (rev) {
877 			/* reverse 32 bytes */
878 			union vsx_reg buf32[2];
879 			buf32[0].d[0] = byterev_8(reg[1].d[1]);
880 			buf32[0].d[1] = byterev_8(reg[1].d[0]);
881 			buf32[1].d[0] = byterev_8(reg[0].d[1]);
882 			buf32[1].d[1] = byterev_8(reg[0].d[0]);
883 			memcpy(mem, buf32, size);
884 		} else {
885 			memcpy(mem, reg, size);
886 		}
887 		break;
888 	case 16:
889 		/* stxv, stxvx, stxvl, stxvll */
890 		if (size == 0)
891 			break;
892 		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
893 			rev = !rev;
894 		if (rev) {
895 			/* reverse 16 bytes */
896 			buf.d[0] = byterev_8(reg->d[1]);
897 			buf.d[1] = byterev_8(reg->d[0]);
898 			reg = &buf;
899 		}
900 		memcpy(mem, reg, size);
901 		break;
902 	case 8:
903 		/* scalar stores, stxvd2x */
904 		write_size = (size >= 8) ? 8 : size;
905 		i = IS_LE ? 8 : 8 - write_size;
906 		if (size < 8 && op->vsx_flags & VSX_FPCONV) {
907 			buf.d[0] = buf.d[1] = 0;
908 			preempt_disable();
909 			conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
910 			preempt_enable();
911 			reg = &buf;
912 		}
913 		memcpy(mem, &reg->b[i], write_size);
914 		if (size == 16)
915 			memcpy(mem + 8, &reg->d[IS_BE], 8);
916 		if (unlikely(rev)) {
917 			do_byte_reverse(mem, write_size);
918 			if (size == 16)
919 				do_byte_reverse(mem + 8, 8);
920 		}
921 		break;
922 	case 4:
923 		/* stxvw4x */
924 		wp = mem;
925 		for (j = 0; j < size / 4; ++j) {
926 			i = IS_LE ? 3 - j : j;
927 			*wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
928 		}
929 		break;
930 	case 2:
931 		/* stxvh8x */
932 		hp = mem;
933 		for (j = 0; j < size / 2; ++j) {
934 			i = IS_LE ? 7 - j : j;
935 			*hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
936 		}
937 		break;
938 	case 1:
939 		/* stvxb16x */
940 		bp = mem;
941 		for (j = 0; j < size; ++j) {
942 			i = IS_LE ? 15 - j : j;
943 			*bp++ = reg->b[i];
944 		}
945 		break;
946 	}
947 }
948 EXPORT_SYMBOL_GPL(emulate_vsx_store);
949 NOKPROBE_SYMBOL(emulate_vsx_store);
950 
do_vsx_load(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)951 static nokprobe_inline int do_vsx_load(struct instruction_op *op,
952 				       unsigned long ea, struct pt_regs *regs,
953 				       bool cross_endian)
954 {
955 	int reg = op->reg;
956 	int i, j, nr_vsx_regs;
957 	u8 mem[32];
958 	union vsx_reg buf[2];
959 	int size = GETSIZE(op->type);
960 
961 	if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
962 		return -EFAULT;
963 
964 	nr_vsx_regs = max(1ul, size / sizeof(__vector128));
965 	emulate_vsx_load(op, buf, mem, cross_endian);
966 	preempt_disable();
967 	if (reg < 32) {
968 		/* FP regs + extensions */
969 		if (regs->msr & MSR_FP) {
970 			for (i = 0; i < nr_vsx_regs; i++) {
971 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
972 				load_vsrn(reg + i, &buf[j].v);
973 			}
974 		} else {
975 			for (i = 0; i < nr_vsx_regs; i++) {
976 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
977 				current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0];
978 				current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1];
979 			}
980 		}
981 	} else {
982 		if (regs->msr & MSR_VEC) {
983 			for (i = 0; i < nr_vsx_regs; i++) {
984 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
985 				load_vsrn(reg + i, &buf[j].v);
986 			}
987 		} else {
988 			for (i = 0; i < nr_vsx_regs; i++) {
989 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
990 				current->thread.vr_state.vr[reg - 32 + i] = buf[j].v;
991 			}
992 		}
993 	}
994 	preempt_enable();
995 	return 0;
996 }
997 
do_vsx_store(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)998 static nokprobe_inline int do_vsx_store(struct instruction_op *op,
999 					unsigned long ea, struct pt_regs *regs,
1000 					bool cross_endian)
1001 {
1002 	int reg = op->reg;
1003 	int i, j, nr_vsx_regs;
1004 	u8 mem[32];
1005 	union vsx_reg buf[2];
1006 	int size = GETSIZE(op->type);
1007 
1008 	if (!address_ok(regs, ea, size))
1009 		return -EFAULT;
1010 
1011 	nr_vsx_regs = max(1ul, size / sizeof(__vector128));
1012 	preempt_disable();
1013 	if (reg < 32) {
1014 		/* FP regs + extensions */
1015 		if (regs->msr & MSR_FP) {
1016 			for (i = 0; i < nr_vsx_regs; i++) {
1017 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1018 				store_vsrn(reg + i, &buf[j].v);
1019 			}
1020 		} else {
1021 			for (i = 0; i < nr_vsx_regs; i++) {
1022 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1023 				buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0];
1024 				buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1];
1025 			}
1026 		}
1027 	} else {
1028 		if (regs->msr & MSR_VEC) {
1029 			for (i = 0; i < nr_vsx_regs; i++) {
1030 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1031 				store_vsrn(reg + i, &buf[j].v);
1032 			}
1033 		} else {
1034 			for (i = 0; i < nr_vsx_regs; i++) {
1035 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1036 				buf[j].v = current->thread.vr_state.vr[reg - 32 + i];
1037 			}
1038 		}
1039 	}
1040 	preempt_enable();
1041 	emulate_vsx_store(op, buf, mem, cross_endian);
1042 	return  copy_mem_out(mem, ea, size, regs);
1043 }
1044 #endif /* CONFIG_VSX */
1045 
__emulate_dcbz(unsigned long ea)1046 static int __emulate_dcbz(unsigned long ea)
1047 {
1048 	unsigned long i;
1049 	unsigned long size = l1_dcache_bytes();
1050 
1051 	for (i = 0; i < size; i += sizeof(long))
1052 		unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault);
1053 
1054 	return 0;
1055 
1056 Efault:
1057 	return -EFAULT;
1058 }
1059 
emulate_dcbz(unsigned long ea,struct pt_regs * regs)1060 int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
1061 {
1062 	int err;
1063 	unsigned long size = l1_dcache_bytes();
1064 
1065 	ea = truncate_if_32bit(regs->msr, ea);
1066 	ea &= ~(size - 1);
1067 	if (!address_ok(regs, ea, size))
1068 		return -EFAULT;
1069 
1070 	if (is_kernel_addr(ea)) {
1071 		err = __emulate_dcbz(ea);
1072 	} else if (user_write_access_begin((void __user *)ea, size)) {
1073 		err = __emulate_dcbz(ea);
1074 		user_write_access_end();
1075 	} else {
1076 		err = -EFAULT;
1077 	}
1078 
1079 	if (err)
1080 		regs->dar = ea;
1081 
1082 
1083 	return err;
1084 }
1085 NOKPROBE_SYMBOL(emulate_dcbz);
1086 
1087 #define __put_user_asmx(x, addr, err, op, cr)		\
1088 	__asm__ __volatile__(				\
1089 		".machine push\n"			\
1090 		".machine power8\n"			\
1091 		"1:	" op " %2,0,%3\n"		\
1092 		".machine pop\n"			\
1093 		"	mfcr	%1\n"			\
1094 		"2:\n"					\
1095 		".section .fixup,\"ax\"\n"		\
1096 		"3:	li	%0,%4\n"		\
1097 		"	b	2b\n"			\
1098 		".previous\n"				\
1099 		EX_TABLE(1b, 3b)			\
1100 		: "=r" (err), "=r" (cr)			\
1101 		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
1102 
1103 #define __get_user_asmx(x, addr, err, op)		\
1104 	__asm__ __volatile__(				\
1105 		".machine push\n"			\
1106 		".machine power8\n"			\
1107 		"1:	"op" %1,0,%2\n"			\
1108 		".machine pop\n"			\
1109 		"2:\n"					\
1110 		".section .fixup,\"ax\"\n"		\
1111 		"3:	li	%0,%3\n"		\
1112 		"	b	2b\n"			\
1113 		".previous\n"				\
1114 		EX_TABLE(1b, 3b)			\
1115 		: "=r" (err), "=r" (x)			\
1116 		: "r" (addr), "i" (-EFAULT), "0" (err))
1117 
1118 #define __cacheop_user_asmx(addr, err, op)		\
1119 	__asm__ __volatile__(				\
1120 		"1:	"op" 0,%1\n"			\
1121 		"2:\n"					\
1122 		".section .fixup,\"ax\"\n"		\
1123 		"3:	li	%0,%3\n"		\
1124 		"	b	2b\n"			\
1125 		".previous\n"				\
1126 		EX_TABLE(1b, 3b)			\
1127 		: "=r" (err)				\
1128 		: "r" (addr), "i" (-EFAULT), "0" (err))
1129 
set_cr0(const struct pt_regs * regs,struct instruction_op * op)1130 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
1131 				    struct instruction_op *op)
1132 {
1133 	long val = op->val;
1134 
1135 	op->type |= SETCC;
1136 	op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
1137 	if (!(regs->msr & MSR_64BIT))
1138 		val = (int) val;
1139 	if (val < 0)
1140 		op->ccval |= 0x80000000;
1141 	else if (val > 0)
1142 		op->ccval |= 0x40000000;
1143 	else
1144 		op->ccval |= 0x20000000;
1145 }
1146 
set_ca32(struct instruction_op * op,bool val)1147 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
1148 {
1149 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1150 		if (val)
1151 			op->xerval |= XER_CA32;
1152 		else
1153 			op->xerval &= ~XER_CA32;
1154 	}
1155 }
1156 
add_with_carry(const struct pt_regs * regs,struct instruction_op * op,int rd,unsigned long val1,unsigned long val2,unsigned long carry_in)1157 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
1158 				     struct instruction_op *op, int rd,
1159 				     unsigned long val1, unsigned long val2,
1160 				     unsigned long carry_in)
1161 {
1162 	unsigned long val = val1 + val2;
1163 
1164 	if (carry_in)
1165 		++val;
1166 	op->type = COMPUTE | SETREG | SETXER;
1167 	op->reg = rd;
1168 	op->val = val;
1169 	val = truncate_if_32bit(regs->msr, val);
1170 	val1 = truncate_if_32bit(regs->msr, val1);
1171 	op->xerval = regs->xer;
1172 	if (val < val1 || (carry_in && val == val1))
1173 		op->xerval |= XER_CA;
1174 	else
1175 		op->xerval &= ~XER_CA;
1176 
1177 	set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1178 			(carry_in && (unsigned int)val == (unsigned int)val1));
1179 }
1180 
do_cmp_signed(const struct pt_regs * regs,struct instruction_op * op,long v1,long v2,int crfld)1181 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1182 					  struct instruction_op *op,
1183 					  long v1, long v2, int crfld)
1184 {
1185 	unsigned int crval, shift;
1186 
1187 	op->type = COMPUTE | SETCC;
1188 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1189 	if (v1 < v2)
1190 		crval |= 8;
1191 	else if (v1 > v2)
1192 		crval |= 4;
1193 	else
1194 		crval |= 2;
1195 	shift = (7 - crfld) * 4;
1196 	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1197 }
1198 
do_cmp_unsigned(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,unsigned long v2,int crfld)1199 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1200 					    struct instruction_op *op,
1201 					    unsigned long v1,
1202 					    unsigned long v2, int crfld)
1203 {
1204 	unsigned int crval, shift;
1205 
1206 	op->type = COMPUTE | SETCC;
1207 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1208 	if (v1 < v2)
1209 		crval |= 8;
1210 	else if (v1 > v2)
1211 		crval |= 4;
1212 	else
1213 		crval |= 2;
1214 	shift = (7 - crfld) * 4;
1215 	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1216 }
1217 
do_cmpb(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,unsigned long v2)1218 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1219 				    struct instruction_op *op,
1220 				    unsigned long v1, unsigned long v2)
1221 {
1222 	unsigned long long out_val, mask;
1223 	int i;
1224 
1225 	out_val = 0;
1226 	for (i = 0; i < 8; i++) {
1227 		mask = 0xffUL << (i * 8);
1228 		if ((v1 & mask) == (v2 & mask))
1229 			out_val |= mask;
1230 	}
1231 	op->val = out_val;
1232 }
1233 
1234 /*
1235  * The size parameter is used to adjust the equivalent popcnt instruction.
1236  * popcntb = 8, popcntw = 32, popcntd = 64
1237  */
do_popcnt(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,int size)1238 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1239 				      struct instruction_op *op,
1240 				      unsigned long v1, int size)
1241 {
1242 	unsigned long long out = v1;
1243 
1244 	out -= (out >> 1) & 0x5555555555555555ULL;
1245 	out = (0x3333333333333333ULL & out) +
1246 	      (0x3333333333333333ULL & (out >> 2));
1247 	out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1248 
1249 	if (size == 8) {	/* popcntb */
1250 		op->val = out;
1251 		return;
1252 	}
1253 	out += out >> 8;
1254 	out += out >> 16;
1255 	if (size == 32) {	/* popcntw */
1256 		op->val = out & 0x0000003f0000003fULL;
1257 		return;
1258 	}
1259 
1260 	out = (out + (out >> 32)) & 0x7f;
1261 	op->val = out;	/* popcntd */
1262 }
1263 
1264 #ifdef CONFIG_PPC64
do_bpermd(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,unsigned long v2)1265 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1266 				      struct instruction_op *op,
1267 				      unsigned long v1, unsigned long v2)
1268 {
1269 	unsigned char perm, idx;
1270 	unsigned int i;
1271 
1272 	perm = 0;
1273 	for (i = 0; i < 8; i++) {
1274 		idx = (v1 >> (i * 8)) & 0xff;
1275 		if (idx < 64)
1276 			if (v2 & PPC_BIT(idx))
1277 				perm |= 1 << i;
1278 	}
1279 	op->val = perm;
1280 }
1281 #endif /* CONFIG_PPC64 */
1282 /*
1283  * The size parameter adjusts the equivalent prty instruction.
1284  * prtyw = 32, prtyd = 64
1285  */
do_prty(const struct pt_regs * regs,struct instruction_op * op,unsigned long v,int size)1286 static nokprobe_inline void do_prty(const struct pt_regs *regs,
1287 				    struct instruction_op *op,
1288 				    unsigned long v, int size)
1289 {
1290 	unsigned long long res = v ^ (v >> 8);
1291 
1292 	res ^= res >> 16;
1293 	if (size == 32) {		/* prtyw */
1294 		op->val = res & 0x0000000100000001ULL;
1295 		return;
1296 	}
1297 
1298 	res ^= res >> 32;
1299 	op->val = res & 1;	/*prtyd */
1300 }
1301 
trap_compare(long v1,long v2)1302 static nokprobe_inline int trap_compare(long v1, long v2)
1303 {
1304 	int ret = 0;
1305 
1306 	if (v1 < v2)
1307 		ret |= 0x10;
1308 	else if (v1 > v2)
1309 		ret |= 0x08;
1310 	else
1311 		ret |= 0x04;
1312 	if ((unsigned long)v1 < (unsigned long)v2)
1313 		ret |= 0x02;
1314 	else if ((unsigned long)v1 > (unsigned long)v2)
1315 		ret |= 0x01;
1316 	return ret;
1317 }
1318 
1319 /*
1320  * Elements of 32-bit rotate and mask instructions.
1321  */
1322 #define MASK32(mb, me)	((0xffffffffUL >> (mb)) + \
1323 			 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1324 #ifdef __powerpc64__
1325 #define MASK64_L(mb)	(~0UL >> (mb))
1326 #define MASK64_R(me)	((signed long)-0x8000000000000000L >> (me))
1327 #define MASK64(mb, me)	(MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1328 #define DATA32(x)	(((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1329 #else
1330 #define DATA32(x)	(x)
1331 #endif
1332 #define ROTATE(x, n)	((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1333 
1334 /*
1335  * Decode an instruction, and return information about it in *op
1336  * without changing *regs.
1337  * Integer arithmetic and logical instructions, branches, and barrier
1338  * instructions can be emulated just using the information in *op.
1339  *
1340  * Return value is 1 if the instruction can be emulated just by
1341  * updating *regs with the information in *op, -1 if we need the
1342  * GPRs but *regs doesn't contain the full register set, or 0
1343  * otherwise.
1344  */
analyse_instr(struct instruction_op * op,const struct pt_regs * regs,ppc_inst_t instr)1345 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1346 		  ppc_inst_t instr)
1347 {
1348 #ifdef CONFIG_PPC64
1349 	unsigned int suffixopcode, prefixtype, prefix_r;
1350 #endif
1351 	unsigned int opcode, ra, rb, rc, rd, spr, u;
1352 	unsigned long int imm;
1353 	unsigned long int val, val2;
1354 	unsigned int mb, me, sh;
1355 	unsigned int word, suffix;
1356 	long ival;
1357 
1358 	word = ppc_inst_val(instr);
1359 	suffix = ppc_inst_suffix(instr);
1360 
1361 	op->type = COMPUTE;
1362 
1363 	opcode = ppc_inst_primary_opcode(instr);
1364 	switch (opcode) {
1365 	case 16:	/* bc */
1366 		op->type = BRANCH;
1367 		imm = (signed short)(word & 0xfffc);
1368 		if ((word & 2) == 0)
1369 			imm += regs->nip;
1370 		op->val = truncate_if_32bit(regs->msr, imm);
1371 		if (word & 1)
1372 			op->type |= SETLK;
1373 		if (branch_taken(word, regs, op))
1374 			op->type |= BRTAKEN;
1375 		return 1;
1376 	case 17:	/* sc */
1377 		if ((word & 0xfe2) == 2)
1378 			op->type = SYSCALL;
1379 		else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1380 				(word & 0xfe3) == 1) {	/* scv */
1381 			op->type = SYSCALL_VECTORED_0;
1382 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1383 				goto unknown_opcode;
1384 		} else
1385 			op->type = UNKNOWN;
1386 		return 0;
1387 	case 18:	/* b */
1388 		op->type = BRANCH | BRTAKEN;
1389 		imm = word & 0x03fffffc;
1390 		if (imm & 0x02000000)
1391 			imm -= 0x04000000;
1392 		if ((word & 2) == 0)
1393 			imm += regs->nip;
1394 		op->val = truncate_if_32bit(regs->msr, imm);
1395 		if (word & 1)
1396 			op->type |= SETLK;
1397 		return 1;
1398 	case 19:
1399 		switch ((word >> 1) & 0x3ff) {
1400 		case 0:		/* mcrf */
1401 			op->type = COMPUTE + SETCC;
1402 			rd = 7 - ((word >> 23) & 0x7);
1403 			ra = 7 - ((word >> 18) & 0x7);
1404 			rd *= 4;
1405 			ra *= 4;
1406 			val = (regs->ccr >> ra) & 0xf;
1407 			op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1408 			return 1;
1409 
1410 		case 16:	/* bclr */
1411 		case 528:	/* bcctr */
1412 			op->type = BRANCH;
1413 			imm = (word & 0x400)? regs->ctr: regs->link;
1414 			op->val = truncate_if_32bit(regs->msr, imm);
1415 			if (word & 1)
1416 				op->type |= SETLK;
1417 			if (branch_taken(word, regs, op))
1418 				op->type |= BRTAKEN;
1419 			return 1;
1420 
1421 		case 18:	/* rfid, scary */
1422 			if (regs->msr & MSR_PR)
1423 				goto priv;
1424 			op->type = RFI;
1425 			return 0;
1426 
1427 		case 150:	/* isync */
1428 			op->type = BARRIER | BARRIER_ISYNC;
1429 			return 1;
1430 
1431 		case 33:	/* crnor */
1432 		case 129:	/* crandc */
1433 		case 193:	/* crxor */
1434 		case 225:	/* crnand */
1435 		case 257:	/* crand */
1436 		case 289:	/* creqv */
1437 		case 417:	/* crorc */
1438 		case 449:	/* cror */
1439 			op->type = COMPUTE + SETCC;
1440 			ra = (word >> 16) & 0x1f;
1441 			rb = (word >> 11) & 0x1f;
1442 			rd = (word >> 21) & 0x1f;
1443 			ra = (regs->ccr >> (31 - ra)) & 1;
1444 			rb = (regs->ccr >> (31 - rb)) & 1;
1445 			val = (word >> (6 + ra * 2 + rb)) & 1;
1446 			op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1447 				(val << (31 - rd));
1448 			return 1;
1449 		}
1450 		break;
1451 	case 31:
1452 		switch ((word >> 1) & 0x3ff) {
1453 		case 598:	/* sync */
1454 			op->type = BARRIER + BARRIER_SYNC;
1455 #ifdef __powerpc64__
1456 			switch ((word >> 21) & 3) {
1457 			case 1:		/* lwsync */
1458 				op->type = BARRIER + BARRIER_LWSYNC;
1459 				break;
1460 			case 2:		/* ptesync */
1461 				op->type = BARRIER + BARRIER_PTESYNC;
1462 				break;
1463 			}
1464 #endif
1465 			return 1;
1466 
1467 		case 854:	/* eieio */
1468 			op->type = BARRIER + BARRIER_EIEIO;
1469 			return 1;
1470 		}
1471 		break;
1472 	}
1473 
1474 	rd = (word >> 21) & 0x1f;
1475 	ra = (word >> 16) & 0x1f;
1476 	rb = (word >> 11) & 0x1f;
1477 	rc = (word >> 6) & 0x1f;
1478 
1479 	switch (opcode) {
1480 #ifdef __powerpc64__
1481 	case 1:
1482 		if (!cpu_has_feature(CPU_FTR_ARCH_31))
1483 			goto unknown_opcode;
1484 
1485 		prefix_r = GET_PREFIX_R(word);
1486 		ra = GET_PREFIX_RA(suffix);
1487 		rd = (suffix >> 21) & 0x1f;
1488 		op->reg = rd;
1489 		op->val = regs->gpr[rd];
1490 		suffixopcode = get_op(suffix);
1491 		prefixtype = (word >> 24) & 0x3;
1492 		switch (prefixtype) {
1493 		case 2:
1494 			if (prefix_r && ra)
1495 				return 0;
1496 			switch (suffixopcode) {
1497 			case 14:	/* paddi */
1498 				op->type = COMPUTE | PREFIXED;
1499 				op->val = mlsd_8lsd_ea(word, suffix, regs);
1500 				goto compute_done;
1501 			}
1502 		}
1503 		break;
1504 	case 2:		/* tdi */
1505 		if (rd & trap_compare(regs->gpr[ra], (short) word))
1506 			goto trap;
1507 		return 1;
1508 #endif
1509 	case 3:		/* twi */
1510 		if (rd & trap_compare((int)regs->gpr[ra], (short) word))
1511 			goto trap;
1512 		return 1;
1513 
1514 #ifdef __powerpc64__
1515 	case 4:
1516 		/*
1517 		 * There are very many instructions with this primary opcode
1518 		 * introduced in the ISA as early as v2.03. However, the ones
1519 		 * we currently emulate were all introduced with ISA 3.0
1520 		 */
1521 		if (!cpu_has_feature(CPU_FTR_ARCH_300))
1522 			goto unknown_opcode;
1523 
1524 		switch (word & 0x3f) {
1525 		case 48:	/* maddhd */
1526 			asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
1527 				     "=r" (op->val) : "r" (regs->gpr[ra]),
1528 				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1529 			goto compute_done;
1530 
1531 		case 49:	/* maddhdu */
1532 			asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
1533 				     "=r" (op->val) : "r" (regs->gpr[ra]),
1534 				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1535 			goto compute_done;
1536 
1537 		case 51:	/* maddld */
1538 			asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
1539 				     "=r" (op->val) : "r" (regs->gpr[ra]),
1540 				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1541 			goto compute_done;
1542 		}
1543 
1544 		/*
1545 		 * There are other instructions from ISA 3.0 with the same
1546 		 * primary opcode which do not have emulation support yet.
1547 		 */
1548 		goto unknown_opcode;
1549 #endif
1550 
1551 	case 7:		/* mulli */
1552 		op->val = regs->gpr[ra] * (short) word;
1553 		goto compute_done;
1554 
1555 	case 8:		/* subfic */
1556 		imm = (short) word;
1557 		add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1558 		return 1;
1559 
1560 	case 10:	/* cmpli */
1561 		imm = (unsigned short) word;
1562 		val = regs->gpr[ra];
1563 #ifdef __powerpc64__
1564 		if ((rd & 1) == 0)
1565 			val = (unsigned int) val;
1566 #endif
1567 		do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1568 		return 1;
1569 
1570 	case 11:	/* cmpi */
1571 		imm = (short) word;
1572 		val = regs->gpr[ra];
1573 #ifdef __powerpc64__
1574 		if ((rd & 1) == 0)
1575 			val = (int) val;
1576 #endif
1577 		do_cmp_signed(regs, op, val, imm, rd >> 2);
1578 		return 1;
1579 
1580 	case 12:	/* addic */
1581 		imm = (short) word;
1582 		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1583 		return 1;
1584 
1585 	case 13:	/* addic. */
1586 		imm = (short) word;
1587 		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1588 		set_cr0(regs, op);
1589 		return 1;
1590 
1591 	case 14:	/* addi */
1592 		imm = (short) word;
1593 		if (ra)
1594 			imm += regs->gpr[ra];
1595 		op->val = imm;
1596 		goto compute_done;
1597 
1598 	case 15:	/* addis */
1599 		imm = ((short) word) << 16;
1600 		if (ra)
1601 			imm += regs->gpr[ra];
1602 		op->val = imm;
1603 		goto compute_done;
1604 
1605 	case 19:
1606 		if (((word >> 1) & 0x1f) == 2) {
1607 			/* addpcis */
1608 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1609 				goto unknown_opcode;
1610 			imm = (short) (word & 0xffc1);	/* d0 + d2 fields */
1611 			imm |= (word >> 15) & 0x3e;	/* d1 field */
1612 			op->val = regs->nip + (imm << 16) + 4;
1613 			goto compute_done;
1614 		}
1615 		op->type = UNKNOWN;
1616 		return 0;
1617 
1618 	case 20:	/* rlwimi */
1619 		mb = (word >> 6) & 0x1f;
1620 		me = (word >> 1) & 0x1f;
1621 		val = DATA32(regs->gpr[rd]);
1622 		imm = MASK32(mb, me);
1623 		op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1624 		goto logical_done;
1625 
1626 	case 21:	/* rlwinm */
1627 		mb = (word >> 6) & 0x1f;
1628 		me = (word >> 1) & 0x1f;
1629 		val = DATA32(regs->gpr[rd]);
1630 		op->val = ROTATE(val, rb) & MASK32(mb, me);
1631 		goto logical_done;
1632 
1633 	case 23:	/* rlwnm */
1634 		mb = (word >> 6) & 0x1f;
1635 		me = (word >> 1) & 0x1f;
1636 		rb = regs->gpr[rb] & 0x1f;
1637 		val = DATA32(regs->gpr[rd]);
1638 		op->val = ROTATE(val, rb) & MASK32(mb, me);
1639 		goto logical_done;
1640 
1641 	case 24:	/* ori */
1642 		op->val = regs->gpr[rd] | (unsigned short) word;
1643 		goto logical_done_nocc;
1644 
1645 	case 25:	/* oris */
1646 		imm = (unsigned short) word;
1647 		op->val = regs->gpr[rd] | (imm << 16);
1648 		goto logical_done_nocc;
1649 
1650 	case 26:	/* xori */
1651 		op->val = regs->gpr[rd] ^ (unsigned short) word;
1652 		goto logical_done_nocc;
1653 
1654 	case 27:	/* xoris */
1655 		imm = (unsigned short) word;
1656 		op->val = regs->gpr[rd] ^ (imm << 16);
1657 		goto logical_done_nocc;
1658 
1659 	case 28:	/* andi. */
1660 		op->val = regs->gpr[rd] & (unsigned short) word;
1661 		set_cr0(regs, op);
1662 		goto logical_done_nocc;
1663 
1664 	case 29:	/* andis. */
1665 		imm = (unsigned short) word;
1666 		op->val = regs->gpr[rd] & (imm << 16);
1667 		set_cr0(regs, op);
1668 		goto logical_done_nocc;
1669 
1670 #ifdef __powerpc64__
1671 	case 30:	/* rld* */
1672 		mb = ((word >> 6) & 0x1f) | (word & 0x20);
1673 		val = regs->gpr[rd];
1674 		if ((word & 0x10) == 0) {
1675 			sh = rb | ((word & 2) << 4);
1676 			val = ROTATE(val, sh);
1677 			switch ((word >> 2) & 3) {
1678 			case 0:		/* rldicl */
1679 				val &= MASK64_L(mb);
1680 				break;
1681 			case 1:		/* rldicr */
1682 				val &= MASK64_R(mb);
1683 				break;
1684 			case 2:		/* rldic */
1685 				val &= MASK64(mb, 63 - sh);
1686 				break;
1687 			case 3:		/* rldimi */
1688 				imm = MASK64(mb, 63 - sh);
1689 				val = (regs->gpr[ra] & ~imm) |
1690 					(val & imm);
1691 			}
1692 			op->val = val;
1693 			goto logical_done;
1694 		} else {
1695 			sh = regs->gpr[rb] & 0x3f;
1696 			val = ROTATE(val, sh);
1697 			switch ((word >> 1) & 7) {
1698 			case 0:		/* rldcl */
1699 				op->val = val & MASK64_L(mb);
1700 				goto logical_done;
1701 			case 1:		/* rldcr */
1702 				op->val = val & MASK64_R(mb);
1703 				goto logical_done;
1704 			}
1705 		}
1706 #endif
1707 		op->type = UNKNOWN;	/* illegal instruction */
1708 		return 0;
1709 
1710 	case 31:
1711 		/* isel occupies 32 minor opcodes */
1712 		if (((word >> 1) & 0x1f) == 15) {
1713 			mb = (word >> 6) & 0x1f; /* bc field */
1714 			val = (regs->ccr >> (31 - mb)) & 1;
1715 			val2 = (ra) ? regs->gpr[ra] : 0;
1716 
1717 			op->val = (val) ? val2 : regs->gpr[rb];
1718 			goto compute_done;
1719 		}
1720 
1721 		switch ((word >> 1) & 0x3ff) {
1722 		case 4:		/* tw */
1723 			if (rd == 0x1f ||
1724 			    (rd & trap_compare((int)regs->gpr[ra],
1725 					       (int)regs->gpr[rb])))
1726 				goto trap;
1727 			return 1;
1728 #ifdef __powerpc64__
1729 		case 68:	/* td */
1730 			if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1731 				goto trap;
1732 			return 1;
1733 #endif
1734 		case 83:	/* mfmsr */
1735 			if (regs->msr & MSR_PR)
1736 				goto priv;
1737 			op->type = MFMSR;
1738 			op->reg = rd;
1739 			return 0;
1740 		case 146:	/* mtmsr */
1741 			if (regs->msr & MSR_PR)
1742 				goto priv;
1743 			op->type = MTMSR;
1744 			op->reg = rd;
1745 			op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1746 			return 0;
1747 #ifdef CONFIG_PPC64
1748 		case 178:	/* mtmsrd */
1749 			if (regs->msr & MSR_PR)
1750 				goto priv;
1751 			op->type = MTMSR;
1752 			op->reg = rd;
1753 			/* only MSR_EE and MSR_RI get changed if bit 15 set */
1754 			/* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1755 			imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1756 			op->val = imm;
1757 			return 0;
1758 #endif
1759 
1760 		case 19:	/* mfcr */
1761 			imm = 0xffffffffUL;
1762 			if ((word >> 20) & 1) {
1763 				imm = 0xf0000000UL;
1764 				for (sh = 0; sh < 8; ++sh) {
1765 					if (word & (0x80000 >> sh))
1766 						break;
1767 					imm >>= 4;
1768 				}
1769 			}
1770 			op->val = regs->ccr & imm;
1771 			goto compute_done;
1772 
1773 		case 128:	/* setb */
1774 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1775 				goto unknown_opcode;
1776 			/*
1777 			 * 'ra' encodes the CR field number (bfa) in the top 3 bits.
1778 			 * Since each CR field is 4 bits,
1779 			 * we can simply mask off the bottom two bits (bfa * 4)
1780 			 * to yield the first bit in the CR field.
1781 			 */
1782 			ra = ra & ~0x3;
1783 			/* 'val' stores bits of the CR field (bfa) */
1784 			val = regs->ccr >> (CR0_SHIFT - ra);
1785 			/* checks if the LT bit of CR field (bfa) is set */
1786 			if (val & 8)
1787 				op->val = -1;
1788 			/* checks if the GT bit of CR field (bfa) is set */
1789 			else if (val & 4)
1790 				op->val = 1;
1791 			else
1792 				op->val = 0;
1793 			goto compute_done;
1794 
1795 		case 144:	/* mtcrf */
1796 			op->type = COMPUTE + SETCC;
1797 			imm = 0xf0000000UL;
1798 			val = regs->gpr[rd];
1799 			op->ccval = regs->ccr;
1800 			for (sh = 0; sh < 8; ++sh) {
1801 				if (word & (0x80000 >> sh))
1802 					op->ccval = (op->ccval & ~imm) |
1803 						(val & imm);
1804 				imm >>= 4;
1805 			}
1806 			return 1;
1807 
1808 		case 339:	/* mfspr */
1809 			spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1810 			op->type = MFSPR;
1811 			op->reg = rd;
1812 			op->spr = spr;
1813 			if (spr == SPRN_XER || spr == SPRN_LR ||
1814 			    spr == SPRN_CTR)
1815 				return 1;
1816 			return 0;
1817 
1818 		case 467:	/* mtspr */
1819 			spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1820 			op->type = MTSPR;
1821 			op->val = regs->gpr[rd];
1822 			op->spr = spr;
1823 			if (spr == SPRN_XER || spr == SPRN_LR ||
1824 			    spr == SPRN_CTR)
1825 				return 1;
1826 			return 0;
1827 
1828 /*
1829  * Compare instructions
1830  */
1831 		case 0:	/* cmp */
1832 			val = regs->gpr[ra];
1833 			val2 = regs->gpr[rb];
1834 #ifdef __powerpc64__
1835 			if ((rd & 1) == 0) {
1836 				/* word (32-bit) compare */
1837 				val = (int) val;
1838 				val2 = (int) val2;
1839 			}
1840 #endif
1841 			do_cmp_signed(regs, op, val, val2, rd >> 2);
1842 			return 1;
1843 
1844 		case 32:	/* cmpl */
1845 			val = regs->gpr[ra];
1846 			val2 = regs->gpr[rb];
1847 #ifdef __powerpc64__
1848 			if ((rd & 1) == 0) {
1849 				/* word (32-bit) compare */
1850 				val = (unsigned int) val;
1851 				val2 = (unsigned int) val2;
1852 			}
1853 #endif
1854 			do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1855 			return 1;
1856 
1857 		case 508: /* cmpb */
1858 			do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1859 			goto logical_done_nocc;
1860 
1861 /*
1862  * Arithmetic instructions
1863  */
1864 		case 8:	/* subfc */
1865 			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1866 				       regs->gpr[rb], 1);
1867 			goto arith_done;
1868 #ifdef __powerpc64__
1869 		case 9:	/* mulhdu */
1870 			asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1871 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1872 			goto arith_done;
1873 #endif
1874 		case 10:	/* addc */
1875 			add_with_carry(regs, op, rd, regs->gpr[ra],
1876 				       regs->gpr[rb], 0);
1877 			goto arith_done;
1878 
1879 		case 11:	/* mulhwu */
1880 			asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1881 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1882 			goto arith_done;
1883 
1884 		case 40:	/* subf */
1885 			op->val = regs->gpr[rb] - regs->gpr[ra];
1886 			goto arith_done;
1887 #ifdef __powerpc64__
1888 		case 73:	/* mulhd */
1889 			asm("mulhd %0,%1,%2" : "=r" (op->val) :
1890 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1891 			goto arith_done;
1892 #endif
1893 		case 75:	/* mulhw */
1894 			asm("mulhw %0,%1,%2" : "=r" (op->val) :
1895 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1896 			goto arith_done;
1897 
1898 		case 104:	/* neg */
1899 			op->val = -regs->gpr[ra];
1900 			goto arith_done;
1901 
1902 		case 136:	/* subfe */
1903 			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1904 				       regs->gpr[rb], regs->xer & XER_CA);
1905 			goto arith_done;
1906 
1907 		case 138:	/* adde */
1908 			add_with_carry(regs, op, rd, regs->gpr[ra],
1909 				       regs->gpr[rb], regs->xer & XER_CA);
1910 			goto arith_done;
1911 
1912 		case 200:	/* subfze */
1913 			add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1914 				       regs->xer & XER_CA);
1915 			goto arith_done;
1916 
1917 		case 202:	/* addze */
1918 			add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1919 				       regs->xer & XER_CA);
1920 			goto arith_done;
1921 
1922 		case 232:	/* subfme */
1923 			add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1924 				       regs->xer & XER_CA);
1925 			goto arith_done;
1926 #ifdef __powerpc64__
1927 		case 233:	/* mulld */
1928 			op->val = regs->gpr[ra] * regs->gpr[rb];
1929 			goto arith_done;
1930 #endif
1931 		case 234:	/* addme */
1932 			add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1933 				       regs->xer & XER_CA);
1934 			goto arith_done;
1935 
1936 		case 235:	/* mullw */
1937 			op->val = (long)(int) regs->gpr[ra] *
1938 				(int) regs->gpr[rb];
1939 
1940 			goto arith_done;
1941 #ifdef __powerpc64__
1942 		case 265:	/* modud */
1943 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1944 				goto unknown_opcode;
1945 			op->val = regs->gpr[ra] % regs->gpr[rb];
1946 			goto compute_done;
1947 #endif
1948 		case 266:	/* add */
1949 			op->val = regs->gpr[ra] + regs->gpr[rb];
1950 			goto arith_done;
1951 
1952 		case 267:	/* moduw */
1953 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1954 				goto unknown_opcode;
1955 			op->val = (unsigned int) regs->gpr[ra] %
1956 				(unsigned int) regs->gpr[rb];
1957 			goto compute_done;
1958 #ifdef __powerpc64__
1959 		case 457:	/* divdu */
1960 			op->val = regs->gpr[ra] / regs->gpr[rb];
1961 			goto arith_done;
1962 #endif
1963 		case 459:	/* divwu */
1964 			op->val = (unsigned int) regs->gpr[ra] /
1965 				(unsigned int) regs->gpr[rb];
1966 			goto arith_done;
1967 #ifdef __powerpc64__
1968 		case 489:	/* divd */
1969 			op->val = (long int) regs->gpr[ra] /
1970 				(long int) regs->gpr[rb];
1971 			goto arith_done;
1972 #endif
1973 		case 491:	/* divw */
1974 			op->val = (int) regs->gpr[ra] /
1975 				(int) regs->gpr[rb];
1976 			goto arith_done;
1977 #ifdef __powerpc64__
1978 		case 425:	/* divde[.] */
1979 			asm volatile(PPC_DIVDE(%0, %1, %2) :
1980 				"=r" (op->val) : "r" (regs->gpr[ra]),
1981 				"r" (regs->gpr[rb]));
1982 			goto arith_done;
1983 		case 393:	/* divdeu[.] */
1984 			asm volatile(PPC_DIVDEU(%0, %1, %2) :
1985 				"=r" (op->val) : "r" (regs->gpr[ra]),
1986 				"r" (regs->gpr[rb]));
1987 			goto arith_done;
1988 #endif
1989 		case 755:	/* darn */
1990 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1991 				goto unknown_opcode;
1992 			switch (ra & 0x3) {
1993 			case 0:
1994 				/* 32-bit conditioned */
1995 				asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
1996 				goto compute_done;
1997 
1998 			case 1:
1999 				/* 64-bit conditioned */
2000 				asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
2001 				goto compute_done;
2002 
2003 			case 2:
2004 				/* 64-bit raw */
2005 				asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
2006 				goto compute_done;
2007 			}
2008 
2009 			goto unknown_opcode;
2010 #ifdef __powerpc64__
2011 		case 777:	/* modsd */
2012 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2013 				goto unknown_opcode;
2014 			op->val = (long int) regs->gpr[ra] %
2015 				(long int) regs->gpr[rb];
2016 			goto compute_done;
2017 #endif
2018 		case 779:	/* modsw */
2019 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2020 				goto unknown_opcode;
2021 			op->val = (int) regs->gpr[ra] %
2022 				(int) regs->gpr[rb];
2023 			goto compute_done;
2024 
2025 
2026 /*
2027  * Logical instructions
2028  */
2029 		case 26:	/* cntlzw */
2030 			val = (unsigned int) regs->gpr[rd];
2031 			op->val = ( val ? __builtin_clz(val) : 32 );
2032 			goto logical_done;
2033 #ifdef __powerpc64__
2034 		case 58:	/* cntlzd */
2035 			val = regs->gpr[rd];
2036 			op->val = ( val ? __builtin_clzl(val) : 64 );
2037 			goto logical_done;
2038 #endif
2039 		case 28:	/* and */
2040 			op->val = regs->gpr[rd] & regs->gpr[rb];
2041 			goto logical_done;
2042 
2043 		case 60:	/* andc */
2044 			op->val = regs->gpr[rd] & ~regs->gpr[rb];
2045 			goto logical_done;
2046 
2047 		case 122:	/* popcntb */
2048 			do_popcnt(regs, op, regs->gpr[rd], 8);
2049 			goto logical_done_nocc;
2050 
2051 		case 124:	/* nor */
2052 			op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
2053 			goto logical_done;
2054 
2055 		case 154:	/* prtyw */
2056 			do_prty(regs, op, regs->gpr[rd], 32);
2057 			goto logical_done_nocc;
2058 
2059 		case 186:	/* prtyd */
2060 			do_prty(regs, op, regs->gpr[rd], 64);
2061 			goto logical_done_nocc;
2062 #ifdef CONFIG_PPC64
2063 		case 252:	/* bpermd */
2064 			do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
2065 			goto logical_done_nocc;
2066 #endif
2067 		case 284:	/* xor */
2068 			op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
2069 			goto logical_done;
2070 
2071 		case 316:	/* xor */
2072 			op->val = regs->gpr[rd] ^ regs->gpr[rb];
2073 			goto logical_done;
2074 
2075 		case 378:	/* popcntw */
2076 			do_popcnt(regs, op, regs->gpr[rd], 32);
2077 			goto logical_done_nocc;
2078 
2079 		case 412:	/* orc */
2080 			op->val = regs->gpr[rd] | ~regs->gpr[rb];
2081 			goto logical_done;
2082 
2083 		case 444:	/* or */
2084 			op->val = regs->gpr[rd] | regs->gpr[rb];
2085 			goto logical_done;
2086 
2087 		case 476:	/* nand */
2088 			op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
2089 			goto logical_done;
2090 #ifdef CONFIG_PPC64
2091 		case 506:	/* popcntd */
2092 			do_popcnt(regs, op, regs->gpr[rd], 64);
2093 			goto logical_done_nocc;
2094 #endif
2095 		case 538:	/* cnttzw */
2096 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2097 				goto unknown_opcode;
2098 			val = (unsigned int) regs->gpr[rd];
2099 			op->val = (val ? __builtin_ctz(val) : 32);
2100 			goto logical_done;
2101 #ifdef __powerpc64__
2102 		case 570:	/* cnttzd */
2103 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2104 				goto unknown_opcode;
2105 			val = regs->gpr[rd];
2106 			op->val = (val ? __builtin_ctzl(val) : 64);
2107 			goto logical_done;
2108 #endif
2109 		case 922:	/* extsh */
2110 			op->val = (signed short) regs->gpr[rd];
2111 			goto logical_done;
2112 
2113 		case 954:	/* extsb */
2114 			op->val = (signed char) regs->gpr[rd];
2115 			goto logical_done;
2116 #ifdef __powerpc64__
2117 		case 986:	/* extsw */
2118 			op->val = (signed int) regs->gpr[rd];
2119 			goto logical_done;
2120 #endif
2121 
2122 /*
2123  * Shift instructions
2124  */
2125 		case 24:	/* slw */
2126 			sh = regs->gpr[rb] & 0x3f;
2127 			if (sh < 32)
2128 				op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
2129 			else
2130 				op->val = 0;
2131 			goto logical_done;
2132 
2133 		case 536:	/* srw */
2134 			sh = regs->gpr[rb] & 0x3f;
2135 			if (sh < 32)
2136 				op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
2137 			else
2138 				op->val = 0;
2139 			goto logical_done;
2140 
2141 		case 792:	/* sraw */
2142 			op->type = COMPUTE + SETREG + SETXER;
2143 			sh = regs->gpr[rb] & 0x3f;
2144 			ival = (signed int) regs->gpr[rd];
2145 			op->val = ival >> (sh < 32 ? sh : 31);
2146 			op->xerval = regs->xer;
2147 			if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
2148 				op->xerval |= XER_CA;
2149 			else
2150 				op->xerval &= ~XER_CA;
2151 			set_ca32(op, op->xerval & XER_CA);
2152 			goto logical_done;
2153 
2154 		case 824:	/* srawi */
2155 			op->type = COMPUTE + SETREG + SETXER;
2156 			sh = rb;
2157 			ival = (signed int) regs->gpr[rd];
2158 			op->val = ival >> sh;
2159 			op->xerval = regs->xer;
2160 			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2161 				op->xerval |= XER_CA;
2162 			else
2163 				op->xerval &= ~XER_CA;
2164 			set_ca32(op, op->xerval & XER_CA);
2165 			goto logical_done;
2166 
2167 #ifdef __powerpc64__
2168 		case 27:	/* sld */
2169 			sh = regs->gpr[rb] & 0x7f;
2170 			if (sh < 64)
2171 				op->val = regs->gpr[rd] << sh;
2172 			else
2173 				op->val = 0;
2174 			goto logical_done;
2175 
2176 		case 539:	/* srd */
2177 			sh = regs->gpr[rb] & 0x7f;
2178 			if (sh < 64)
2179 				op->val = regs->gpr[rd] >> sh;
2180 			else
2181 				op->val = 0;
2182 			goto logical_done;
2183 
2184 		case 794:	/* srad */
2185 			op->type = COMPUTE + SETREG + SETXER;
2186 			sh = regs->gpr[rb] & 0x7f;
2187 			ival = (signed long int) regs->gpr[rd];
2188 			op->val = ival >> (sh < 64 ? sh : 63);
2189 			op->xerval = regs->xer;
2190 			if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
2191 				op->xerval |= XER_CA;
2192 			else
2193 				op->xerval &= ~XER_CA;
2194 			set_ca32(op, op->xerval & XER_CA);
2195 			goto logical_done;
2196 
2197 		case 826:	/* sradi with sh_5 = 0 */
2198 		case 827:	/* sradi with sh_5 = 1 */
2199 			op->type = COMPUTE + SETREG + SETXER;
2200 			sh = rb | ((word & 2) << 4);
2201 			ival = (signed long int) regs->gpr[rd];
2202 			op->val = ival >> sh;
2203 			op->xerval = regs->xer;
2204 			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2205 				op->xerval |= XER_CA;
2206 			else
2207 				op->xerval &= ~XER_CA;
2208 			set_ca32(op, op->xerval & XER_CA);
2209 			goto logical_done;
2210 
2211 		case 890:	/* extswsli with sh_5 = 0 */
2212 		case 891:	/* extswsli with sh_5 = 1 */
2213 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2214 				goto unknown_opcode;
2215 			op->type = COMPUTE + SETREG;
2216 			sh = rb | ((word & 2) << 4);
2217 			val = (signed int) regs->gpr[rd];
2218 			if (sh)
2219 				op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
2220 			else
2221 				op->val = val;
2222 			goto logical_done;
2223 
2224 #endif /* __powerpc64__ */
2225 
2226 /*
2227  * Cache instructions
2228  */
2229 		case 54:	/* dcbst */
2230 			op->type = MKOP(CACHEOP, DCBST, 0);
2231 			op->ea = xform_ea(word, regs);
2232 			return 0;
2233 
2234 		case 86:	/* dcbf */
2235 			op->type = MKOP(CACHEOP, DCBF, 0);
2236 			op->ea = xform_ea(word, regs);
2237 			return 0;
2238 
2239 		case 246:	/* dcbtst */
2240 			op->type = MKOP(CACHEOP, DCBTST, 0);
2241 			op->ea = xform_ea(word, regs);
2242 			op->reg = rd;
2243 			return 0;
2244 
2245 		case 278:	/* dcbt */
2246 			op->type = MKOP(CACHEOP, DCBTST, 0);
2247 			op->ea = xform_ea(word, regs);
2248 			op->reg = rd;
2249 			return 0;
2250 
2251 		case 982:	/* icbi */
2252 			op->type = MKOP(CACHEOP, ICBI, 0);
2253 			op->ea = xform_ea(word, regs);
2254 			return 0;
2255 
2256 		case 1014:	/* dcbz */
2257 			op->type = MKOP(CACHEOP, DCBZ, 0);
2258 			op->ea = xform_ea(word, regs);
2259 			return 0;
2260 		}
2261 		break;
2262 	}
2263 
2264 /*
2265  * Loads and stores.
2266  */
2267 	op->type = UNKNOWN;
2268 	op->update_reg = ra;
2269 	op->reg = rd;
2270 	op->val = regs->gpr[rd];
2271 	u = (word >> 20) & UPDATE;
2272 	op->vsx_flags = 0;
2273 
2274 	switch (opcode) {
2275 	case 31:
2276 		u = word & UPDATE;
2277 		op->ea = xform_ea(word, regs);
2278 		switch ((word >> 1) & 0x3ff) {
2279 		case 20:	/* lwarx */
2280 			op->type = MKOP(LARX, 0, 4);
2281 			break;
2282 
2283 		case 150:	/* stwcx. */
2284 			op->type = MKOP(STCX, 0, 4);
2285 			break;
2286 
2287 #ifdef __powerpc64__
2288 		case 84:	/* ldarx */
2289 			op->type = MKOP(LARX, 0, 8);
2290 			break;
2291 
2292 		case 214:	/* stdcx. */
2293 			op->type = MKOP(STCX, 0, 8);
2294 			break;
2295 
2296 		case 52:	/* lbarx */
2297 			op->type = MKOP(LARX, 0, 1);
2298 			break;
2299 
2300 		case 694:	/* stbcx. */
2301 			op->type = MKOP(STCX, 0, 1);
2302 			break;
2303 
2304 		case 116:	/* lharx */
2305 			op->type = MKOP(LARX, 0, 2);
2306 			break;
2307 
2308 		case 726:	/* sthcx. */
2309 			op->type = MKOP(STCX, 0, 2);
2310 			break;
2311 
2312 		case 276:	/* lqarx */
2313 			if (!((rd & 1) || rd == ra || rd == rb))
2314 				op->type = MKOP(LARX, 0, 16);
2315 			break;
2316 
2317 		case 182:	/* stqcx. */
2318 			if (!(rd & 1))
2319 				op->type = MKOP(STCX, 0, 16);
2320 			break;
2321 #endif
2322 
2323 		case 23:	/* lwzx */
2324 		case 55:	/* lwzux */
2325 			op->type = MKOP(LOAD, u, 4);
2326 			break;
2327 
2328 		case 87:	/* lbzx */
2329 		case 119:	/* lbzux */
2330 			op->type = MKOP(LOAD, u, 1);
2331 			break;
2332 
2333 #ifdef CONFIG_ALTIVEC
2334 		/*
2335 		 * Note: for the load/store vector element instructions,
2336 		 * bits of the EA say which field of the VMX register to use.
2337 		 */
2338 		case 7:		/* lvebx */
2339 			op->type = MKOP(LOAD_VMX, 0, 1);
2340 			op->element_size = 1;
2341 			break;
2342 
2343 		case 39:	/* lvehx */
2344 			op->type = MKOP(LOAD_VMX, 0, 2);
2345 			op->element_size = 2;
2346 			break;
2347 
2348 		case 71:	/* lvewx */
2349 			op->type = MKOP(LOAD_VMX, 0, 4);
2350 			op->element_size = 4;
2351 			break;
2352 
2353 		case 103:	/* lvx */
2354 		case 359:	/* lvxl */
2355 			op->type = MKOP(LOAD_VMX, 0, 16);
2356 			op->element_size = 16;
2357 			break;
2358 
2359 		case 135:	/* stvebx */
2360 			op->type = MKOP(STORE_VMX, 0, 1);
2361 			op->element_size = 1;
2362 			break;
2363 
2364 		case 167:	/* stvehx */
2365 			op->type = MKOP(STORE_VMX, 0, 2);
2366 			op->element_size = 2;
2367 			break;
2368 
2369 		case 199:	/* stvewx */
2370 			op->type = MKOP(STORE_VMX, 0, 4);
2371 			op->element_size = 4;
2372 			break;
2373 
2374 		case 231:	/* stvx */
2375 		case 487:	/* stvxl */
2376 			op->type = MKOP(STORE_VMX, 0, 16);
2377 			break;
2378 #endif /* CONFIG_ALTIVEC */
2379 
2380 #ifdef __powerpc64__
2381 		case 21:	/* ldx */
2382 		case 53:	/* ldux */
2383 			op->type = MKOP(LOAD, u, 8);
2384 			break;
2385 
2386 		case 149:	/* stdx */
2387 		case 181:	/* stdux */
2388 			op->type = MKOP(STORE, u, 8);
2389 			break;
2390 #endif
2391 
2392 		case 151:	/* stwx */
2393 		case 183:	/* stwux */
2394 			op->type = MKOP(STORE, u, 4);
2395 			break;
2396 
2397 		case 215:	/* stbx */
2398 		case 247:	/* stbux */
2399 			op->type = MKOP(STORE, u, 1);
2400 			break;
2401 
2402 		case 279:	/* lhzx */
2403 		case 311:	/* lhzux */
2404 			op->type = MKOP(LOAD, u, 2);
2405 			break;
2406 
2407 #ifdef __powerpc64__
2408 		case 341:	/* lwax */
2409 		case 373:	/* lwaux */
2410 			op->type = MKOP(LOAD, SIGNEXT | u, 4);
2411 			break;
2412 #endif
2413 
2414 		case 343:	/* lhax */
2415 		case 375:	/* lhaux */
2416 			op->type = MKOP(LOAD, SIGNEXT | u, 2);
2417 			break;
2418 
2419 		case 407:	/* sthx */
2420 		case 439:	/* sthux */
2421 			op->type = MKOP(STORE, u, 2);
2422 			break;
2423 
2424 #ifdef __powerpc64__
2425 		case 532:	/* ldbrx */
2426 			op->type = MKOP(LOAD, BYTEREV, 8);
2427 			break;
2428 
2429 #endif
2430 		case 533:	/* lswx */
2431 			op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2432 			break;
2433 
2434 		case 534:	/* lwbrx */
2435 			op->type = MKOP(LOAD, BYTEREV, 4);
2436 			break;
2437 
2438 		case 597:	/* lswi */
2439 			if (rb == 0)
2440 				rb = 32;	/* # bytes to load */
2441 			op->type = MKOP(LOAD_MULTI, 0, rb);
2442 			op->ea = ra ? regs->gpr[ra] : 0;
2443 			break;
2444 
2445 #ifdef CONFIG_PPC_FPU
2446 		case 535:	/* lfsx */
2447 		case 567:	/* lfsux */
2448 			op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2449 			break;
2450 
2451 		case 599:	/* lfdx */
2452 		case 631:	/* lfdux */
2453 			op->type = MKOP(LOAD_FP, u, 8);
2454 			break;
2455 
2456 		case 663:	/* stfsx */
2457 		case 695:	/* stfsux */
2458 			op->type = MKOP(STORE_FP, u | FPCONV, 4);
2459 			break;
2460 
2461 		case 727:	/* stfdx */
2462 		case 759:	/* stfdux */
2463 			op->type = MKOP(STORE_FP, u, 8);
2464 			break;
2465 
2466 #ifdef __powerpc64__
2467 		case 791:	/* lfdpx */
2468 			op->type = MKOP(LOAD_FP, 0, 16);
2469 			break;
2470 
2471 		case 855:	/* lfiwax */
2472 			op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2473 			break;
2474 
2475 		case 887:	/* lfiwzx */
2476 			op->type = MKOP(LOAD_FP, 0, 4);
2477 			break;
2478 
2479 		case 919:	/* stfdpx */
2480 			op->type = MKOP(STORE_FP, 0, 16);
2481 			break;
2482 
2483 		case 983:	/* stfiwx */
2484 			op->type = MKOP(STORE_FP, 0, 4);
2485 			break;
2486 #endif /* __powerpc64 */
2487 #endif /* CONFIG_PPC_FPU */
2488 
2489 #ifdef __powerpc64__
2490 		case 660:	/* stdbrx */
2491 			op->type = MKOP(STORE, BYTEREV, 8);
2492 			op->val = byterev_8(regs->gpr[rd]);
2493 			break;
2494 
2495 #endif
2496 		case 661:	/* stswx */
2497 			op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2498 			break;
2499 
2500 		case 662:	/* stwbrx */
2501 			op->type = MKOP(STORE, BYTEREV, 4);
2502 			op->val = byterev_4(regs->gpr[rd]);
2503 			break;
2504 
2505 		case 725:	/* stswi */
2506 			if (rb == 0)
2507 				rb = 32;	/* # bytes to store */
2508 			op->type = MKOP(STORE_MULTI, 0, rb);
2509 			op->ea = ra ? regs->gpr[ra] : 0;
2510 			break;
2511 
2512 		case 790:	/* lhbrx */
2513 			op->type = MKOP(LOAD, BYTEREV, 2);
2514 			break;
2515 
2516 		case 918:	/* sthbrx */
2517 			op->type = MKOP(STORE, BYTEREV, 2);
2518 			op->val = byterev_2(regs->gpr[rd]);
2519 			break;
2520 
2521 #ifdef CONFIG_VSX
2522 		case 12:	/* lxsiwzx */
2523 			op->reg = rd | ((word & 1) << 5);
2524 			op->type = MKOP(LOAD_VSX, 0, 4);
2525 			op->element_size = 8;
2526 			break;
2527 
2528 		case 76:	/* lxsiwax */
2529 			op->reg = rd | ((word & 1) << 5);
2530 			op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2531 			op->element_size = 8;
2532 			break;
2533 
2534 		case 140:	/* stxsiwx */
2535 			op->reg = rd | ((word & 1) << 5);
2536 			op->type = MKOP(STORE_VSX, 0, 4);
2537 			op->element_size = 8;
2538 			break;
2539 
2540 		case 268:	/* lxvx */
2541 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2542 				goto unknown_opcode;
2543 			op->reg = rd | ((word & 1) << 5);
2544 			op->type = MKOP(LOAD_VSX, 0, 16);
2545 			op->element_size = 16;
2546 			op->vsx_flags = VSX_CHECK_VEC;
2547 			break;
2548 
2549 		case 269:	/* lxvl */
2550 		case 301: {	/* lxvll */
2551 			int nb;
2552 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2553 				goto unknown_opcode;
2554 			op->reg = rd | ((word & 1) << 5);
2555 			op->ea = ra ? regs->gpr[ra] : 0;
2556 			nb = regs->gpr[rb] & 0xff;
2557 			if (nb > 16)
2558 				nb = 16;
2559 			op->type = MKOP(LOAD_VSX, 0, nb);
2560 			op->element_size = 16;
2561 			op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2562 				VSX_CHECK_VEC;
2563 			break;
2564 		}
2565 		case 332:	/* lxvdsx */
2566 			op->reg = rd | ((word & 1) << 5);
2567 			op->type = MKOP(LOAD_VSX, 0, 8);
2568 			op->element_size = 8;
2569 			op->vsx_flags = VSX_SPLAT;
2570 			break;
2571 
2572 		case 333:       /* lxvpx */
2573 			if (!cpu_has_feature(CPU_FTR_ARCH_31))
2574 				goto unknown_opcode;
2575 			op->reg = VSX_REGISTER_XTP(rd);
2576 			op->type = MKOP(LOAD_VSX, 0, 32);
2577 			op->element_size = 32;
2578 			break;
2579 
2580 		case 364:	/* lxvwsx */
2581 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2582 				goto unknown_opcode;
2583 			op->reg = rd | ((word & 1) << 5);
2584 			op->type = MKOP(LOAD_VSX, 0, 4);
2585 			op->element_size = 4;
2586 			op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2587 			break;
2588 
2589 		case 396:	/* stxvx */
2590 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2591 				goto unknown_opcode;
2592 			op->reg = rd | ((word & 1) << 5);
2593 			op->type = MKOP(STORE_VSX, 0, 16);
2594 			op->element_size = 16;
2595 			op->vsx_flags = VSX_CHECK_VEC;
2596 			break;
2597 
2598 		case 397:	/* stxvl */
2599 		case 429: {	/* stxvll */
2600 			int nb;
2601 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2602 				goto unknown_opcode;
2603 			op->reg = rd | ((word & 1) << 5);
2604 			op->ea = ra ? regs->gpr[ra] : 0;
2605 			nb = regs->gpr[rb] & 0xff;
2606 			if (nb > 16)
2607 				nb = 16;
2608 			op->type = MKOP(STORE_VSX, 0, nb);
2609 			op->element_size = 16;
2610 			op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2611 				VSX_CHECK_VEC;
2612 			break;
2613 		}
2614 		case 461:       /* stxvpx */
2615 			if (!cpu_has_feature(CPU_FTR_ARCH_31))
2616 				goto unknown_opcode;
2617 			op->reg = VSX_REGISTER_XTP(rd);
2618 			op->type = MKOP(STORE_VSX, 0, 32);
2619 			op->element_size = 32;
2620 			break;
2621 		case 524:	/* lxsspx */
2622 			op->reg = rd | ((word & 1) << 5);
2623 			op->type = MKOP(LOAD_VSX, 0, 4);
2624 			op->element_size = 8;
2625 			op->vsx_flags = VSX_FPCONV;
2626 			break;
2627 
2628 		case 588:	/* lxsdx */
2629 			op->reg = rd | ((word & 1) << 5);
2630 			op->type = MKOP(LOAD_VSX, 0, 8);
2631 			op->element_size = 8;
2632 			break;
2633 
2634 		case 652:	/* stxsspx */
2635 			op->reg = rd | ((word & 1) << 5);
2636 			op->type = MKOP(STORE_VSX, 0, 4);
2637 			op->element_size = 8;
2638 			op->vsx_flags = VSX_FPCONV;
2639 			break;
2640 
2641 		case 716:	/* stxsdx */
2642 			op->reg = rd | ((word & 1) << 5);
2643 			op->type = MKOP(STORE_VSX, 0, 8);
2644 			op->element_size = 8;
2645 			break;
2646 
2647 		case 780:	/* lxvw4x */
2648 			op->reg = rd | ((word & 1) << 5);
2649 			op->type = MKOP(LOAD_VSX, 0, 16);
2650 			op->element_size = 4;
2651 			break;
2652 
2653 		case 781:	/* lxsibzx */
2654 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2655 				goto unknown_opcode;
2656 			op->reg = rd | ((word & 1) << 5);
2657 			op->type = MKOP(LOAD_VSX, 0, 1);
2658 			op->element_size = 8;
2659 			op->vsx_flags = VSX_CHECK_VEC;
2660 			break;
2661 
2662 		case 812:	/* lxvh8x */
2663 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2664 				goto unknown_opcode;
2665 			op->reg = rd | ((word & 1) << 5);
2666 			op->type = MKOP(LOAD_VSX, 0, 16);
2667 			op->element_size = 2;
2668 			op->vsx_flags = VSX_CHECK_VEC;
2669 			break;
2670 
2671 		case 813:	/* lxsihzx */
2672 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2673 				goto unknown_opcode;
2674 			op->reg = rd | ((word & 1) << 5);
2675 			op->type = MKOP(LOAD_VSX, 0, 2);
2676 			op->element_size = 8;
2677 			op->vsx_flags = VSX_CHECK_VEC;
2678 			break;
2679 
2680 		case 844:	/* lxvd2x */
2681 			op->reg = rd | ((word & 1) << 5);
2682 			op->type = MKOP(LOAD_VSX, 0, 16);
2683 			op->element_size = 8;
2684 			break;
2685 
2686 		case 876:	/* lxvb16x */
2687 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2688 				goto unknown_opcode;
2689 			op->reg = rd | ((word & 1) << 5);
2690 			op->type = MKOP(LOAD_VSX, 0, 16);
2691 			op->element_size = 1;
2692 			op->vsx_flags = VSX_CHECK_VEC;
2693 			break;
2694 
2695 		case 908:	/* stxvw4x */
2696 			op->reg = rd | ((word & 1) << 5);
2697 			op->type = MKOP(STORE_VSX, 0, 16);
2698 			op->element_size = 4;
2699 			break;
2700 
2701 		case 909:	/* stxsibx */
2702 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2703 				goto unknown_opcode;
2704 			op->reg = rd | ((word & 1) << 5);
2705 			op->type = MKOP(STORE_VSX, 0, 1);
2706 			op->element_size = 8;
2707 			op->vsx_flags = VSX_CHECK_VEC;
2708 			break;
2709 
2710 		case 940:	/* stxvh8x */
2711 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2712 				goto unknown_opcode;
2713 			op->reg = rd | ((word & 1) << 5);
2714 			op->type = MKOP(STORE_VSX, 0, 16);
2715 			op->element_size = 2;
2716 			op->vsx_flags = VSX_CHECK_VEC;
2717 			break;
2718 
2719 		case 941:	/* stxsihx */
2720 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2721 				goto unknown_opcode;
2722 			op->reg = rd | ((word & 1) << 5);
2723 			op->type = MKOP(STORE_VSX, 0, 2);
2724 			op->element_size = 8;
2725 			op->vsx_flags = VSX_CHECK_VEC;
2726 			break;
2727 
2728 		case 972:	/* stxvd2x */
2729 			op->reg = rd | ((word & 1) << 5);
2730 			op->type = MKOP(STORE_VSX, 0, 16);
2731 			op->element_size = 8;
2732 			break;
2733 
2734 		case 1004:	/* stxvb16x */
2735 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2736 				goto unknown_opcode;
2737 			op->reg = rd | ((word & 1) << 5);
2738 			op->type = MKOP(STORE_VSX, 0, 16);
2739 			op->element_size = 1;
2740 			op->vsx_flags = VSX_CHECK_VEC;
2741 			break;
2742 
2743 #endif /* CONFIG_VSX */
2744 		}
2745 		break;
2746 
2747 	case 32:	/* lwz */
2748 	case 33:	/* lwzu */
2749 		op->type = MKOP(LOAD, u, 4);
2750 		op->ea = dform_ea(word, regs);
2751 		break;
2752 
2753 	case 34:	/* lbz */
2754 	case 35:	/* lbzu */
2755 		op->type = MKOP(LOAD, u, 1);
2756 		op->ea = dform_ea(word, regs);
2757 		break;
2758 
2759 	case 36:	/* stw */
2760 	case 37:	/* stwu */
2761 		op->type = MKOP(STORE, u, 4);
2762 		op->ea = dform_ea(word, regs);
2763 		break;
2764 
2765 	case 38:	/* stb */
2766 	case 39:	/* stbu */
2767 		op->type = MKOP(STORE, u, 1);
2768 		op->ea = dform_ea(word, regs);
2769 		break;
2770 
2771 	case 40:	/* lhz */
2772 	case 41:	/* lhzu */
2773 		op->type = MKOP(LOAD, u, 2);
2774 		op->ea = dform_ea(word, regs);
2775 		break;
2776 
2777 	case 42:	/* lha */
2778 	case 43:	/* lhau */
2779 		op->type = MKOP(LOAD, SIGNEXT | u, 2);
2780 		op->ea = dform_ea(word, regs);
2781 		break;
2782 
2783 	case 44:	/* sth */
2784 	case 45:	/* sthu */
2785 		op->type = MKOP(STORE, u, 2);
2786 		op->ea = dform_ea(word, regs);
2787 		break;
2788 
2789 	case 46:	/* lmw */
2790 		if (ra >= rd)
2791 			break;		/* invalid form, ra in range to load */
2792 		op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2793 		op->ea = dform_ea(word, regs);
2794 		break;
2795 
2796 	case 47:	/* stmw */
2797 		op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2798 		op->ea = dform_ea(word, regs);
2799 		break;
2800 
2801 #ifdef CONFIG_PPC_FPU
2802 	case 48:	/* lfs */
2803 	case 49:	/* lfsu */
2804 		op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2805 		op->ea = dform_ea(word, regs);
2806 		break;
2807 
2808 	case 50:	/* lfd */
2809 	case 51:	/* lfdu */
2810 		op->type = MKOP(LOAD_FP, u, 8);
2811 		op->ea = dform_ea(word, regs);
2812 		break;
2813 
2814 	case 52:	/* stfs */
2815 	case 53:	/* stfsu */
2816 		op->type = MKOP(STORE_FP, u | FPCONV, 4);
2817 		op->ea = dform_ea(word, regs);
2818 		break;
2819 
2820 	case 54:	/* stfd */
2821 	case 55:	/* stfdu */
2822 		op->type = MKOP(STORE_FP, u, 8);
2823 		op->ea = dform_ea(word, regs);
2824 		break;
2825 #endif
2826 
2827 #ifdef __powerpc64__
2828 	case 56:	/* lq */
2829 		if (!((rd & 1) || (rd == ra)))
2830 			op->type = MKOP(LOAD, 0, 16);
2831 		op->ea = dqform_ea(word, regs);
2832 		break;
2833 #endif
2834 
2835 #ifdef CONFIG_VSX
2836 	case 57:	/* lfdp, lxsd, lxssp */
2837 		op->ea = dsform_ea(word, regs);
2838 		switch (word & 3) {
2839 		case 0:		/* lfdp */
2840 			if (rd & 1)
2841 				break;		/* reg must be even */
2842 			op->type = MKOP(LOAD_FP, 0, 16);
2843 			break;
2844 		case 2:		/* lxsd */
2845 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2846 				goto unknown_opcode;
2847 			op->reg = rd + 32;
2848 			op->type = MKOP(LOAD_VSX, 0, 8);
2849 			op->element_size = 8;
2850 			op->vsx_flags = VSX_CHECK_VEC;
2851 			break;
2852 		case 3:		/* lxssp */
2853 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2854 				goto unknown_opcode;
2855 			op->reg = rd + 32;
2856 			op->type = MKOP(LOAD_VSX, 0, 4);
2857 			op->element_size = 8;
2858 			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2859 			break;
2860 		}
2861 		break;
2862 #endif /* CONFIG_VSX */
2863 
2864 #ifdef __powerpc64__
2865 	case 58:	/* ld[u], lwa */
2866 		op->ea = dsform_ea(word, regs);
2867 		switch (word & 3) {
2868 		case 0:		/* ld */
2869 			op->type = MKOP(LOAD, 0, 8);
2870 			break;
2871 		case 1:		/* ldu */
2872 			op->type = MKOP(LOAD, UPDATE, 8);
2873 			break;
2874 		case 2:		/* lwa */
2875 			op->type = MKOP(LOAD, SIGNEXT, 4);
2876 			break;
2877 		}
2878 		break;
2879 #endif
2880 
2881 #ifdef CONFIG_VSX
2882 	case 6:
2883 		if (!cpu_has_feature(CPU_FTR_ARCH_31))
2884 			goto unknown_opcode;
2885 		op->ea = dqform_ea(word, regs);
2886 		op->reg = VSX_REGISTER_XTP(rd);
2887 		op->element_size = 32;
2888 		switch (word & 0xf) {
2889 		case 0:         /* lxvp */
2890 			op->type = MKOP(LOAD_VSX, 0, 32);
2891 			break;
2892 		case 1:         /* stxvp */
2893 			op->type = MKOP(STORE_VSX, 0, 32);
2894 			break;
2895 		}
2896 		break;
2897 
2898 	case 61:	/* stfdp, lxv, stxsd, stxssp, stxv */
2899 		switch (word & 7) {
2900 		case 0:		/* stfdp with LSB of DS field = 0 */
2901 		case 4:		/* stfdp with LSB of DS field = 1 */
2902 			op->ea = dsform_ea(word, regs);
2903 			op->type = MKOP(STORE_FP, 0, 16);
2904 			break;
2905 
2906 		case 1:		/* lxv */
2907 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2908 				goto unknown_opcode;
2909 			op->ea = dqform_ea(word, regs);
2910 			if (word & 8)
2911 				op->reg = rd + 32;
2912 			op->type = MKOP(LOAD_VSX, 0, 16);
2913 			op->element_size = 16;
2914 			op->vsx_flags = VSX_CHECK_VEC;
2915 			break;
2916 
2917 		case 2:		/* stxsd with LSB of DS field = 0 */
2918 		case 6:		/* stxsd with LSB of DS field = 1 */
2919 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2920 				goto unknown_opcode;
2921 			op->ea = dsform_ea(word, regs);
2922 			op->reg = rd + 32;
2923 			op->type = MKOP(STORE_VSX, 0, 8);
2924 			op->element_size = 8;
2925 			op->vsx_flags = VSX_CHECK_VEC;
2926 			break;
2927 
2928 		case 3:		/* stxssp with LSB of DS field = 0 */
2929 		case 7:		/* stxssp with LSB of DS field = 1 */
2930 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2931 				goto unknown_opcode;
2932 			op->ea = dsform_ea(word, regs);
2933 			op->reg = rd + 32;
2934 			op->type = MKOP(STORE_VSX, 0, 4);
2935 			op->element_size = 8;
2936 			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2937 			break;
2938 
2939 		case 5:		/* stxv */
2940 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2941 				goto unknown_opcode;
2942 			op->ea = dqform_ea(word, regs);
2943 			if (word & 8)
2944 				op->reg = rd + 32;
2945 			op->type = MKOP(STORE_VSX, 0, 16);
2946 			op->element_size = 16;
2947 			op->vsx_flags = VSX_CHECK_VEC;
2948 			break;
2949 		}
2950 		break;
2951 #endif /* CONFIG_VSX */
2952 
2953 #ifdef __powerpc64__
2954 	case 62:	/* std[u] */
2955 		op->ea = dsform_ea(word, regs);
2956 		switch (word & 3) {
2957 		case 0:		/* std */
2958 			op->type = MKOP(STORE, 0, 8);
2959 			break;
2960 		case 1:		/* stdu */
2961 			op->type = MKOP(STORE, UPDATE, 8);
2962 			break;
2963 		case 2:		/* stq */
2964 			if (!(rd & 1))
2965 				op->type = MKOP(STORE, 0, 16);
2966 			break;
2967 		}
2968 		break;
2969 	case 1: /* Prefixed instructions */
2970 		if (!cpu_has_feature(CPU_FTR_ARCH_31))
2971 			goto unknown_opcode;
2972 
2973 		prefix_r = GET_PREFIX_R(word);
2974 		ra = GET_PREFIX_RA(suffix);
2975 		op->update_reg = ra;
2976 		rd = (suffix >> 21) & 0x1f;
2977 		op->reg = rd;
2978 		op->val = regs->gpr[rd];
2979 
2980 		suffixopcode = get_op(suffix);
2981 		prefixtype = (word >> 24) & 0x3;
2982 		switch (prefixtype) {
2983 		case 0: /* Type 00  Eight-Byte Load/Store */
2984 			if (prefix_r && ra)
2985 				break;
2986 			op->ea = mlsd_8lsd_ea(word, suffix, regs);
2987 			switch (suffixopcode) {
2988 			case 41:	/* plwa */
2989 				op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
2990 				break;
2991 #ifdef CONFIG_VSX
2992 			case 42:        /* plxsd */
2993 				op->reg = rd + 32;
2994 				op->type = MKOP(LOAD_VSX, PREFIXED, 8);
2995 				op->element_size = 8;
2996 				op->vsx_flags = VSX_CHECK_VEC;
2997 				break;
2998 			case 43:	/* plxssp */
2999 				op->reg = rd + 32;
3000 				op->type = MKOP(LOAD_VSX, PREFIXED, 4);
3001 				op->element_size = 8;
3002 				op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
3003 				break;
3004 			case 46:	/* pstxsd */
3005 				op->reg = rd + 32;
3006 				op->type = MKOP(STORE_VSX, PREFIXED, 8);
3007 				op->element_size = 8;
3008 				op->vsx_flags = VSX_CHECK_VEC;
3009 				break;
3010 			case 47:	/* pstxssp */
3011 				op->reg = rd + 32;
3012 				op->type = MKOP(STORE_VSX, PREFIXED, 4);
3013 				op->element_size = 8;
3014 				op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
3015 				break;
3016 			case 51:	/* plxv1 */
3017 				op->reg += 32;
3018 				fallthrough;
3019 			case 50:	/* plxv0 */
3020 				op->type = MKOP(LOAD_VSX, PREFIXED, 16);
3021 				op->element_size = 16;
3022 				op->vsx_flags = VSX_CHECK_VEC;
3023 				break;
3024 			case 55:	/* pstxv1 */
3025 				op->reg = rd + 32;
3026 				fallthrough;
3027 			case 54:	/* pstxv0 */
3028 				op->type = MKOP(STORE_VSX, PREFIXED, 16);
3029 				op->element_size = 16;
3030 				op->vsx_flags = VSX_CHECK_VEC;
3031 				break;
3032 #endif /* CONFIG_VSX */
3033 			case 56:        /* plq */
3034 				op->type = MKOP(LOAD, PREFIXED, 16);
3035 				break;
3036 			case 57:	/* pld */
3037 				op->type = MKOP(LOAD, PREFIXED, 8);
3038 				break;
3039 #ifdef CONFIG_VSX
3040 			case 58:        /* plxvp */
3041 				op->reg = VSX_REGISTER_XTP(rd);
3042 				op->type = MKOP(LOAD_VSX, PREFIXED, 32);
3043 				op->element_size = 32;
3044 				break;
3045 #endif /* CONFIG_VSX */
3046 			case 60:        /* pstq */
3047 				op->type = MKOP(STORE, PREFIXED, 16);
3048 				break;
3049 			case 61:	/* pstd */
3050 				op->type = MKOP(STORE, PREFIXED, 8);
3051 				break;
3052 #ifdef CONFIG_VSX
3053 			case 62:        /* pstxvp */
3054 				op->reg = VSX_REGISTER_XTP(rd);
3055 				op->type = MKOP(STORE_VSX, PREFIXED, 32);
3056 				op->element_size = 32;
3057 				break;
3058 #endif /* CONFIG_VSX */
3059 			}
3060 			break;
3061 		case 1: /* Type 01 Eight-Byte Register-to-Register */
3062 			break;
3063 		case 2: /* Type 10 Modified Load/Store */
3064 			if (prefix_r && ra)
3065 				break;
3066 			op->ea = mlsd_8lsd_ea(word, suffix, regs);
3067 			switch (suffixopcode) {
3068 			case 32:	/* plwz */
3069 				op->type = MKOP(LOAD, PREFIXED, 4);
3070 				break;
3071 			case 34:	/* plbz */
3072 				op->type = MKOP(LOAD, PREFIXED, 1);
3073 				break;
3074 			case 36:	/* pstw */
3075 				op->type = MKOP(STORE, PREFIXED, 4);
3076 				break;
3077 			case 38:	/* pstb */
3078 				op->type = MKOP(STORE, PREFIXED, 1);
3079 				break;
3080 			case 40:	/* plhz */
3081 				op->type = MKOP(LOAD, PREFIXED, 2);
3082 				break;
3083 			case 42:	/* plha */
3084 				op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
3085 				break;
3086 			case 44:	/* psth */
3087 				op->type = MKOP(STORE, PREFIXED, 2);
3088 				break;
3089 			case 48:        /* plfs */
3090 				op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
3091 				break;
3092 			case 50:        /* plfd */
3093 				op->type = MKOP(LOAD_FP, PREFIXED, 8);
3094 				break;
3095 			case 52:        /* pstfs */
3096 				op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
3097 				break;
3098 			case 54:        /* pstfd */
3099 				op->type = MKOP(STORE_FP, PREFIXED, 8);
3100 				break;
3101 			}
3102 			break;
3103 		case 3: /* Type 11 Modified Register-to-Register */
3104 			break;
3105 		}
3106 #endif /* __powerpc64__ */
3107 
3108 	}
3109 
3110 	if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
3111 		switch (GETTYPE(op->type)) {
3112 		case LOAD:
3113 			if (ra == rd)
3114 				goto unknown_opcode;
3115 			fallthrough;
3116 		case STORE:
3117 		case LOAD_FP:
3118 		case STORE_FP:
3119 			if (ra == 0)
3120 				goto unknown_opcode;
3121 		}
3122 	}
3123 
3124 #ifdef CONFIG_VSX
3125 	if ((GETTYPE(op->type) == LOAD_VSX ||
3126 	     GETTYPE(op->type) == STORE_VSX) &&
3127 	    !cpu_has_feature(CPU_FTR_VSX)) {
3128 		return -1;
3129 	}
3130 #endif /* CONFIG_VSX */
3131 
3132 	return 0;
3133 
3134  unknown_opcode:
3135 	op->type = UNKNOWN;
3136 	return 0;
3137 
3138  logical_done:
3139 	if (word & 1)
3140 		set_cr0(regs, op);
3141  logical_done_nocc:
3142 	op->reg = ra;
3143 	op->type |= SETREG;
3144 	return 1;
3145 
3146  arith_done:
3147 	if (word & 1)
3148 		set_cr0(regs, op);
3149  compute_done:
3150 	op->reg = rd;
3151 	op->type |= SETREG;
3152 	return 1;
3153 
3154  priv:
3155 	op->type = INTERRUPT | 0x700;
3156 	op->val = SRR1_PROGPRIV;
3157 	return 0;
3158 
3159  trap:
3160 	op->type = INTERRUPT | 0x700;
3161 	op->val = SRR1_PROGTRAP;
3162 	return 0;
3163 }
3164 EXPORT_SYMBOL_GPL(analyse_instr);
3165 NOKPROBE_SYMBOL(analyse_instr);
3166 
3167 /*
3168  * For PPC32 we always use stwu with r1 to change the stack pointer.
3169  * So this emulated store may corrupt the exception frame, now we
3170  * have to provide the exception frame trampoline, which is pushed
3171  * below the kprobed function stack. So we only update gpr[1] but
3172  * don't emulate the real store operation. We will do real store
3173  * operation safely in exception return code by checking this flag.
3174  */
handle_stack_update(unsigned long ea,struct pt_regs * regs)3175 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
3176 {
3177 	/*
3178 	 * Check if we already set since that means we'll
3179 	 * lose the previous value.
3180 	 */
3181 	WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
3182 	set_thread_flag(TIF_EMULATE_STACK_STORE);
3183 	return 0;
3184 }
3185 
do_signext(unsigned long * valp,int size)3186 static nokprobe_inline void do_signext(unsigned long *valp, int size)
3187 {
3188 	switch (size) {
3189 	case 2:
3190 		*valp = (signed short) *valp;
3191 		break;
3192 	case 4:
3193 		*valp = (signed int) *valp;
3194 		break;
3195 	}
3196 }
3197 
do_byterev(unsigned long * valp,int size)3198 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
3199 {
3200 	switch (size) {
3201 	case 2:
3202 		*valp = byterev_2(*valp);
3203 		break;
3204 	case 4:
3205 		*valp = byterev_4(*valp);
3206 		break;
3207 #ifdef __powerpc64__
3208 	case 8:
3209 		*valp = byterev_8(*valp);
3210 		break;
3211 #endif
3212 	}
3213 }
3214 
3215 /*
3216  * Emulate an instruction that can be executed just by updating
3217  * fields in *regs.
3218  */
emulate_update_regs(struct pt_regs * regs,struct instruction_op * op)3219 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
3220 {
3221 	unsigned long next_pc;
3222 
3223 	next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
3224 	switch (GETTYPE(op->type)) {
3225 	case COMPUTE:
3226 		if (op->type & SETREG)
3227 			regs->gpr[op->reg] = op->val;
3228 		if (op->type & SETCC)
3229 			regs->ccr = op->ccval;
3230 		if (op->type & SETXER)
3231 			regs->xer = op->xerval;
3232 		break;
3233 
3234 	case BRANCH:
3235 		if (op->type & SETLK)
3236 			regs->link = next_pc;
3237 		if (op->type & BRTAKEN)
3238 			next_pc = op->val;
3239 		if (op->type & DECCTR)
3240 			--regs->ctr;
3241 		break;
3242 
3243 	case BARRIER:
3244 		switch (op->type & BARRIER_MASK) {
3245 		case BARRIER_SYNC:
3246 			mb();
3247 			break;
3248 		case BARRIER_ISYNC:
3249 			isync();
3250 			break;
3251 		case BARRIER_EIEIO:
3252 			eieio();
3253 			break;
3254 #ifdef CONFIG_PPC64
3255 		case BARRIER_LWSYNC:
3256 			asm volatile("lwsync" : : : "memory");
3257 			break;
3258 		case BARRIER_PTESYNC:
3259 			asm volatile("ptesync" : : : "memory");
3260 			break;
3261 #endif
3262 		}
3263 		break;
3264 
3265 	case MFSPR:
3266 		switch (op->spr) {
3267 		case SPRN_XER:
3268 			regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
3269 			break;
3270 		case SPRN_LR:
3271 			regs->gpr[op->reg] = regs->link;
3272 			break;
3273 		case SPRN_CTR:
3274 			regs->gpr[op->reg] = regs->ctr;
3275 			break;
3276 		default:
3277 			WARN_ON_ONCE(1);
3278 		}
3279 		break;
3280 
3281 	case MTSPR:
3282 		switch (op->spr) {
3283 		case SPRN_XER:
3284 			regs->xer = op->val & 0xffffffffUL;
3285 			break;
3286 		case SPRN_LR:
3287 			regs->link = op->val;
3288 			break;
3289 		case SPRN_CTR:
3290 			regs->ctr = op->val;
3291 			break;
3292 		default:
3293 			WARN_ON_ONCE(1);
3294 		}
3295 		break;
3296 
3297 	default:
3298 		WARN_ON_ONCE(1);
3299 	}
3300 	regs_set_return_ip(regs, next_pc);
3301 }
3302 NOKPROBE_SYMBOL(emulate_update_regs);
3303 
3304 /*
3305  * Emulate a previously-analysed load or store instruction.
3306  * Return values are:
3307  * 0 = instruction emulated successfully
3308  * -EFAULT = address out of range or access faulted (regs->dar
3309  *	     contains the faulting address)
3310  * -EACCES = misaligned access, instruction requires alignment
3311  * -EINVAL = unknown operation in *op
3312  */
emulate_loadstore(struct pt_regs * regs,struct instruction_op * op)3313 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
3314 {
3315 	int err, size, type;
3316 	int i, rd, nb;
3317 	unsigned int cr;
3318 	unsigned long val;
3319 	unsigned long ea;
3320 	bool cross_endian;
3321 
3322 	err = 0;
3323 	size = GETSIZE(op->type);
3324 	type = GETTYPE(op->type);
3325 	cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
3326 	ea = truncate_if_32bit(regs->msr, op->ea);
3327 
3328 	switch (type) {
3329 	case LARX:
3330 		if (ea & (size - 1))
3331 			return -EACCES;		/* can't handle misaligned */
3332 		if (!address_ok(regs, ea, size))
3333 			return -EFAULT;
3334 		err = 0;
3335 		val = 0;
3336 		switch (size) {
3337 #ifdef __powerpc64__
3338 		case 1:
3339 			__get_user_asmx(val, ea, err, "lbarx");
3340 			break;
3341 		case 2:
3342 			__get_user_asmx(val, ea, err, "lharx");
3343 			break;
3344 #endif
3345 		case 4:
3346 			__get_user_asmx(val, ea, err, "lwarx");
3347 			break;
3348 #ifdef __powerpc64__
3349 		case 8:
3350 			__get_user_asmx(val, ea, err, "ldarx");
3351 			break;
3352 		case 16:
3353 			err = do_lqarx(ea, &regs->gpr[op->reg]);
3354 			break;
3355 #endif
3356 		default:
3357 			return -EINVAL;
3358 		}
3359 		if (err) {
3360 			regs->dar = ea;
3361 			break;
3362 		}
3363 		if (size < 16)
3364 			regs->gpr[op->reg] = val;
3365 		break;
3366 
3367 	case STCX:
3368 		if (ea & (size - 1))
3369 			return -EACCES;		/* can't handle misaligned */
3370 		if (!address_ok(regs, ea, size))
3371 			return -EFAULT;
3372 		err = 0;
3373 		switch (size) {
3374 #ifdef __powerpc64__
3375 		case 1:
3376 			__put_user_asmx(op->val, ea, err, "stbcx.", cr);
3377 			break;
3378 		case 2:
3379 			__put_user_asmx(op->val, ea, err, "sthcx.", cr);
3380 			break;
3381 #endif
3382 		case 4:
3383 			__put_user_asmx(op->val, ea, err, "stwcx.", cr);
3384 			break;
3385 #ifdef __powerpc64__
3386 		case 8:
3387 			__put_user_asmx(op->val, ea, err, "stdcx.", cr);
3388 			break;
3389 		case 16:
3390 			err = do_stqcx(ea, regs->gpr[op->reg],
3391 				       regs->gpr[op->reg + 1], &cr);
3392 			break;
3393 #endif
3394 		default:
3395 			return -EINVAL;
3396 		}
3397 		if (!err)
3398 			regs->ccr = (regs->ccr & 0x0fffffff) |
3399 				(cr & 0xe0000000) |
3400 				((regs->xer >> 3) & 0x10000000);
3401 		else
3402 			regs->dar = ea;
3403 		break;
3404 
3405 	case LOAD:
3406 #ifdef __powerpc64__
3407 		if (size == 16) {
3408 			err = emulate_lq(regs, ea, op->reg, cross_endian);
3409 			break;
3410 		}
3411 #endif
3412 		err = read_mem(&regs->gpr[op->reg], ea, size, regs);
3413 		if (!err) {
3414 			if (op->type & SIGNEXT)
3415 				do_signext(&regs->gpr[op->reg], size);
3416 			if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
3417 				do_byterev(&regs->gpr[op->reg], size);
3418 		}
3419 		break;
3420 
3421 #ifdef CONFIG_PPC_FPU
3422 	case LOAD_FP:
3423 		/*
3424 		 * If the instruction is in userspace, we can emulate it even
3425 		 * if the VMX state is not live, because we have the state
3426 		 * stored in the thread_struct.  If the instruction is in
3427 		 * the kernel, we must not touch the state in the thread_struct.
3428 		 */
3429 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3430 			return 0;
3431 		err = do_fp_load(op, ea, regs, cross_endian);
3432 		break;
3433 #endif
3434 #ifdef CONFIG_ALTIVEC
3435 	case LOAD_VMX:
3436 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3437 			return 0;
3438 		err = do_vec_load(op->reg, ea, size, regs, cross_endian);
3439 		break;
3440 #endif
3441 #ifdef CONFIG_VSX
3442 	case LOAD_VSX: {
3443 		unsigned long msrbit = MSR_VSX;
3444 
3445 		/*
3446 		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3447 		 * when the target of the instruction is a vector register.
3448 		 */
3449 		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3450 			msrbit = MSR_VEC;
3451 		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3452 			return 0;
3453 		err = do_vsx_load(op, ea, regs, cross_endian);
3454 		break;
3455 	}
3456 #endif
3457 	case LOAD_MULTI:
3458 		if (!address_ok(regs, ea, size))
3459 			return -EFAULT;
3460 		rd = op->reg;
3461 		for (i = 0; i < size; i += 4) {
3462 			unsigned int v32 = 0;
3463 
3464 			nb = size - i;
3465 			if (nb > 4)
3466 				nb = 4;
3467 			err = copy_mem_in((u8 *) &v32, ea, nb, regs);
3468 			if (err)
3469 				break;
3470 			if (unlikely(cross_endian))
3471 				v32 = byterev_4(v32);
3472 			regs->gpr[rd] = v32;
3473 			ea += 4;
3474 			/* reg number wraps from 31 to 0 for lsw[ix] */
3475 			rd = (rd + 1) & 0x1f;
3476 		}
3477 		break;
3478 
3479 	case STORE:
3480 #ifdef __powerpc64__
3481 		if (size == 16) {
3482 			err = emulate_stq(regs, ea, op->reg, cross_endian);
3483 			break;
3484 		}
3485 #endif
3486 		if ((op->type & UPDATE) && size == sizeof(long) &&
3487 		    op->reg == 1 && op->update_reg == 1 &&
3488 		    !(regs->msr & MSR_PR) &&
3489 		    ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
3490 			err = handle_stack_update(ea, regs);
3491 			break;
3492 		}
3493 		if (unlikely(cross_endian))
3494 			do_byterev(&op->val, size);
3495 		err = write_mem(op->val, ea, size, regs);
3496 		break;
3497 
3498 #ifdef CONFIG_PPC_FPU
3499 	case STORE_FP:
3500 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3501 			return 0;
3502 		err = do_fp_store(op, ea, regs, cross_endian);
3503 		break;
3504 #endif
3505 #ifdef CONFIG_ALTIVEC
3506 	case STORE_VMX:
3507 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3508 			return 0;
3509 		err = do_vec_store(op->reg, ea, size, regs, cross_endian);
3510 		break;
3511 #endif
3512 #ifdef CONFIG_VSX
3513 	case STORE_VSX: {
3514 		unsigned long msrbit = MSR_VSX;
3515 
3516 		/*
3517 		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3518 		 * when the target of the instruction is a vector register.
3519 		 */
3520 		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3521 			msrbit = MSR_VEC;
3522 		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3523 			return 0;
3524 		err = do_vsx_store(op, ea, regs, cross_endian);
3525 		break;
3526 	}
3527 #endif
3528 	case STORE_MULTI:
3529 		if (!address_ok(regs, ea, size))
3530 			return -EFAULT;
3531 		rd = op->reg;
3532 		for (i = 0; i < size; i += 4) {
3533 			unsigned int v32 = regs->gpr[rd];
3534 
3535 			nb = size - i;
3536 			if (nb > 4)
3537 				nb = 4;
3538 			if (unlikely(cross_endian))
3539 				v32 = byterev_4(v32);
3540 			err = copy_mem_out((u8 *) &v32, ea, nb, regs);
3541 			if (err)
3542 				break;
3543 			ea += 4;
3544 			/* reg number wraps from 31 to 0 for stsw[ix] */
3545 			rd = (rd + 1) & 0x1f;
3546 		}
3547 		break;
3548 
3549 	default:
3550 		return -EINVAL;
3551 	}
3552 
3553 	if (err)
3554 		return err;
3555 
3556 	if (op->type & UPDATE)
3557 		regs->gpr[op->update_reg] = op->ea;
3558 
3559 	return 0;
3560 }
3561 NOKPROBE_SYMBOL(emulate_loadstore);
3562 
3563 /*
3564  * Emulate instructions that cause a transfer of control,
3565  * loads and stores, and a few other instructions.
3566  * Returns 1 if the step was emulated, 0 if not,
3567  * or -1 if the instruction is one that should not be stepped,
3568  * such as an rfid, or a mtmsrd that would clear MSR_RI.
3569  */
emulate_step(struct pt_regs * regs,ppc_inst_t instr)3570 int emulate_step(struct pt_regs *regs, ppc_inst_t instr)
3571 {
3572 	struct instruction_op op;
3573 	int r, err, type;
3574 	unsigned long val;
3575 	unsigned long ea;
3576 
3577 	r = analyse_instr(&op, regs, instr);
3578 	if (r < 0)
3579 		return r;
3580 	if (r > 0) {
3581 		emulate_update_regs(regs, &op);
3582 		return 1;
3583 	}
3584 
3585 	err = 0;
3586 	type = GETTYPE(op.type);
3587 
3588 	if (OP_IS_LOAD_STORE(type)) {
3589 		err = emulate_loadstore(regs, &op);
3590 		if (err)
3591 			return 0;
3592 		goto instr_done;
3593 	}
3594 
3595 	switch (type) {
3596 	case CACHEOP:
3597 		ea = truncate_if_32bit(regs->msr, op.ea);
3598 		if (!address_ok(regs, ea, 8))
3599 			return 0;
3600 		switch (op.type & CACHEOP_MASK) {
3601 		case DCBST:
3602 			__cacheop_user_asmx(ea, err, "dcbst");
3603 			break;
3604 		case DCBF:
3605 			__cacheop_user_asmx(ea, err, "dcbf");
3606 			break;
3607 		case DCBTST:
3608 			if (op.reg == 0)
3609 				prefetchw((void *) ea);
3610 			break;
3611 		case DCBT:
3612 			if (op.reg == 0)
3613 				prefetch((void *) ea);
3614 			break;
3615 		case ICBI:
3616 			__cacheop_user_asmx(ea, err, "icbi");
3617 			break;
3618 		case DCBZ:
3619 			err = emulate_dcbz(ea, regs);
3620 			break;
3621 		}
3622 		if (err) {
3623 			regs->dar = ea;
3624 			return 0;
3625 		}
3626 		goto instr_done;
3627 
3628 	case MFMSR:
3629 		regs->gpr[op.reg] = regs->msr & MSR_MASK;
3630 		goto instr_done;
3631 
3632 	case MTMSR:
3633 		val = regs->gpr[op.reg];
3634 		if ((val & MSR_RI) == 0)
3635 			/* can't step mtmsr[d] that would clear MSR_RI */
3636 			return -1;
3637 		/* here op.val is the mask of bits to change */
3638 		regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
3639 		goto instr_done;
3640 
3641 	case SYSCALL:	/* sc */
3642 		/*
3643 		 * Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't
3644 		 * single step a system call instruction:
3645 		 *
3646 		 *   Successful completion for an instruction means that the
3647 		 *   instruction caused no other interrupt. Thus a Trace
3648 		 *   interrupt never occurs for a System Call or System Call
3649 		 *   Vectored instruction, or for a Trap instruction that
3650 		 *   traps.
3651 		 */
3652 		return -1;
3653 	case SYSCALL_VECTORED_0:	/* scv 0 */
3654 		return -1;
3655 	case RFI:
3656 		return -1;
3657 	}
3658 	return 0;
3659 
3660  instr_done:
3661 	regs_set_return_ip(regs,
3662 		truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)));
3663 	return 1;
3664 }
3665 NOKPROBE_SYMBOL(emulate_step);
3666