1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
7 #include <linux/kernel.h>
8 #include <linux/kprobes.h>
9 #include <linux/ptrace.h>
10 #include <linux/prefetch.h>
11 #include <asm/sstep.h>
12 #include <asm/processor.h>
13 #include <linux/uaccess.h>
14 #include <asm/cpu_has_feature.h>
15 #include <asm/cputable.h>
16 #include <asm/disassemble.h>
18 extern char system_call_common[];
19 extern char system_call_vectored_emulate[];
22 /* Bits in SRR1 that are copied from MSR */
23 #define MSR_MASK 0xffffffff87c0ffffUL
25 #define MSR_MASK 0x87c0ffff
29 #define XER_SO 0x80000000U
30 #define XER_OV 0x40000000U
31 #define XER_CA 0x20000000U
32 #define XER_OV32 0x00080000U
33 #define XER_CA32 0x00040000U
36 #define VSX_REGISTER_XTP(rd) ((((rd) & 1) << 5) | ((rd) & 0xfe))
41 * Functions in ldstfp.S
43 extern void get_fpr(int rn, double *p);
44 extern void put_fpr(int rn, const double *p);
45 extern void get_vr(int rn, __vector128 *p);
46 extern void put_vr(int rn, __vector128 *p);
47 extern void load_vsrn(int vsr, const void *p);
48 extern void store_vsrn(int vsr, void *p);
49 extern void conv_sp_to_dp(const float *sp, double *dp);
50 extern void conv_dp_to_sp(const double *dp, float *sp);
57 extern int do_lq(unsigned long ea, unsigned long *regs);
58 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
59 extern int do_lqarx(unsigned long ea, unsigned long *regs);
60 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
64 #ifdef __LITTLE_ENDIAN__
73 * Emulate the truncation of 64 bit values in 32-bit mode.
75 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
79 if ((msr & MSR_64BIT) == 0)
86 * Determine whether a conditional branch instruction would branch.
88 static nokprobe_inline int branch_taken(unsigned int instr,
89 const struct pt_regs *regs,
90 struct instruction_op *op)
92 unsigned int bo = (instr >> 21) & 0x1f;
96 /* decrement counter */
98 if (((bo >> 1) & 1) ^ (regs->ctr == 1))
101 if ((bo & 0x10) == 0) {
102 /* check bit from CR */
103 bi = (instr >> 16) & 0x1f;
104 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
110 static nokprobe_inline long address_ok(struct pt_regs *regs,
111 unsigned long ea, int nb)
113 if (!user_mode(regs))
115 if (__access_ok(ea, nb))
117 if (__access_ok(ea, 1))
118 /* Access overlaps the end of the user region */
119 regs->dar = TASK_SIZE_MAX - 1;
126 * Calculate effective address for a D-form instruction
128 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
129 const struct pt_regs *regs)
134 ra = (instr >> 16) & 0x1f;
135 ea = (signed short) instr; /* sign-extend */
144 * Calculate effective address for a DS-form instruction
146 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
147 const struct pt_regs *regs)
152 ra = (instr >> 16) & 0x1f;
153 ea = (signed short) (instr & ~3); /* sign-extend */
161 * Calculate effective address for a DQ-form instruction
163 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
164 const struct pt_regs *regs)
169 ra = (instr >> 16) & 0x1f;
170 ea = (signed short) (instr & ~0xf); /* sign-extend */
176 #endif /* __powerpc64 */
179 * Calculate effective address for an X-form instruction
181 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
182 const struct pt_regs *regs)
187 ra = (instr >> 16) & 0x1f;
188 rb = (instr >> 11) & 0x1f;
197 * Calculate effective address for a MLS:D-form / 8LS:D-form
198 * prefixed instruction
200 static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
202 const struct pt_regs *regs)
206 unsigned long ea, d0, d1, d;
208 prefix_r = GET_PREFIX_R(instr);
209 ra = GET_PREFIX_RA(suffix);
211 d0 = instr & 0x3ffff;
212 d1 = suffix & 0xffff;
216 * sign extend a 34 bit number
218 dd = (unsigned int)(d >> 2);
220 ea = (ea << 2) | (d & 0x3);
224 else if (!prefix_r && !ra)
225 ; /* Leave ea as is */
230 * (prefix_r && ra) is an invalid form. Should already be
231 * checked for by caller!
238 * Return the largest power of 2, not greater than sizeof(unsigned long),
239 * such that x is a multiple of it.
241 static nokprobe_inline unsigned long max_align(unsigned long x)
243 x |= sizeof(unsigned long);
244 return x & -x; /* isolates rightmost bit */
247 static nokprobe_inline unsigned long byterev_2(unsigned long x)
249 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
252 static nokprobe_inline unsigned long byterev_4(unsigned long x)
254 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
255 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
259 static nokprobe_inline unsigned long byterev_8(unsigned long x)
261 return (byterev_4(x) << 32) | byterev_4(x >> 32);
265 static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
269 *(u16 *)ptr = byterev_2(*(u16 *)ptr);
272 *(u32 *)ptr = byterev_4(*(u32 *)ptr);
276 *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
279 unsigned long *up = (unsigned long *)ptr;
281 tmp = byterev_8(up[0]);
282 up[0] = byterev_8(up[1]);
287 unsigned long *up = (unsigned long *)ptr;
290 tmp = byterev_8(up[0]);
291 up[0] = byterev_8(up[3]);
293 tmp = byterev_8(up[2]);
294 up[2] = byterev_8(up[1]);
305 static nokprobe_inline int read_mem_aligned(unsigned long *dest,
306 unsigned long ea, int nb,
307 struct pt_regs *regs)
314 err = __get_user(x, (unsigned char __user *) ea);
317 err = __get_user(x, (unsigned short __user *) ea);
320 err = __get_user(x, (unsigned int __user *) ea);
324 err = __get_user(x, (unsigned long __user *) ea);
336 * Copy from userspace to a buffer, using the largest possible
337 * aligned accesses, up to sizeof(long).
339 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
340 struct pt_regs *regs)
345 for (; nb > 0; nb -= c) {
351 err = __get_user(*dest, (unsigned char __user *) ea);
354 err = __get_user(*(u16 *)dest,
355 (unsigned short __user *) ea);
358 err = __get_user(*(u32 *)dest,
359 (unsigned int __user *) ea);
363 err = __get_user(*(unsigned long *)dest,
364 (unsigned long __user *) ea);
378 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
379 unsigned long ea, int nb,
380 struct pt_regs *regs)
384 u8 b[sizeof(unsigned long)];
390 i = IS_BE ? sizeof(unsigned long) - nb : 0;
391 err = copy_mem_in(&u.b[i], ea, nb, regs);
398 * Read memory at address ea for nb bytes, return 0 for success
399 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
400 * If nb < sizeof(long), the result is right-justified on BE systems.
402 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
403 struct pt_regs *regs)
405 if (!address_ok(regs, ea, nb))
407 if ((ea & (nb - 1)) == 0)
408 return read_mem_aligned(dest, ea, nb, regs);
409 return read_mem_unaligned(dest, ea, nb, regs);
411 NOKPROBE_SYMBOL(read_mem);
413 static nokprobe_inline int write_mem_aligned(unsigned long val,
414 unsigned long ea, int nb,
415 struct pt_regs *regs)
421 err = __put_user(val, (unsigned char __user *) ea);
424 err = __put_user(val, (unsigned short __user *) ea);
427 err = __put_user(val, (unsigned int __user *) ea);
431 err = __put_user(val, (unsigned long __user *) ea);
441 * Copy from a buffer to userspace, using the largest possible
442 * aligned accesses, up to sizeof(long).
444 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
445 struct pt_regs *regs)
450 for (; nb > 0; nb -= c) {
456 err = __put_user(*dest, (unsigned char __user *) ea);
459 err = __put_user(*(u16 *)dest,
460 (unsigned short __user *) ea);
463 err = __put_user(*(u32 *)dest,
464 (unsigned int __user *) ea);
468 err = __put_user(*(unsigned long *)dest,
469 (unsigned long __user *) ea);
483 static nokprobe_inline int write_mem_unaligned(unsigned long val,
484 unsigned long ea, int nb,
485 struct pt_regs *regs)
489 u8 b[sizeof(unsigned long)];
494 i = IS_BE ? sizeof(unsigned long) - nb : 0;
495 return copy_mem_out(&u.b[i], ea, nb, regs);
499 * Write memory at address ea for nb bytes, return 0 for success
500 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
502 static int write_mem(unsigned long val, unsigned long ea, int nb,
503 struct pt_regs *regs)
505 if (!address_ok(regs, ea, nb))
507 if ((ea & (nb - 1)) == 0)
508 return write_mem_aligned(val, ea, nb, regs);
509 return write_mem_unaligned(val, ea, nb, regs);
511 NOKPROBE_SYMBOL(write_mem);
513 #ifdef CONFIG_PPC_FPU
515 * These access either the real FP register or the image in the
516 * thread_struct, depending on regs->msr & MSR_FP.
518 static int do_fp_load(struct instruction_op *op, unsigned long ea,
519 struct pt_regs *regs, bool cross_endian)
528 u8 b[2 * sizeof(double)];
531 nb = GETSIZE(op->type);
532 if (!address_ok(regs, ea, nb))
535 err = copy_mem_in(u.b, ea, nb, regs);
538 if (unlikely(cross_endian)) {
539 do_byte_reverse(u.b, min(nb, 8));
541 do_byte_reverse(&u.b[8], 8);
545 if (op->type & FPCONV)
546 conv_sp_to_dp(&u.f, &u.d[0]);
547 else if (op->type & SIGNEXT)
552 if (regs->msr & MSR_FP)
553 put_fpr(rn, &u.d[0]);
555 current->thread.TS_FPR(rn) = u.l[0];
559 if (regs->msr & MSR_FP)
560 put_fpr(rn, &u.d[1]);
562 current->thread.TS_FPR(rn) = u.l[1];
567 NOKPROBE_SYMBOL(do_fp_load);
569 static int do_fp_store(struct instruction_op *op, unsigned long ea,
570 struct pt_regs *regs, bool cross_endian)
578 u8 b[2 * sizeof(double)];
581 nb = GETSIZE(op->type);
582 if (!address_ok(regs, ea, nb))
586 if (regs->msr & MSR_FP)
587 get_fpr(rn, &u.d[0]);
589 u.l[0] = current->thread.TS_FPR(rn);
591 if (op->type & FPCONV)
592 conv_dp_to_sp(&u.d[0], &u.f);
598 if (regs->msr & MSR_FP)
599 get_fpr(rn, &u.d[1]);
601 u.l[1] = current->thread.TS_FPR(rn);
604 if (unlikely(cross_endian)) {
605 do_byte_reverse(u.b, min(nb, 8));
607 do_byte_reverse(&u.b[8], 8);
609 return copy_mem_out(u.b, ea, nb, regs);
611 NOKPROBE_SYMBOL(do_fp_store);
614 #ifdef CONFIG_ALTIVEC
615 /* For Altivec/VMX, no need to worry about alignment */
616 static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
617 int size, struct pt_regs *regs,
623 u8 b[sizeof(__vector128)];
626 if (!address_ok(regs, ea & ~0xfUL, 16))
628 /* align to multiple of size */
630 err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
633 if (unlikely(cross_endian))
634 do_byte_reverse(&u.b[ea & 0xf], size);
636 if (regs->msr & MSR_VEC)
639 current->thread.vr_state.vr[rn] = u.v;
644 static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
645 int size, struct pt_regs *regs,
650 u8 b[sizeof(__vector128)];
653 if (!address_ok(regs, ea & ~0xfUL, 16))
655 /* align to multiple of size */
659 if (regs->msr & MSR_VEC)
662 u.v = current->thread.vr_state.vr[rn];
664 if (unlikely(cross_endian))
665 do_byte_reverse(&u.b[ea & 0xf], size);
666 return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
668 #endif /* CONFIG_ALTIVEC */
671 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
672 int reg, bool cross_endian)
676 if (!address_ok(regs, ea, 16))
678 /* if aligned, should be atomic */
679 if ((ea & 0xf) == 0) {
680 err = do_lq(ea, ®s->gpr[reg]);
682 err = read_mem(®s->gpr[reg + IS_LE], ea, 8, regs);
684 err = read_mem(®s->gpr[reg + IS_BE], ea + 8, 8, regs);
686 if (!err && unlikely(cross_endian))
687 do_byte_reverse(®s->gpr[reg], 16);
691 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
692 int reg, bool cross_endian)
695 unsigned long vals[2];
697 if (!address_ok(regs, ea, 16))
699 vals[0] = regs->gpr[reg];
700 vals[1] = regs->gpr[reg + 1];
701 if (unlikely(cross_endian))
702 do_byte_reverse(vals, 16);
704 /* if aligned, should be atomic */
706 return do_stq(ea, vals[0], vals[1]);
708 err = write_mem(vals[IS_LE], ea, 8, regs);
710 err = write_mem(vals[IS_BE], ea + 8, 8, regs);
713 #endif /* __powerpc64 */
716 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
717 const void *mem, bool rev)
721 const unsigned int *wp;
722 const unsigned short *hp;
723 const unsigned char *bp;
725 size = GETSIZE(op->type);
726 reg->d[0] = reg->d[1] = 0;
728 switch (op->element_size) {
732 /* whole vector; lxv[x] or lxvl[l] */
735 memcpy(reg, mem, size);
736 if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
739 do_byte_reverse(reg, size);
742 /* scalar loads, lxvd2x, lxvdsx */
743 read_size = (size >= 8) ? 8 : size;
744 i = IS_LE ? 8 : 8 - read_size;
745 memcpy(®->b[i], mem, read_size);
747 do_byte_reverse(®->b[i], 8);
749 if (op->type & SIGNEXT) {
750 /* size == 4 is the only case here */
751 reg->d[IS_LE] = (signed int) reg->d[IS_LE];
752 } else if (op->vsx_flags & VSX_FPCONV) {
754 conv_sp_to_dp(®->fp[1 + IS_LE],
760 unsigned long v = *(unsigned long *)(mem + 8);
761 reg->d[IS_BE] = !rev ? v : byterev_8(v);
762 } else if (op->vsx_flags & VSX_SPLAT)
763 reg->d[IS_BE] = reg->d[IS_LE];
769 for (j = 0; j < size / 4; ++j) {
770 i = IS_LE ? 3 - j : j;
771 reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
773 if (op->vsx_flags & VSX_SPLAT) {
774 u32 val = reg->w[IS_LE ? 3 : 0];
776 i = IS_LE ? 3 - j : j;
784 for (j = 0; j < size / 2; ++j) {
785 i = IS_LE ? 7 - j : j;
786 reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
792 for (j = 0; j < size; ++j) {
793 i = IS_LE ? 15 - j : j;
799 EXPORT_SYMBOL_GPL(emulate_vsx_load);
800 NOKPROBE_SYMBOL(emulate_vsx_load);
802 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
805 int size, write_size;
812 size = GETSIZE(op->type);
814 switch (op->element_size) {
820 /* reverse 32 bytes */
821 union vsx_reg buf32[2];
822 buf32[0].d[0] = byterev_8(reg[1].d[1]);
823 buf32[0].d[1] = byterev_8(reg[1].d[0]);
824 buf32[1].d[0] = byterev_8(reg[0].d[1]);
825 buf32[1].d[1] = byterev_8(reg[0].d[0]);
826 memcpy(mem, buf32, size);
828 memcpy(mem, reg, size);
832 /* stxv, stxvx, stxvl, stxvll */
835 if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
838 /* reverse 16 bytes */
839 buf.d[0] = byterev_8(reg->d[1]);
840 buf.d[1] = byterev_8(reg->d[0]);
843 memcpy(mem, reg, size);
846 /* scalar stores, stxvd2x */
847 write_size = (size >= 8) ? 8 : size;
848 i = IS_LE ? 8 : 8 - write_size;
849 if (size < 8 && op->vsx_flags & VSX_FPCONV) {
850 buf.d[0] = buf.d[1] = 0;
852 conv_dp_to_sp(®->dp[IS_LE], &buf.fp[1 + IS_LE]);
856 memcpy(mem, ®->b[i], write_size);
858 memcpy(mem + 8, ®->d[IS_BE], 8);
860 do_byte_reverse(mem, write_size);
862 do_byte_reverse(mem + 8, 8);
868 for (j = 0; j < size / 4; ++j) {
869 i = IS_LE ? 3 - j : j;
870 *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
876 for (j = 0; j < size / 2; ++j) {
877 i = IS_LE ? 7 - j : j;
878 *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
884 for (j = 0; j < size; ++j) {
885 i = IS_LE ? 15 - j : j;
891 EXPORT_SYMBOL_GPL(emulate_vsx_store);
892 NOKPROBE_SYMBOL(emulate_vsx_store);
894 static nokprobe_inline int do_vsx_load(struct instruction_op *op,
895 unsigned long ea, struct pt_regs *regs,
899 int i, j, nr_vsx_regs;
901 union vsx_reg buf[2];
902 int size = GETSIZE(op->type);
904 if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
907 nr_vsx_regs = max(1ul, size / sizeof(__vector128));
908 emulate_vsx_load(op, buf, mem, cross_endian);
911 /* FP regs + extensions */
912 if (regs->msr & MSR_FP) {
913 for (i = 0; i < nr_vsx_regs; i++) {
914 j = IS_LE ? nr_vsx_regs - i - 1 : i;
915 load_vsrn(reg + i, &buf[j].v);
918 for (i = 0; i < nr_vsx_regs; i++) {
919 j = IS_LE ? nr_vsx_regs - i - 1 : i;
920 current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0];
921 current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1];
925 if (regs->msr & MSR_VEC) {
926 for (i = 0; i < nr_vsx_regs; i++) {
927 j = IS_LE ? nr_vsx_regs - i - 1 : i;
928 load_vsrn(reg + i, &buf[j].v);
931 for (i = 0; i < nr_vsx_regs; i++) {
932 j = IS_LE ? nr_vsx_regs - i - 1 : i;
933 current->thread.vr_state.vr[reg - 32 + i] = buf[j].v;
941 static nokprobe_inline int do_vsx_store(struct instruction_op *op,
942 unsigned long ea, struct pt_regs *regs,
946 int i, j, nr_vsx_regs;
948 union vsx_reg buf[2];
949 int size = GETSIZE(op->type);
951 if (!address_ok(regs, ea, size))
954 nr_vsx_regs = max(1ul, size / sizeof(__vector128));
957 /* FP regs + extensions */
958 if (regs->msr & MSR_FP) {
959 for (i = 0; i < nr_vsx_regs; i++) {
960 j = IS_LE ? nr_vsx_regs - i - 1 : i;
961 store_vsrn(reg + i, &buf[j].v);
964 for (i = 0; i < nr_vsx_regs; i++) {
965 j = IS_LE ? nr_vsx_regs - i - 1 : i;
966 buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0];
967 buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1];
971 if (regs->msr & MSR_VEC) {
972 for (i = 0; i < nr_vsx_regs; i++) {
973 j = IS_LE ? nr_vsx_regs - i - 1 : i;
974 store_vsrn(reg + i, &buf[j].v);
977 for (i = 0; i < nr_vsx_regs; i++) {
978 j = IS_LE ? nr_vsx_regs - i - 1 : i;
979 buf[j].v = current->thread.vr_state.vr[reg - 32 + i];
984 emulate_vsx_store(op, buf, mem, cross_endian);
985 return copy_mem_out(mem, ea, size, regs);
987 #endif /* CONFIG_VSX */
989 int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
992 unsigned long i, size;
995 size = ppc64_caches.l1d.block_size;
996 if (!(regs->msr & MSR_64BIT))
999 size = L1_CACHE_BYTES;
1002 if (!address_ok(regs, ea, size))
1004 for (i = 0; i < size; i += sizeof(long)) {
1005 err = __put_user(0, (unsigned long __user *) (ea + i));
1013 NOKPROBE_SYMBOL(emulate_dcbz);
1015 #define __put_user_asmx(x, addr, err, op, cr) \
1016 __asm__ __volatile__( \
1017 "1: " op " %2,0,%3\n" \
1020 ".section .fixup,\"ax\"\n" \
1025 : "=r" (err), "=r" (cr) \
1026 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
1028 #define __get_user_asmx(x, addr, err, op) \
1029 __asm__ __volatile__( \
1030 "1: "op" %1,0,%2\n" \
1032 ".section .fixup,\"ax\"\n" \
1037 : "=r" (err), "=r" (x) \
1038 : "r" (addr), "i" (-EFAULT), "0" (err))
1040 #define __cacheop_user_asmx(addr, err, op) \
1041 __asm__ __volatile__( \
1044 ".section .fixup,\"ax\"\n" \
1050 : "r" (addr), "i" (-EFAULT), "0" (err))
1052 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
1053 struct instruction_op *op)
1058 op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
1059 #ifdef __powerpc64__
1060 if (!(regs->msr & MSR_64BIT))
1064 op->ccval |= 0x80000000;
1066 op->ccval |= 0x40000000;
1068 op->ccval |= 0x20000000;
1071 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
1073 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1075 op->xerval |= XER_CA32;
1077 op->xerval &= ~XER_CA32;
1081 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
1082 struct instruction_op *op, int rd,
1083 unsigned long val1, unsigned long val2,
1084 unsigned long carry_in)
1086 unsigned long val = val1 + val2;
1090 op->type = COMPUTE + SETREG + SETXER;
1093 #ifdef __powerpc64__
1094 if (!(regs->msr & MSR_64BIT)) {
1095 val = (unsigned int) val;
1096 val1 = (unsigned int) val1;
1099 op->xerval = regs->xer;
1100 if (val < val1 || (carry_in && val == val1))
1101 op->xerval |= XER_CA;
1103 op->xerval &= ~XER_CA;
1105 set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1106 (carry_in && (unsigned int)val == (unsigned int)val1));
1109 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1110 struct instruction_op *op,
1111 long v1, long v2, int crfld)
1113 unsigned int crval, shift;
1115 op->type = COMPUTE + SETCC;
1116 crval = (regs->xer >> 31) & 1; /* get SO bit */
1123 shift = (7 - crfld) * 4;
1124 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1127 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1128 struct instruction_op *op,
1130 unsigned long v2, int crfld)
1132 unsigned int crval, shift;
1134 op->type = COMPUTE + SETCC;
1135 crval = (regs->xer >> 31) & 1; /* get SO bit */
1142 shift = (7 - crfld) * 4;
1143 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1146 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1147 struct instruction_op *op,
1148 unsigned long v1, unsigned long v2)
1150 unsigned long long out_val, mask;
1154 for (i = 0; i < 8; i++) {
1155 mask = 0xffUL << (i * 8);
1156 if ((v1 & mask) == (v2 & mask))
1163 * The size parameter is used to adjust the equivalent popcnt instruction.
1164 * popcntb = 8, popcntw = 32, popcntd = 64
1166 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1167 struct instruction_op *op,
1168 unsigned long v1, int size)
1170 unsigned long long out = v1;
1172 out -= (out >> 1) & 0x5555555555555555ULL;
1173 out = (0x3333333333333333ULL & out) +
1174 (0x3333333333333333ULL & (out >> 2));
1175 out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1177 if (size == 8) { /* popcntb */
1183 if (size == 32) { /* popcntw */
1184 op->val = out & 0x0000003f0000003fULL;
1188 out = (out + (out >> 32)) & 0x7f;
1189 op->val = out; /* popcntd */
1193 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1194 struct instruction_op *op,
1195 unsigned long v1, unsigned long v2)
1197 unsigned char perm, idx;
1201 for (i = 0; i < 8; i++) {
1202 idx = (v1 >> (i * 8)) & 0xff;
1204 if (v2 & PPC_BIT(idx))
1209 #endif /* CONFIG_PPC64 */
1211 * The size parameter adjusts the equivalent prty instruction.
1212 * prtyw = 32, prtyd = 64
1214 static nokprobe_inline void do_prty(const struct pt_regs *regs,
1215 struct instruction_op *op,
1216 unsigned long v, int size)
1218 unsigned long long res = v ^ (v >> 8);
1221 if (size == 32) { /* prtyw */
1222 op->val = res & 0x0000000100000001ULL;
1227 op->val = res & 1; /*prtyd */
1230 static nokprobe_inline int trap_compare(long v1, long v2)
1240 if ((unsigned long)v1 < (unsigned long)v2)
1242 else if ((unsigned long)v1 > (unsigned long)v2)
1248 * Elements of 32-bit rotate and mask instructions.
1250 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
1251 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1252 #ifdef __powerpc64__
1253 #define MASK64_L(mb) (~0UL >> (mb))
1254 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
1255 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1256 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1258 #define DATA32(x) (x)
1260 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1263 * Decode an instruction, and return information about it in *op
1264 * without changing *regs.
1265 * Integer arithmetic and logical instructions, branches, and barrier
1266 * instructions can be emulated just using the information in *op.
1268 * Return value is 1 if the instruction can be emulated just by
1269 * updating *regs with the information in *op, -1 if we need the
1270 * GPRs but *regs doesn't contain the full register set, or 0
1273 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1274 struct ppc_inst instr)
1277 unsigned int suffixopcode, prefixtype, prefix_r;
1279 unsigned int opcode, ra, rb, rc, rd, spr, u;
1280 unsigned long int imm;
1281 unsigned long int val, val2;
1282 unsigned int mb, me, sh;
1283 unsigned int word, suffix;
1286 word = ppc_inst_val(instr);
1287 suffix = ppc_inst_suffix(instr);
1291 opcode = ppc_inst_primary_opcode(instr);
1295 imm = (signed short)(word & 0xfffc);
1296 if ((word & 2) == 0)
1298 op->val = truncate_if_32bit(regs->msr, imm);
1301 if (branch_taken(word, regs, op))
1302 op->type |= BRTAKEN;
1306 if ((word & 0xfe2) == 2)
1308 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1309 (word & 0xfe3) == 1) { /* scv */
1310 op->type = SYSCALL_VECTORED_0;
1311 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1312 goto unknown_opcode;
1318 op->type = BRANCH | BRTAKEN;
1319 imm = word & 0x03fffffc;
1320 if (imm & 0x02000000)
1322 if ((word & 2) == 0)
1324 op->val = truncate_if_32bit(regs->msr, imm);
1329 switch ((word >> 1) & 0x3ff) {
1331 op->type = COMPUTE + SETCC;
1332 rd = 7 - ((word >> 23) & 0x7);
1333 ra = 7 - ((word >> 18) & 0x7);
1336 val = (regs->ccr >> ra) & 0xf;
1337 op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1341 case 528: /* bcctr */
1343 imm = (word & 0x400)? regs->ctr: regs->link;
1344 op->val = truncate_if_32bit(regs->msr, imm);
1347 if (branch_taken(word, regs, op))
1348 op->type |= BRTAKEN;
1351 case 18: /* rfid, scary */
1352 if (regs->msr & MSR_PR)
1357 case 150: /* isync */
1358 op->type = BARRIER | BARRIER_ISYNC;
1361 case 33: /* crnor */
1362 case 129: /* crandc */
1363 case 193: /* crxor */
1364 case 225: /* crnand */
1365 case 257: /* crand */
1366 case 289: /* creqv */
1367 case 417: /* crorc */
1368 case 449: /* cror */
1369 op->type = COMPUTE + SETCC;
1370 ra = (word >> 16) & 0x1f;
1371 rb = (word >> 11) & 0x1f;
1372 rd = (word >> 21) & 0x1f;
1373 ra = (regs->ccr >> (31 - ra)) & 1;
1374 rb = (regs->ccr >> (31 - rb)) & 1;
1375 val = (word >> (6 + ra * 2 + rb)) & 1;
1376 op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1382 switch ((word >> 1) & 0x3ff) {
1383 case 598: /* sync */
1384 op->type = BARRIER + BARRIER_SYNC;
1385 #ifdef __powerpc64__
1386 switch ((word >> 21) & 3) {
1387 case 1: /* lwsync */
1388 op->type = BARRIER + BARRIER_LWSYNC;
1390 case 2: /* ptesync */
1391 op->type = BARRIER + BARRIER_PTESYNC;
1397 case 854: /* eieio */
1398 op->type = BARRIER + BARRIER_EIEIO;
1404 /* Following cases refer to regs->gpr[], so we need all regs */
1405 if (!FULL_REGS(regs))
1408 rd = (word >> 21) & 0x1f;
1409 ra = (word >> 16) & 0x1f;
1410 rb = (word >> 11) & 0x1f;
1411 rc = (word >> 6) & 0x1f;
1414 #ifdef __powerpc64__
1416 if (!cpu_has_feature(CPU_FTR_ARCH_31))
1417 goto unknown_opcode;
1419 prefix_r = GET_PREFIX_R(word);
1420 ra = GET_PREFIX_RA(suffix);
1421 rd = (suffix >> 21) & 0x1f;
1423 op->val = regs->gpr[rd];
1424 suffixopcode = get_op(suffix);
1425 prefixtype = (word >> 24) & 0x3;
1426 switch (prefixtype) {
1430 switch (suffixopcode) {
1431 case 14: /* paddi */
1432 op->type = COMPUTE | PREFIXED;
1433 op->val = mlsd_8lsd_ea(word, suffix, regs);
1439 if (rd & trap_compare(regs->gpr[ra], (short) word))
1444 if (rd & trap_compare((int)regs->gpr[ra], (short) word))
1448 #ifdef __powerpc64__
1451 * There are very many instructions with this primary opcode
1452 * introduced in the ISA as early as v2.03. However, the ones
1453 * we currently emulate were all introduced with ISA 3.0
1455 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1456 goto unknown_opcode;
1458 switch (word & 0x3f) {
1459 case 48: /* maddhd */
1460 asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
1461 "=r" (op->val) : "r" (regs->gpr[ra]),
1462 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1465 case 49: /* maddhdu */
1466 asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
1467 "=r" (op->val) : "r" (regs->gpr[ra]),
1468 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1471 case 51: /* maddld */
1472 asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
1473 "=r" (op->val) : "r" (regs->gpr[ra]),
1474 "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1479 * There are other instructions from ISA 3.0 with the same
1480 * primary opcode which do not have emulation support yet.
1482 goto unknown_opcode;
1486 op->val = regs->gpr[ra] * (short) word;
1489 case 8: /* subfic */
1491 add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1494 case 10: /* cmpli */
1495 imm = (unsigned short) word;
1496 val = regs->gpr[ra];
1497 #ifdef __powerpc64__
1499 val = (unsigned int) val;
1501 do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1506 val = regs->gpr[ra];
1507 #ifdef __powerpc64__
1511 do_cmp_signed(regs, op, val, imm, rd >> 2);
1514 case 12: /* addic */
1516 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1519 case 13: /* addic. */
1521 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1528 imm += regs->gpr[ra];
1532 case 15: /* addis */
1533 imm = ((short) word) << 16;
1535 imm += regs->gpr[ra];
1540 if (((word >> 1) & 0x1f) == 2) {
1542 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1543 goto unknown_opcode;
1544 imm = (short) (word & 0xffc1); /* d0 + d2 fields */
1545 imm |= (word >> 15) & 0x3e; /* d1 field */
1546 op->val = regs->nip + (imm << 16) + 4;
1552 case 20: /* rlwimi */
1553 mb = (word >> 6) & 0x1f;
1554 me = (word >> 1) & 0x1f;
1555 val = DATA32(regs->gpr[rd]);
1556 imm = MASK32(mb, me);
1557 op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1560 case 21: /* rlwinm */
1561 mb = (word >> 6) & 0x1f;
1562 me = (word >> 1) & 0x1f;
1563 val = DATA32(regs->gpr[rd]);
1564 op->val = ROTATE(val, rb) & MASK32(mb, me);
1567 case 23: /* rlwnm */
1568 mb = (word >> 6) & 0x1f;
1569 me = (word >> 1) & 0x1f;
1570 rb = regs->gpr[rb] & 0x1f;
1571 val = DATA32(regs->gpr[rd]);
1572 op->val = ROTATE(val, rb) & MASK32(mb, me);
1576 op->val = regs->gpr[rd] | (unsigned short) word;
1577 goto logical_done_nocc;
1580 imm = (unsigned short) word;
1581 op->val = regs->gpr[rd] | (imm << 16);
1582 goto logical_done_nocc;
1585 op->val = regs->gpr[rd] ^ (unsigned short) word;
1586 goto logical_done_nocc;
1588 case 27: /* xoris */
1589 imm = (unsigned short) word;
1590 op->val = regs->gpr[rd] ^ (imm << 16);
1591 goto logical_done_nocc;
1593 case 28: /* andi. */
1594 op->val = regs->gpr[rd] & (unsigned short) word;
1596 goto logical_done_nocc;
1598 case 29: /* andis. */
1599 imm = (unsigned short) word;
1600 op->val = regs->gpr[rd] & (imm << 16);
1602 goto logical_done_nocc;
1604 #ifdef __powerpc64__
1606 mb = ((word >> 6) & 0x1f) | (word & 0x20);
1607 val = regs->gpr[rd];
1608 if ((word & 0x10) == 0) {
1609 sh = rb | ((word & 2) << 4);
1610 val = ROTATE(val, sh);
1611 switch ((word >> 2) & 3) {
1612 case 0: /* rldicl */
1613 val &= MASK64_L(mb);
1615 case 1: /* rldicr */
1616 val &= MASK64_R(mb);
1619 val &= MASK64(mb, 63 - sh);
1621 case 3: /* rldimi */
1622 imm = MASK64(mb, 63 - sh);
1623 val = (regs->gpr[ra] & ~imm) |
1629 sh = regs->gpr[rb] & 0x3f;
1630 val = ROTATE(val, sh);
1631 switch ((word >> 1) & 7) {
1633 op->val = val & MASK64_L(mb);
1636 op->val = val & MASK64_R(mb);
1641 op->type = UNKNOWN; /* illegal instruction */
1645 /* isel occupies 32 minor opcodes */
1646 if (((word >> 1) & 0x1f) == 15) {
1647 mb = (word >> 6) & 0x1f; /* bc field */
1648 val = (regs->ccr >> (31 - mb)) & 1;
1649 val2 = (ra) ? regs->gpr[ra] : 0;
1651 op->val = (val) ? val2 : regs->gpr[rb];
1655 switch ((word >> 1) & 0x3ff) {
1658 (rd & trap_compare((int)regs->gpr[ra],
1659 (int)regs->gpr[rb])))
1662 #ifdef __powerpc64__
1664 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1668 case 83: /* mfmsr */
1669 if (regs->msr & MSR_PR)
1674 case 146: /* mtmsr */
1675 if (regs->msr & MSR_PR)
1679 op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1682 case 178: /* mtmsrd */
1683 if (regs->msr & MSR_PR)
1687 /* only MSR_EE and MSR_RI get changed if bit 15 set */
1688 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1689 imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1696 if ((word >> 20) & 1) {
1698 for (sh = 0; sh < 8; ++sh) {
1699 if (word & (0x80000 >> sh))
1704 op->val = regs->ccr & imm;
1707 case 144: /* mtcrf */
1708 op->type = COMPUTE + SETCC;
1710 val = regs->gpr[rd];
1711 op->ccval = regs->ccr;
1712 for (sh = 0; sh < 8; ++sh) {
1713 if (word & (0x80000 >> sh))
1714 op->ccval = (op->ccval & ~imm) |
1720 case 339: /* mfspr */
1721 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1725 if (spr == SPRN_XER || spr == SPRN_LR ||
1730 case 467: /* mtspr */
1731 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1733 op->val = regs->gpr[rd];
1735 if (spr == SPRN_XER || spr == SPRN_LR ||
1741 * Compare instructions
1744 val = regs->gpr[ra];
1745 val2 = regs->gpr[rb];
1746 #ifdef __powerpc64__
1747 if ((rd & 1) == 0) {
1748 /* word (32-bit) compare */
1753 do_cmp_signed(regs, op, val, val2, rd >> 2);
1757 val = regs->gpr[ra];
1758 val2 = regs->gpr[rb];
1759 #ifdef __powerpc64__
1760 if ((rd & 1) == 0) {
1761 /* word (32-bit) compare */
1762 val = (unsigned int) val;
1763 val2 = (unsigned int) val2;
1766 do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1769 case 508: /* cmpb */
1770 do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1771 goto logical_done_nocc;
1774 * Arithmetic instructions
1777 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1780 #ifdef __powerpc64__
1781 case 9: /* mulhdu */
1782 asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1783 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1787 add_with_carry(regs, op, rd, regs->gpr[ra],
1791 case 11: /* mulhwu */
1792 asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1793 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1797 op->val = regs->gpr[rb] - regs->gpr[ra];
1799 #ifdef __powerpc64__
1800 case 73: /* mulhd */
1801 asm("mulhd %0,%1,%2" : "=r" (op->val) :
1802 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1805 case 75: /* mulhw */
1806 asm("mulhw %0,%1,%2" : "=r" (op->val) :
1807 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1811 op->val = -regs->gpr[ra];
1814 case 136: /* subfe */
1815 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1816 regs->gpr[rb], regs->xer & XER_CA);
1819 case 138: /* adde */
1820 add_with_carry(regs, op, rd, regs->gpr[ra],
1821 regs->gpr[rb], regs->xer & XER_CA);
1824 case 200: /* subfze */
1825 add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1826 regs->xer & XER_CA);
1829 case 202: /* addze */
1830 add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1831 regs->xer & XER_CA);
1834 case 232: /* subfme */
1835 add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1836 regs->xer & XER_CA);
1838 #ifdef __powerpc64__
1839 case 233: /* mulld */
1840 op->val = regs->gpr[ra] * regs->gpr[rb];
1843 case 234: /* addme */
1844 add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1845 regs->xer & XER_CA);
1848 case 235: /* mullw */
1849 op->val = (long)(int) regs->gpr[ra] *
1850 (int) regs->gpr[rb];
1853 #ifdef __powerpc64__
1854 case 265: /* modud */
1855 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1856 goto unknown_opcode;
1857 op->val = regs->gpr[ra] % regs->gpr[rb];
1861 op->val = regs->gpr[ra] + regs->gpr[rb];
1864 case 267: /* moduw */
1865 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1866 goto unknown_opcode;
1867 op->val = (unsigned int) regs->gpr[ra] %
1868 (unsigned int) regs->gpr[rb];
1870 #ifdef __powerpc64__
1871 case 457: /* divdu */
1872 op->val = regs->gpr[ra] / regs->gpr[rb];
1875 case 459: /* divwu */
1876 op->val = (unsigned int) regs->gpr[ra] /
1877 (unsigned int) regs->gpr[rb];
1879 #ifdef __powerpc64__
1880 case 489: /* divd */
1881 op->val = (long int) regs->gpr[ra] /
1882 (long int) regs->gpr[rb];
1885 case 491: /* divw */
1886 op->val = (int) regs->gpr[ra] /
1887 (int) regs->gpr[rb];
1889 #ifdef __powerpc64__
1890 case 425: /* divde[.] */
1891 asm volatile(PPC_DIVDE(%0, %1, %2) :
1892 "=r" (op->val) : "r" (regs->gpr[ra]),
1893 "r" (regs->gpr[rb]));
1895 case 393: /* divdeu[.] */
1896 asm volatile(PPC_DIVDEU(%0, %1, %2) :
1897 "=r" (op->val) : "r" (regs->gpr[ra]),
1898 "r" (regs->gpr[rb]));
1901 case 755: /* darn */
1902 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1903 goto unknown_opcode;
1906 /* 32-bit conditioned */
1907 asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
1911 /* 64-bit conditioned */
1912 asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
1917 asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
1921 goto unknown_opcode;
1922 #ifdef __powerpc64__
1923 case 777: /* modsd */
1924 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1925 goto unknown_opcode;
1926 op->val = (long int) regs->gpr[ra] %
1927 (long int) regs->gpr[rb];
1930 case 779: /* modsw */
1931 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1932 goto unknown_opcode;
1933 op->val = (int) regs->gpr[ra] %
1934 (int) regs->gpr[rb];
1939 * Logical instructions
1941 case 26: /* cntlzw */
1942 val = (unsigned int) regs->gpr[rd];
1943 op->val = ( val ? __builtin_clz(val) : 32 );
1945 #ifdef __powerpc64__
1946 case 58: /* cntlzd */
1947 val = regs->gpr[rd];
1948 op->val = ( val ? __builtin_clzl(val) : 64 );
1952 op->val = regs->gpr[rd] & regs->gpr[rb];
1956 op->val = regs->gpr[rd] & ~regs->gpr[rb];
1959 case 122: /* popcntb */
1960 do_popcnt(regs, op, regs->gpr[rd], 8);
1961 goto logical_done_nocc;
1964 op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1967 case 154: /* prtyw */
1968 do_prty(regs, op, regs->gpr[rd], 32);
1969 goto logical_done_nocc;
1971 case 186: /* prtyd */
1972 do_prty(regs, op, regs->gpr[rd], 64);
1973 goto logical_done_nocc;
1975 case 252: /* bpermd */
1976 do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
1977 goto logical_done_nocc;
1980 op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1984 op->val = regs->gpr[rd] ^ regs->gpr[rb];
1987 case 378: /* popcntw */
1988 do_popcnt(regs, op, regs->gpr[rd], 32);
1989 goto logical_done_nocc;
1992 op->val = regs->gpr[rd] | ~regs->gpr[rb];
1996 op->val = regs->gpr[rd] | regs->gpr[rb];
1999 case 476: /* nand */
2000 op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
2003 case 506: /* popcntd */
2004 do_popcnt(regs, op, regs->gpr[rd], 64);
2005 goto logical_done_nocc;
2007 case 538: /* cnttzw */
2008 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2009 goto unknown_opcode;
2010 val = (unsigned int) regs->gpr[rd];
2011 op->val = (val ? __builtin_ctz(val) : 32);
2013 #ifdef __powerpc64__
2014 case 570: /* cnttzd */
2015 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2016 goto unknown_opcode;
2017 val = regs->gpr[rd];
2018 op->val = (val ? __builtin_ctzl(val) : 64);
2021 case 922: /* extsh */
2022 op->val = (signed short) regs->gpr[rd];
2025 case 954: /* extsb */
2026 op->val = (signed char) regs->gpr[rd];
2028 #ifdef __powerpc64__
2029 case 986: /* extsw */
2030 op->val = (signed int) regs->gpr[rd];
2035 * Shift instructions
2038 sh = regs->gpr[rb] & 0x3f;
2040 op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
2046 sh = regs->gpr[rb] & 0x3f;
2048 op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
2053 case 792: /* sraw */
2054 op->type = COMPUTE + SETREG + SETXER;
2055 sh = regs->gpr[rb] & 0x3f;
2056 ival = (signed int) regs->gpr[rd];
2057 op->val = ival >> (sh < 32 ? sh : 31);
2058 op->xerval = regs->xer;
2059 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
2060 op->xerval |= XER_CA;
2062 op->xerval &= ~XER_CA;
2063 set_ca32(op, op->xerval & XER_CA);
2066 case 824: /* srawi */
2067 op->type = COMPUTE + SETREG + SETXER;
2069 ival = (signed int) regs->gpr[rd];
2070 op->val = ival >> sh;
2071 op->xerval = regs->xer;
2072 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2073 op->xerval |= XER_CA;
2075 op->xerval &= ~XER_CA;
2076 set_ca32(op, op->xerval & XER_CA);
2079 #ifdef __powerpc64__
2081 sh = regs->gpr[rb] & 0x7f;
2083 op->val = regs->gpr[rd] << sh;
2089 sh = regs->gpr[rb] & 0x7f;
2091 op->val = regs->gpr[rd] >> sh;
2096 case 794: /* srad */
2097 op->type = COMPUTE + SETREG + SETXER;
2098 sh = regs->gpr[rb] & 0x7f;
2099 ival = (signed long int) regs->gpr[rd];
2100 op->val = ival >> (sh < 64 ? sh : 63);
2101 op->xerval = regs->xer;
2102 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
2103 op->xerval |= XER_CA;
2105 op->xerval &= ~XER_CA;
2106 set_ca32(op, op->xerval & XER_CA);
2109 case 826: /* sradi with sh_5 = 0 */
2110 case 827: /* sradi with sh_5 = 1 */
2111 op->type = COMPUTE + SETREG + SETXER;
2112 sh = rb | ((word & 2) << 4);
2113 ival = (signed long int) regs->gpr[rd];
2114 op->val = ival >> sh;
2115 op->xerval = regs->xer;
2116 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2117 op->xerval |= XER_CA;
2119 op->xerval &= ~XER_CA;
2120 set_ca32(op, op->xerval & XER_CA);
2123 case 890: /* extswsli with sh_5 = 0 */
2124 case 891: /* extswsli with sh_5 = 1 */
2125 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2126 goto unknown_opcode;
2127 op->type = COMPUTE + SETREG;
2128 sh = rb | ((word & 2) << 4);
2129 val = (signed int) regs->gpr[rd];
2131 op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
2136 #endif /* __powerpc64__ */
2139 * Cache instructions
2141 case 54: /* dcbst */
2142 op->type = MKOP(CACHEOP, DCBST, 0);
2143 op->ea = xform_ea(word, regs);
2147 op->type = MKOP(CACHEOP, DCBF, 0);
2148 op->ea = xform_ea(word, regs);
2151 case 246: /* dcbtst */
2152 op->type = MKOP(CACHEOP, DCBTST, 0);
2153 op->ea = xform_ea(word, regs);
2157 case 278: /* dcbt */
2158 op->type = MKOP(CACHEOP, DCBTST, 0);
2159 op->ea = xform_ea(word, regs);
2163 case 982: /* icbi */
2164 op->type = MKOP(CACHEOP, ICBI, 0);
2165 op->ea = xform_ea(word, regs);
2168 case 1014: /* dcbz */
2169 op->type = MKOP(CACHEOP, DCBZ, 0);
2170 op->ea = xform_ea(word, regs);
2180 op->update_reg = ra;
2182 op->val = regs->gpr[rd];
2183 u = (word >> 20) & UPDATE;
2189 op->ea = xform_ea(word, regs);
2190 switch ((word >> 1) & 0x3ff) {
2191 case 20: /* lwarx */
2192 op->type = MKOP(LARX, 0, 4);
2195 case 150: /* stwcx. */
2196 op->type = MKOP(STCX, 0, 4);
2199 #ifdef __powerpc64__
2200 case 84: /* ldarx */
2201 op->type = MKOP(LARX, 0, 8);
2204 case 214: /* stdcx. */
2205 op->type = MKOP(STCX, 0, 8);
2208 case 52: /* lbarx */
2209 op->type = MKOP(LARX, 0, 1);
2212 case 694: /* stbcx. */
2213 op->type = MKOP(STCX, 0, 1);
2216 case 116: /* lharx */
2217 op->type = MKOP(LARX, 0, 2);
2220 case 726: /* sthcx. */
2221 op->type = MKOP(STCX, 0, 2);
2224 case 276: /* lqarx */
2225 if (!((rd & 1) || rd == ra || rd == rb))
2226 op->type = MKOP(LARX, 0, 16);
2229 case 182: /* stqcx. */
2231 op->type = MKOP(STCX, 0, 16);
2236 case 55: /* lwzux */
2237 op->type = MKOP(LOAD, u, 4);
2241 case 119: /* lbzux */
2242 op->type = MKOP(LOAD, u, 1);
2245 #ifdef CONFIG_ALTIVEC
2247 * Note: for the load/store vector element instructions,
2248 * bits of the EA say which field of the VMX register to use.
2251 op->type = MKOP(LOAD_VMX, 0, 1);
2252 op->element_size = 1;
2255 case 39: /* lvehx */
2256 op->type = MKOP(LOAD_VMX, 0, 2);
2257 op->element_size = 2;
2260 case 71: /* lvewx */
2261 op->type = MKOP(LOAD_VMX, 0, 4);
2262 op->element_size = 4;
2266 case 359: /* lvxl */
2267 op->type = MKOP(LOAD_VMX, 0, 16);
2268 op->element_size = 16;
2271 case 135: /* stvebx */
2272 op->type = MKOP(STORE_VMX, 0, 1);
2273 op->element_size = 1;
2276 case 167: /* stvehx */
2277 op->type = MKOP(STORE_VMX, 0, 2);
2278 op->element_size = 2;
2281 case 199: /* stvewx */
2282 op->type = MKOP(STORE_VMX, 0, 4);
2283 op->element_size = 4;
2286 case 231: /* stvx */
2287 case 487: /* stvxl */
2288 op->type = MKOP(STORE_VMX, 0, 16);
2290 #endif /* CONFIG_ALTIVEC */
2292 #ifdef __powerpc64__
2295 op->type = MKOP(LOAD, u, 8);
2298 case 149: /* stdx */
2299 case 181: /* stdux */
2300 op->type = MKOP(STORE, u, 8);
2304 case 151: /* stwx */
2305 case 183: /* stwux */
2306 op->type = MKOP(STORE, u, 4);
2309 case 215: /* stbx */
2310 case 247: /* stbux */
2311 op->type = MKOP(STORE, u, 1);
2314 case 279: /* lhzx */
2315 case 311: /* lhzux */
2316 op->type = MKOP(LOAD, u, 2);
2319 #ifdef __powerpc64__
2320 case 341: /* lwax */
2321 case 373: /* lwaux */
2322 op->type = MKOP(LOAD, SIGNEXT | u, 4);
2326 case 343: /* lhax */
2327 case 375: /* lhaux */
2328 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2331 case 407: /* sthx */
2332 case 439: /* sthux */
2333 op->type = MKOP(STORE, u, 2);
2336 #ifdef __powerpc64__
2337 case 532: /* ldbrx */
2338 op->type = MKOP(LOAD, BYTEREV, 8);
2342 case 533: /* lswx */
2343 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2346 case 534: /* lwbrx */
2347 op->type = MKOP(LOAD, BYTEREV, 4);
2350 case 597: /* lswi */
2352 rb = 32; /* # bytes to load */
2353 op->type = MKOP(LOAD_MULTI, 0, rb);
2354 op->ea = ra ? regs->gpr[ra] : 0;
2357 #ifdef CONFIG_PPC_FPU
2358 case 535: /* lfsx */
2359 case 567: /* lfsux */
2360 op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2363 case 599: /* lfdx */
2364 case 631: /* lfdux */
2365 op->type = MKOP(LOAD_FP, u, 8);
2368 case 663: /* stfsx */
2369 case 695: /* stfsux */
2370 op->type = MKOP(STORE_FP, u | FPCONV, 4);
2373 case 727: /* stfdx */
2374 case 759: /* stfdux */
2375 op->type = MKOP(STORE_FP, u, 8);
2378 #ifdef __powerpc64__
2379 case 791: /* lfdpx */
2380 op->type = MKOP(LOAD_FP, 0, 16);
2383 case 855: /* lfiwax */
2384 op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2387 case 887: /* lfiwzx */
2388 op->type = MKOP(LOAD_FP, 0, 4);
2391 case 919: /* stfdpx */
2392 op->type = MKOP(STORE_FP, 0, 16);
2395 case 983: /* stfiwx */
2396 op->type = MKOP(STORE_FP, 0, 4);
2398 #endif /* __powerpc64 */
2399 #endif /* CONFIG_PPC_FPU */
2401 #ifdef __powerpc64__
2402 case 660: /* stdbrx */
2403 op->type = MKOP(STORE, BYTEREV, 8);
2404 op->val = byterev_8(regs->gpr[rd]);
2408 case 661: /* stswx */
2409 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2412 case 662: /* stwbrx */
2413 op->type = MKOP(STORE, BYTEREV, 4);
2414 op->val = byterev_4(regs->gpr[rd]);
2417 case 725: /* stswi */
2419 rb = 32; /* # bytes to store */
2420 op->type = MKOP(STORE_MULTI, 0, rb);
2421 op->ea = ra ? regs->gpr[ra] : 0;
2424 case 790: /* lhbrx */
2425 op->type = MKOP(LOAD, BYTEREV, 2);
2428 case 918: /* sthbrx */
2429 op->type = MKOP(STORE, BYTEREV, 2);
2430 op->val = byterev_2(regs->gpr[rd]);
2434 case 12: /* lxsiwzx */
2435 op->reg = rd | ((word & 1) << 5);
2436 op->type = MKOP(LOAD_VSX, 0, 4);
2437 op->element_size = 8;
2440 case 76: /* lxsiwax */
2441 op->reg = rd | ((word & 1) << 5);
2442 op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2443 op->element_size = 8;
2446 case 140: /* stxsiwx */
2447 op->reg = rd | ((word & 1) << 5);
2448 op->type = MKOP(STORE_VSX, 0, 4);
2449 op->element_size = 8;
2452 case 268: /* lxvx */
2453 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2454 goto unknown_opcode;
2455 op->reg = rd | ((word & 1) << 5);
2456 op->type = MKOP(LOAD_VSX, 0, 16);
2457 op->element_size = 16;
2458 op->vsx_flags = VSX_CHECK_VEC;
2461 case 269: /* lxvl */
2462 case 301: { /* lxvll */
2464 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2465 goto unknown_opcode;
2466 op->reg = rd | ((word & 1) << 5);
2467 op->ea = ra ? regs->gpr[ra] : 0;
2468 nb = regs->gpr[rb] & 0xff;
2471 op->type = MKOP(LOAD_VSX, 0, nb);
2472 op->element_size = 16;
2473 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2477 case 332: /* lxvdsx */
2478 op->reg = rd | ((word & 1) << 5);
2479 op->type = MKOP(LOAD_VSX, 0, 8);
2480 op->element_size = 8;
2481 op->vsx_flags = VSX_SPLAT;
2484 case 333: /* lxvpx */
2485 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2486 goto unknown_opcode;
2487 op->reg = VSX_REGISTER_XTP(rd);
2488 op->type = MKOP(LOAD_VSX, 0, 32);
2489 op->element_size = 32;
2492 case 364: /* lxvwsx */
2493 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2494 goto unknown_opcode;
2495 op->reg = rd | ((word & 1) << 5);
2496 op->type = MKOP(LOAD_VSX, 0, 4);
2497 op->element_size = 4;
2498 op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2501 case 396: /* stxvx */
2502 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2503 goto unknown_opcode;
2504 op->reg = rd | ((word & 1) << 5);
2505 op->type = MKOP(STORE_VSX, 0, 16);
2506 op->element_size = 16;
2507 op->vsx_flags = VSX_CHECK_VEC;
2510 case 397: /* stxvl */
2511 case 429: { /* stxvll */
2513 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2514 goto unknown_opcode;
2515 op->reg = rd | ((word & 1) << 5);
2516 op->ea = ra ? regs->gpr[ra] : 0;
2517 nb = regs->gpr[rb] & 0xff;
2520 op->type = MKOP(STORE_VSX, 0, nb);
2521 op->element_size = 16;
2522 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2526 case 461: /* stxvpx */
2527 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2528 goto unknown_opcode;
2529 op->reg = VSX_REGISTER_XTP(rd);
2530 op->type = MKOP(STORE_VSX, 0, 32);
2531 op->element_size = 32;
2533 case 524: /* lxsspx */
2534 op->reg = rd | ((word & 1) << 5);
2535 op->type = MKOP(LOAD_VSX, 0, 4);
2536 op->element_size = 8;
2537 op->vsx_flags = VSX_FPCONV;
2540 case 588: /* lxsdx */
2541 op->reg = rd | ((word & 1) << 5);
2542 op->type = MKOP(LOAD_VSX, 0, 8);
2543 op->element_size = 8;
2546 case 652: /* stxsspx */
2547 op->reg = rd | ((word & 1) << 5);
2548 op->type = MKOP(STORE_VSX, 0, 4);
2549 op->element_size = 8;
2550 op->vsx_flags = VSX_FPCONV;
2553 case 716: /* stxsdx */
2554 op->reg = rd | ((word & 1) << 5);
2555 op->type = MKOP(STORE_VSX, 0, 8);
2556 op->element_size = 8;
2559 case 780: /* lxvw4x */
2560 op->reg = rd | ((word & 1) << 5);
2561 op->type = MKOP(LOAD_VSX, 0, 16);
2562 op->element_size = 4;
2565 case 781: /* lxsibzx */
2566 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2567 goto unknown_opcode;
2568 op->reg = rd | ((word & 1) << 5);
2569 op->type = MKOP(LOAD_VSX, 0, 1);
2570 op->element_size = 8;
2571 op->vsx_flags = VSX_CHECK_VEC;
2574 case 812: /* lxvh8x */
2575 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2576 goto unknown_opcode;
2577 op->reg = rd | ((word & 1) << 5);
2578 op->type = MKOP(LOAD_VSX, 0, 16);
2579 op->element_size = 2;
2580 op->vsx_flags = VSX_CHECK_VEC;
2583 case 813: /* lxsihzx */
2584 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2585 goto unknown_opcode;
2586 op->reg = rd | ((word & 1) << 5);
2587 op->type = MKOP(LOAD_VSX, 0, 2);
2588 op->element_size = 8;
2589 op->vsx_flags = VSX_CHECK_VEC;
2592 case 844: /* lxvd2x */
2593 op->reg = rd | ((word & 1) << 5);
2594 op->type = MKOP(LOAD_VSX, 0, 16);
2595 op->element_size = 8;
2598 case 876: /* lxvb16x */
2599 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2600 goto unknown_opcode;
2601 op->reg = rd | ((word & 1) << 5);
2602 op->type = MKOP(LOAD_VSX, 0, 16);
2603 op->element_size = 1;
2604 op->vsx_flags = VSX_CHECK_VEC;
2607 case 908: /* stxvw4x */
2608 op->reg = rd | ((word & 1) << 5);
2609 op->type = MKOP(STORE_VSX, 0, 16);
2610 op->element_size = 4;
2613 case 909: /* stxsibx */
2614 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2615 goto unknown_opcode;
2616 op->reg = rd | ((word & 1) << 5);
2617 op->type = MKOP(STORE_VSX, 0, 1);
2618 op->element_size = 8;
2619 op->vsx_flags = VSX_CHECK_VEC;
2622 case 940: /* stxvh8x */
2623 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2624 goto unknown_opcode;
2625 op->reg = rd | ((word & 1) << 5);
2626 op->type = MKOP(STORE_VSX, 0, 16);
2627 op->element_size = 2;
2628 op->vsx_flags = VSX_CHECK_VEC;
2631 case 941: /* stxsihx */
2632 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2633 goto unknown_opcode;
2634 op->reg = rd | ((word & 1) << 5);
2635 op->type = MKOP(STORE_VSX, 0, 2);
2636 op->element_size = 8;
2637 op->vsx_flags = VSX_CHECK_VEC;
2640 case 972: /* stxvd2x */
2641 op->reg = rd | ((word & 1) << 5);
2642 op->type = MKOP(STORE_VSX, 0, 16);
2643 op->element_size = 8;
2646 case 1004: /* stxvb16x */
2647 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2648 goto unknown_opcode;
2649 op->reg = rd | ((word & 1) << 5);
2650 op->type = MKOP(STORE_VSX, 0, 16);
2651 op->element_size = 1;
2652 op->vsx_flags = VSX_CHECK_VEC;
2655 #endif /* CONFIG_VSX */
2661 op->type = MKOP(LOAD, u, 4);
2662 op->ea = dform_ea(word, regs);
2667 op->type = MKOP(LOAD, u, 1);
2668 op->ea = dform_ea(word, regs);
2673 op->type = MKOP(STORE, u, 4);
2674 op->ea = dform_ea(word, regs);
2679 op->type = MKOP(STORE, u, 1);
2680 op->ea = dform_ea(word, regs);
2685 op->type = MKOP(LOAD, u, 2);
2686 op->ea = dform_ea(word, regs);
2691 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2692 op->ea = dform_ea(word, regs);
2697 op->type = MKOP(STORE, u, 2);
2698 op->ea = dform_ea(word, regs);
2703 break; /* invalid form, ra in range to load */
2704 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2705 op->ea = dform_ea(word, regs);
2709 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2710 op->ea = dform_ea(word, regs);
2713 #ifdef CONFIG_PPC_FPU
2716 op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2717 op->ea = dform_ea(word, regs);
2722 op->type = MKOP(LOAD_FP, u, 8);
2723 op->ea = dform_ea(word, regs);
2727 case 53: /* stfsu */
2728 op->type = MKOP(STORE_FP, u | FPCONV, 4);
2729 op->ea = dform_ea(word, regs);
2733 case 55: /* stfdu */
2734 op->type = MKOP(STORE_FP, u, 8);
2735 op->ea = dform_ea(word, regs);
2739 #ifdef __powerpc64__
2741 if (!((rd & 1) || (rd == ra)))
2742 op->type = MKOP(LOAD, 0, 16);
2743 op->ea = dqform_ea(word, regs);
2748 case 57: /* lfdp, lxsd, lxssp */
2749 op->ea = dsform_ea(word, regs);
2753 break; /* reg must be even */
2754 op->type = MKOP(LOAD_FP, 0, 16);
2757 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2758 goto unknown_opcode;
2760 op->type = MKOP(LOAD_VSX, 0, 8);
2761 op->element_size = 8;
2762 op->vsx_flags = VSX_CHECK_VEC;
2765 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2766 goto unknown_opcode;
2768 op->type = MKOP(LOAD_VSX, 0, 4);
2769 op->element_size = 8;
2770 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2774 #endif /* CONFIG_VSX */
2776 #ifdef __powerpc64__
2777 case 58: /* ld[u], lwa */
2778 op->ea = dsform_ea(word, regs);
2781 op->type = MKOP(LOAD, 0, 8);
2784 op->type = MKOP(LOAD, UPDATE, 8);
2787 op->type = MKOP(LOAD, SIGNEXT, 4);
2795 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2796 goto unknown_opcode;
2797 op->ea = dqform_ea(word, regs);
2798 op->reg = VSX_REGISTER_XTP(rd);
2799 op->element_size = 32;
2800 switch (word & 0xf) {
2802 op->type = MKOP(LOAD_VSX, 0, 32);
2805 op->type = MKOP(STORE_VSX, 0, 32);
2810 case 61: /* stfdp, lxv, stxsd, stxssp, stxv */
2812 case 0: /* stfdp with LSB of DS field = 0 */
2813 case 4: /* stfdp with LSB of DS field = 1 */
2814 op->ea = dsform_ea(word, regs);
2815 op->type = MKOP(STORE_FP, 0, 16);
2819 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2820 goto unknown_opcode;
2821 op->ea = dqform_ea(word, regs);
2824 op->type = MKOP(LOAD_VSX, 0, 16);
2825 op->element_size = 16;
2826 op->vsx_flags = VSX_CHECK_VEC;
2829 case 2: /* stxsd with LSB of DS field = 0 */
2830 case 6: /* stxsd with LSB of DS field = 1 */
2831 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2832 goto unknown_opcode;
2833 op->ea = dsform_ea(word, regs);
2835 op->type = MKOP(STORE_VSX, 0, 8);
2836 op->element_size = 8;
2837 op->vsx_flags = VSX_CHECK_VEC;
2840 case 3: /* stxssp with LSB of DS field = 0 */
2841 case 7: /* stxssp with LSB of DS field = 1 */
2842 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2843 goto unknown_opcode;
2844 op->ea = dsform_ea(word, regs);
2846 op->type = MKOP(STORE_VSX, 0, 4);
2847 op->element_size = 8;
2848 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2852 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2853 goto unknown_opcode;
2854 op->ea = dqform_ea(word, regs);
2857 op->type = MKOP(STORE_VSX, 0, 16);
2858 op->element_size = 16;
2859 op->vsx_flags = VSX_CHECK_VEC;
2863 #endif /* CONFIG_VSX */
2865 #ifdef __powerpc64__
2866 case 62: /* std[u] */
2867 op->ea = dsform_ea(word, regs);
2870 op->type = MKOP(STORE, 0, 8);
2873 op->type = MKOP(STORE, UPDATE, 8);
2877 op->type = MKOP(STORE, 0, 16);
2881 case 1: /* Prefixed instructions */
2882 if (!cpu_has_feature(CPU_FTR_ARCH_31))
2883 goto unknown_opcode;
2885 prefix_r = GET_PREFIX_R(word);
2886 ra = GET_PREFIX_RA(suffix);
2887 op->update_reg = ra;
2888 rd = (suffix >> 21) & 0x1f;
2890 op->val = regs->gpr[rd];
2892 suffixopcode = get_op(suffix);
2893 prefixtype = (word >> 24) & 0x3;
2894 switch (prefixtype) {
2895 case 0: /* Type 00 Eight-Byte Load/Store */
2898 op->ea = mlsd_8lsd_ea(word, suffix, regs);
2899 switch (suffixopcode) {
2901 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
2904 case 42: /* plxsd */
2906 op->type = MKOP(LOAD_VSX, PREFIXED, 8);
2907 op->element_size = 8;
2908 op->vsx_flags = VSX_CHECK_VEC;
2910 case 43: /* plxssp */
2912 op->type = MKOP(LOAD_VSX, PREFIXED, 4);
2913 op->element_size = 8;
2914 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2916 case 46: /* pstxsd */
2918 op->type = MKOP(STORE_VSX, PREFIXED, 8);
2919 op->element_size = 8;
2920 op->vsx_flags = VSX_CHECK_VEC;
2922 case 47: /* pstxssp */
2924 op->type = MKOP(STORE_VSX, PREFIXED, 4);
2925 op->element_size = 8;
2926 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2928 case 51: /* plxv1 */
2931 case 50: /* plxv0 */
2932 op->type = MKOP(LOAD_VSX, PREFIXED, 16);
2933 op->element_size = 16;
2934 op->vsx_flags = VSX_CHECK_VEC;
2936 case 55: /* pstxv1 */
2939 case 54: /* pstxv0 */
2940 op->type = MKOP(STORE_VSX, PREFIXED, 16);
2941 op->element_size = 16;
2942 op->vsx_flags = VSX_CHECK_VEC;
2944 #endif /* CONFIG_VSX */
2946 op->type = MKOP(LOAD, PREFIXED, 16);
2949 op->type = MKOP(LOAD, PREFIXED, 8);
2952 case 58: /* plxvp */
2953 op->reg = VSX_REGISTER_XTP(rd);
2954 op->type = MKOP(LOAD_VSX, PREFIXED, 32);
2955 op->element_size = 32;
2957 #endif /* CONFIG_VSX */
2959 op->type = MKOP(STORE, PREFIXED, 16);
2962 op->type = MKOP(STORE, PREFIXED, 8);
2965 case 62: /* pstxvp */
2966 op->reg = VSX_REGISTER_XTP(rd);
2967 op->type = MKOP(STORE_VSX, PREFIXED, 32);
2968 op->element_size = 32;
2970 #endif /* CONFIG_VSX */
2973 case 1: /* Type 01 Eight-Byte Register-to-Register */
2975 case 2: /* Type 10 Modified Load/Store */
2978 op->ea = mlsd_8lsd_ea(word, suffix, regs);
2979 switch (suffixopcode) {
2981 op->type = MKOP(LOAD, PREFIXED, 4);
2984 op->type = MKOP(LOAD, PREFIXED, 1);
2987 op->type = MKOP(STORE, PREFIXED, 4);
2990 op->type = MKOP(STORE, PREFIXED, 1);
2993 op->type = MKOP(LOAD, PREFIXED, 2);
2996 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
2999 op->type = MKOP(STORE, PREFIXED, 2);
3002 op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
3005 op->type = MKOP(LOAD_FP, PREFIXED, 8);
3007 case 52: /* pstfs */
3008 op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
3010 case 54: /* pstfd */
3011 op->type = MKOP(STORE_FP, PREFIXED, 8);
3015 case 3: /* Type 11 Modified Register-to-Register */
3018 #endif /* __powerpc64__ */
3022 if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
3023 switch (GETTYPE(op->type)) {
3026 goto unknown_opcode;
3032 goto unknown_opcode;
3037 if ((GETTYPE(op->type) == LOAD_VSX ||
3038 GETTYPE(op->type) == STORE_VSX) &&
3039 !cpu_has_feature(CPU_FTR_VSX)) {
3042 #endif /* CONFIG_VSX */
3067 op->type = INTERRUPT | 0x700;
3068 op->val = SRR1_PROGPRIV;
3072 op->type = INTERRUPT | 0x700;
3073 op->val = SRR1_PROGTRAP;
3076 EXPORT_SYMBOL_GPL(analyse_instr);
3077 NOKPROBE_SYMBOL(analyse_instr);
3080 * For PPC32 we always use stwu with r1 to change the stack pointer.
3081 * So this emulated store may corrupt the exception frame, now we
3082 * have to provide the exception frame trampoline, which is pushed
3083 * below the kprobed function stack. So we only update gpr[1] but
3084 * don't emulate the real store operation. We will do real store
3085 * operation safely in exception return code by checking this flag.
3087 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
3091 * Check if we will touch kernel stack overflow
3093 if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
3094 printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
3097 #endif /* CONFIG_PPC32 */
3099 * Check if we already set since that means we'll
3100 * lose the previous value.
3102 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
3103 set_thread_flag(TIF_EMULATE_STACK_STORE);
3107 static nokprobe_inline void do_signext(unsigned long *valp, int size)
3111 *valp = (signed short) *valp;
3114 *valp = (signed int) *valp;
3119 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
3123 *valp = byterev_2(*valp);
3126 *valp = byterev_4(*valp);
3128 #ifdef __powerpc64__
3130 *valp = byterev_8(*valp);
3137 * Emulate an instruction that can be executed just by updating
3140 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
3142 unsigned long next_pc;
3144 next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
3145 switch (GETTYPE(op->type)) {
3147 if (op->type & SETREG)
3148 regs->gpr[op->reg] = op->val;
3149 if (op->type & SETCC)
3150 regs->ccr = op->ccval;
3151 if (op->type & SETXER)
3152 regs->xer = op->xerval;
3156 if (op->type & SETLK)
3157 regs->link = next_pc;
3158 if (op->type & BRTAKEN)
3160 if (op->type & DECCTR)
3165 switch (op->type & BARRIER_MASK) {
3175 case BARRIER_LWSYNC:
3176 asm volatile("lwsync" : : : "memory");
3178 case BARRIER_PTESYNC:
3179 asm volatile("ptesync" : : : "memory");
3187 regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
3190 regs->gpr[op->reg] = regs->link;
3193 regs->gpr[op->reg] = regs->ctr;
3203 regs->xer = op->val & 0xffffffffUL;
3206 regs->link = op->val;
3209 regs->ctr = op->val;
3219 regs->nip = next_pc;
3221 NOKPROBE_SYMBOL(emulate_update_regs);
3224 * Emulate a previously-analysed load or store instruction.
3225 * Return values are:
3226 * 0 = instruction emulated successfully
3227 * -EFAULT = address out of range or access faulted (regs->dar
3228 * contains the faulting address)
3229 * -EACCES = misaligned access, instruction requires alignment
3230 * -EINVAL = unknown operation in *op
3232 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
3234 int err, size, type;
3242 size = GETSIZE(op->type);
3243 type = GETTYPE(op->type);
3244 cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
3245 ea = truncate_if_32bit(regs->msr, op->ea);
3249 if (ea & (size - 1))
3250 return -EACCES; /* can't handle misaligned */
3251 if (!address_ok(regs, ea, size))
3256 #ifdef __powerpc64__
3258 __get_user_asmx(val, ea, err, "lbarx");
3261 __get_user_asmx(val, ea, err, "lharx");
3265 __get_user_asmx(val, ea, err, "lwarx");
3267 #ifdef __powerpc64__
3269 __get_user_asmx(val, ea, err, "ldarx");
3272 err = do_lqarx(ea, ®s->gpr[op->reg]);
3283 regs->gpr[op->reg] = val;
3287 if (ea & (size - 1))
3288 return -EACCES; /* can't handle misaligned */
3289 if (!address_ok(regs, ea, size))
3293 #ifdef __powerpc64__
3295 __put_user_asmx(op->val, ea, err, "stbcx.", cr);
3298 __put_user_asmx(op->val, ea, err, "stbcx.", cr);
3302 __put_user_asmx(op->val, ea, err, "stwcx.", cr);
3304 #ifdef __powerpc64__
3306 __put_user_asmx(op->val, ea, err, "stdcx.", cr);
3309 err = do_stqcx(ea, regs->gpr[op->reg],
3310 regs->gpr[op->reg + 1], &cr);
3317 regs->ccr = (regs->ccr & 0x0fffffff) |
3319 ((regs->xer >> 3) & 0x10000000);
3325 #ifdef __powerpc64__
3327 err = emulate_lq(regs, ea, op->reg, cross_endian);
3331 err = read_mem(®s->gpr[op->reg], ea, size, regs);
3333 if (op->type & SIGNEXT)
3334 do_signext(®s->gpr[op->reg], size);
3335 if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
3336 do_byterev(®s->gpr[op->reg], size);
3340 #ifdef CONFIG_PPC_FPU
3343 * If the instruction is in userspace, we can emulate it even
3344 * if the VMX state is not live, because we have the state
3345 * stored in the thread_struct. If the instruction is in
3346 * the kernel, we must not touch the state in the thread_struct.
3348 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3350 err = do_fp_load(op, ea, regs, cross_endian);
3353 #ifdef CONFIG_ALTIVEC
3355 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3357 err = do_vec_load(op->reg, ea, size, regs, cross_endian);
3362 unsigned long msrbit = MSR_VSX;
3365 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3366 * when the target of the instruction is a vector register.
3368 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3370 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3372 err = do_vsx_load(op, ea, regs, cross_endian);
3377 if (!address_ok(regs, ea, size))
3380 for (i = 0; i < size; i += 4) {
3381 unsigned int v32 = 0;
3386 err = copy_mem_in((u8 *) &v32, ea, nb, regs);
3389 if (unlikely(cross_endian))
3390 v32 = byterev_4(v32);
3391 regs->gpr[rd] = v32;
3393 /* reg number wraps from 31 to 0 for lsw[ix] */
3394 rd = (rd + 1) & 0x1f;
3399 #ifdef __powerpc64__
3401 err = emulate_stq(regs, ea, op->reg, cross_endian);
3405 if ((op->type & UPDATE) && size == sizeof(long) &&
3406 op->reg == 1 && op->update_reg == 1 &&
3407 !(regs->msr & MSR_PR) &&
3408 ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
3409 err = handle_stack_update(ea, regs);
3412 if (unlikely(cross_endian))
3413 do_byterev(&op->val, size);
3414 err = write_mem(op->val, ea, size, regs);
3417 #ifdef CONFIG_PPC_FPU
3419 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3421 err = do_fp_store(op, ea, regs, cross_endian);
3424 #ifdef CONFIG_ALTIVEC
3426 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3428 err = do_vec_store(op->reg, ea, size, regs, cross_endian);
3433 unsigned long msrbit = MSR_VSX;
3436 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3437 * when the target of the instruction is a vector register.
3439 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3441 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3443 err = do_vsx_store(op, ea, regs, cross_endian);
3448 if (!address_ok(regs, ea, size))
3451 for (i = 0; i < size; i += 4) {
3452 unsigned int v32 = regs->gpr[rd];
3457 if (unlikely(cross_endian))
3458 v32 = byterev_4(v32);
3459 err = copy_mem_out((u8 *) &v32, ea, nb, regs);
3463 /* reg number wraps from 31 to 0 for stsw[ix] */
3464 rd = (rd + 1) & 0x1f;
3475 if (op->type & UPDATE)
3476 regs->gpr[op->update_reg] = op->ea;
3480 NOKPROBE_SYMBOL(emulate_loadstore);
3483 * Emulate instructions that cause a transfer of control,
3484 * loads and stores, and a few other instructions.
3485 * Returns 1 if the step was emulated, 0 if not,
3486 * or -1 if the instruction is one that should not be stepped,
3487 * such as an rfid, or a mtmsrd that would clear MSR_RI.
3489 int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
3491 struct instruction_op op;
3496 r = analyse_instr(&op, regs, instr);
3500 emulate_update_regs(regs, &op);
3505 type = GETTYPE(op.type);
3507 if (OP_IS_LOAD_STORE(type)) {
3508 err = emulate_loadstore(regs, &op);
3516 ea = truncate_if_32bit(regs->msr, op.ea);
3517 if (!address_ok(regs, ea, 8))
3519 switch (op.type & CACHEOP_MASK) {
3521 __cacheop_user_asmx(ea, err, "dcbst");
3524 __cacheop_user_asmx(ea, err, "dcbf");
3528 prefetchw((void *) ea);
3532 prefetch((void *) ea);
3535 __cacheop_user_asmx(ea, err, "icbi");
3538 err = emulate_dcbz(ea, regs);
3548 regs->gpr[op.reg] = regs->msr & MSR_MASK;
3552 val = regs->gpr[op.reg];
3553 if ((val & MSR_RI) == 0)
3554 /* can't step mtmsr[d] that would clear MSR_RI */
3556 /* here op.val is the mask of bits to change */
3557 regs->msr = (regs->msr & ~op.val) | (val & op.val);
3561 case SYSCALL: /* sc */
3563 * N.B. this uses knowledge about how the syscall
3564 * entry code works. If that is changed, this will
3565 * need to be changed also.
3567 if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
3568 cpu_has_feature(CPU_FTR_REAL_LE) &&
3569 regs->gpr[0] == 0x1ebe) {
3570 regs->msr ^= MSR_LE;
3573 regs->gpr[9] = regs->gpr[13];
3574 regs->gpr[10] = MSR_KERNEL;
3575 regs->gpr[11] = regs->nip + 4;
3576 regs->gpr[12] = regs->msr & MSR_MASK;
3577 regs->gpr[13] = (unsigned long) get_paca();
3578 regs->nip = (unsigned long) &system_call_common;
3579 regs->msr = MSR_KERNEL;
3582 #ifdef CONFIG_PPC_BOOK3S_64
3583 case SYSCALL_VECTORED_0: /* scv 0 */
3584 regs->gpr[9] = regs->gpr[13];
3585 regs->gpr[10] = MSR_KERNEL;
3586 regs->gpr[11] = regs->nip + 4;
3587 regs->gpr[12] = regs->msr & MSR_MASK;
3588 regs->gpr[13] = (unsigned long) get_paca();
3589 regs->nip = (unsigned long) &system_call_vectored_emulate;
3590 regs->msr = MSR_KERNEL;
3601 regs->nip = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type));
3604 NOKPROBE_SYMBOL(emulate_step);