4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <linux/uaccess.h>
18 #include <asm/cpu_has_feature.h>
19 #include <asm/cputable.h>
21 extern char system_call_common[];
24 /* Bits in SRR1 that are copied from MSR */
25 #define MSR_MASK 0xffffffff87c0ffffUL
27 #define MSR_MASK 0x87c0ffff
31 #define XER_SO 0x80000000U
32 #define XER_OV 0x40000000U
33 #define XER_CA 0x20000000U
37 * Functions in ldstfp.S
39 extern void get_fpr(int rn, double *p);
40 extern void put_fpr(int rn, const double *p);
41 extern void get_vr(int rn, __vector128 *p);
42 extern void put_vr(int rn, __vector128 *p);
43 extern void load_vsrn(int vsr, const void *p);
44 extern void store_vsrn(int vsr, void *p);
45 extern void conv_sp_to_dp(const float *sp, double *dp);
46 extern void conv_dp_to_sp(const double *dp, float *sp);
53 extern int do_lq(unsigned long ea, unsigned long *regs);
54 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
55 extern int do_lqarx(unsigned long ea, unsigned long *regs);
56 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
60 #ifdef __LITTLE_ENDIAN__
69 * Emulate the truncation of 64 bit values in 32-bit mode.
71 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
75 if ((msr & MSR_64BIT) == 0)
82 * Determine whether a conditional branch instruction would branch.
84 static nokprobe_inline int branch_taken(unsigned int instr,
85 const struct pt_regs *regs,
86 struct instruction_op *op)
88 unsigned int bo = (instr >> 21) & 0x1f;
92 /* decrement counter */
94 if (((bo >> 1) & 1) ^ (regs->ctr == 1))
97 if ((bo & 0x10) == 0) {
98 /* check bit from CR */
99 bi = (instr >> 16) & 0x1f;
100 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
106 static nokprobe_inline long address_ok(struct pt_regs *regs, unsigned long ea, int nb)
108 if (!user_mode(regs))
110 return __access_ok(ea, nb, USER_DS);
114 * Calculate effective address for a D-form instruction
116 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
117 const struct pt_regs *regs)
122 ra = (instr >> 16) & 0x1f;
123 ea = (signed short) instr; /* sign-extend */
132 * Calculate effective address for a DS-form instruction
134 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
135 const struct pt_regs *regs)
140 ra = (instr >> 16) & 0x1f;
141 ea = (signed short) (instr & ~3); /* sign-extend */
149 * Calculate effective address for a DQ-form instruction
151 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
152 const struct pt_regs *regs)
157 ra = (instr >> 16) & 0x1f;
158 ea = (signed short) (instr & ~0xf); /* sign-extend */
164 #endif /* __powerpc64 */
167 * Calculate effective address for an X-form instruction
169 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
170 const struct pt_regs *regs)
175 ra = (instr >> 16) & 0x1f;
176 rb = (instr >> 11) & 0x1f;
185 * Return the largest power of 2, not greater than sizeof(unsigned long),
186 * such that x is a multiple of it.
188 static nokprobe_inline unsigned long max_align(unsigned long x)
190 x |= sizeof(unsigned long);
191 return x & -x; /* isolates rightmost bit */
194 static nokprobe_inline unsigned long byterev_2(unsigned long x)
196 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
199 static nokprobe_inline unsigned long byterev_4(unsigned long x)
201 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
202 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
206 static nokprobe_inline unsigned long byterev_8(unsigned long x)
208 return (byterev_4(x) << 32) | byterev_4(x >> 32);
212 static nokprobe_inline int read_mem_aligned(unsigned long *dest,
213 unsigned long ea, int nb)
220 err = __get_user(x, (unsigned char __user *) ea);
223 err = __get_user(x, (unsigned short __user *) ea);
226 err = __get_user(x, (unsigned int __user *) ea);
230 err = __get_user(x, (unsigned long __user *) ea);
240 * Copy from userspace to a buffer, using the largest possible
241 * aligned accesses, up to sizeof(long).
243 static int nokprobe_inline copy_mem_in(u8 *dest, unsigned long ea, int nb)
248 for (; nb > 0; nb -= c) {
254 err = __get_user(*dest, (unsigned char __user *) ea);
257 err = __get_user(*(u16 *)dest,
258 (unsigned short __user *) ea);
261 err = __get_user(*(u32 *)dest,
262 (unsigned int __user *) ea);
266 err = __get_user(*(unsigned long *)dest,
267 (unsigned long __user *) ea);
279 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
280 unsigned long ea, int nb,
281 struct pt_regs *regs)
285 u8 b[sizeof(unsigned long)];
291 i = IS_BE ? sizeof(unsigned long) - nb : 0;
292 err = copy_mem_in(&u.b[i], ea, nb);
299 * Read memory at address ea for nb bytes, return 0 for success
300 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
301 * If nb < sizeof(long), the result is right-justified on BE systems.
303 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
304 struct pt_regs *regs)
306 if (!address_ok(regs, ea, nb))
308 if ((ea & (nb - 1)) == 0)
309 return read_mem_aligned(dest, ea, nb);
310 return read_mem_unaligned(dest, ea, nb, regs);
312 NOKPROBE_SYMBOL(read_mem);
314 static nokprobe_inline int write_mem_aligned(unsigned long val,
315 unsigned long ea, int nb)
321 err = __put_user(val, (unsigned char __user *) ea);
324 err = __put_user(val, (unsigned short __user *) ea);
327 err = __put_user(val, (unsigned int __user *) ea);
331 err = __put_user(val, (unsigned long __user *) ea);
339 * Copy from a buffer to userspace, using the largest possible
340 * aligned accesses, up to sizeof(long).
342 static int nokprobe_inline copy_mem_out(u8 *dest, unsigned long ea, int nb)
347 for (; nb > 0; nb -= c) {
353 err = __put_user(*dest, (unsigned char __user *) ea);
356 err = __put_user(*(u16 *)dest,
357 (unsigned short __user *) ea);
360 err = __put_user(*(u32 *)dest,
361 (unsigned int __user *) ea);
365 err = __put_user(*(unsigned long *)dest,
366 (unsigned long __user *) ea);
378 static nokprobe_inline int write_mem_unaligned(unsigned long val,
379 unsigned long ea, int nb,
380 struct pt_regs *regs)
384 u8 b[sizeof(unsigned long)];
389 i = IS_BE ? sizeof(unsigned long) - nb : 0;
390 return copy_mem_out(&u.b[i], ea, nb);
394 * Write memory at address ea for nb bytes, return 0 for success
395 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
397 static int write_mem(unsigned long val, unsigned long ea, int nb,
398 struct pt_regs *regs)
400 if (!address_ok(regs, ea, nb))
402 if ((ea & (nb - 1)) == 0)
403 return write_mem_aligned(val, ea, nb);
404 return write_mem_unaligned(val, ea, nb, regs);
406 NOKPROBE_SYMBOL(write_mem);
408 #ifdef CONFIG_PPC_FPU
410 * These access either the real FP register or the image in the
411 * thread_struct, depending on regs->msr & MSR_FP.
413 static int do_fp_load(int rn, unsigned long ea, int nb, struct pt_regs *regs)
420 u8 b[2 * sizeof(double)];
423 if (!address_ok(regs, ea, nb))
425 err = copy_mem_in(u.b, ea, nb);
430 conv_sp_to_dp(&u.f, &u.d[0]);
431 if (regs->msr & MSR_FP)
432 put_fpr(rn, &u.d[0]);
434 current->thread.TS_FPR(rn) = u.l[0];
438 if (regs->msr & MSR_FP)
439 put_fpr(rn, &u.d[1]);
441 current->thread.TS_FPR(rn) = u.l[1];
446 NOKPROBE_SYMBOL(do_fp_load);
448 static int do_fp_store(int rn, unsigned long ea, int nb, struct pt_regs *regs)
454 u8 b[2 * sizeof(double)];
457 if (!address_ok(regs, ea, nb))
460 if (regs->msr & MSR_FP)
461 get_fpr(rn, &u.d[0]);
463 u.l[0] = current->thread.TS_FPR(rn);
465 conv_dp_to_sp(&u.d[0], &u.f);
468 if (regs->msr & MSR_FP)
469 get_fpr(rn, &u.d[1]);
471 u.l[1] = current->thread.TS_FPR(rn);
474 return copy_mem_out(u.b, ea, nb);
476 NOKPROBE_SYMBOL(do_fp_store);
479 #ifdef CONFIG_ALTIVEC
480 /* For Altivec/VMX, no need to worry about alignment */
481 static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
482 int size, struct pt_regs *regs)
487 u8 b[sizeof(__vector128)];
490 if (!address_ok(regs, ea & ~0xfUL, 16))
492 /* align to multiple of size */
494 err = copy_mem_in(&u.b[ea & 0xf], ea, size);
499 if (regs->msr & MSR_VEC)
502 current->thread.vr_state.vr[rn] = u.v;
507 static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
508 int size, struct pt_regs *regs)
512 u8 b[sizeof(__vector128)];
515 if (!address_ok(regs, ea & ~0xfUL, 16))
517 /* align to multiple of size */
521 if (regs->msr & MSR_VEC)
524 u.v = current->thread.vr_state.vr[rn];
526 return copy_mem_out(&u.b[ea & 0xf], ea, size);
528 #endif /* CONFIG_ALTIVEC */
531 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
536 if (!address_ok(regs, ea, 16))
538 /* if aligned, should be atomic */
540 return do_lq(ea, ®s->gpr[reg]);
542 err = read_mem(®s->gpr[reg + IS_LE], ea, 8, regs);
544 err = read_mem(®s->gpr[reg + IS_BE], ea + 8, 8, regs);
548 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
553 if (!address_ok(regs, ea, 16))
555 /* if aligned, should be atomic */
557 return do_stq(ea, regs->gpr[reg], regs->gpr[reg + 1]);
559 err = write_mem(regs->gpr[reg + IS_LE], ea, 8, regs);
561 err = write_mem(regs->gpr[reg + IS_BE], ea + 8, 8, regs);
564 #endif /* __powerpc64 */
567 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
572 const unsigned int *wp;
573 const unsigned short *hp;
574 const unsigned char *bp;
576 size = GETSIZE(op->type);
577 reg->d[0] = reg->d[1] = 0;
579 switch (op->element_size) {
581 /* whole vector; lxv[x] or lxvl[l] */
584 memcpy(reg, mem, size);
585 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) {
586 /* reverse 16 bytes */
588 tmp = byterev_8(reg->d[0]);
589 reg->d[0] = byterev_8(reg->d[1]);
594 /* scalar loads, lxvd2x, lxvdsx */
595 read_size = (size >= 8) ? 8 : size;
596 i = IS_LE ? 8 : 8 - read_size;
597 memcpy(®->b[i], mem, read_size);
599 if (op->type & SIGNEXT) {
600 /* size == 4 is the only case here */
601 reg->d[IS_LE] = (signed int) reg->d[IS_LE];
602 } else if (op->vsx_flags & VSX_FPCONV) {
604 conv_sp_to_dp(®->fp[1 + IS_LE],
610 reg->d[IS_BE] = *(unsigned long *)(mem + 8);
611 else if (op->vsx_flags & VSX_SPLAT)
612 reg->d[IS_BE] = reg->d[IS_LE];
618 for (j = 0; j < size / 4; ++j) {
619 i = IS_LE ? 3 - j : j;
622 if (op->vsx_flags & VSX_SPLAT) {
623 u32 val = reg->w[IS_LE ? 3 : 0];
625 i = IS_LE ? 3 - j : j;
633 for (j = 0; j < size / 2; ++j) {
634 i = IS_LE ? 7 - j : j;
641 for (j = 0; j < size; ++j) {
642 i = IS_LE ? 15 - j : j;
648 EXPORT_SYMBOL_GPL(emulate_vsx_load);
649 NOKPROBE_SYMBOL(emulate_vsx_load);
651 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
654 int size, write_size;
661 size = GETSIZE(op->type);
663 switch (op->element_size) {
665 /* stxv, stxvx, stxvl, stxvll */
668 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) {
669 /* reverse 16 bytes */
670 buf.d[0] = byterev_8(reg->d[1]);
671 buf.d[1] = byterev_8(reg->d[0]);
674 memcpy(mem, reg, size);
677 /* scalar stores, stxvd2x */
678 write_size = (size >= 8) ? 8 : size;
679 i = IS_LE ? 8 : 8 - write_size;
680 if (size < 8 && op->vsx_flags & VSX_FPCONV) {
681 buf.d[0] = buf.d[1] = 0;
683 conv_dp_to_sp(®->dp[IS_LE], &buf.fp[1 + IS_LE]);
687 memcpy(mem, ®->b[i], write_size);
689 memcpy(mem + 8, ®->d[IS_BE], 8);
694 for (j = 0; j < size / 4; ++j) {
695 i = IS_LE ? 3 - j : j;
702 for (j = 0; j < size / 2; ++j) {
703 i = IS_LE ? 7 - j : j;
710 for (j = 0; j < size; ++j) {
711 i = IS_LE ? 15 - j : j;
717 EXPORT_SYMBOL_GPL(emulate_vsx_store);
718 NOKPROBE_SYMBOL(emulate_vsx_store);
720 static nokprobe_inline int do_vsx_load(struct instruction_op *op,
721 unsigned long ea, struct pt_regs *regs)
726 int size = GETSIZE(op->type);
728 if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size))
731 emulate_vsx_load(op, &buf, mem);
734 /* FP regs + extensions */
735 if (regs->msr & MSR_FP) {
736 load_vsrn(reg, &buf);
738 current->thread.fp_state.fpr[reg][0] = buf.d[0];
739 current->thread.fp_state.fpr[reg][1] = buf.d[1];
742 if (regs->msr & MSR_VEC)
743 load_vsrn(reg, &buf);
745 current->thread.vr_state.vr[reg - 32] = buf.v;
751 static nokprobe_inline int do_vsx_store(struct instruction_op *op,
752 unsigned long ea, struct pt_regs *regs)
757 int size = GETSIZE(op->type);
759 if (!address_ok(regs, ea, size))
764 /* FP regs + extensions */
765 if (regs->msr & MSR_FP) {
766 store_vsrn(reg, &buf);
768 buf.d[0] = current->thread.fp_state.fpr[reg][0];
769 buf.d[1] = current->thread.fp_state.fpr[reg][1];
772 if (regs->msr & MSR_VEC)
773 store_vsrn(reg, &buf);
775 buf.v = current->thread.vr_state.vr[reg - 32];
778 emulate_vsx_store(op, &buf, mem);
779 return copy_mem_out(mem, ea, size);
781 #endif /* CONFIG_VSX */
783 #define __put_user_asmx(x, addr, err, op, cr) \
784 __asm__ __volatile__( \
785 "1: " op " %2,0,%3\n" \
788 ".section .fixup,\"ax\"\n" \
793 : "=r" (err), "=r" (cr) \
794 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
796 #define __get_user_asmx(x, addr, err, op) \
797 __asm__ __volatile__( \
798 "1: "op" %1,0,%2\n" \
800 ".section .fixup,\"ax\"\n" \
805 : "=r" (err), "=r" (x) \
806 : "r" (addr), "i" (-EFAULT), "0" (err))
808 #define __cacheop_user_asmx(addr, err, op) \
809 __asm__ __volatile__( \
812 ".section .fixup,\"ax\"\n" \
818 : "r" (addr), "i" (-EFAULT), "0" (err))
820 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
821 struct instruction_op *op, int rd)
823 long val = regs->gpr[rd];
826 op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
828 if (!(regs->msr & MSR_64BIT))
832 op->ccval |= 0x80000000;
834 op->ccval |= 0x40000000;
836 op->ccval |= 0x20000000;
839 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
840 struct instruction_op *op, int rd,
841 unsigned long val1, unsigned long val2,
842 unsigned long carry_in)
844 unsigned long val = val1 + val2;
848 op->type = COMPUTE + SETREG + SETXER;
852 if (!(regs->msr & MSR_64BIT)) {
853 val = (unsigned int) val;
854 val1 = (unsigned int) val1;
857 op->xerval = regs->xer;
858 if (val < val1 || (carry_in && val == val1))
859 op->xerval |= XER_CA;
861 op->xerval &= ~XER_CA;
864 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
865 struct instruction_op *op,
866 long v1, long v2, int crfld)
868 unsigned int crval, shift;
870 op->type = COMPUTE + SETCC;
871 crval = (regs->xer >> 31) & 1; /* get SO bit */
878 shift = (7 - crfld) * 4;
879 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
882 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
883 struct instruction_op *op,
885 unsigned long v2, int crfld)
887 unsigned int crval, shift;
889 op->type = COMPUTE + SETCC;
890 crval = (regs->xer >> 31) & 1; /* get SO bit */
897 shift = (7 - crfld) * 4;
898 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
901 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
902 struct instruction_op *op,
903 unsigned long v1, unsigned long v2)
905 unsigned long long out_val, mask;
909 for (i = 0; i < 8; i++) {
910 mask = 0xffUL << (i * 8);
911 if ((v1 & mask) == (v2 & mask))
918 * The size parameter is used to adjust the equivalent popcnt instruction.
919 * popcntb = 8, popcntw = 32, popcntd = 64
921 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
922 struct instruction_op *op,
923 unsigned long v1, int size)
925 unsigned long long out = v1;
927 out -= (out >> 1) & 0x5555555555555555;
928 out = (0x3333333333333333 & out) + (0x3333333333333333 & (out >> 2));
929 out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0f;
931 if (size == 8) { /* popcntb */
937 if (size == 32) { /* popcntw */
938 op->val = out & 0x0000003f0000003f;
942 out = (out + (out >> 32)) & 0x7f;
943 op->val = out; /* popcntd */
947 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
948 struct instruction_op *op,
949 unsigned long v1, unsigned long v2)
951 unsigned char perm, idx;
955 for (i = 0; i < 8; i++) {
956 idx = (v1 >> (i * 8)) & 0xff;
958 if (v2 & PPC_BIT(idx))
963 #endif /* CONFIG_PPC64 */
965 * The size parameter adjusts the equivalent prty instruction.
966 * prtyw = 32, prtyd = 64
968 static nokprobe_inline void do_prty(const struct pt_regs *regs,
969 struct instruction_op *op,
970 unsigned long v, int size)
972 unsigned long long res = v ^ (v >> 8);
975 if (size == 32) { /* prtyw */
976 op->val = res & 0x0000000100000001;
981 op->val = res & 1; /*prtyd */
984 static nokprobe_inline int trap_compare(long v1, long v2)
994 if ((unsigned long)v1 < (unsigned long)v2)
996 else if ((unsigned long)v1 > (unsigned long)v2)
1002 * Elements of 32-bit rotate and mask instructions.
1004 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
1005 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1006 #ifdef __powerpc64__
1007 #define MASK64_L(mb) (~0UL >> (mb))
1008 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
1009 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1010 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1012 #define DATA32(x) (x)
1014 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1017 * Decode an instruction, and return information about it in *op
1018 * without changing *regs.
1019 * Integer arithmetic and logical instructions, branches, and barrier
1020 * instructions can be emulated just using the information in *op.
1022 * Return value is 1 if the instruction can be emulated just by
1023 * updating *regs with the information in *op, -1 if we need the
1024 * GPRs but *regs doesn't contain the full register set, or 0
1027 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1030 unsigned int opcode, ra, rb, rd, spr, u;
1031 unsigned long int imm;
1032 unsigned long int val, val2;
1033 unsigned int mb, me, sh;
1038 opcode = instr >> 26;
1042 imm = (signed short)(instr & 0xfffc);
1043 if ((instr & 2) == 0)
1045 op->val = truncate_if_32bit(regs->msr, imm);
1048 if (branch_taken(instr, regs, op))
1049 op->type |= BRTAKEN;
1053 if ((instr & 0xfe2) == 2)
1060 op->type = BRANCH | BRTAKEN;
1061 imm = instr & 0x03fffffc;
1062 if (imm & 0x02000000)
1064 if ((instr & 2) == 0)
1066 op->val = truncate_if_32bit(regs->msr, imm);
1071 switch ((instr >> 1) & 0x3ff) {
1073 op->type = COMPUTE + SETCC;
1074 rd = 7 - ((instr >> 23) & 0x7);
1075 ra = 7 - ((instr >> 18) & 0x7);
1078 val = (regs->ccr >> ra) & 0xf;
1079 op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1083 case 528: /* bcctr */
1085 imm = (instr & 0x400)? regs->ctr: regs->link;
1086 op->val = truncate_if_32bit(regs->msr, imm);
1089 if (branch_taken(instr, regs, op))
1090 op->type |= BRTAKEN;
1093 case 18: /* rfid, scary */
1094 if (regs->msr & MSR_PR)
1099 case 150: /* isync */
1100 op->type = BARRIER | BARRIER_ISYNC;
1103 case 33: /* crnor */
1104 case 129: /* crandc */
1105 case 193: /* crxor */
1106 case 225: /* crnand */
1107 case 257: /* crand */
1108 case 289: /* creqv */
1109 case 417: /* crorc */
1110 case 449: /* cror */
1111 op->type = COMPUTE + SETCC;
1112 ra = (instr >> 16) & 0x1f;
1113 rb = (instr >> 11) & 0x1f;
1114 rd = (instr >> 21) & 0x1f;
1115 ra = (regs->ccr >> (31 - ra)) & 1;
1116 rb = (regs->ccr >> (31 - rb)) & 1;
1117 val = (instr >> (6 + ra * 2 + rb)) & 1;
1118 op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1124 switch ((instr >> 1) & 0x3ff) {
1125 case 598: /* sync */
1126 op->type = BARRIER + BARRIER_SYNC;
1127 #ifdef __powerpc64__
1128 switch ((instr >> 21) & 3) {
1129 case 1: /* lwsync */
1130 op->type = BARRIER + BARRIER_LWSYNC;
1132 case 2: /* ptesync */
1133 op->type = BARRIER + BARRIER_PTESYNC;
1139 case 854: /* eieio */
1140 op->type = BARRIER + BARRIER_EIEIO;
1146 /* Following cases refer to regs->gpr[], so we need all regs */
1147 if (!FULL_REGS(regs))
1150 rd = (instr >> 21) & 0x1f;
1151 ra = (instr >> 16) & 0x1f;
1152 rb = (instr >> 11) & 0x1f;
1155 #ifdef __powerpc64__
1157 if (rd & trap_compare(regs->gpr[ra], (short) instr))
1162 if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
1167 op->val = regs->gpr[ra] * (short) instr;
1170 case 8: /* subfic */
1171 imm = (short) instr;
1172 add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1175 case 10: /* cmpli */
1176 imm = (unsigned short) instr;
1177 val = regs->gpr[ra];
1178 #ifdef __powerpc64__
1180 val = (unsigned int) val;
1182 do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1186 imm = (short) instr;
1187 val = regs->gpr[ra];
1188 #ifdef __powerpc64__
1192 do_cmp_signed(regs, op, val, imm, rd >> 2);
1195 case 12: /* addic */
1196 imm = (short) instr;
1197 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1200 case 13: /* addic. */
1201 imm = (short) instr;
1202 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1203 set_cr0(regs, op, rd);
1207 imm = (short) instr;
1209 imm += regs->gpr[ra];
1213 case 15: /* addis */
1214 imm = ((short) instr) << 16;
1216 imm += regs->gpr[ra];
1221 if (((instr >> 1) & 0x1f) == 2) {
1223 imm = (short) (instr & 0xffc1); /* d0 + d2 fields */
1224 imm |= (instr >> 15) & 0x3e; /* d1 field */
1225 op->val = regs->nip + (imm << 16) + 4;
1231 case 20: /* rlwimi */
1232 mb = (instr >> 6) & 0x1f;
1233 me = (instr >> 1) & 0x1f;
1234 val = DATA32(regs->gpr[rd]);
1235 imm = MASK32(mb, me);
1236 op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1239 case 21: /* rlwinm */
1240 mb = (instr >> 6) & 0x1f;
1241 me = (instr >> 1) & 0x1f;
1242 val = DATA32(regs->gpr[rd]);
1243 op->val = ROTATE(val, rb) & MASK32(mb, me);
1246 case 23: /* rlwnm */
1247 mb = (instr >> 6) & 0x1f;
1248 me = (instr >> 1) & 0x1f;
1249 rb = regs->gpr[rb] & 0x1f;
1250 val = DATA32(regs->gpr[rd]);
1251 op->val = ROTATE(val, rb) & MASK32(mb, me);
1255 op->val = regs->gpr[rd] | (unsigned short) instr;
1256 goto logical_done_nocc;
1259 imm = (unsigned short) instr;
1260 op->val = regs->gpr[rd] | (imm << 16);
1261 goto logical_done_nocc;
1264 op->val = regs->gpr[rd] ^ (unsigned short) instr;
1265 goto logical_done_nocc;
1267 case 27: /* xoris */
1268 imm = (unsigned short) instr;
1269 op->val = regs->gpr[rd] ^ (imm << 16);
1270 goto logical_done_nocc;
1272 case 28: /* andi. */
1273 op->val = regs->gpr[rd] & (unsigned short) instr;
1274 set_cr0(regs, op, ra);
1275 goto logical_done_nocc;
1277 case 29: /* andis. */
1278 imm = (unsigned short) instr;
1279 op->val = regs->gpr[rd] & (imm << 16);
1280 set_cr0(regs, op, ra);
1281 goto logical_done_nocc;
1283 #ifdef __powerpc64__
1285 mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
1286 val = regs->gpr[rd];
1287 if ((instr & 0x10) == 0) {
1288 sh = rb | ((instr & 2) << 4);
1289 val = ROTATE(val, sh);
1290 switch ((instr >> 2) & 3) {
1291 case 0: /* rldicl */
1292 val &= MASK64_L(mb);
1294 case 1: /* rldicr */
1295 val &= MASK64_R(mb);
1298 val &= MASK64(mb, 63 - sh);
1300 case 3: /* rldimi */
1301 imm = MASK64(mb, 63 - sh);
1302 val = (regs->gpr[ra] & ~imm) |
1308 sh = regs->gpr[rb] & 0x3f;
1309 val = ROTATE(val, sh);
1310 switch ((instr >> 1) & 7) {
1312 op->val = val & MASK64_L(mb);
1315 op->val = val & MASK64_R(mb);
1320 op->type = UNKNOWN; /* illegal instruction */
1324 /* isel occupies 32 minor opcodes */
1325 if (((instr >> 1) & 0x1f) == 15) {
1326 mb = (instr >> 6) & 0x1f; /* bc field */
1327 val = (regs->ccr >> (31 - mb)) & 1;
1328 val2 = (ra) ? regs->gpr[ra] : 0;
1330 op->val = (val) ? val2 : regs->gpr[rb];
1334 switch ((instr >> 1) & 0x3ff) {
1337 (rd & trap_compare((int)regs->gpr[ra],
1338 (int)regs->gpr[rb])))
1341 #ifdef __powerpc64__
1343 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1347 case 83: /* mfmsr */
1348 if (regs->msr & MSR_PR)
1353 case 146: /* mtmsr */
1354 if (regs->msr & MSR_PR)
1358 op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1361 case 178: /* mtmsrd */
1362 if (regs->msr & MSR_PR)
1366 /* only MSR_EE and MSR_RI get changed if bit 15 set */
1367 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1368 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1375 if ((instr >> 20) & 1) {
1377 for (sh = 0; sh < 8; ++sh) {
1378 if (instr & (0x80000 >> sh))
1383 op->val = regs->ccr & imm;
1386 case 144: /* mtcrf */
1387 op->type = COMPUTE + SETCC;
1389 val = regs->gpr[rd];
1390 op->val = regs->ccr;
1391 for (sh = 0; sh < 8; ++sh) {
1392 if (instr & (0x80000 >> sh))
1393 op->val = (op->val & ~imm) |
1399 case 339: /* mfspr */
1400 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1404 if (spr == SPRN_XER || spr == SPRN_LR ||
1409 case 467: /* mtspr */
1410 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1412 op->val = regs->gpr[rd];
1414 if (spr == SPRN_XER || spr == SPRN_LR ||
1420 * Compare instructions
1423 val = regs->gpr[ra];
1424 val2 = regs->gpr[rb];
1425 #ifdef __powerpc64__
1426 if ((rd & 1) == 0) {
1427 /* word (32-bit) compare */
1432 do_cmp_signed(regs, op, val, val2, rd >> 2);
1436 val = regs->gpr[ra];
1437 val2 = regs->gpr[rb];
1438 #ifdef __powerpc64__
1439 if ((rd & 1) == 0) {
1440 /* word (32-bit) compare */
1441 val = (unsigned int) val;
1442 val2 = (unsigned int) val2;
1445 do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1448 case 508: /* cmpb */
1449 do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1450 goto logical_done_nocc;
1453 * Arithmetic instructions
1456 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1459 #ifdef __powerpc64__
1460 case 9: /* mulhdu */
1461 asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1462 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1466 add_with_carry(regs, op, rd, regs->gpr[ra],
1470 case 11: /* mulhwu */
1471 asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1472 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1476 op->val = regs->gpr[rb] - regs->gpr[ra];
1478 #ifdef __powerpc64__
1479 case 73: /* mulhd */
1480 asm("mulhd %0,%1,%2" : "=r" (op->val) :
1481 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1484 case 75: /* mulhw */
1485 asm("mulhw %0,%1,%2" : "=r" (op->val) :
1486 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1490 op->val = -regs->gpr[ra];
1493 case 136: /* subfe */
1494 add_with_carry(regs, op, rd, ~regs->gpr[ra],
1495 regs->gpr[rb], regs->xer & XER_CA);
1498 case 138: /* adde */
1499 add_with_carry(regs, op, rd, regs->gpr[ra],
1500 regs->gpr[rb], regs->xer & XER_CA);
1503 case 200: /* subfze */
1504 add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1505 regs->xer & XER_CA);
1508 case 202: /* addze */
1509 add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1510 regs->xer & XER_CA);
1513 case 232: /* subfme */
1514 add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1515 regs->xer & XER_CA);
1517 #ifdef __powerpc64__
1518 case 233: /* mulld */
1519 op->val = regs->gpr[ra] * regs->gpr[rb];
1522 case 234: /* addme */
1523 add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1524 regs->xer & XER_CA);
1527 case 235: /* mullw */
1528 op->val = (unsigned int) regs->gpr[ra] *
1529 (unsigned int) regs->gpr[rb];
1533 op->val = regs->gpr[ra] + regs->gpr[rb];
1535 #ifdef __powerpc64__
1536 case 457: /* divdu */
1537 op->val = regs->gpr[ra] / regs->gpr[rb];
1540 case 459: /* divwu */
1541 op->val = (unsigned int) regs->gpr[ra] /
1542 (unsigned int) regs->gpr[rb];
1544 #ifdef __powerpc64__
1545 case 489: /* divd */
1546 op->val = (long int) regs->gpr[ra] /
1547 (long int) regs->gpr[rb];
1550 case 491: /* divw */
1551 op->val = (int) regs->gpr[ra] /
1552 (int) regs->gpr[rb];
1557 * Logical instructions
1559 case 26: /* cntlzw */
1560 op->val = __builtin_clz((unsigned int) regs->gpr[rd]);
1562 #ifdef __powerpc64__
1563 case 58: /* cntlzd */
1564 op->val = __builtin_clzl(regs->gpr[rd]);
1568 op->val = regs->gpr[rd] & regs->gpr[rb];
1572 op->val = regs->gpr[rd] & ~regs->gpr[rb];
1575 case 122: /* popcntb */
1576 do_popcnt(regs, op, regs->gpr[rd], 8);
1577 goto logical_done_nocc;
1580 op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1583 case 154: /* prtyw */
1584 do_prty(regs, op, regs->gpr[rd], 32);
1585 goto logical_done_nocc;
1587 case 186: /* prtyd */
1588 do_prty(regs, op, regs->gpr[rd], 64);
1589 goto logical_done_nocc;
1591 case 252: /* bpermd */
1592 do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
1593 goto logical_done_nocc;
1596 op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1600 op->val = regs->gpr[rd] ^ regs->gpr[rb];
1603 case 378: /* popcntw */
1604 do_popcnt(regs, op, regs->gpr[rd], 32);
1605 goto logical_done_nocc;
1608 op->val = regs->gpr[rd] | ~regs->gpr[rb];
1612 op->val = regs->gpr[rd] | regs->gpr[rb];
1615 case 476: /* nand */
1616 op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
1619 case 506: /* popcntd */
1620 do_popcnt(regs, op, regs->gpr[rd], 64);
1621 goto logical_done_nocc;
1623 case 922: /* extsh */
1624 op->val = (signed short) regs->gpr[rd];
1627 case 954: /* extsb */
1628 op->val = (signed char) regs->gpr[rd];
1630 #ifdef __powerpc64__
1631 case 986: /* extsw */
1632 op->val = (signed int) regs->gpr[rd];
1637 * Shift instructions
1640 sh = regs->gpr[rb] & 0x3f;
1642 op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
1648 sh = regs->gpr[rb] & 0x3f;
1650 op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1655 case 792: /* sraw */
1656 op->type = COMPUTE + SETREG + SETXER;
1657 sh = regs->gpr[rb] & 0x3f;
1658 ival = (signed int) regs->gpr[rd];
1659 op->val = ival >> (sh < 32 ? sh : 31);
1660 op->xerval = regs->xer;
1661 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1662 op->xerval |= XER_CA;
1664 op->xerval &= ~XER_CA;
1667 case 824: /* srawi */
1668 op->type = COMPUTE + SETREG + SETXER;
1670 ival = (signed int) regs->gpr[rd];
1671 op->val = ival >> sh;
1672 op->xerval = regs->xer;
1673 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1674 op->xerval |= XER_CA;
1676 op->xerval &= ~XER_CA;
1679 #ifdef __powerpc64__
1681 sh = regs->gpr[rb] & 0x7f;
1683 op->val = regs->gpr[rd] << sh;
1689 sh = regs->gpr[rb] & 0x7f;
1691 op->val = regs->gpr[rd] >> sh;
1696 case 794: /* srad */
1697 op->type = COMPUTE + SETREG + SETXER;
1698 sh = regs->gpr[rb] & 0x7f;
1699 ival = (signed long int) regs->gpr[rd];
1700 op->val = ival >> (sh < 64 ? sh : 63);
1701 op->xerval = regs->xer;
1702 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1703 op->xerval |= XER_CA;
1705 op->xerval &= ~XER_CA;
1708 case 826: /* sradi with sh_5 = 0 */
1709 case 827: /* sradi with sh_5 = 1 */
1710 op->type = COMPUTE + SETREG + SETXER;
1711 sh = rb | ((instr & 2) << 4);
1712 ival = (signed long int) regs->gpr[rd];
1713 op->val = ival >> sh;
1714 op->xerval = regs->xer;
1715 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1716 op->xerval |= XER_CA;
1718 op->xerval &= ~XER_CA;
1720 #endif /* __powerpc64__ */
1723 * Cache instructions
1725 case 54: /* dcbst */
1726 op->type = MKOP(CACHEOP, DCBST, 0);
1727 op->ea = xform_ea(instr, regs);
1731 op->type = MKOP(CACHEOP, DCBF, 0);
1732 op->ea = xform_ea(instr, regs);
1735 case 246: /* dcbtst */
1736 op->type = MKOP(CACHEOP, DCBTST, 0);
1737 op->ea = xform_ea(instr, regs);
1741 case 278: /* dcbt */
1742 op->type = MKOP(CACHEOP, DCBTST, 0);
1743 op->ea = xform_ea(instr, regs);
1747 case 982: /* icbi */
1748 op->type = MKOP(CACHEOP, ICBI, 0);
1749 op->ea = xform_ea(instr, regs);
1759 op->update_reg = ra;
1761 op->val = regs->gpr[rd];
1762 u = (instr >> 20) & UPDATE;
1768 op->ea = xform_ea(instr, regs);
1769 switch ((instr >> 1) & 0x3ff) {
1770 case 20: /* lwarx */
1771 op->type = MKOP(LARX, 0, 4);
1774 case 150: /* stwcx. */
1775 op->type = MKOP(STCX, 0, 4);
1778 #ifdef __powerpc64__
1779 case 84: /* ldarx */
1780 op->type = MKOP(LARX, 0, 8);
1783 case 214: /* stdcx. */
1784 op->type = MKOP(STCX, 0, 8);
1787 case 52: /* lbarx */
1788 op->type = MKOP(LARX, 0, 1);
1791 case 694: /* stbcx. */
1792 op->type = MKOP(STCX, 0, 1);
1795 case 116: /* lharx */
1796 op->type = MKOP(LARX, 0, 2);
1799 case 726: /* sthcx. */
1800 op->type = MKOP(STCX, 0, 2);
1803 case 276: /* lqarx */
1804 if (!((rd & 1) || rd == ra || rd == rb))
1805 op->type = MKOP(LARX, 0, 16);
1808 case 182: /* stqcx. */
1810 op->type = MKOP(STCX, 0, 16);
1815 case 55: /* lwzux */
1816 op->type = MKOP(LOAD, u, 4);
1820 case 119: /* lbzux */
1821 op->type = MKOP(LOAD, u, 1);
1824 #ifdef CONFIG_ALTIVEC
1826 * Note: for the load/store vector element instructions,
1827 * bits of the EA say which field of the VMX register to use.
1830 op->type = MKOP(LOAD_VMX, 0, 1);
1831 op->element_size = 1;
1834 case 39: /* lvehx */
1835 op->type = MKOP(LOAD_VMX, 0, 2);
1836 op->element_size = 2;
1839 case 71: /* lvewx */
1840 op->type = MKOP(LOAD_VMX, 0, 4);
1841 op->element_size = 4;
1845 case 359: /* lvxl */
1846 op->type = MKOP(LOAD_VMX, 0, 16);
1847 op->element_size = 16;
1850 case 135: /* stvebx */
1851 op->type = MKOP(STORE_VMX, 0, 1);
1852 op->element_size = 1;
1855 case 167: /* stvehx */
1856 op->type = MKOP(STORE_VMX, 0, 2);
1857 op->element_size = 2;
1860 case 199: /* stvewx */
1861 op->type = MKOP(STORE_VMX, 0, 4);
1862 op->element_size = 4;
1865 case 231: /* stvx */
1866 case 487: /* stvxl */
1867 op->type = MKOP(STORE_VMX, 0, 16);
1869 #endif /* CONFIG_ALTIVEC */
1871 #ifdef __powerpc64__
1874 op->type = MKOP(LOAD, u, 8);
1877 case 149: /* stdx */
1878 case 181: /* stdux */
1879 op->type = MKOP(STORE, u, 8);
1883 case 151: /* stwx */
1884 case 183: /* stwux */
1885 op->type = MKOP(STORE, u, 4);
1888 case 215: /* stbx */
1889 case 247: /* stbux */
1890 op->type = MKOP(STORE, u, 1);
1893 case 279: /* lhzx */
1894 case 311: /* lhzux */
1895 op->type = MKOP(LOAD, u, 2);
1898 #ifdef __powerpc64__
1899 case 341: /* lwax */
1900 case 373: /* lwaux */
1901 op->type = MKOP(LOAD, SIGNEXT | u, 4);
1905 case 343: /* lhax */
1906 case 375: /* lhaux */
1907 op->type = MKOP(LOAD, SIGNEXT | u, 2);
1910 case 407: /* sthx */
1911 case 439: /* sthux */
1912 op->type = MKOP(STORE, u, 2);
1915 #ifdef __powerpc64__
1916 case 532: /* ldbrx */
1917 op->type = MKOP(LOAD, BYTEREV, 8);
1921 case 533: /* lswx */
1922 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
1925 case 534: /* lwbrx */
1926 op->type = MKOP(LOAD, BYTEREV, 4);
1929 case 597: /* lswi */
1931 rb = 32; /* # bytes to load */
1932 op->type = MKOP(LOAD_MULTI, 0, rb);
1933 op->ea = ra ? regs->gpr[ra] : 0;
1936 #ifdef CONFIG_PPC_FPU
1937 case 535: /* lfsx */
1938 case 567: /* lfsux */
1939 op->type = MKOP(LOAD_FP, u, 4);
1942 case 599: /* lfdx */
1943 case 631: /* lfdux */
1944 op->type = MKOP(LOAD_FP, u, 8);
1947 case 663: /* stfsx */
1948 case 695: /* stfsux */
1949 op->type = MKOP(STORE_FP, u, 4);
1952 case 727: /* stfdx */
1953 case 759: /* stfdux */
1954 op->type = MKOP(STORE_FP, u, 8);
1957 #ifdef __powerpc64__
1958 case 791: /* lfdpx */
1959 op->type = MKOP(LOAD_FP, 0, 16);
1962 case 919: /* stfdpx */
1963 op->type = MKOP(STORE_FP, 0, 16);
1965 #endif /* __powerpc64 */
1966 #endif /* CONFIG_PPC_FPU */
1968 #ifdef __powerpc64__
1969 case 660: /* stdbrx */
1970 op->type = MKOP(STORE, BYTEREV, 8);
1971 op->val = byterev_8(regs->gpr[rd]);
1975 case 661: /* stswx */
1976 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
1979 case 662: /* stwbrx */
1980 op->type = MKOP(STORE, BYTEREV, 4);
1981 op->val = byterev_4(regs->gpr[rd]);
1984 case 725: /* stswi */
1986 rb = 32; /* # bytes to store */
1987 op->type = MKOP(STORE_MULTI, 0, rb);
1988 op->ea = ra ? regs->gpr[ra] : 0;
1991 case 790: /* lhbrx */
1992 op->type = MKOP(LOAD, BYTEREV, 2);
1995 case 918: /* sthbrx */
1996 op->type = MKOP(STORE, BYTEREV, 2);
1997 op->val = byterev_2(regs->gpr[rd]);
2001 case 12: /* lxsiwzx */
2002 op->reg = rd | ((instr & 1) << 5);
2003 op->type = MKOP(LOAD_VSX, 0, 4);
2004 op->element_size = 8;
2007 case 76: /* lxsiwax */
2008 op->reg = rd | ((instr & 1) << 5);
2009 op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2010 op->element_size = 8;
2013 case 140: /* stxsiwx */
2014 op->reg = rd | ((instr & 1) << 5);
2015 op->type = MKOP(STORE_VSX, 0, 4);
2016 op->element_size = 8;
2019 case 268: /* lxvx */
2020 op->reg = rd | ((instr & 1) << 5);
2021 op->type = MKOP(LOAD_VSX, 0, 16);
2022 op->element_size = 16;
2023 op->vsx_flags = VSX_CHECK_VEC;
2026 case 269: /* lxvl */
2027 case 301: { /* lxvll */
2029 op->reg = rd | ((instr & 1) << 5);
2030 op->ea = ra ? regs->gpr[ra] : 0;
2031 nb = regs->gpr[rb] & 0xff;
2034 op->type = MKOP(LOAD_VSX, 0, nb);
2035 op->element_size = 16;
2036 op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
2040 case 332: /* lxvdsx */
2041 op->reg = rd | ((instr & 1) << 5);
2042 op->type = MKOP(LOAD_VSX, 0, 8);
2043 op->element_size = 8;
2044 op->vsx_flags = VSX_SPLAT;
2047 case 364: /* lxvwsx */
2048 op->reg = rd | ((instr & 1) << 5);
2049 op->type = MKOP(LOAD_VSX, 0, 4);
2050 op->element_size = 4;
2051 op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2054 case 396: /* stxvx */
2055 op->reg = rd | ((instr & 1) << 5);
2056 op->type = MKOP(STORE_VSX, 0, 16);
2057 op->element_size = 16;
2058 op->vsx_flags = VSX_CHECK_VEC;
2061 case 397: /* stxvl */
2062 case 429: { /* stxvll */
2064 op->reg = rd | ((instr & 1) << 5);
2065 op->ea = ra ? regs->gpr[ra] : 0;
2066 nb = regs->gpr[rb] & 0xff;
2069 op->type = MKOP(STORE_VSX, 0, nb);
2070 op->element_size = 16;
2071 op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
2075 case 524: /* lxsspx */
2076 op->reg = rd | ((instr & 1) << 5);
2077 op->type = MKOP(LOAD_VSX, 0, 4);
2078 op->element_size = 8;
2079 op->vsx_flags = VSX_FPCONV;
2082 case 588: /* lxsdx */
2083 op->reg = rd | ((instr & 1) << 5);
2084 op->type = MKOP(LOAD_VSX, 0, 8);
2085 op->element_size = 8;
2088 case 652: /* stxsspx */
2089 op->reg = rd | ((instr & 1) << 5);
2090 op->type = MKOP(STORE_VSX, 0, 4);
2091 op->element_size = 8;
2092 op->vsx_flags = VSX_FPCONV;
2095 case 716: /* stxsdx */
2096 op->reg = rd | ((instr & 1) << 5);
2097 op->type = MKOP(STORE_VSX, 0, 8);
2098 op->element_size = 8;
2101 case 780: /* lxvw4x */
2102 op->reg = rd | ((instr & 1) << 5);
2103 op->type = MKOP(LOAD_VSX, 0, 16);
2104 op->element_size = 4;
2107 case 781: /* lxsibzx */
2108 op->reg = rd | ((instr & 1) << 5);
2109 op->type = MKOP(LOAD_VSX, 0, 1);
2110 op->element_size = 8;
2111 op->vsx_flags = VSX_CHECK_VEC;
2114 case 812: /* lxvh8x */
2115 op->reg = rd | ((instr & 1) << 5);
2116 op->type = MKOP(LOAD_VSX, 0, 16);
2117 op->element_size = 2;
2118 op->vsx_flags = VSX_CHECK_VEC;
2121 case 813: /* lxsihzx */
2122 op->reg = rd | ((instr & 1) << 5);
2123 op->type = MKOP(LOAD_VSX, 0, 2);
2124 op->element_size = 8;
2125 op->vsx_flags = VSX_CHECK_VEC;
2128 case 844: /* lxvd2x */
2129 op->reg = rd | ((instr & 1) << 5);
2130 op->type = MKOP(LOAD_VSX, 0, 16);
2131 op->element_size = 8;
2134 case 876: /* lxvb16x */
2135 op->reg = rd | ((instr & 1) << 5);
2136 op->type = MKOP(LOAD_VSX, 0, 16);
2137 op->element_size = 1;
2138 op->vsx_flags = VSX_CHECK_VEC;
2141 case 908: /* stxvw4x */
2142 op->reg = rd | ((instr & 1) << 5);
2143 op->type = MKOP(STORE_VSX, 0, 16);
2144 op->element_size = 4;
2147 case 909: /* stxsibx */
2148 op->reg = rd | ((instr & 1) << 5);
2149 op->type = MKOP(STORE_VSX, 0, 1);
2150 op->element_size = 8;
2151 op->vsx_flags = VSX_CHECK_VEC;
2154 case 940: /* stxvh8x */
2155 op->reg = rd | ((instr & 1) << 5);
2156 op->type = MKOP(STORE_VSX, 0, 16);
2157 op->element_size = 2;
2158 op->vsx_flags = VSX_CHECK_VEC;
2161 case 941: /* stxsihx */
2162 op->reg = rd | ((instr & 1) << 5);
2163 op->type = MKOP(STORE_VSX, 0, 2);
2164 op->element_size = 8;
2165 op->vsx_flags = VSX_CHECK_VEC;
2168 case 972: /* stxvd2x */
2169 op->reg = rd | ((instr & 1) << 5);
2170 op->type = MKOP(STORE_VSX, 0, 16);
2171 op->element_size = 8;
2174 case 1004: /* stxvb16x */
2175 op->reg = rd | ((instr & 1) << 5);
2176 op->type = MKOP(STORE_VSX, 0, 16);
2177 op->element_size = 1;
2178 op->vsx_flags = VSX_CHECK_VEC;
2181 #endif /* CONFIG_VSX */
2187 op->type = MKOP(LOAD, u, 4);
2188 op->ea = dform_ea(instr, regs);
2193 op->type = MKOP(LOAD, u, 1);
2194 op->ea = dform_ea(instr, regs);
2199 op->type = MKOP(STORE, u, 4);
2200 op->ea = dform_ea(instr, regs);
2205 op->type = MKOP(STORE, u, 1);
2206 op->ea = dform_ea(instr, regs);
2211 op->type = MKOP(LOAD, u, 2);
2212 op->ea = dform_ea(instr, regs);
2217 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2218 op->ea = dform_ea(instr, regs);
2223 op->type = MKOP(STORE, u, 2);
2224 op->ea = dform_ea(instr, regs);
2229 break; /* invalid form, ra in range to load */
2230 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2231 op->ea = dform_ea(instr, regs);
2235 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2236 op->ea = dform_ea(instr, regs);
2239 #ifdef CONFIG_PPC_FPU
2242 op->type = MKOP(LOAD_FP, u, 4);
2243 op->ea = dform_ea(instr, regs);
2248 op->type = MKOP(LOAD_FP, u, 8);
2249 op->ea = dform_ea(instr, regs);
2253 case 53: /* stfsu */
2254 op->type = MKOP(STORE_FP, u, 4);
2255 op->ea = dform_ea(instr, regs);
2259 case 55: /* stfdu */
2260 op->type = MKOP(STORE_FP, u, 8);
2261 op->ea = dform_ea(instr, regs);
2265 #ifdef __powerpc64__
2267 if (!((rd & 1) || (rd == ra)))
2268 op->type = MKOP(LOAD, 0, 16);
2269 op->ea = dqform_ea(instr, regs);
2274 case 57: /* lfdp, lxsd, lxssp */
2275 op->ea = dsform_ea(instr, regs);
2276 switch (instr & 3) {
2279 break; /* reg must be even */
2280 op->type = MKOP(LOAD_FP, 0, 16);
2284 op->type = MKOP(LOAD_VSX, 0, 8);
2285 op->element_size = 8;
2286 op->vsx_flags = VSX_CHECK_VEC;
2290 op->type = MKOP(LOAD_VSX, 0, 4);
2291 op->element_size = 8;
2292 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2296 #endif /* CONFIG_VSX */
2298 #ifdef __powerpc64__
2299 case 58: /* ld[u], lwa */
2300 op->ea = dsform_ea(instr, regs);
2301 switch (instr & 3) {
2303 op->type = MKOP(LOAD, 0, 8);
2306 op->type = MKOP(LOAD, UPDATE, 8);
2309 op->type = MKOP(LOAD, SIGNEXT, 4);
2316 case 61: /* stfdp, lxv, stxsd, stxssp, stxv */
2317 switch (instr & 7) {
2318 case 0: /* stfdp with LSB of DS field = 0 */
2319 case 4: /* stfdp with LSB of DS field = 1 */
2320 op->ea = dsform_ea(instr, regs);
2321 op->type = MKOP(STORE_FP, 0, 16);
2325 op->ea = dqform_ea(instr, regs);
2328 op->type = MKOP(LOAD_VSX, 0, 16);
2329 op->element_size = 16;
2330 op->vsx_flags = VSX_CHECK_VEC;
2333 case 2: /* stxsd with LSB of DS field = 0 */
2334 case 6: /* stxsd with LSB of DS field = 1 */
2335 op->ea = dsform_ea(instr, regs);
2337 op->type = MKOP(STORE_VSX, 0, 8);
2338 op->element_size = 8;
2339 op->vsx_flags = VSX_CHECK_VEC;
2342 case 3: /* stxssp with LSB of DS field = 0 */
2343 case 7: /* stxssp with LSB of DS field = 1 */
2344 op->ea = dsform_ea(instr, regs);
2346 op->type = MKOP(STORE_VSX, 0, 4);
2347 op->element_size = 8;
2348 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2352 op->ea = dqform_ea(instr, regs);
2355 op->type = MKOP(STORE_VSX, 0, 16);
2356 op->element_size = 16;
2357 op->vsx_flags = VSX_CHECK_VEC;
2361 #endif /* CONFIG_VSX */
2363 #ifdef __powerpc64__
2364 case 62: /* std[u] */
2365 op->ea = dsform_ea(instr, regs);
2366 switch (instr & 3) {
2368 op->type = MKOP(STORE, 0, 8);
2371 op->type = MKOP(STORE, UPDATE, 8);
2375 op->type = MKOP(STORE, 0, 16);
2379 #endif /* __powerpc64__ */
2386 set_cr0(regs, op, ra);
2394 set_cr0(regs, op, rd);
2401 op->type = INTERRUPT | 0x700;
2402 op->val = SRR1_PROGPRIV;
2406 op->type = INTERRUPT | 0x700;
2407 op->val = SRR1_PROGTRAP;
2410 EXPORT_SYMBOL_GPL(analyse_instr);
2411 NOKPROBE_SYMBOL(analyse_instr);
2414 * For PPC32 we always use stwu with r1 to change the stack pointer.
2415 * So this emulated store may corrupt the exception frame, now we
2416 * have to provide the exception frame trampoline, which is pushed
2417 * below the kprobed function stack. So we only update gpr[1] but
2418 * don't emulate the real store operation. We will do real store
2419 * operation safely in exception return code by checking this flag.
2421 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
2425 * Check if we will touch kernel stack overflow
2427 if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
2428 printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
2431 #endif /* CONFIG_PPC32 */
2433 * Check if we already set since that means we'll
2434 * lose the previous value.
2436 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
2437 set_thread_flag(TIF_EMULATE_STACK_STORE);
2441 static nokprobe_inline void do_signext(unsigned long *valp, int size)
2445 *valp = (signed short) *valp;
2448 *valp = (signed int) *valp;
2453 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
2457 *valp = byterev_2(*valp);
2460 *valp = byterev_4(*valp);
2462 #ifdef __powerpc64__
2464 *valp = byterev_8(*valp);
2471 * Emulate an instruction that can be executed just by updating
2474 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
2476 unsigned long next_pc;
2478 next_pc = truncate_if_32bit(regs->msr, regs->nip + 4);
2479 switch (op->type & INSTR_TYPE_MASK) {
2481 if (op->type & SETREG)
2482 regs->gpr[op->reg] = op->val;
2483 if (op->type & SETCC)
2484 regs->ccr = op->ccval;
2485 if (op->type & SETXER)
2486 regs->xer = op->xerval;
2490 if (op->type & SETLK)
2491 regs->link = next_pc;
2492 if (op->type & BRTAKEN)
2494 if (op->type & DECCTR)
2499 switch (op->type & BARRIER_MASK) {
2509 case BARRIER_LWSYNC:
2510 asm volatile("lwsync" : : : "memory");
2512 case BARRIER_PTESYNC:
2513 asm volatile("ptesync" : : : "memory");
2521 regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
2524 regs->gpr[op->reg] = regs->link;
2527 regs->gpr[op->reg] = regs->ctr;
2537 regs->xer = op->val & 0xffffffffUL;
2540 regs->link = op->val;
2543 regs->ctr = op->val;
2553 regs->nip = next_pc;
2557 * Emulate instructions that cause a transfer of control,
2558 * loads and stores, and a few other instructions.
2559 * Returns 1 if the step was emulated, 0 if not,
2560 * or -1 if the instruction is one that should not be stepped,
2561 * such as an rfid, or a mtmsrd that would clear MSR_RI.
2563 int emulate_step(struct pt_regs *regs, unsigned int instr)
2565 struct instruction_op op;
2566 int r, err, size, type;
2572 r = analyse_instr(&op, regs, instr);
2576 emulate_update_regs(regs, &op);
2581 size = GETSIZE(op.type);
2582 type = op.type & INSTR_TYPE_MASK;
2585 if (OP_IS_LOAD_STORE(type) || type == CACHEOP)
2586 ea = truncate_if_32bit(regs->msr, op.ea);
2590 if (!address_ok(regs, ea, 8))
2592 switch (op.type & CACHEOP_MASK) {
2594 __cacheop_user_asmx(ea, err, "dcbst");
2597 __cacheop_user_asmx(ea, err, "dcbf");
2601 prefetchw((void *) ea);
2605 prefetch((void *) ea);
2608 __cacheop_user_asmx(ea, err, "icbi");
2616 if (ea & (size - 1))
2617 break; /* can't handle misaligned */
2618 if (!address_ok(regs, ea, size))
2622 #ifdef __powerpc64__
2624 __get_user_asmx(val, ea, err, "lbarx");
2627 __get_user_asmx(val, ea, err, "lharx");
2631 __get_user_asmx(val, ea, err, "lwarx");
2633 #ifdef __powerpc64__
2635 __get_user_asmx(val, ea, err, "ldarx");
2638 err = do_lqarx(ea, ®s->gpr[op.reg]);
2645 regs->gpr[op.reg] = val;
2649 if (ea & (size - 1))
2650 break; /* can't handle misaligned */
2651 if (!address_ok(regs, ea, size))
2655 #ifdef __powerpc64__
2657 __put_user_asmx(op.val, ea, err, "stbcx.", cr);
2660 __put_user_asmx(op.val, ea, err, "stbcx.", cr);
2664 __put_user_asmx(op.val, ea, err, "stwcx.", cr);
2666 #ifdef __powerpc64__
2668 __put_user_asmx(op.val, ea, err, "stdcx.", cr);
2671 err = do_stqcx(ea, regs->gpr[op.reg],
2672 regs->gpr[op.reg + 1], &cr);
2679 regs->ccr = (regs->ccr & 0x0fffffff) |
2681 ((regs->xer >> 3) & 0x10000000);
2685 #ifdef __powerpc64__
2687 err = emulate_lq(regs, ea, op.reg);
2691 err = read_mem(®s->gpr[op.reg], ea, size, regs);
2693 if (op.type & SIGNEXT)
2694 do_signext(®s->gpr[op.reg], size);
2695 if (op.type & BYTEREV)
2696 do_byterev(®s->gpr[op.reg], size);
2700 #ifdef CONFIG_PPC_FPU
2703 * If the instruction is in userspace, we can emulate it even
2704 * if the VMX state is not live, because we have the state
2705 * stored in the thread_struct. If the instruction is in
2706 * the kernel, we must not touch the state in the thread_struct.
2708 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
2710 err = do_fp_load(op.reg, ea, size, regs);
2713 #ifdef CONFIG_ALTIVEC
2715 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
2717 err = do_vec_load(op.reg, ea, size, regs);
2722 unsigned long msrbit = MSR_VSX;
2725 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
2726 * when the target of the instruction is a vector register.
2728 if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC))
2730 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
2732 err = do_vsx_load(&op, ea, regs);
2737 if (regs->msr & MSR_LE)
2740 for (i = 0; i < size; i += 4) {
2744 err = read_mem(®s->gpr[rd], ea, nb, regs);
2747 if (nb < 4) /* left-justify last bytes */
2748 regs->gpr[rd] <<= 32 - 8 * nb;
2755 #ifdef __powerpc64__
2757 err = emulate_stq(regs, ea, op.reg);
2761 if ((op.type & UPDATE) && size == sizeof(long) &&
2762 op.reg == 1 && op.update_reg == 1 &&
2763 !(regs->msr & MSR_PR) &&
2764 ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
2765 err = handle_stack_update(ea, regs);
2768 err = write_mem(op.val, ea, size, regs);
2771 #ifdef CONFIG_PPC_FPU
2773 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
2775 err = do_fp_store(op.reg, ea, size, regs);
2778 #ifdef CONFIG_ALTIVEC
2780 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
2782 err = do_vec_store(op.reg, ea, size, regs);
2787 unsigned long msrbit = MSR_VSX;
2790 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
2791 * when the target of the instruction is a vector register.
2793 if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC))
2795 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
2797 err = do_vsx_store(&op, ea, regs);
2802 if (regs->msr & MSR_LE)
2805 for (i = 0; i < size; i += 4) {
2806 val = regs->gpr[rd];
2811 val >>= 32 - 8 * nb;
2812 err = write_mem(val, ea, nb, regs);
2821 regs->gpr[op.reg] = regs->msr & MSR_MASK;
2825 val = regs->gpr[op.reg];
2826 if ((val & MSR_RI) == 0)
2827 /* can't step mtmsr[d] that would clear MSR_RI */
2829 /* here op.val is the mask of bits to change */
2830 regs->msr = (regs->msr & ~op.val) | (val & op.val);
2834 case SYSCALL: /* sc */
2836 * N.B. this uses knowledge about how the syscall
2837 * entry code works. If that is changed, this will
2838 * need to be changed also.
2840 if (regs->gpr[0] == 0x1ebe &&
2841 cpu_has_feature(CPU_FTR_REAL_LE)) {
2842 regs->msr ^= MSR_LE;
2845 regs->gpr[9] = regs->gpr[13];
2846 regs->gpr[10] = MSR_KERNEL;
2847 regs->gpr[11] = regs->nip + 4;
2848 regs->gpr[12] = regs->msr & MSR_MASK;
2849 regs->gpr[13] = (unsigned long) get_paca();
2850 regs->nip = (unsigned long) &system_call_common;
2851 regs->msr = MSR_KERNEL;
2863 if (op.type & UPDATE)
2864 regs->gpr[op.update_reg] = op.ea;
2867 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
2870 NOKPROBE_SYMBOL(emulate_step);