powerpc: Emulate load/store floating double pair instructions
[linux-block.git] / arch / powerpc / lib / sstep.c
1 /*
2  * Single-step support.
3  *
4  * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <linux/uaccess.h>
18 #include <asm/cpu_has_feature.h>
19 #include <asm/cputable.h>
20
21 extern char system_call_common[];
22
23 #ifdef CONFIG_PPC64
24 /* Bits in SRR1 that are copied from MSR */
25 #define MSR_MASK        0xffffffff87c0ffffUL
26 #else
27 #define MSR_MASK        0x87c0ffff
28 #endif
29
30 /* Bits in XER */
31 #define XER_SO          0x80000000U
32 #define XER_OV          0x40000000U
33 #define XER_CA          0x20000000U
34
35 #ifdef CONFIG_PPC_FPU
36 /*
37  * Functions in ldstfp.S
38  */
39 extern void get_fpr(int rn, double *p);
40 extern void put_fpr(int rn, const double *p);
41 extern void get_vr(int rn, __vector128 *p);
42 extern void put_vr(int rn, __vector128 *p);
43 extern void load_vsrn(int vsr, const void *p);
44 extern void store_vsrn(int vsr, void *p);
45 extern void conv_sp_to_dp(const float *sp, double *dp);
46 extern void conv_dp_to_sp(const double *dp, float *sp);
47 #endif
48
49 #ifdef __powerpc64__
50 /*
51  * Functions in quad.S
52  */
53 extern int do_lq(unsigned long ea, unsigned long *regs);
54 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
55 extern int do_lqarx(unsigned long ea, unsigned long *regs);
56 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
57                     unsigned int *crp);
58 #endif
59
60 #ifdef __LITTLE_ENDIAN__
61 #define IS_LE   1
62 #define IS_BE   0
63 #else
64 #define IS_LE   0
65 #define IS_BE   1
66 #endif
67
68 /*
69  * Emulate the truncation of 64 bit values in 32-bit mode.
70  */
71 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
72                                                         unsigned long val)
73 {
74 #ifdef __powerpc64__
75         if ((msr & MSR_64BIT) == 0)
76                 val &= 0xffffffffUL;
77 #endif
78         return val;
79 }
80
81 /*
82  * Determine whether a conditional branch instruction would branch.
83  */
84 static nokprobe_inline int branch_taken(unsigned int instr,
85                                         const struct pt_regs *regs,
86                                         struct instruction_op *op)
87 {
88         unsigned int bo = (instr >> 21) & 0x1f;
89         unsigned int bi;
90
91         if ((bo & 4) == 0) {
92                 /* decrement counter */
93                 op->type |= DECCTR;
94                 if (((bo >> 1) & 1) ^ (regs->ctr == 1))
95                         return 0;
96         }
97         if ((bo & 0x10) == 0) {
98                 /* check bit from CR */
99                 bi = (instr >> 16) & 0x1f;
100                 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
101                         return 0;
102         }
103         return 1;
104 }
105
106 static nokprobe_inline long address_ok(struct pt_regs *regs, unsigned long ea, int nb)
107 {
108         if (!user_mode(regs))
109                 return 1;
110         return __access_ok(ea, nb, USER_DS);
111 }
112
113 /*
114  * Calculate effective address for a D-form instruction
115  */
116 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
117                                               const struct pt_regs *regs)
118 {
119         int ra;
120         unsigned long ea;
121
122         ra = (instr >> 16) & 0x1f;
123         ea = (signed short) instr;              /* sign-extend */
124         if (ra)
125                 ea += regs->gpr[ra];
126
127         return ea;
128 }
129
130 #ifdef __powerpc64__
131 /*
132  * Calculate effective address for a DS-form instruction
133  */
134 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
135                                                const struct pt_regs *regs)
136 {
137         int ra;
138         unsigned long ea;
139
140         ra = (instr >> 16) & 0x1f;
141         ea = (signed short) (instr & ~3);       /* sign-extend */
142         if (ra)
143                 ea += regs->gpr[ra];
144
145         return ea;
146 }
147
148 /*
149  * Calculate effective address for a DQ-form instruction
150  */
151 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
152                                                const struct pt_regs *regs)
153 {
154         int ra;
155         unsigned long ea;
156
157         ra = (instr >> 16) & 0x1f;
158         ea = (signed short) (instr & ~0xf);     /* sign-extend */
159         if (ra)
160                 ea += regs->gpr[ra];
161
162         return ea;
163 }
164 #endif /* __powerpc64 */
165
166 /*
167  * Calculate effective address for an X-form instruction
168  */
169 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
170                                               const struct pt_regs *regs)
171 {
172         int ra, rb;
173         unsigned long ea;
174
175         ra = (instr >> 16) & 0x1f;
176         rb = (instr >> 11) & 0x1f;
177         ea = regs->gpr[rb];
178         if (ra)
179                 ea += regs->gpr[ra];
180
181         return ea;
182 }
183
184 /*
185  * Return the largest power of 2, not greater than sizeof(unsigned long),
186  * such that x is a multiple of it.
187  */
188 static nokprobe_inline unsigned long max_align(unsigned long x)
189 {
190         x |= sizeof(unsigned long);
191         return x & -x;          /* isolates rightmost bit */
192 }
193
194 static nokprobe_inline unsigned long byterev_2(unsigned long x)
195 {
196         return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
197 }
198
199 static nokprobe_inline unsigned long byterev_4(unsigned long x)
200 {
201         return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
202                 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
203 }
204
205 #ifdef __powerpc64__
206 static nokprobe_inline unsigned long byterev_8(unsigned long x)
207 {
208         return (byterev_4(x) << 32) | byterev_4(x >> 32);
209 }
210 #endif
211
212 static nokprobe_inline int read_mem_aligned(unsigned long *dest,
213                                         unsigned long ea, int nb)
214 {
215         int err = 0;
216         unsigned long x = 0;
217
218         switch (nb) {
219         case 1:
220                 err = __get_user(x, (unsigned char __user *) ea);
221                 break;
222         case 2:
223                 err = __get_user(x, (unsigned short __user *) ea);
224                 break;
225         case 4:
226                 err = __get_user(x, (unsigned int __user *) ea);
227                 break;
228 #ifdef __powerpc64__
229         case 8:
230                 err = __get_user(x, (unsigned long __user *) ea);
231                 break;
232 #endif
233         }
234         if (!err)
235                 *dest = x;
236         return err;
237 }
238
239 /*
240  * Copy from userspace to a buffer, using the largest possible
241  * aligned accesses, up to sizeof(long).
242  */
243 static int nokprobe_inline copy_mem_in(u8 *dest, unsigned long ea, int nb)
244 {
245         int err = 0;
246         int c;
247
248         for (; nb > 0; nb -= c) {
249                 c = max_align(ea);
250                 if (c > nb)
251                         c = max_align(nb);
252                 switch (c) {
253                 case 1:
254                         err = __get_user(*dest, (unsigned char __user *) ea);
255                         break;
256                 case 2:
257                         err = __get_user(*(u16 *)dest,
258                                          (unsigned short __user *) ea);
259                         break;
260                 case 4:
261                         err = __get_user(*(u32 *)dest,
262                                          (unsigned int __user *) ea);
263                         break;
264 #ifdef __powerpc64__
265                 case 8:
266                         err = __get_user(*(unsigned long *)dest,
267                                          (unsigned long __user *) ea);
268                         break;
269 #endif
270                 }
271                 if (err)
272                         return err;
273                 dest += c;
274                 ea += c;
275         }
276         return 0;
277 }
278
279 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
280                                               unsigned long ea, int nb,
281                                               struct pt_regs *regs)
282 {
283         union {
284                 unsigned long ul;
285                 u8 b[sizeof(unsigned long)];
286         } u;
287         int i;
288         int err;
289
290         u.ul = 0;
291         i = IS_BE ? sizeof(unsigned long) - nb : 0;
292         err = copy_mem_in(&u.b[i], ea, nb);
293         if (!err)
294                 *dest = u.ul;
295         return err;
296 }
297
298 /*
299  * Read memory at address ea for nb bytes, return 0 for success
300  * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
301  * If nb < sizeof(long), the result is right-justified on BE systems.
302  */
303 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
304                               struct pt_regs *regs)
305 {
306         if (!address_ok(regs, ea, nb))
307                 return -EFAULT;
308         if ((ea & (nb - 1)) == 0)
309                 return read_mem_aligned(dest, ea, nb);
310         return read_mem_unaligned(dest, ea, nb, regs);
311 }
312 NOKPROBE_SYMBOL(read_mem);
313
314 static nokprobe_inline int write_mem_aligned(unsigned long val,
315                                         unsigned long ea, int nb)
316 {
317         int err = 0;
318
319         switch (nb) {
320         case 1:
321                 err = __put_user(val, (unsigned char __user *) ea);
322                 break;
323         case 2:
324                 err = __put_user(val, (unsigned short __user *) ea);
325                 break;
326         case 4:
327                 err = __put_user(val, (unsigned int __user *) ea);
328                 break;
329 #ifdef __powerpc64__
330         case 8:
331                 err = __put_user(val, (unsigned long __user *) ea);
332                 break;
333 #endif
334         }
335         return err;
336 }
337
338 /*
339  * Copy from a buffer to userspace, using the largest possible
340  * aligned accesses, up to sizeof(long).
341  */
342 static int nokprobe_inline copy_mem_out(u8 *dest, unsigned long ea, int nb)
343 {
344         int err = 0;
345         int c;
346
347         for (; nb > 0; nb -= c) {
348                 c = max_align(ea);
349                 if (c > nb)
350                         c = max_align(nb);
351                 switch (c) {
352                 case 1:
353                         err = __put_user(*dest, (unsigned char __user *) ea);
354                         break;
355                 case 2:
356                         err = __put_user(*(u16 *)dest,
357                                          (unsigned short __user *) ea);
358                         break;
359                 case 4:
360                         err = __put_user(*(u32 *)dest,
361                                          (unsigned int __user *) ea);
362                         break;
363 #ifdef __powerpc64__
364                 case 8:
365                         err = __put_user(*(unsigned long *)dest,
366                                          (unsigned long __user *) ea);
367                         break;
368 #endif
369                 }
370                 if (err)
371                         return err;
372                 dest += c;
373                 ea += c;
374         }
375         return 0;
376 }
377
378 static nokprobe_inline int write_mem_unaligned(unsigned long val,
379                                                unsigned long ea, int nb,
380                                                struct pt_regs *regs)
381 {
382         union {
383                 unsigned long ul;
384                 u8 b[sizeof(unsigned long)];
385         } u;
386         int i;
387
388         u.ul = val;
389         i = IS_BE ? sizeof(unsigned long) - nb : 0;
390         return copy_mem_out(&u.b[i], ea, nb);
391 }
392
393 /*
394  * Write memory at address ea for nb bytes, return 0 for success
395  * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
396  */
397 static int write_mem(unsigned long val, unsigned long ea, int nb,
398                                struct pt_regs *regs)
399 {
400         if (!address_ok(regs, ea, nb))
401                 return -EFAULT;
402         if ((ea & (nb - 1)) == 0)
403                 return write_mem_aligned(val, ea, nb);
404         return write_mem_unaligned(val, ea, nb, regs);
405 }
406 NOKPROBE_SYMBOL(write_mem);
407
408 #ifdef CONFIG_PPC_FPU
409 /*
410  * These access either the real FP register or the image in the
411  * thread_struct, depending on regs->msr & MSR_FP.
412  */
413 static int do_fp_load(int rn, unsigned long ea, int nb, struct pt_regs *regs)
414 {
415         int err;
416         union {
417                 float f;
418                 double d[2];
419                 unsigned long l[2];
420                 u8 b[2 * sizeof(double)];
421         } u;
422
423         if (!address_ok(regs, ea, nb))
424                 return -EFAULT;
425         err = copy_mem_in(u.b, ea, nb);
426         if (err)
427                 return err;
428         preempt_disable();
429         if (nb == 4)
430                 conv_sp_to_dp(&u.f, &u.d[0]);
431         if (regs->msr & MSR_FP)
432                 put_fpr(rn, &u.d[0]);
433         else
434                 current->thread.TS_FPR(rn) = u.l[0];
435         if (nb == 16) {
436                 /* lfdp */
437                 rn |= 1;
438                 if (regs->msr & MSR_FP)
439                         put_fpr(rn, &u.d[1]);
440                 else
441                         current->thread.TS_FPR(rn) = u.l[1];
442         }
443         preempt_enable();
444         return 0;
445 }
446 NOKPROBE_SYMBOL(do_fp_load);
447
448 static int do_fp_store(int rn, unsigned long ea, int nb, struct pt_regs *regs)
449 {
450         union {
451                 float f;
452                 double d[2];
453                 unsigned long l[2];
454                 u8 b[2 * sizeof(double)];
455         } u;
456
457         if (!address_ok(regs, ea, nb))
458                 return -EFAULT;
459         preempt_disable();
460         if (regs->msr & MSR_FP)
461                 get_fpr(rn, &u.d[0]);
462         else
463                 u.l[0] = current->thread.TS_FPR(rn);
464         if (nb == 4)
465                 conv_dp_to_sp(&u.d[0], &u.f);
466         if (nb == 16) {
467                 rn |= 1;
468                 if (regs->msr & MSR_FP)
469                         get_fpr(rn, &u.d[1]);
470                 else
471                         u.l[1] = current->thread.TS_FPR(rn);
472         }
473         preempt_enable();
474         return copy_mem_out(u.b, ea, nb);
475 }
476 NOKPROBE_SYMBOL(do_fp_store);
477 #endif
478
479 #ifdef CONFIG_ALTIVEC
480 /* For Altivec/VMX, no need to worry about alignment */
481 static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
482                                        int size, struct pt_regs *regs)
483 {
484         int err;
485         union {
486                 __vector128 v;
487                 u8 b[sizeof(__vector128)];
488         } u = {};
489
490         if (!address_ok(regs, ea & ~0xfUL, 16))
491                 return -EFAULT;
492         /* align to multiple of size */
493         ea &= ~(size - 1);
494         err = copy_mem_in(&u.b[ea & 0xf], ea, size);
495         if (err)
496                 return err;
497
498         preempt_disable();
499         if (regs->msr & MSR_VEC)
500                 put_vr(rn, &u.v);
501         else
502                 current->thread.vr_state.vr[rn] = u.v;
503         preempt_enable();
504         return 0;
505 }
506
507 static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
508                                         int size, struct pt_regs *regs)
509 {
510         union {
511                 __vector128 v;
512                 u8 b[sizeof(__vector128)];
513         } u;
514
515         if (!address_ok(regs, ea & ~0xfUL, 16))
516                 return -EFAULT;
517         /* align to multiple of size */
518         ea &= ~(size - 1);
519
520         preempt_disable();
521         if (regs->msr & MSR_VEC)
522                 get_vr(rn, &u.v);
523         else
524                 u.v = current->thread.vr_state.vr[rn];
525         preempt_enable();
526         return copy_mem_out(&u.b[ea & 0xf], ea, size);
527 }
528 #endif /* CONFIG_ALTIVEC */
529
530 #ifdef __powerpc64__
531 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
532                                       int reg)
533 {
534         int err;
535
536         if (!address_ok(regs, ea, 16))
537                 return -EFAULT;
538         /* if aligned, should be atomic */
539         if ((ea & 0xf) == 0)
540                 return do_lq(ea, &regs->gpr[reg]);
541
542         err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
543         if (!err)
544                 err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
545         return err;
546 }
547
548 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
549                                        int reg)
550 {
551         int err;
552
553         if (!address_ok(regs, ea, 16))
554                 return -EFAULT;
555         /* if aligned, should be atomic */
556         if ((ea & 0xf) == 0)
557                 return do_stq(ea, regs->gpr[reg], regs->gpr[reg + 1]);
558
559         err = write_mem(regs->gpr[reg + IS_LE], ea, 8, regs);
560         if (!err)
561                 err = write_mem(regs->gpr[reg + IS_BE], ea + 8, 8, regs);
562         return err;
563 }
564 #endif /* __powerpc64 */
565
566 #ifdef CONFIG_VSX
567 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
568                       const void *mem)
569 {
570         int size, read_size;
571         int i, j;
572         const unsigned int *wp;
573         const unsigned short *hp;
574         const unsigned char *bp;
575
576         size = GETSIZE(op->type);
577         reg->d[0] = reg->d[1] = 0;
578
579         switch (op->element_size) {
580         case 16:
581                 /* whole vector; lxv[x] or lxvl[l] */
582                 if (size == 0)
583                         break;
584                 memcpy(reg, mem, size);
585                 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) {
586                         /* reverse 16 bytes */
587                         unsigned long tmp;
588                         tmp = byterev_8(reg->d[0]);
589                         reg->d[0] = byterev_8(reg->d[1]);
590                         reg->d[1] = tmp;
591                 }
592                 break;
593         case 8:
594                 /* scalar loads, lxvd2x, lxvdsx */
595                 read_size = (size >= 8) ? 8 : size;
596                 i = IS_LE ? 8 : 8 - read_size;
597                 memcpy(&reg->b[i], mem, read_size);
598                 if (size < 8) {
599                         if (op->type & SIGNEXT) {
600                                 /* size == 4 is the only case here */
601                                 reg->d[IS_LE] = (signed int) reg->d[IS_LE];
602                         } else if (op->vsx_flags & VSX_FPCONV) {
603                                 preempt_disable();
604                                 conv_sp_to_dp(&reg->fp[1 + IS_LE],
605                                               &reg->dp[IS_LE]);
606                                 preempt_enable();
607                         }
608                 } else {
609                         if (size == 16)
610                                 reg->d[IS_BE] = *(unsigned long *)(mem + 8);
611                         else if (op->vsx_flags & VSX_SPLAT)
612                                 reg->d[IS_BE] = reg->d[IS_LE];
613                 }
614                 break;
615         case 4:
616                 /* lxvw4x, lxvwsx */
617                 wp = mem;
618                 for (j = 0; j < size / 4; ++j) {
619                         i = IS_LE ? 3 - j : j;
620                         reg->w[i] = *wp++;
621                 }
622                 if (op->vsx_flags & VSX_SPLAT) {
623                         u32 val = reg->w[IS_LE ? 3 : 0];
624                         for (; j < 4; ++j) {
625                                 i = IS_LE ? 3 - j : j;
626                                 reg->w[i] = val;
627                         }
628                 }
629                 break;
630         case 2:
631                 /* lxvh8x */
632                 hp = mem;
633                 for (j = 0; j < size / 2; ++j) {
634                         i = IS_LE ? 7 - j : j;
635                         reg->h[i] = *hp++;
636                 }
637                 break;
638         case 1:
639                 /* lxvb16x */
640                 bp = mem;
641                 for (j = 0; j < size; ++j) {
642                         i = IS_LE ? 15 - j : j;
643                         reg->b[i] = *bp++;
644                 }
645                 break;
646         }
647 }
648 EXPORT_SYMBOL_GPL(emulate_vsx_load);
649 NOKPROBE_SYMBOL(emulate_vsx_load);
650
651 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
652                        void *mem)
653 {
654         int size, write_size;
655         int i, j;
656         union vsx_reg buf;
657         unsigned int *wp;
658         unsigned short *hp;
659         unsigned char *bp;
660
661         size = GETSIZE(op->type);
662
663         switch (op->element_size) {
664         case 16:
665                 /* stxv, stxvx, stxvl, stxvll */
666                 if (size == 0)
667                         break;
668                 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) {
669                         /* reverse 16 bytes */
670                         buf.d[0] = byterev_8(reg->d[1]);
671                         buf.d[1] = byterev_8(reg->d[0]);
672                         reg = &buf;
673                 }
674                 memcpy(mem, reg, size);
675                 break;
676         case 8:
677                 /* scalar stores, stxvd2x */
678                 write_size = (size >= 8) ? 8 : size;
679                 i = IS_LE ? 8 : 8 - write_size;
680                 if (size < 8 && op->vsx_flags & VSX_FPCONV) {
681                         buf.d[0] = buf.d[1] = 0;
682                         preempt_disable();
683                         conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
684                         preempt_enable();
685                         reg = &buf;
686                 }
687                 memcpy(mem, &reg->b[i], write_size);
688                 if (size == 16)
689                         memcpy(mem + 8, &reg->d[IS_BE], 8);
690                 break;
691         case 4:
692                 /* stxvw4x */
693                 wp = mem;
694                 for (j = 0; j < size / 4; ++j) {
695                         i = IS_LE ? 3 - j : j;
696                         *wp++ = reg->w[i];
697                 }
698                 break;
699         case 2:
700                 /* stxvh8x */
701                 hp = mem;
702                 for (j = 0; j < size / 2; ++j) {
703                         i = IS_LE ? 7 - j : j;
704                         *hp++ = reg->h[i];
705                 }
706                 break;
707         case 1:
708                 /* stvxb16x */
709                 bp = mem;
710                 for (j = 0; j < size; ++j) {
711                         i = IS_LE ? 15 - j : j;
712                         *bp++ = reg->b[i];
713                 }
714                 break;
715         }
716 }
717 EXPORT_SYMBOL_GPL(emulate_vsx_store);
718 NOKPROBE_SYMBOL(emulate_vsx_store);
719
720 static nokprobe_inline int do_vsx_load(struct instruction_op *op,
721                                        unsigned long ea, struct pt_regs *regs)
722 {
723         int reg = op->reg;
724         u8 mem[16];
725         union vsx_reg buf;
726         int size = GETSIZE(op->type);
727
728         if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size))
729                 return -EFAULT;
730
731         emulate_vsx_load(op, &buf, mem);
732         preempt_disable();
733         if (reg < 32) {
734                 /* FP regs + extensions */
735                 if (regs->msr & MSR_FP) {
736                         load_vsrn(reg, &buf);
737                 } else {
738                         current->thread.fp_state.fpr[reg][0] = buf.d[0];
739                         current->thread.fp_state.fpr[reg][1] = buf.d[1];
740                 }
741         } else {
742                 if (regs->msr & MSR_VEC)
743                         load_vsrn(reg, &buf);
744                 else
745                         current->thread.vr_state.vr[reg - 32] = buf.v;
746         }
747         preempt_enable();
748         return 0;
749 }
750
751 static nokprobe_inline int do_vsx_store(struct instruction_op *op,
752                                         unsigned long ea, struct pt_regs *regs)
753 {
754         int reg = op->reg;
755         u8 mem[16];
756         union vsx_reg buf;
757         int size = GETSIZE(op->type);
758
759         if (!address_ok(regs, ea, size))
760                 return -EFAULT;
761
762         preempt_disable();
763         if (reg < 32) {
764                 /* FP regs + extensions */
765                 if (regs->msr & MSR_FP) {
766                         store_vsrn(reg, &buf);
767                 } else {
768                         buf.d[0] = current->thread.fp_state.fpr[reg][0];
769                         buf.d[1] = current->thread.fp_state.fpr[reg][1];
770                 }
771         } else {
772                 if (regs->msr & MSR_VEC)
773                         store_vsrn(reg, &buf);
774                 else
775                         buf.v = current->thread.vr_state.vr[reg - 32];
776         }
777         preempt_enable();
778         emulate_vsx_store(op, &buf, mem);
779         return  copy_mem_out(mem, ea, size);
780 }
781 #endif /* CONFIG_VSX */
782
783 #define __put_user_asmx(x, addr, err, op, cr)           \
784         __asm__ __volatile__(                           \
785                 "1:     " op " %2,0,%3\n"               \
786                 "       mfcr    %1\n"                   \
787                 "2:\n"                                  \
788                 ".section .fixup,\"ax\"\n"              \
789                 "3:     li      %0,%4\n"                \
790                 "       b       2b\n"                   \
791                 ".previous\n"                           \
792                 EX_TABLE(1b, 3b)                        \
793                 : "=r" (err), "=r" (cr)                 \
794                 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
795
796 #define __get_user_asmx(x, addr, err, op)               \
797         __asm__ __volatile__(                           \
798                 "1:     "op" %1,0,%2\n"                 \
799                 "2:\n"                                  \
800                 ".section .fixup,\"ax\"\n"              \
801                 "3:     li      %0,%3\n"                \
802                 "       b       2b\n"                   \
803                 ".previous\n"                           \
804                 EX_TABLE(1b, 3b)                        \
805                 : "=r" (err), "=r" (x)                  \
806                 : "r" (addr), "i" (-EFAULT), "0" (err))
807
808 #define __cacheop_user_asmx(addr, err, op)              \
809         __asm__ __volatile__(                           \
810                 "1:     "op" 0,%1\n"                    \
811                 "2:\n"                                  \
812                 ".section .fixup,\"ax\"\n"              \
813                 "3:     li      %0,%3\n"                \
814                 "       b       2b\n"                   \
815                 ".previous\n"                           \
816                 EX_TABLE(1b, 3b)                        \
817                 : "=r" (err)                            \
818                 : "r" (addr), "i" (-EFAULT), "0" (err))
819
820 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
821                                     struct instruction_op *op, int rd)
822 {
823         long val = regs->gpr[rd];
824
825         op->type |= SETCC;
826         op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
827 #ifdef __powerpc64__
828         if (!(regs->msr & MSR_64BIT))
829                 val = (int) val;
830 #endif
831         if (val < 0)
832                 op->ccval |= 0x80000000;
833         else if (val > 0)
834                 op->ccval |= 0x40000000;
835         else
836                 op->ccval |= 0x20000000;
837 }
838
839 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
840                                      struct instruction_op *op, int rd,
841                                      unsigned long val1, unsigned long val2,
842                                      unsigned long carry_in)
843 {
844         unsigned long val = val1 + val2;
845
846         if (carry_in)
847                 ++val;
848         op->type = COMPUTE + SETREG + SETXER;
849         op->reg = rd;
850         op->val = val;
851 #ifdef __powerpc64__
852         if (!(regs->msr & MSR_64BIT)) {
853                 val = (unsigned int) val;
854                 val1 = (unsigned int) val1;
855         }
856 #endif
857         op->xerval = regs->xer;
858         if (val < val1 || (carry_in && val == val1))
859                 op->xerval |= XER_CA;
860         else
861                 op->xerval &= ~XER_CA;
862 }
863
864 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
865                                           struct instruction_op *op,
866                                           long v1, long v2, int crfld)
867 {
868         unsigned int crval, shift;
869
870         op->type = COMPUTE + SETCC;
871         crval = (regs->xer >> 31) & 1;          /* get SO bit */
872         if (v1 < v2)
873                 crval |= 8;
874         else if (v1 > v2)
875                 crval |= 4;
876         else
877                 crval |= 2;
878         shift = (7 - crfld) * 4;
879         op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
880 }
881
882 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
883                                             struct instruction_op *op,
884                                             unsigned long v1,
885                                             unsigned long v2, int crfld)
886 {
887         unsigned int crval, shift;
888
889         op->type = COMPUTE + SETCC;
890         crval = (regs->xer >> 31) & 1;          /* get SO bit */
891         if (v1 < v2)
892                 crval |= 8;
893         else if (v1 > v2)
894                 crval |= 4;
895         else
896                 crval |= 2;
897         shift = (7 - crfld) * 4;
898         op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
899 }
900
901 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
902                                     struct instruction_op *op,
903                                     unsigned long v1, unsigned long v2)
904 {
905         unsigned long long out_val, mask;
906         int i;
907
908         out_val = 0;
909         for (i = 0; i < 8; i++) {
910                 mask = 0xffUL << (i * 8);
911                 if ((v1 & mask) == (v2 & mask))
912                         out_val |= mask;
913         }
914         op->val = out_val;
915 }
916
917 /*
918  * The size parameter is used to adjust the equivalent popcnt instruction.
919  * popcntb = 8, popcntw = 32, popcntd = 64
920  */
921 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
922                                       struct instruction_op *op,
923                                       unsigned long v1, int size)
924 {
925         unsigned long long out = v1;
926
927         out -= (out >> 1) & 0x5555555555555555;
928         out = (0x3333333333333333 & out) + (0x3333333333333333 & (out >> 2));
929         out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0f;
930
931         if (size == 8) {        /* popcntb */
932                 op->val = out;
933                 return;
934         }
935         out += out >> 8;
936         out += out >> 16;
937         if (size == 32) {       /* popcntw */
938                 op->val = out & 0x0000003f0000003f;
939                 return;
940         }
941
942         out = (out + (out >> 32)) & 0x7f;
943         op->val = out;  /* popcntd */
944 }
945
946 #ifdef CONFIG_PPC64
947 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
948                                       struct instruction_op *op,
949                                       unsigned long v1, unsigned long v2)
950 {
951         unsigned char perm, idx;
952         unsigned int i;
953
954         perm = 0;
955         for (i = 0; i < 8; i++) {
956                 idx = (v1 >> (i * 8)) & 0xff;
957                 if (idx < 64)
958                         if (v2 & PPC_BIT(idx))
959                                 perm |= 1 << i;
960         }
961         op->val = perm;
962 }
963 #endif /* CONFIG_PPC64 */
964 /*
965  * The size parameter adjusts the equivalent prty instruction.
966  * prtyw = 32, prtyd = 64
967  */
968 static nokprobe_inline void do_prty(const struct pt_regs *regs,
969                                     struct instruction_op *op,
970                                     unsigned long v, int size)
971 {
972         unsigned long long res = v ^ (v >> 8);
973
974         res ^= res >> 16;
975         if (size == 32) {               /* prtyw */
976                 op->val = res & 0x0000000100000001;
977                 return;
978         }
979
980         res ^= res >> 32;
981         op->val = res & 1;      /*prtyd */
982 }
983
984 static nokprobe_inline int trap_compare(long v1, long v2)
985 {
986         int ret = 0;
987
988         if (v1 < v2)
989                 ret |= 0x10;
990         else if (v1 > v2)
991                 ret |= 0x08;
992         else
993                 ret |= 0x04;
994         if ((unsigned long)v1 < (unsigned long)v2)
995                 ret |= 0x02;
996         else if ((unsigned long)v1 > (unsigned long)v2)
997                 ret |= 0x01;
998         return ret;
999 }
1000
1001 /*
1002  * Elements of 32-bit rotate and mask instructions.
1003  */
1004 #define MASK32(mb, me)  ((0xffffffffUL >> (mb)) + \
1005                          ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1006 #ifdef __powerpc64__
1007 #define MASK64_L(mb)    (~0UL >> (mb))
1008 #define MASK64_R(me)    ((signed long)-0x8000000000000000L >> (me))
1009 #define MASK64(mb, me)  (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1010 #define DATA32(x)       (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1011 #else
1012 #define DATA32(x)       (x)
1013 #endif
1014 #define ROTATE(x, n)    ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1015
1016 /*
1017  * Decode an instruction, and return information about it in *op
1018  * without changing *regs.
1019  * Integer arithmetic and logical instructions, branches, and barrier
1020  * instructions can be emulated just using the information in *op.
1021  *
1022  * Return value is 1 if the instruction can be emulated just by
1023  * updating *regs with the information in *op, -1 if we need the
1024  * GPRs but *regs doesn't contain the full register set, or 0
1025  * otherwise.
1026  */
1027 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1028                   unsigned int instr)
1029 {
1030         unsigned int opcode, ra, rb, rd, spr, u;
1031         unsigned long int imm;
1032         unsigned long int val, val2;
1033         unsigned int mb, me, sh;
1034         long ival;
1035
1036         op->type = COMPUTE;
1037
1038         opcode = instr >> 26;
1039         switch (opcode) {
1040         case 16:        /* bc */
1041                 op->type = BRANCH;
1042                 imm = (signed short)(instr & 0xfffc);
1043                 if ((instr & 2) == 0)
1044                         imm += regs->nip;
1045                 op->val = truncate_if_32bit(regs->msr, imm);
1046                 if (instr & 1)
1047                         op->type |= SETLK;
1048                 if (branch_taken(instr, regs, op))
1049                         op->type |= BRTAKEN;
1050                 return 1;
1051 #ifdef CONFIG_PPC64
1052         case 17:        /* sc */
1053                 if ((instr & 0xfe2) == 2)
1054                         op->type = SYSCALL;
1055                 else
1056                         op->type = UNKNOWN;
1057                 return 0;
1058 #endif
1059         case 18:        /* b */
1060                 op->type = BRANCH | BRTAKEN;
1061                 imm = instr & 0x03fffffc;
1062                 if (imm & 0x02000000)
1063                         imm -= 0x04000000;
1064                 if ((instr & 2) == 0)
1065                         imm += regs->nip;
1066                 op->val = truncate_if_32bit(regs->msr, imm);
1067                 if (instr & 1)
1068                         op->type |= SETLK;
1069                 return 1;
1070         case 19:
1071                 switch ((instr >> 1) & 0x3ff) {
1072                 case 0:         /* mcrf */
1073                         op->type = COMPUTE + SETCC;
1074                         rd = 7 - ((instr >> 23) & 0x7);
1075                         ra = 7 - ((instr >> 18) & 0x7);
1076                         rd *= 4;
1077                         ra *= 4;
1078                         val = (regs->ccr >> ra) & 0xf;
1079                         op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1080                         return 1;
1081
1082                 case 16:        /* bclr */
1083                 case 528:       /* bcctr */
1084                         op->type = BRANCH;
1085                         imm = (instr & 0x400)? regs->ctr: regs->link;
1086                         op->val = truncate_if_32bit(regs->msr, imm);
1087                         if (instr & 1)
1088                                 op->type |= SETLK;
1089                         if (branch_taken(instr, regs, op))
1090                                 op->type |= BRTAKEN;
1091                         return 1;
1092
1093                 case 18:        /* rfid, scary */
1094                         if (regs->msr & MSR_PR)
1095                                 goto priv;
1096                         op->type = RFI;
1097                         return 0;
1098
1099                 case 150:       /* isync */
1100                         op->type = BARRIER | BARRIER_ISYNC;
1101                         return 1;
1102
1103                 case 33:        /* crnor */
1104                 case 129:       /* crandc */
1105                 case 193:       /* crxor */
1106                 case 225:       /* crnand */
1107                 case 257:       /* crand */
1108                 case 289:       /* creqv */
1109                 case 417:       /* crorc */
1110                 case 449:       /* cror */
1111                         op->type = COMPUTE + SETCC;
1112                         ra = (instr >> 16) & 0x1f;
1113                         rb = (instr >> 11) & 0x1f;
1114                         rd = (instr >> 21) & 0x1f;
1115                         ra = (regs->ccr >> (31 - ra)) & 1;
1116                         rb = (regs->ccr >> (31 - rb)) & 1;
1117                         val = (instr >> (6 + ra * 2 + rb)) & 1;
1118                         op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1119                                 (val << (31 - rd));
1120                         return 1;
1121                 }
1122                 break;
1123         case 31:
1124                 switch ((instr >> 1) & 0x3ff) {
1125                 case 598:       /* sync */
1126                         op->type = BARRIER + BARRIER_SYNC;
1127 #ifdef __powerpc64__
1128                         switch ((instr >> 21) & 3) {
1129                         case 1:         /* lwsync */
1130                                 op->type = BARRIER + BARRIER_LWSYNC;
1131                                 break;
1132                         case 2:         /* ptesync */
1133                                 op->type = BARRIER + BARRIER_PTESYNC;
1134                                 break;
1135                         }
1136 #endif
1137                         return 1;
1138
1139                 case 854:       /* eieio */
1140                         op->type = BARRIER + BARRIER_EIEIO;
1141                         return 1;
1142                 }
1143                 break;
1144         }
1145
1146         /* Following cases refer to regs->gpr[], so we need all regs */
1147         if (!FULL_REGS(regs))
1148                 return -1;
1149
1150         rd = (instr >> 21) & 0x1f;
1151         ra = (instr >> 16) & 0x1f;
1152         rb = (instr >> 11) & 0x1f;
1153
1154         switch (opcode) {
1155 #ifdef __powerpc64__
1156         case 2:         /* tdi */
1157                 if (rd & trap_compare(regs->gpr[ra], (short) instr))
1158                         goto trap;
1159                 return 1;
1160 #endif
1161         case 3:         /* twi */
1162                 if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
1163                         goto trap;
1164                 return 1;
1165
1166         case 7:         /* mulli */
1167                 op->val = regs->gpr[ra] * (short) instr;
1168                 goto compute_done;
1169
1170         case 8:         /* subfic */
1171                 imm = (short) instr;
1172                 add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1173                 return 1;
1174
1175         case 10:        /* cmpli */
1176                 imm = (unsigned short) instr;
1177                 val = regs->gpr[ra];
1178 #ifdef __powerpc64__
1179                 if ((rd & 1) == 0)
1180                         val = (unsigned int) val;
1181 #endif
1182                 do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1183                 return 1;
1184
1185         case 11:        /* cmpi */
1186                 imm = (short) instr;
1187                 val = regs->gpr[ra];
1188 #ifdef __powerpc64__
1189                 if ((rd & 1) == 0)
1190                         val = (int) val;
1191 #endif
1192                 do_cmp_signed(regs, op, val, imm, rd >> 2);
1193                 return 1;
1194
1195         case 12:        /* addic */
1196                 imm = (short) instr;
1197                 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1198                 return 1;
1199
1200         case 13:        /* addic. */
1201                 imm = (short) instr;
1202                 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1203                 set_cr0(regs, op, rd);
1204                 return 1;
1205
1206         case 14:        /* addi */
1207                 imm = (short) instr;
1208                 if (ra)
1209                         imm += regs->gpr[ra];
1210                 op->val = imm;
1211                 goto compute_done;
1212
1213         case 15:        /* addis */
1214                 imm = ((short) instr) << 16;
1215                 if (ra)
1216                         imm += regs->gpr[ra];
1217                 op->val = imm;
1218                 goto compute_done;
1219
1220         case 19:
1221                 if (((instr >> 1) & 0x1f) == 2) {
1222                         /* addpcis */
1223                         imm = (short) (instr & 0xffc1); /* d0 + d2 fields */
1224                         imm |= (instr >> 15) & 0x3e;    /* d1 field */
1225                         op->val = regs->nip + (imm << 16) + 4;
1226                         goto compute_done;
1227                 }
1228                 op->type = UNKNOWN;
1229                 return 0;
1230
1231         case 20:        /* rlwimi */
1232                 mb = (instr >> 6) & 0x1f;
1233                 me = (instr >> 1) & 0x1f;
1234                 val = DATA32(regs->gpr[rd]);
1235                 imm = MASK32(mb, me);
1236                 op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1237                 goto logical_done;
1238
1239         case 21:        /* rlwinm */
1240                 mb = (instr >> 6) & 0x1f;
1241                 me = (instr >> 1) & 0x1f;
1242                 val = DATA32(regs->gpr[rd]);
1243                 op->val = ROTATE(val, rb) & MASK32(mb, me);
1244                 goto logical_done;
1245
1246         case 23:        /* rlwnm */
1247                 mb = (instr >> 6) & 0x1f;
1248                 me = (instr >> 1) & 0x1f;
1249                 rb = regs->gpr[rb] & 0x1f;
1250                 val = DATA32(regs->gpr[rd]);
1251                 op->val = ROTATE(val, rb) & MASK32(mb, me);
1252                 goto logical_done;
1253
1254         case 24:        /* ori */
1255                 op->val = regs->gpr[rd] | (unsigned short) instr;
1256                 goto logical_done_nocc;
1257
1258         case 25:        /* oris */
1259                 imm = (unsigned short) instr;
1260                 op->val = regs->gpr[rd] | (imm << 16);
1261                 goto logical_done_nocc;
1262
1263         case 26:        /* xori */
1264                 op->val = regs->gpr[rd] ^ (unsigned short) instr;
1265                 goto logical_done_nocc;
1266
1267         case 27:        /* xoris */
1268                 imm = (unsigned short) instr;
1269                 op->val = regs->gpr[rd] ^ (imm << 16);
1270                 goto logical_done_nocc;
1271
1272         case 28:        /* andi. */
1273                 op->val = regs->gpr[rd] & (unsigned short) instr;
1274                 set_cr0(regs, op, ra);
1275                 goto logical_done_nocc;
1276
1277         case 29:        /* andis. */
1278                 imm = (unsigned short) instr;
1279                 op->val = regs->gpr[rd] & (imm << 16);
1280                 set_cr0(regs, op, ra);
1281                 goto logical_done_nocc;
1282
1283 #ifdef __powerpc64__
1284         case 30:        /* rld* */
1285                 mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
1286                 val = regs->gpr[rd];
1287                 if ((instr & 0x10) == 0) {
1288                         sh = rb | ((instr & 2) << 4);
1289                         val = ROTATE(val, sh);
1290                         switch ((instr >> 2) & 3) {
1291                         case 0:         /* rldicl */
1292                                 val &= MASK64_L(mb);
1293                                 break;
1294                         case 1:         /* rldicr */
1295                                 val &= MASK64_R(mb);
1296                                 break;
1297                         case 2:         /* rldic */
1298                                 val &= MASK64(mb, 63 - sh);
1299                                 break;
1300                         case 3:         /* rldimi */
1301                                 imm = MASK64(mb, 63 - sh);
1302                                 val = (regs->gpr[ra] & ~imm) |
1303                                         (val & imm);
1304                         }
1305                         op->val = val;
1306                         goto logical_done;
1307                 } else {
1308                         sh = regs->gpr[rb] & 0x3f;
1309                         val = ROTATE(val, sh);
1310                         switch ((instr >> 1) & 7) {
1311                         case 0:         /* rldcl */
1312                                 op->val = val & MASK64_L(mb);
1313                                 goto logical_done;
1314                         case 1:         /* rldcr */
1315                                 op->val = val & MASK64_R(mb);
1316                                 goto logical_done;
1317                         }
1318                 }
1319 #endif
1320                 op->type = UNKNOWN;     /* illegal instruction */
1321                 return 0;
1322
1323         case 31:
1324                 /* isel occupies 32 minor opcodes */
1325                 if (((instr >> 1) & 0x1f) == 15) {
1326                         mb = (instr >> 6) & 0x1f; /* bc field */
1327                         val = (regs->ccr >> (31 - mb)) & 1;
1328                         val2 = (ra) ? regs->gpr[ra] : 0;
1329
1330                         op->val = (val) ? val2 : regs->gpr[rb];
1331                         goto compute_done;
1332                 }
1333
1334                 switch ((instr >> 1) & 0x3ff) {
1335                 case 4:         /* tw */
1336                         if (rd == 0x1f ||
1337                             (rd & trap_compare((int)regs->gpr[ra],
1338                                                (int)regs->gpr[rb])))
1339                                 goto trap;
1340                         return 1;
1341 #ifdef __powerpc64__
1342                 case 68:        /* td */
1343                         if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1344                                 goto trap;
1345                         return 1;
1346 #endif
1347                 case 83:        /* mfmsr */
1348                         if (regs->msr & MSR_PR)
1349                                 goto priv;
1350                         op->type = MFMSR;
1351                         op->reg = rd;
1352                         return 0;
1353                 case 146:       /* mtmsr */
1354                         if (regs->msr & MSR_PR)
1355                                 goto priv;
1356                         op->type = MTMSR;
1357                         op->reg = rd;
1358                         op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1359                         return 0;
1360 #ifdef CONFIG_PPC64
1361                 case 178:       /* mtmsrd */
1362                         if (regs->msr & MSR_PR)
1363                                 goto priv;
1364                         op->type = MTMSR;
1365                         op->reg = rd;
1366                         /* only MSR_EE and MSR_RI get changed if bit 15 set */
1367                         /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1368                         imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1369                         op->val = imm;
1370                         return 0;
1371 #endif
1372
1373                 case 19:        /* mfcr */
1374                         imm = 0xffffffffUL;
1375                         if ((instr >> 20) & 1) {
1376                                 imm = 0xf0000000UL;
1377                                 for (sh = 0; sh < 8; ++sh) {
1378                                         if (instr & (0x80000 >> sh))
1379                                                 break;
1380                                         imm >>= 4;
1381                                 }
1382                         }
1383                         op->val = regs->ccr & imm;
1384                         goto compute_done;
1385
1386                 case 144:       /* mtcrf */
1387                         op->type = COMPUTE + SETCC;
1388                         imm = 0xf0000000UL;
1389                         val = regs->gpr[rd];
1390                         op->val = regs->ccr;
1391                         for (sh = 0; sh < 8; ++sh) {
1392                                 if (instr & (0x80000 >> sh))
1393                                         op->val = (op->val & ~imm) |
1394                                                 (val & imm);
1395                                 imm >>= 4;
1396                         }
1397                         return 1;
1398
1399                 case 339:       /* mfspr */
1400                         spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1401                         op->type = MFSPR;
1402                         op->reg = rd;
1403                         op->spr = spr;
1404                         if (spr == SPRN_XER || spr == SPRN_LR ||
1405                             spr == SPRN_CTR)
1406                                 return 1;
1407                         return 0;
1408
1409                 case 467:       /* mtspr */
1410                         spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1411                         op->type = MTSPR;
1412                         op->val = regs->gpr[rd];
1413                         op->spr = spr;
1414                         if (spr == SPRN_XER || spr == SPRN_LR ||
1415                             spr == SPRN_CTR)
1416                                 return 1;
1417                         return 0;
1418
1419 /*
1420  * Compare instructions
1421  */
1422                 case 0: /* cmp */
1423                         val = regs->gpr[ra];
1424                         val2 = regs->gpr[rb];
1425 #ifdef __powerpc64__
1426                         if ((rd & 1) == 0) {
1427                                 /* word (32-bit) compare */
1428                                 val = (int) val;
1429                                 val2 = (int) val2;
1430                         }
1431 #endif
1432                         do_cmp_signed(regs, op, val, val2, rd >> 2);
1433                         return 1;
1434
1435                 case 32:        /* cmpl */
1436                         val = regs->gpr[ra];
1437                         val2 = regs->gpr[rb];
1438 #ifdef __powerpc64__
1439                         if ((rd & 1) == 0) {
1440                                 /* word (32-bit) compare */
1441                                 val = (unsigned int) val;
1442                                 val2 = (unsigned int) val2;
1443                         }
1444 #endif
1445                         do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1446                         return 1;
1447
1448                 case 508: /* cmpb */
1449                         do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1450                         goto logical_done_nocc;
1451
1452 /*
1453  * Arithmetic instructions
1454  */
1455                 case 8: /* subfc */
1456                         add_with_carry(regs, op, rd, ~regs->gpr[ra],
1457                                        regs->gpr[rb], 1);
1458                         goto arith_done;
1459 #ifdef __powerpc64__
1460                 case 9: /* mulhdu */
1461                         asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1462                             "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1463                         goto arith_done;
1464 #endif
1465                 case 10:        /* addc */
1466                         add_with_carry(regs, op, rd, regs->gpr[ra],
1467                                        regs->gpr[rb], 0);
1468                         goto arith_done;
1469
1470                 case 11:        /* mulhwu */
1471                         asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1472                             "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1473                         goto arith_done;
1474
1475                 case 40:        /* subf */
1476                         op->val = regs->gpr[rb] - regs->gpr[ra];
1477                         goto arith_done;
1478 #ifdef __powerpc64__
1479                 case 73:        /* mulhd */
1480                         asm("mulhd %0,%1,%2" : "=r" (op->val) :
1481                             "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1482                         goto arith_done;
1483 #endif
1484                 case 75:        /* mulhw */
1485                         asm("mulhw %0,%1,%2" : "=r" (op->val) :
1486                             "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1487                         goto arith_done;
1488
1489                 case 104:       /* neg */
1490                         op->val = -regs->gpr[ra];
1491                         goto arith_done;
1492
1493                 case 136:       /* subfe */
1494                         add_with_carry(regs, op, rd, ~regs->gpr[ra],
1495                                        regs->gpr[rb], regs->xer & XER_CA);
1496                         goto arith_done;
1497
1498                 case 138:       /* adde */
1499                         add_with_carry(regs, op, rd, regs->gpr[ra],
1500                                        regs->gpr[rb], regs->xer & XER_CA);
1501                         goto arith_done;
1502
1503                 case 200:       /* subfze */
1504                         add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1505                                        regs->xer & XER_CA);
1506                         goto arith_done;
1507
1508                 case 202:       /* addze */
1509                         add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1510                                        regs->xer & XER_CA);
1511                         goto arith_done;
1512
1513                 case 232:       /* subfme */
1514                         add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1515                                        regs->xer & XER_CA);
1516                         goto arith_done;
1517 #ifdef __powerpc64__
1518                 case 233:       /* mulld */
1519                         op->val = regs->gpr[ra] * regs->gpr[rb];
1520                         goto arith_done;
1521 #endif
1522                 case 234:       /* addme */
1523                         add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1524                                        regs->xer & XER_CA);
1525                         goto arith_done;
1526
1527                 case 235:       /* mullw */
1528                         op->val = (unsigned int) regs->gpr[ra] *
1529                                 (unsigned int) regs->gpr[rb];
1530                         goto arith_done;
1531
1532                 case 266:       /* add */
1533                         op->val = regs->gpr[ra] + regs->gpr[rb];
1534                         goto arith_done;
1535 #ifdef __powerpc64__
1536                 case 457:       /* divdu */
1537                         op->val = regs->gpr[ra] / regs->gpr[rb];
1538                         goto arith_done;
1539 #endif
1540                 case 459:       /* divwu */
1541                         op->val = (unsigned int) regs->gpr[ra] /
1542                                 (unsigned int) regs->gpr[rb];
1543                         goto arith_done;
1544 #ifdef __powerpc64__
1545                 case 489:       /* divd */
1546                         op->val = (long int) regs->gpr[ra] /
1547                                 (long int) regs->gpr[rb];
1548                         goto arith_done;
1549 #endif
1550                 case 491:       /* divw */
1551                         op->val = (int) regs->gpr[ra] /
1552                                 (int) regs->gpr[rb];
1553                         goto arith_done;
1554
1555
1556 /*
1557  * Logical instructions
1558  */
1559                 case 26:        /* cntlzw */
1560                         op->val = __builtin_clz((unsigned int) regs->gpr[rd]);
1561                         goto logical_done;
1562 #ifdef __powerpc64__
1563                 case 58:        /* cntlzd */
1564                         op->val = __builtin_clzl(regs->gpr[rd]);
1565                         goto logical_done;
1566 #endif
1567                 case 28:        /* and */
1568                         op->val = regs->gpr[rd] & regs->gpr[rb];
1569                         goto logical_done;
1570
1571                 case 60:        /* andc */
1572                         op->val = regs->gpr[rd] & ~regs->gpr[rb];
1573                         goto logical_done;
1574
1575                 case 122:       /* popcntb */
1576                         do_popcnt(regs, op, regs->gpr[rd], 8);
1577                         goto logical_done_nocc;
1578
1579                 case 124:       /* nor */
1580                         op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1581                         goto logical_done;
1582
1583                 case 154:       /* prtyw */
1584                         do_prty(regs, op, regs->gpr[rd], 32);
1585                         goto logical_done_nocc;
1586
1587                 case 186:       /* prtyd */
1588                         do_prty(regs, op, regs->gpr[rd], 64);
1589                         goto logical_done_nocc;
1590 #ifdef CONFIG_PPC64
1591                 case 252:       /* bpermd */
1592                         do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
1593                         goto logical_done_nocc;
1594 #endif
1595                 case 284:       /* xor */
1596                         op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1597                         goto logical_done;
1598
1599                 case 316:       /* xor */
1600                         op->val = regs->gpr[rd] ^ regs->gpr[rb];
1601                         goto logical_done;
1602
1603                 case 378:       /* popcntw */
1604                         do_popcnt(regs, op, regs->gpr[rd], 32);
1605                         goto logical_done_nocc;
1606
1607                 case 412:       /* orc */
1608                         op->val = regs->gpr[rd] | ~regs->gpr[rb];
1609                         goto logical_done;
1610
1611                 case 444:       /* or */
1612                         op->val = regs->gpr[rd] | regs->gpr[rb];
1613                         goto logical_done;
1614
1615                 case 476:       /* nand */
1616                         op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
1617                         goto logical_done;
1618 #ifdef CONFIG_PPC64
1619                 case 506:       /* popcntd */
1620                         do_popcnt(regs, op, regs->gpr[rd], 64);
1621                         goto logical_done_nocc;
1622 #endif
1623                 case 922:       /* extsh */
1624                         op->val = (signed short) regs->gpr[rd];
1625                         goto logical_done;
1626
1627                 case 954:       /* extsb */
1628                         op->val = (signed char) regs->gpr[rd];
1629                         goto logical_done;
1630 #ifdef __powerpc64__
1631                 case 986:       /* extsw */
1632                         op->val = (signed int) regs->gpr[rd];
1633                         goto logical_done;
1634 #endif
1635
1636 /*
1637  * Shift instructions
1638  */
1639                 case 24:        /* slw */
1640                         sh = regs->gpr[rb] & 0x3f;
1641                         if (sh < 32)
1642                                 op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
1643                         else
1644                                 op->val = 0;
1645                         goto logical_done;
1646
1647                 case 536:       /* srw */
1648                         sh = regs->gpr[rb] & 0x3f;
1649                         if (sh < 32)
1650                                 op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1651                         else
1652                                 op->val = 0;
1653                         goto logical_done;
1654
1655                 case 792:       /* sraw */
1656                         op->type = COMPUTE + SETREG + SETXER;
1657                         sh = regs->gpr[rb] & 0x3f;
1658                         ival = (signed int) regs->gpr[rd];
1659                         op->val = ival >> (sh < 32 ? sh : 31);
1660                         op->xerval = regs->xer;
1661                         if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1662                                 op->xerval |= XER_CA;
1663                         else
1664                                 op->xerval &= ~XER_CA;
1665                         goto logical_done;
1666
1667                 case 824:       /* srawi */
1668                         op->type = COMPUTE + SETREG + SETXER;
1669                         sh = rb;
1670                         ival = (signed int) regs->gpr[rd];
1671                         op->val = ival >> sh;
1672                         op->xerval = regs->xer;
1673                         if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1674                                 op->xerval |= XER_CA;
1675                         else
1676                                 op->xerval &= ~XER_CA;
1677                         goto logical_done;
1678
1679 #ifdef __powerpc64__
1680                 case 27:        /* sld */
1681                         sh = regs->gpr[rb] & 0x7f;
1682                         if (sh < 64)
1683                                 op->val = regs->gpr[rd] << sh;
1684                         else
1685                                 op->val = 0;
1686                         goto logical_done;
1687
1688                 case 539:       /* srd */
1689                         sh = regs->gpr[rb] & 0x7f;
1690                         if (sh < 64)
1691                                 op->val = regs->gpr[rd] >> sh;
1692                         else
1693                                 op->val = 0;
1694                         goto logical_done;
1695
1696                 case 794:       /* srad */
1697                         op->type = COMPUTE + SETREG + SETXER;
1698                         sh = regs->gpr[rb] & 0x7f;
1699                         ival = (signed long int) regs->gpr[rd];
1700                         op->val = ival >> (sh < 64 ? sh : 63);
1701                         op->xerval = regs->xer;
1702                         if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1703                                 op->xerval |= XER_CA;
1704                         else
1705                                 op->xerval &= ~XER_CA;
1706                         goto logical_done;
1707
1708                 case 826:       /* sradi with sh_5 = 0 */
1709                 case 827:       /* sradi with sh_5 = 1 */
1710                         op->type = COMPUTE + SETREG + SETXER;
1711                         sh = rb | ((instr & 2) << 4);
1712                         ival = (signed long int) regs->gpr[rd];
1713                         op->val = ival >> sh;
1714                         op->xerval = regs->xer;
1715                         if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1716                                 op->xerval |= XER_CA;
1717                         else
1718                                 op->xerval &= ~XER_CA;
1719                         goto logical_done;
1720 #endif /* __powerpc64__ */
1721
1722 /*
1723  * Cache instructions
1724  */
1725                 case 54:        /* dcbst */
1726                         op->type = MKOP(CACHEOP, DCBST, 0);
1727                         op->ea = xform_ea(instr, regs);
1728                         return 0;
1729
1730                 case 86:        /* dcbf */
1731                         op->type = MKOP(CACHEOP, DCBF, 0);
1732                         op->ea = xform_ea(instr, regs);
1733                         return 0;
1734
1735                 case 246:       /* dcbtst */
1736                         op->type = MKOP(CACHEOP, DCBTST, 0);
1737                         op->ea = xform_ea(instr, regs);
1738                         op->reg = rd;
1739                         return 0;
1740
1741                 case 278:       /* dcbt */
1742                         op->type = MKOP(CACHEOP, DCBTST, 0);
1743                         op->ea = xform_ea(instr, regs);
1744                         op->reg = rd;
1745                         return 0;
1746
1747                 case 982:       /* icbi */
1748                         op->type = MKOP(CACHEOP, ICBI, 0);
1749                         op->ea = xform_ea(instr, regs);
1750                         return 0;
1751                 }
1752                 break;
1753         }
1754
1755 /*
1756  * Loads and stores.
1757  */
1758         op->type = UNKNOWN;
1759         op->update_reg = ra;
1760         op->reg = rd;
1761         op->val = regs->gpr[rd];
1762         u = (instr >> 20) & UPDATE;
1763         op->vsx_flags = 0;
1764
1765         switch (opcode) {
1766         case 31:
1767                 u = instr & UPDATE;
1768                 op->ea = xform_ea(instr, regs);
1769                 switch ((instr >> 1) & 0x3ff) {
1770                 case 20:        /* lwarx */
1771                         op->type = MKOP(LARX, 0, 4);
1772                         break;
1773
1774                 case 150:       /* stwcx. */
1775                         op->type = MKOP(STCX, 0, 4);
1776                         break;
1777
1778 #ifdef __powerpc64__
1779                 case 84:        /* ldarx */
1780                         op->type = MKOP(LARX, 0, 8);
1781                         break;
1782
1783                 case 214:       /* stdcx. */
1784                         op->type = MKOP(STCX, 0, 8);
1785                         break;
1786
1787                 case 52:        /* lbarx */
1788                         op->type = MKOP(LARX, 0, 1);
1789                         break;
1790
1791                 case 694:       /* stbcx. */
1792                         op->type = MKOP(STCX, 0, 1);
1793                         break;
1794
1795                 case 116:       /* lharx */
1796                         op->type = MKOP(LARX, 0, 2);
1797                         break;
1798
1799                 case 726:       /* sthcx. */
1800                         op->type = MKOP(STCX, 0, 2);
1801                         break;
1802
1803                 case 276:       /* lqarx */
1804                         if (!((rd & 1) || rd == ra || rd == rb))
1805                                 op->type = MKOP(LARX, 0, 16);
1806                         break;
1807
1808                 case 182:       /* stqcx. */
1809                         if (!(rd & 1))
1810                                 op->type = MKOP(STCX, 0, 16);
1811                         break;
1812 #endif
1813
1814                 case 23:        /* lwzx */
1815                 case 55:        /* lwzux */
1816                         op->type = MKOP(LOAD, u, 4);
1817                         break;
1818
1819                 case 87:        /* lbzx */
1820                 case 119:       /* lbzux */
1821                         op->type = MKOP(LOAD, u, 1);
1822                         break;
1823
1824 #ifdef CONFIG_ALTIVEC
1825                 /*
1826                  * Note: for the load/store vector element instructions,
1827                  * bits of the EA say which field of the VMX register to use.
1828                  */
1829                 case 7:         /* lvebx */
1830                         op->type = MKOP(LOAD_VMX, 0, 1);
1831                         op->element_size = 1;
1832                         break;
1833
1834                 case 39:        /* lvehx */
1835                         op->type = MKOP(LOAD_VMX, 0, 2);
1836                         op->element_size = 2;
1837                         break;
1838
1839                 case 71:        /* lvewx */
1840                         op->type = MKOP(LOAD_VMX, 0, 4);
1841                         op->element_size = 4;
1842                         break;
1843
1844                 case 103:       /* lvx */
1845                 case 359:       /* lvxl */
1846                         op->type = MKOP(LOAD_VMX, 0, 16);
1847                         op->element_size = 16;
1848                         break;
1849
1850                 case 135:       /* stvebx */
1851                         op->type = MKOP(STORE_VMX, 0, 1);
1852                         op->element_size = 1;
1853                         break;
1854
1855                 case 167:       /* stvehx */
1856                         op->type = MKOP(STORE_VMX, 0, 2);
1857                         op->element_size = 2;
1858                         break;
1859
1860                 case 199:       /* stvewx */
1861                         op->type = MKOP(STORE_VMX, 0, 4);
1862                         op->element_size = 4;
1863                         break;
1864
1865                 case 231:       /* stvx */
1866                 case 487:       /* stvxl */
1867                         op->type = MKOP(STORE_VMX, 0, 16);
1868                         break;
1869 #endif /* CONFIG_ALTIVEC */
1870
1871 #ifdef __powerpc64__
1872                 case 21:        /* ldx */
1873                 case 53:        /* ldux */
1874                         op->type = MKOP(LOAD, u, 8);
1875                         break;
1876
1877                 case 149:       /* stdx */
1878                 case 181:       /* stdux */
1879                         op->type = MKOP(STORE, u, 8);
1880                         break;
1881 #endif
1882
1883                 case 151:       /* stwx */
1884                 case 183:       /* stwux */
1885                         op->type = MKOP(STORE, u, 4);
1886                         break;
1887
1888                 case 215:       /* stbx */
1889                 case 247:       /* stbux */
1890                         op->type = MKOP(STORE, u, 1);
1891                         break;
1892
1893                 case 279:       /* lhzx */
1894                 case 311:       /* lhzux */
1895                         op->type = MKOP(LOAD, u, 2);
1896                         break;
1897
1898 #ifdef __powerpc64__
1899                 case 341:       /* lwax */
1900                 case 373:       /* lwaux */
1901                         op->type = MKOP(LOAD, SIGNEXT | u, 4);
1902                         break;
1903 #endif
1904
1905                 case 343:       /* lhax */
1906                 case 375:       /* lhaux */
1907                         op->type = MKOP(LOAD, SIGNEXT | u, 2);
1908                         break;
1909
1910                 case 407:       /* sthx */
1911                 case 439:       /* sthux */
1912                         op->type = MKOP(STORE, u, 2);
1913                         break;
1914
1915 #ifdef __powerpc64__
1916                 case 532:       /* ldbrx */
1917                         op->type = MKOP(LOAD, BYTEREV, 8);
1918                         break;
1919
1920 #endif
1921                 case 533:       /* lswx */
1922                         op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
1923                         break;
1924
1925                 case 534:       /* lwbrx */
1926                         op->type = MKOP(LOAD, BYTEREV, 4);
1927                         break;
1928
1929                 case 597:       /* lswi */
1930                         if (rb == 0)
1931                                 rb = 32;        /* # bytes to load */
1932                         op->type = MKOP(LOAD_MULTI, 0, rb);
1933                         op->ea = ra ? regs->gpr[ra] : 0;
1934                         break;
1935
1936 #ifdef CONFIG_PPC_FPU
1937                 case 535:       /* lfsx */
1938                 case 567:       /* lfsux */
1939                         op->type = MKOP(LOAD_FP, u, 4);
1940                         break;
1941
1942                 case 599:       /* lfdx */
1943                 case 631:       /* lfdux */
1944                         op->type = MKOP(LOAD_FP, u, 8);
1945                         break;
1946
1947                 case 663:       /* stfsx */
1948                 case 695:       /* stfsux */
1949                         op->type = MKOP(STORE_FP, u, 4);
1950                         break;
1951
1952                 case 727:       /* stfdx */
1953                 case 759:       /* stfdux */
1954                         op->type = MKOP(STORE_FP, u, 8);
1955                         break;
1956
1957 #ifdef __powerpc64__
1958                 case 791:       /* lfdpx */
1959                         op->type = MKOP(LOAD_FP, 0, 16);
1960                         break;
1961
1962                 case 919:       /* stfdpx */
1963                         op->type = MKOP(STORE_FP, 0, 16);
1964                         break;
1965 #endif /* __powerpc64 */
1966 #endif /* CONFIG_PPC_FPU */
1967
1968 #ifdef __powerpc64__
1969                 case 660:       /* stdbrx */
1970                         op->type = MKOP(STORE, BYTEREV, 8);
1971                         op->val = byterev_8(regs->gpr[rd]);
1972                         break;
1973
1974 #endif
1975                 case 661:       /* stswx */
1976                         op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
1977                         break;
1978
1979                 case 662:       /* stwbrx */
1980                         op->type = MKOP(STORE, BYTEREV, 4);
1981                         op->val = byterev_4(regs->gpr[rd]);
1982                         break;
1983
1984                 case 725:       /* stswi */
1985                         if (rb == 0)
1986                                 rb = 32;        /* # bytes to store */
1987                         op->type = MKOP(STORE_MULTI, 0, rb);
1988                         op->ea = ra ? regs->gpr[ra] : 0;
1989                         break;
1990
1991                 case 790:       /* lhbrx */
1992                         op->type = MKOP(LOAD, BYTEREV, 2);
1993                         break;
1994
1995                 case 918:       /* sthbrx */
1996                         op->type = MKOP(STORE, BYTEREV, 2);
1997                         op->val = byterev_2(regs->gpr[rd]);
1998                         break;
1999
2000 #ifdef CONFIG_VSX
2001                 case 12:        /* lxsiwzx */
2002                         op->reg = rd | ((instr & 1) << 5);
2003                         op->type = MKOP(LOAD_VSX, 0, 4);
2004                         op->element_size = 8;
2005                         break;
2006
2007                 case 76:        /* lxsiwax */
2008                         op->reg = rd | ((instr & 1) << 5);
2009                         op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2010                         op->element_size = 8;
2011                         break;
2012
2013                 case 140:       /* stxsiwx */
2014                         op->reg = rd | ((instr & 1) << 5);
2015                         op->type = MKOP(STORE_VSX, 0, 4);
2016                         op->element_size = 8;
2017                         break;
2018
2019                 case 268:       /* lxvx */
2020                         op->reg = rd | ((instr & 1) << 5);
2021                         op->type = MKOP(LOAD_VSX, 0, 16);
2022                         op->element_size = 16;
2023                         op->vsx_flags = VSX_CHECK_VEC;
2024                         break;
2025
2026                 case 269:       /* lxvl */
2027                 case 301: {     /* lxvll */
2028                         int nb;
2029                         op->reg = rd | ((instr & 1) << 5);
2030                         op->ea = ra ? regs->gpr[ra] : 0;
2031                         nb = regs->gpr[rb] & 0xff;
2032                         if (nb > 16)
2033                                 nb = 16;
2034                         op->type = MKOP(LOAD_VSX, 0, nb);
2035                         op->element_size = 16;
2036                         op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
2037                                 VSX_CHECK_VEC;
2038                         break;
2039                 }
2040                 case 332:       /* lxvdsx */
2041                         op->reg = rd | ((instr & 1) << 5);
2042                         op->type = MKOP(LOAD_VSX, 0, 8);
2043                         op->element_size = 8;
2044                         op->vsx_flags = VSX_SPLAT;
2045                         break;
2046
2047                 case 364:       /* lxvwsx */
2048                         op->reg = rd | ((instr & 1) << 5);
2049                         op->type = MKOP(LOAD_VSX, 0, 4);
2050                         op->element_size = 4;
2051                         op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2052                         break;
2053
2054                 case 396:       /* stxvx */
2055                         op->reg = rd | ((instr & 1) << 5);
2056                         op->type = MKOP(STORE_VSX, 0, 16);
2057                         op->element_size = 16;
2058                         op->vsx_flags = VSX_CHECK_VEC;
2059                         break;
2060
2061                 case 397:       /* stxvl */
2062                 case 429: {     /* stxvll */
2063                         int nb;
2064                         op->reg = rd | ((instr & 1) << 5);
2065                         op->ea = ra ? regs->gpr[ra] : 0;
2066                         nb = regs->gpr[rb] & 0xff;
2067                         if (nb > 16)
2068                                 nb = 16;
2069                         op->type = MKOP(STORE_VSX, 0, nb);
2070                         op->element_size = 16;
2071                         op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
2072                                 VSX_CHECK_VEC;
2073                         break;
2074                 }
2075                 case 524:       /* lxsspx */
2076                         op->reg = rd | ((instr & 1) << 5);
2077                         op->type = MKOP(LOAD_VSX, 0, 4);
2078                         op->element_size = 8;
2079                         op->vsx_flags = VSX_FPCONV;
2080                         break;
2081
2082                 case 588:       /* lxsdx */
2083                         op->reg = rd | ((instr & 1) << 5);
2084                         op->type = MKOP(LOAD_VSX, 0, 8);
2085                         op->element_size = 8;
2086                         break;
2087
2088                 case 652:       /* stxsspx */
2089                         op->reg = rd | ((instr & 1) << 5);
2090                         op->type = MKOP(STORE_VSX, 0, 4);
2091                         op->element_size = 8;
2092                         op->vsx_flags = VSX_FPCONV;
2093                         break;
2094
2095                 case 716:       /* stxsdx */
2096                         op->reg = rd | ((instr & 1) << 5);
2097                         op->type = MKOP(STORE_VSX, 0, 8);
2098                         op->element_size = 8;
2099                         break;
2100
2101                 case 780:       /* lxvw4x */
2102                         op->reg = rd | ((instr & 1) << 5);
2103                         op->type = MKOP(LOAD_VSX, 0, 16);
2104                         op->element_size = 4;
2105                         break;
2106
2107                 case 781:       /* lxsibzx */
2108                         op->reg = rd | ((instr & 1) << 5);
2109                         op->type = MKOP(LOAD_VSX, 0, 1);
2110                         op->element_size = 8;
2111                         op->vsx_flags = VSX_CHECK_VEC;
2112                         break;
2113
2114                 case 812:       /* lxvh8x */
2115                         op->reg = rd | ((instr & 1) << 5);
2116                         op->type = MKOP(LOAD_VSX, 0, 16);
2117                         op->element_size = 2;
2118                         op->vsx_flags = VSX_CHECK_VEC;
2119                         break;
2120
2121                 case 813:       /* lxsihzx */
2122                         op->reg = rd | ((instr & 1) << 5);
2123                         op->type = MKOP(LOAD_VSX, 0, 2);
2124                         op->element_size = 8;
2125                         op->vsx_flags = VSX_CHECK_VEC;
2126                         break;
2127
2128                 case 844:       /* lxvd2x */
2129                         op->reg = rd | ((instr & 1) << 5);
2130                         op->type = MKOP(LOAD_VSX, 0, 16);
2131                         op->element_size = 8;
2132                         break;
2133
2134                 case 876:       /* lxvb16x */
2135                         op->reg = rd | ((instr & 1) << 5);
2136                         op->type = MKOP(LOAD_VSX, 0, 16);
2137                         op->element_size = 1;
2138                         op->vsx_flags = VSX_CHECK_VEC;
2139                         break;
2140
2141                 case 908:       /* stxvw4x */
2142                         op->reg = rd | ((instr & 1) << 5);
2143                         op->type = MKOP(STORE_VSX, 0, 16);
2144                         op->element_size = 4;
2145                         break;
2146
2147                 case 909:       /* stxsibx */
2148                         op->reg = rd | ((instr & 1) << 5);
2149                         op->type = MKOP(STORE_VSX, 0, 1);
2150                         op->element_size = 8;
2151                         op->vsx_flags = VSX_CHECK_VEC;
2152                         break;
2153
2154                 case 940:       /* stxvh8x */
2155                         op->reg = rd | ((instr & 1) << 5);
2156                         op->type = MKOP(STORE_VSX, 0, 16);
2157                         op->element_size = 2;
2158                         op->vsx_flags = VSX_CHECK_VEC;
2159                         break;
2160
2161                 case 941:       /* stxsihx */
2162                         op->reg = rd | ((instr & 1) << 5);
2163                         op->type = MKOP(STORE_VSX, 0, 2);
2164                         op->element_size = 8;
2165                         op->vsx_flags = VSX_CHECK_VEC;
2166                         break;
2167
2168                 case 972:       /* stxvd2x */
2169                         op->reg = rd | ((instr & 1) << 5);
2170                         op->type = MKOP(STORE_VSX, 0, 16);
2171                         op->element_size = 8;
2172                         break;
2173
2174                 case 1004:      /* stxvb16x */
2175                         op->reg = rd | ((instr & 1) << 5);
2176                         op->type = MKOP(STORE_VSX, 0, 16);
2177                         op->element_size = 1;
2178                         op->vsx_flags = VSX_CHECK_VEC;
2179                         break;
2180
2181 #endif /* CONFIG_VSX */
2182                 }
2183                 break;
2184
2185         case 32:        /* lwz */
2186         case 33:        /* lwzu */
2187                 op->type = MKOP(LOAD, u, 4);
2188                 op->ea = dform_ea(instr, regs);
2189                 break;
2190
2191         case 34:        /* lbz */
2192         case 35:        /* lbzu */
2193                 op->type = MKOP(LOAD, u, 1);
2194                 op->ea = dform_ea(instr, regs);
2195                 break;
2196
2197         case 36:        /* stw */
2198         case 37:        /* stwu */
2199                 op->type = MKOP(STORE, u, 4);
2200                 op->ea = dform_ea(instr, regs);
2201                 break;
2202
2203         case 38:        /* stb */
2204         case 39:        /* stbu */
2205                 op->type = MKOP(STORE, u, 1);
2206                 op->ea = dform_ea(instr, regs);
2207                 break;
2208
2209         case 40:        /* lhz */
2210         case 41:        /* lhzu */
2211                 op->type = MKOP(LOAD, u, 2);
2212                 op->ea = dform_ea(instr, regs);
2213                 break;
2214
2215         case 42:        /* lha */
2216         case 43:        /* lhau */
2217                 op->type = MKOP(LOAD, SIGNEXT | u, 2);
2218                 op->ea = dform_ea(instr, regs);
2219                 break;
2220
2221         case 44:        /* sth */
2222         case 45:        /* sthu */
2223                 op->type = MKOP(STORE, u, 2);
2224                 op->ea = dform_ea(instr, regs);
2225                 break;
2226
2227         case 46:        /* lmw */
2228                 if (ra >= rd)
2229                         break;          /* invalid form, ra in range to load */
2230                 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2231                 op->ea = dform_ea(instr, regs);
2232                 break;
2233
2234         case 47:        /* stmw */
2235                 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2236                 op->ea = dform_ea(instr, regs);
2237                 break;
2238
2239 #ifdef CONFIG_PPC_FPU
2240         case 48:        /* lfs */
2241         case 49:        /* lfsu */
2242                 op->type = MKOP(LOAD_FP, u, 4);
2243                 op->ea = dform_ea(instr, regs);
2244                 break;
2245
2246         case 50:        /* lfd */
2247         case 51:        /* lfdu */
2248                 op->type = MKOP(LOAD_FP, u, 8);
2249                 op->ea = dform_ea(instr, regs);
2250                 break;
2251
2252         case 52:        /* stfs */
2253         case 53:        /* stfsu */
2254                 op->type = MKOP(STORE_FP, u, 4);
2255                 op->ea = dform_ea(instr, regs);
2256                 break;
2257
2258         case 54:        /* stfd */
2259         case 55:        /* stfdu */
2260                 op->type = MKOP(STORE_FP, u, 8);
2261                 op->ea = dform_ea(instr, regs);
2262                 break;
2263 #endif
2264
2265 #ifdef __powerpc64__
2266         case 56:        /* lq */
2267                 if (!((rd & 1) || (rd == ra)))
2268                         op->type = MKOP(LOAD, 0, 16);
2269                 op->ea = dqform_ea(instr, regs);
2270                 break;
2271 #endif
2272
2273 #ifdef CONFIG_VSX
2274         case 57:        /* lfdp, lxsd, lxssp */
2275                 op->ea = dsform_ea(instr, regs);
2276                 switch (instr & 3) {
2277                 case 0:         /* lfdp */
2278                         if (rd & 1)
2279                                 break;          /* reg must be even */
2280                         op->type = MKOP(LOAD_FP, 0, 16);
2281                         break;
2282                 case 2:         /* lxsd */
2283                         op->reg = rd + 32;
2284                         op->type = MKOP(LOAD_VSX, 0, 8);
2285                         op->element_size = 8;
2286                         op->vsx_flags = VSX_CHECK_VEC;
2287                         break;
2288                 case 3:         /* lxssp */
2289                         op->reg = rd + 32;
2290                         op->type = MKOP(LOAD_VSX, 0, 4);
2291                         op->element_size = 8;
2292                         op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2293                         break;
2294                 }
2295                 break;
2296 #endif /* CONFIG_VSX */
2297
2298 #ifdef __powerpc64__
2299         case 58:        /* ld[u], lwa */
2300                 op->ea = dsform_ea(instr, regs);
2301                 switch (instr & 3) {
2302                 case 0:         /* ld */
2303                         op->type = MKOP(LOAD, 0, 8);
2304                         break;
2305                 case 1:         /* ldu */
2306                         op->type = MKOP(LOAD, UPDATE, 8);
2307                         break;
2308                 case 2:         /* lwa */
2309                         op->type = MKOP(LOAD, SIGNEXT, 4);
2310                         break;
2311                 }
2312                 break;
2313 #endif
2314
2315 #ifdef CONFIG_VSX
2316         case 61:        /* stfdp, lxv, stxsd, stxssp, stxv */
2317                 switch (instr & 7) {
2318                 case 0:         /* stfdp with LSB of DS field = 0 */
2319                 case 4:         /* stfdp with LSB of DS field = 1 */
2320                         op->ea = dsform_ea(instr, regs);
2321                         op->type = MKOP(STORE_FP, 0, 16);
2322                         break;
2323
2324                 case 1:         /* lxv */
2325                         op->ea = dqform_ea(instr, regs);
2326                         if (instr & 8)
2327                                 op->reg = rd + 32;
2328                         op->type = MKOP(LOAD_VSX, 0, 16);
2329                         op->element_size = 16;
2330                         op->vsx_flags = VSX_CHECK_VEC;
2331                         break;
2332
2333                 case 2:         /* stxsd with LSB of DS field = 0 */
2334                 case 6:         /* stxsd with LSB of DS field = 1 */
2335                         op->ea = dsform_ea(instr, regs);
2336                         op->reg = rd + 32;
2337                         op->type = MKOP(STORE_VSX, 0, 8);
2338                         op->element_size = 8;
2339                         op->vsx_flags = VSX_CHECK_VEC;
2340                         break;
2341
2342                 case 3:         /* stxssp with LSB of DS field = 0 */
2343                 case 7:         /* stxssp with LSB of DS field = 1 */
2344                         op->ea = dsform_ea(instr, regs);
2345                         op->reg = rd + 32;
2346                         op->type = MKOP(STORE_VSX, 0, 4);
2347                         op->element_size = 8;
2348                         op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2349                         break;
2350
2351                 case 5:         /* stxv */
2352                         op->ea = dqform_ea(instr, regs);
2353                         if (instr & 8)
2354                                 op->reg = rd + 32;
2355                         op->type = MKOP(STORE_VSX, 0, 16);
2356                         op->element_size = 16;
2357                         op->vsx_flags = VSX_CHECK_VEC;
2358                         break;
2359                 }
2360                 break;
2361 #endif /* CONFIG_VSX */
2362
2363 #ifdef __powerpc64__
2364         case 62:        /* std[u] */
2365                 op->ea = dsform_ea(instr, regs);
2366                 switch (instr & 3) {
2367                 case 0:         /* std */
2368                         op->type = MKOP(STORE, 0, 8);
2369                         break;
2370                 case 1:         /* stdu */
2371                         op->type = MKOP(STORE, UPDATE, 8);
2372                         break;
2373                 case 2:         /* stq */
2374                         if (!(rd & 1))
2375                                 op->type = MKOP(STORE, 0, 16);
2376                         break;
2377                 }
2378                 break;
2379 #endif /* __powerpc64__ */
2380
2381         }
2382         return 0;
2383
2384  logical_done:
2385         if (instr & 1)
2386                 set_cr0(regs, op, ra);
2387  logical_done_nocc:
2388         op->reg = ra;
2389         op->type |= SETREG;
2390         return 1;
2391
2392  arith_done:
2393         if (instr & 1)
2394                 set_cr0(regs, op, rd);
2395  compute_done:
2396         op->reg = rd;
2397         op->type |= SETREG;
2398         return 1;
2399
2400  priv:
2401         op->type = INTERRUPT | 0x700;
2402         op->val = SRR1_PROGPRIV;
2403         return 0;
2404
2405  trap:
2406         op->type = INTERRUPT | 0x700;
2407         op->val = SRR1_PROGTRAP;
2408         return 0;
2409 }
2410 EXPORT_SYMBOL_GPL(analyse_instr);
2411 NOKPROBE_SYMBOL(analyse_instr);
2412
2413 /*
2414  * For PPC32 we always use stwu with r1 to change the stack pointer.
2415  * So this emulated store may corrupt the exception frame, now we
2416  * have to provide the exception frame trampoline, which is pushed
2417  * below the kprobed function stack. So we only update gpr[1] but
2418  * don't emulate the real store operation. We will do real store
2419  * operation safely in exception return code by checking this flag.
2420  */
2421 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
2422 {
2423 #ifdef CONFIG_PPC32
2424         /*
2425          * Check if we will touch kernel stack overflow
2426          */
2427         if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
2428                 printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
2429                 return -EINVAL;
2430         }
2431 #endif /* CONFIG_PPC32 */
2432         /*
2433          * Check if we already set since that means we'll
2434          * lose the previous value.
2435          */
2436         WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
2437         set_thread_flag(TIF_EMULATE_STACK_STORE);
2438         return 0;
2439 }
2440
2441 static nokprobe_inline void do_signext(unsigned long *valp, int size)
2442 {
2443         switch (size) {
2444         case 2:
2445                 *valp = (signed short) *valp;
2446                 break;
2447         case 4:
2448                 *valp = (signed int) *valp;
2449                 break;
2450         }
2451 }
2452
2453 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
2454 {
2455         switch (size) {
2456         case 2:
2457                 *valp = byterev_2(*valp);
2458                 break;
2459         case 4:
2460                 *valp = byterev_4(*valp);
2461                 break;
2462 #ifdef __powerpc64__
2463         case 8:
2464                 *valp = byterev_8(*valp);
2465                 break;
2466 #endif
2467         }
2468 }
2469
2470 /*
2471  * Emulate an instruction that can be executed just by updating
2472  * fields in *regs.
2473  */
2474 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
2475 {
2476         unsigned long next_pc;
2477
2478         next_pc = truncate_if_32bit(regs->msr, regs->nip + 4);
2479         switch (op->type & INSTR_TYPE_MASK) {
2480         case COMPUTE:
2481                 if (op->type & SETREG)
2482                         regs->gpr[op->reg] = op->val;
2483                 if (op->type & SETCC)
2484                         regs->ccr = op->ccval;
2485                 if (op->type & SETXER)
2486                         regs->xer = op->xerval;
2487                 break;
2488
2489         case BRANCH:
2490                 if (op->type & SETLK)
2491                         regs->link = next_pc;
2492                 if (op->type & BRTAKEN)
2493                         next_pc = op->val;
2494                 if (op->type & DECCTR)
2495                         --regs->ctr;
2496                 break;
2497
2498         case BARRIER:
2499                 switch (op->type & BARRIER_MASK) {
2500                 case BARRIER_SYNC:
2501                         mb();
2502                         break;
2503                 case BARRIER_ISYNC:
2504                         isync();
2505                         break;
2506                 case BARRIER_EIEIO:
2507                         eieio();
2508                         break;
2509                 case BARRIER_LWSYNC:
2510                         asm volatile("lwsync" : : : "memory");
2511                         break;
2512                 case BARRIER_PTESYNC:
2513                         asm volatile("ptesync" : : : "memory");
2514                         break;
2515                 }
2516                 break;
2517
2518         case MFSPR:
2519                 switch (op->spr) {
2520                 case SPRN_XER:
2521                         regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
2522                         break;
2523                 case SPRN_LR:
2524                         regs->gpr[op->reg] = regs->link;
2525                         break;
2526                 case SPRN_CTR:
2527                         regs->gpr[op->reg] = regs->ctr;
2528                         break;
2529                 default:
2530                         WARN_ON_ONCE(1);
2531                 }
2532                 break;
2533
2534         case MTSPR:
2535                 switch (op->spr) {
2536                 case SPRN_XER:
2537                         regs->xer = op->val & 0xffffffffUL;
2538                         break;
2539                 case SPRN_LR:
2540                         regs->link = op->val;
2541                         break;
2542                 case SPRN_CTR:
2543                         regs->ctr = op->val;
2544                         break;
2545                 default:
2546                         WARN_ON_ONCE(1);
2547                 }
2548                 break;
2549
2550         default:
2551                 WARN_ON_ONCE(1);
2552         }
2553         regs->nip = next_pc;
2554 }
2555
2556 /*
2557  * Emulate instructions that cause a transfer of control,
2558  * loads and stores, and a few other instructions.
2559  * Returns 1 if the step was emulated, 0 if not,
2560  * or -1 if the instruction is one that should not be stepped,
2561  * such as an rfid, or a mtmsrd that would clear MSR_RI.
2562  */
2563 int emulate_step(struct pt_regs *regs, unsigned int instr)
2564 {
2565         struct instruction_op op;
2566         int r, err, size, type;
2567         unsigned long val;
2568         unsigned int cr;
2569         int i, rd, nb;
2570         unsigned long ea;
2571
2572         r = analyse_instr(&op, regs, instr);
2573         if (r < 0)
2574                 return r;
2575         if (r > 0) {
2576                 emulate_update_regs(regs, &op);
2577                 return 1;
2578         }
2579
2580         err = 0;
2581         size = GETSIZE(op.type);
2582         type = op.type & INSTR_TYPE_MASK;
2583
2584         ea = op.ea;
2585         if (OP_IS_LOAD_STORE(type) || type == CACHEOP)
2586                 ea = truncate_if_32bit(regs->msr, op.ea);
2587
2588         switch (type) {
2589         case CACHEOP:
2590                 if (!address_ok(regs, ea, 8))
2591                         return 0;
2592                 switch (op.type & CACHEOP_MASK) {
2593                 case DCBST:
2594                         __cacheop_user_asmx(ea, err, "dcbst");
2595                         break;
2596                 case DCBF:
2597                         __cacheop_user_asmx(ea, err, "dcbf");
2598                         break;
2599                 case DCBTST:
2600                         if (op.reg == 0)
2601                                 prefetchw((void *) ea);
2602                         break;
2603                 case DCBT:
2604                         if (op.reg == 0)
2605                                 prefetch((void *) ea);
2606                         break;
2607                 case ICBI:
2608                         __cacheop_user_asmx(ea, err, "icbi");
2609                         break;
2610                 }
2611                 if (err)
2612                         return 0;
2613                 goto instr_done;
2614
2615         case LARX:
2616                 if (ea & (size - 1))
2617                         break;          /* can't handle misaligned */
2618                 if (!address_ok(regs, ea, size))
2619                         return 0;
2620                 err = 0;
2621                 switch (size) {
2622 #ifdef __powerpc64__
2623                 case 1:
2624                         __get_user_asmx(val, ea, err, "lbarx");
2625                         break;
2626                 case 2:
2627                         __get_user_asmx(val, ea, err, "lharx");
2628                         break;
2629 #endif
2630                 case 4:
2631                         __get_user_asmx(val, ea, err, "lwarx");
2632                         break;
2633 #ifdef __powerpc64__
2634                 case 8:
2635                         __get_user_asmx(val, ea, err, "ldarx");
2636                         break;
2637                 case 16:
2638                         err = do_lqarx(ea, &regs->gpr[op.reg]);
2639                         goto ldst_done;
2640 #endif
2641                 default:
2642                         return 0;
2643                 }
2644                 if (!err)
2645                         regs->gpr[op.reg] = val;
2646                 goto ldst_done;
2647
2648         case STCX:
2649                 if (ea & (size - 1))
2650                         break;          /* can't handle misaligned */
2651                 if (!address_ok(regs, ea, size))
2652                         return 0;
2653                 err = 0;
2654                 switch (size) {
2655 #ifdef __powerpc64__
2656                 case 1:
2657                         __put_user_asmx(op.val, ea, err, "stbcx.", cr);
2658                         break;
2659                 case 2:
2660                         __put_user_asmx(op.val, ea, err, "stbcx.", cr);
2661                         break;
2662 #endif
2663                 case 4:
2664                         __put_user_asmx(op.val, ea, err, "stwcx.", cr);
2665                         break;
2666 #ifdef __powerpc64__
2667                 case 8:
2668                         __put_user_asmx(op.val, ea, err, "stdcx.", cr);
2669                         break;
2670                 case 16:
2671                         err = do_stqcx(ea, regs->gpr[op.reg],
2672                                        regs->gpr[op.reg + 1], &cr);
2673                         break;
2674 #endif
2675                 default:
2676                         return 0;
2677                 }
2678                 if (!err)
2679                         regs->ccr = (regs->ccr & 0x0fffffff) |
2680                                 (cr & 0xe0000000) |
2681                                 ((regs->xer >> 3) & 0x10000000);
2682                 goto ldst_done;
2683
2684         case LOAD:
2685 #ifdef __powerpc64__
2686                 if (size == 16) {
2687                         err = emulate_lq(regs, ea, op.reg);
2688                         goto ldst_done;
2689                 }
2690 #endif
2691                 err = read_mem(&regs->gpr[op.reg], ea, size, regs);
2692                 if (!err) {
2693                         if (op.type & SIGNEXT)
2694                                 do_signext(&regs->gpr[op.reg], size);
2695                         if (op.type & BYTEREV)
2696                                 do_byterev(&regs->gpr[op.reg], size);
2697                 }
2698                 goto ldst_done;
2699
2700 #ifdef CONFIG_PPC_FPU
2701         case LOAD_FP:
2702                 /*
2703                  * If the instruction is in userspace, we can emulate it even
2704                  * if the VMX state is not live, because we have the state
2705                  * stored in the thread_struct.  If the instruction is in
2706                  * the kernel, we must not touch the state in the thread_struct.
2707                  */
2708                 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
2709                         return 0;
2710                 err = do_fp_load(op.reg, ea, size, regs);
2711                 goto ldst_done;
2712 #endif
2713 #ifdef CONFIG_ALTIVEC
2714         case LOAD_VMX:
2715                 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
2716                         return 0;
2717                 err = do_vec_load(op.reg, ea, size, regs);
2718                 goto ldst_done;
2719 #endif
2720 #ifdef CONFIG_VSX
2721         case LOAD_VSX: {
2722                 unsigned long msrbit = MSR_VSX;
2723
2724                 /*
2725                  * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
2726                  * when the target of the instruction is a vector register.
2727                  */
2728                 if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC))
2729                         msrbit = MSR_VEC;
2730                 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
2731                         return 0;
2732                 err = do_vsx_load(&op, ea, regs);
2733                 goto ldst_done;
2734         }
2735 #endif
2736         case LOAD_MULTI:
2737                 if (regs->msr & MSR_LE)
2738                         return 0;
2739                 rd = op.reg;
2740                 for (i = 0; i < size; i += 4) {
2741                         nb = size - i;
2742                         if (nb > 4)
2743                                 nb = 4;
2744                         err = read_mem(&regs->gpr[rd], ea, nb, regs);
2745                         if (err)
2746                                 return 0;
2747                         if (nb < 4)     /* left-justify last bytes */
2748                                 regs->gpr[rd] <<= 32 - 8 * nb;
2749                         ea += 4;
2750                         ++rd;
2751                 }
2752                 goto instr_done;
2753
2754         case STORE:
2755 #ifdef __powerpc64__
2756                 if (size == 16) {
2757                         err = emulate_stq(regs, ea, op.reg);
2758                         goto ldst_done;
2759                 }
2760 #endif
2761                 if ((op.type & UPDATE) && size == sizeof(long) &&
2762                     op.reg == 1 && op.update_reg == 1 &&
2763                     !(regs->msr & MSR_PR) &&
2764                     ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
2765                         err = handle_stack_update(ea, regs);
2766                         goto ldst_done;
2767                 }
2768                 err = write_mem(op.val, ea, size, regs);
2769                 goto ldst_done;
2770
2771 #ifdef CONFIG_PPC_FPU
2772         case STORE_FP:
2773                 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
2774                         return 0;
2775                 err = do_fp_store(op.reg, ea, size, regs);
2776                 goto ldst_done;
2777 #endif
2778 #ifdef CONFIG_ALTIVEC
2779         case STORE_VMX:
2780                 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
2781                         return 0;
2782                 err = do_vec_store(op.reg, ea, size, regs);
2783                 goto ldst_done;
2784 #endif
2785 #ifdef CONFIG_VSX
2786         case STORE_VSX: {
2787                 unsigned long msrbit = MSR_VSX;
2788
2789                 /*
2790                  * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
2791                  * when the target of the instruction is a vector register.
2792                  */
2793                 if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC))
2794                         msrbit = MSR_VEC;
2795                 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
2796                         return 0;
2797                 err = do_vsx_store(&op, ea, regs);
2798                 goto ldst_done;
2799         }
2800 #endif
2801         case STORE_MULTI:
2802                 if (regs->msr & MSR_LE)
2803                         return 0;
2804                 rd = op.reg;
2805                 for (i = 0; i < size; i += 4) {
2806                         val = regs->gpr[rd];
2807                         nb = size - i;
2808                         if (nb > 4)
2809                                 nb = 4;
2810                         else
2811                                 val >>= 32 - 8 * nb;
2812                         err = write_mem(val, ea, nb, regs);
2813                         if (err)
2814                                 return 0;
2815                         ea += 4;
2816                         ++rd;
2817                 }
2818                 goto instr_done;
2819
2820         case MFMSR:
2821                 regs->gpr[op.reg] = regs->msr & MSR_MASK;
2822                 goto instr_done;
2823
2824         case MTMSR:
2825                 val = regs->gpr[op.reg];
2826                 if ((val & MSR_RI) == 0)
2827                         /* can't step mtmsr[d] that would clear MSR_RI */
2828                         return -1;
2829                 /* here op.val is the mask of bits to change */
2830                 regs->msr = (regs->msr & ~op.val) | (val & op.val);
2831                 goto instr_done;
2832
2833 #ifdef CONFIG_PPC64
2834         case SYSCALL:   /* sc */
2835                 /*
2836                  * N.B. this uses knowledge about how the syscall
2837                  * entry code works.  If that is changed, this will
2838                  * need to be changed also.
2839                  */
2840                 if (regs->gpr[0] == 0x1ebe &&
2841                     cpu_has_feature(CPU_FTR_REAL_LE)) {
2842                         regs->msr ^= MSR_LE;
2843                         goto instr_done;
2844                 }
2845                 regs->gpr[9] = regs->gpr[13];
2846                 regs->gpr[10] = MSR_KERNEL;
2847                 regs->gpr[11] = regs->nip + 4;
2848                 regs->gpr[12] = regs->msr & MSR_MASK;
2849                 regs->gpr[13] = (unsigned long) get_paca();
2850                 regs->nip = (unsigned long) &system_call_common;
2851                 regs->msr = MSR_KERNEL;
2852                 return 1;
2853
2854         case RFI:
2855                 return -1;
2856 #endif
2857         }
2858         return 0;
2859
2860  ldst_done:
2861         if (err)
2862                 return 0;
2863         if (op.type & UPDATE)
2864                 regs->gpr[op.update_reg] = op.ea;
2865
2866  instr_done:
2867         regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
2868         return 1;
2869 }
2870 NOKPROBE_SYMBOL(emulate_step);