4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <asm/uaccess.h>
18 #include <asm/cputable.h>
20 extern char system_call_common[];
23 /* Bits in SRR1 that are copied from MSR */
24 #define MSR_MASK 0xffffffff87c0ffffUL
26 #define MSR_MASK 0x87c0ffff
30 #define XER_SO 0x80000000U
31 #define XER_OV 0x40000000U
32 #define XER_CA 0x20000000U
36 * Functions in ldstfp.S
38 extern int do_lfs(int rn, unsigned long ea);
39 extern int do_lfd(int rn, unsigned long ea);
40 extern int do_stfs(int rn, unsigned long ea);
41 extern int do_stfd(int rn, unsigned long ea);
42 extern int do_lvx(int rn, unsigned long ea);
43 extern int do_stvx(int rn, unsigned long ea);
44 extern int do_lxvd2x(int rn, unsigned long ea);
45 extern int do_stxvd2x(int rn, unsigned long ea);
49 * Emulate the truncation of 64 bit values in 32-bit mode.
51 static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val)
54 if ((msr & MSR_64BIT) == 0)
61 * Determine whether a conditional branch instruction would branch.
63 static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
65 unsigned int bo = (instr >> 21) & 0x1f;
69 /* decrement counter */
71 if (((bo >> 1) & 1) ^ (regs->ctr == 0))
74 if ((bo & 0x10) == 0) {
75 /* check bit from CR */
76 bi = (instr >> 16) & 0x1f;
77 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
84 static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb)
88 return __access_ok(ea, nb, USER_DS);
92 * Calculate effective address for a D-form instruction
94 static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs)
99 ra = (instr >> 16) & 0x1f;
100 ea = (signed short) instr; /* sign-extend */
104 return truncate_if_32bit(regs->msr, ea);
109 * Calculate effective address for a DS-form instruction
111 static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs)
116 ra = (instr >> 16) & 0x1f;
117 ea = (signed short) (instr & ~3); /* sign-extend */
121 return truncate_if_32bit(regs->msr, ea);
123 #endif /* __powerpc64 */
126 * Calculate effective address for an X-form instruction
128 static unsigned long __kprobes xform_ea(unsigned int instr,
129 struct pt_regs *regs)
134 ra = (instr >> 16) & 0x1f;
135 rb = (instr >> 11) & 0x1f;
140 return truncate_if_32bit(regs->msr, ea);
144 * Return the largest power of 2, not greater than sizeof(unsigned long),
145 * such that x is a multiple of it.
147 static inline unsigned long max_align(unsigned long x)
149 x |= sizeof(unsigned long);
150 return x & -x; /* isolates rightmost bit */
154 static inline unsigned long byterev_2(unsigned long x)
156 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
159 static inline unsigned long byterev_4(unsigned long x)
161 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
162 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
166 static inline unsigned long byterev_8(unsigned long x)
168 return (byterev_4(x) << 32) | byterev_4(x >> 32);
172 static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea,
180 err = __get_user(x, (unsigned char __user *) ea);
183 err = __get_user(x, (unsigned short __user *) ea);
186 err = __get_user(x, (unsigned int __user *) ea);
190 err = __get_user(x, (unsigned long __user *) ea);
199 static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
200 int nb, struct pt_regs *regs)
203 unsigned long x, b, c;
204 #ifdef __LITTLE_ENDIAN__
205 int len = nb; /* save a copy of the length for byte reversal */
208 /* unaligned, do this in pieces */
210 for (; nb > 0; nb -= c) {
211 #ifdef __LITTLE_ENDIAN__
214 #ifdef __BIG_ENDIAN__
219 err = read_mem_aligned(&b, ea, c);
222 x = (x << (8 * c)) + b;
225 #ifdef __LITTLE_ENDIAN__
228 *dest = byterev_2(x);
231 *dest = byterev_4(x);
235 *dest = byterev_8(x);
240 #ifdef __BIG_ENDIAN__
247 * Read memory at address ea for nb bytes, return 0 for success
248 * or -EFAULT if an error occurred.
250 static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb,
251 struct pt_regs *regs)
253 if (!address_ok(regs, ea, nb))
255 if ((ea & (nb - 1)) == 0)
256 return read_mem_aligned(dest, ea, nb);
257 return read_mem_unaligned(dest, ea, nb, regs);
260 static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea,
267 err = __put_user(val, (unsigned char __user *) ea);
270 err = __put_user(val, (unsigned short __user *) ea);
273 err = __put_user(val, (unsigned int __user *) ea);
277 err = __put_user(val, (unsigned long __user *) ea);
284 static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
285 int nb, struct pt_regs *regs)
290 #ifdef __LITTLE_ENDIAN__
293 val = byterev_2(val);
296 val = byterev_4(val);
300 val = byterev_8(val);
305 /* unaligned or little-endian, do this in pieces */
306 for (; nb > 0; nb -= c) {
307 #ifdef __LITTLE_ENDIAN__
310 #ifdef __BIG_ENDIAN__
315 err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
324 * Write memory at address ea for nb bytes, return 0 for success
325 * or -EFAULT if an error occurred.
327 static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
328 struct pt_regs *regs)
330 if (!address_ok(regs, ea, nb))
332 if ((ea & (nb - 1)) == 0)
333 return write_mem_aligned(val, ea, nb);
334 return write_mem_unaligned(val, ea, nb, regs);
337 #ifdef CONFIG_PPC_FPU
339 * Check the address and alignment, and call func to do the actual
342 static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
343 unsigned long ea, int nb,
344 struct pt_regs *regs)
351 #ifdef __BIG_ENDIAN__
355 #ifdef __LITTLE_ENDIAN__
363 if (!address_ok(regs, ea, nb))
366 return (*func)(rn, ea);
367 ptr = (unsigned long) &data.ul;
368 if (sizeof(unsigned long) == 8 || nb == 4) {
369 err = read_mem_unaligned(&data.ul[0], ea, nb, regs);
371 ptr = (unsigned long)&(data.single.word);
373 /* reading a double on 32-bit */
374 err = read_mem_unaligned(&data.ul[0], ea, 4, regs);
376 err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs);
380 return (*func)(rn, ptr);
383 static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
384 unsigned long ea, int nb,
385 struct pt_regs *regs)
392 #ifdef __BIG_ENDIAN__
396 #ifdef __LITTLE_ENDIAN__
404 if (!address_ok(regs, ea, nb))
407 return (*func)(rn, ea);
408 ptr = (unsigned long) &data.ul[0];
409 if (sizeof(unsigned long) == 8 || nb == 4) {
411 ptr = (unsigned long)&(data.single.word);
412 err = (*func)(rn, ptr);
415 err = write_mem_unaligned(data.ul[0], ea, nb, regs);
417 /* writing a double on 32-bit */
418 err = (*func)(rn, ptr);
421 err = write_mem_unaligned(data.ul[0], ea, 4, regs);
423 err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs);
429 #ifdef CONFIG_ALTIVEC
430 /* For Altivec/VMX, no need to worry about alignment */
431 static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long),
432 unsigned long ea, struct pt_regs *regs)
434 if (!address_ok(regs, ea & ~0xfUL, 16))
436 return (*func)(rn, ea);
439 static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long),
440 unsigned long ea, struct pt_regs *regs)
442 if (!address_ok(regs, ea & ~0xfUL, 16))
444 return (*func)(rn, ea);
446 #endif /* CONFIG_ALTIVEC */
449 static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long),
450 unsigned long ea, struct pt_regs *regs)
453 unsigned long val[2];
455 if (!address_ok(regs, ea, 16))
458 return (*func)(rn, ea);
459 err = read_mem_unaligned(&val[0], ea, 8, regs);
461 err = read_mem_unaligned(&val[1], ea + 8, 8, regs);
463 err = (*func)(rn, (unsigned long) &val[0]);
467 static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
468 unsigned long ea, struct pt_regs *regs)
471 unsigned long val[2];
473 if (!address_ok(regs, ea, 16))
476 return (*func)(rn, ea);
477 err = (*func)(rn, (unsigned long) &val[0]);
480 err = write_mem_unaligned(val[0], ea, 8, regs);
482 err = write_mem_unaligned(val[1], ea + 8, 8, regs);
485 #endif /* CONFIG_VSX */
487 #define __put_user_asmx(x, addr, err, op, cr) \
488 __asm__ __volatile__( \
489 "1: " op " %2,0,%3\n" \
492 ".section .fixup,\"ax\"\n" \
496 ".section __ex_table,\"a\"\n" \
497 PPC_LONG_ALIGN "\n" \
500 : "=r" (err), "=r" (cr) \
501 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
503 #define __get_user_asmx(x, addr, err, op) \
504 __asm__ __volatile__( \
505 "1: "op" %1,0,%2\n" \
507 ".section .fixup,\"ax\"\n" \
511 ".section __ex_table,\"a\"\n" \
512 PPC_LONG_ALIGN "\n" \
515 : "=r" (err), "=r" (x) \
516 : "r" (addr), "i" (-EFAULT), "0" (err))
518 #define __cacheop_user_asmx(addr, err, op) \
519 __asm__ __volatile__( \
522 ".section .fixup,\"ax\"\n" \
526 ".section __ex_table,\"a\"\n" \
527 PPC_LONG_ALIGN "\n" \
531 : "r" (addr), "i" (-EFAULT), "0" (err))
533 static void __kprobes set_cr0(struct pt_regs *regs, int rd)
535 long val = regs->gpr[rd];
537 regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
539 if (!(regs->msr & MSR_64BIT))
543 regs->ccr |= 0x80000000;
545 regs->ccr |= 0x40000000;
547 regs->ccr |= 0x20000000;
550 static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
551 unsigned long val1, unsigned long val2,
552 unsigned long carry_in)
554 unsigned long val = val1 + val2;
560 if (!(regs->msr & MSR_64BIT)) {
561 val = (unsigned int) val;
562 val1 = (unsigned int) val1;
565 if (val < val1 || (carry_in && val == val1))
568 regs->xer &= ~XER_CA;
571 static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2,
574 unsigned int crval, shift;
576 crval = (regs->xer >> 31) & 1; /* get SO bit */
583 shift = (7 - crfld) * 4;
584 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
587 static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
588 unsigned long v2, int crfld)
590 unsigned int crval, shift;
592 crval = (regs->xer >> 31) & 1; /* get SO bit */
599 shift = (7 - crfld) * 4;
600 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
603 static int __kprobes trap_compare(long v1, long v2)
613 if ((unsigned long)v1 < (unsigned long)v2)
615 else if ((unsigned long)v1 > (unsigned long)v2)
621 * Elements of 32-bit rotate and mask instructions.
623 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
624 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
626 #define MASK64_L(mb) (~0UL >> (mb))
627 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
628 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
629 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
631 #define DATA32(x) (x)
633 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
636 * Decode an instruction, and execute it if that can be done just by
637 * modifying *regs (i.e. integer arithmetic and logical instructions,
638 * branches, and barrier instructions).
639 * Returns 1 if the instruction has been executed, or 0 if not.
640 * Sets *op to indicate what the instruction does.
642 int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs,
645 unsigned int opcode, ra, rb, rd, spr, u;
646 unsigned long int imm;
647 unsigned long int val, val2;
648 unsigned int mb, me, sh;
653 opcode = instr >> 26;
657 imm = (signed short)(instr & 0xfffc);
658 if ((instr & 2) == 0)
661 regs->nip = truncate_if_32bit(regs->msr, regs->nip);
663 regs->link = regs->nip;
664 if (branch_taken(instr, regs))
665 regs->nip = truncate_if_32bit(regs->msr, imm);
669 if ((instr & 0xfe2) == 2)
677 imm = instr & 0x03fffffc;
678 if (imm & 0x02000000)
680 if ((instr & 2) == 0)
683 regs->link = truncate_if_32bit(regs->msr, regs->nip + 4);
684 imm = truncate_if_32bit(regs->msr, imm);
688 switch ((instr >> 1) & 0x3ff) {
690 rd = 7 - ((instr >> 23) & 0x7);
691 ra = 7 - ((instr >> 18) & 0x7);
694 val = (regs->ccr >> ra) & 0xf;
695 regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
699 case 528: /* bcctr */
701 imm = (instr & 0x400)? regs->ctr: regs->link;
702 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
703 imm = truncate_if_32bit(regs->msr, imm);
705 regs->link = regs->nip;
706 if (branch_taken(instr, regs))
710 case 18: /* rfid, scary */
711 if (regs->msr & MSR_PR)
716 case 150: /* isync */
722 case 129: /* crandc */
723 case 193: /* crxor */
724 case 225: /* crnand */
725 case 257: /* crand */
726 case 289: /* creqv */
727 case 417: /* crorc */
729 ra = (instr >> 16) & 0x1f;
730 rb = (instr >> 11) & 0x1f;
731 rd = (instr >> 21) & 0x1f;
732 ra = (regs->ccr >> (31 - ra)) & 1;
733 rb = (regs->ccr >> (31 - rb)) & 1;
734 val = (instr >> (6 + ra * 2 + rb)) & 1;
735 regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) |
741 switch ((instr >> 1) & 0x3ff) {
745 switch ((instr >> 21) & 3) {
747 asm volatile("lwsync" : : : "memory");
749 case 2: /* ptesync */
750 asm volatile("ptesync" : : : "memory");
757 case 854: /* eieio */
765 /* Following cases refer to regs->gpr[], so we need all regs */
766 if (!FULL_REGS(regs))
769 rd = (instr >> 21) & 0x1f;
770 ra = (instr >> 16) & 0x1f;
771 rb = (instr >> 11) & 0x1f;
776 if (rd & trap_compare(regs->gpr[ra], (short) instr))
781 if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
786 regs->gpr[rd] = regs->gpr[ra] * (short) instr;
791 add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1);
795 imm = (unsigned short) instr;
799 val = (unsigned int) val;
801 do_cmp_unsigned(regs, val, imm, rd >> 2);
811 do_cmp_signed(regs, val, imm, rd >> 2);
816 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
819 case 13: /* addic. */
821 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
828 imm += regs->gpr[ra];
833 imm = ((short) instr) << 16;
835 imm += regs->gpr[ra];
839 case 20: /* rlwimi */
840 mb = (instr >> 6) & 0x1f;
841 me = (instr >> 1) & 0x1f;
842 val = DATA32(regs->gpr[rd]);
843 imm = MASK32(mb, me);
844 regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
847 case 21: /* rlwinm */
848 mb = (instr >> 6) & 0x1f;
849 me = (instr >> 1) & 0x1f;
850 val = DATA32(regs->gpr[rd]);
851 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
855 mb = (instr >> 6) & 0x1f;
856 me = (instr >> 1) & 0x1f;
857 rb = regs->gpr[rb] & 0x1f;
858 val = DATA32(regs->gpr[rd]);
859 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
863 imm = (unsigned short) instr;
864 regs->gpr[ra] = regs->gpr[rd] | imm;
868 imm = (unsigned short) instr;
869 regs->gpr[ra] = regs->gpr[rd] | (imm << 16);
873 imm = (unsigned short) instr;
874 regs->gpr[ra] = regs->gpr[rd] ^ imm;
878 imm = (unsigned short) instr;
879 regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16);
883 imm = (unsigned short) instr;
884 regs->gpr[ra] = regs->gpr[rd] & imm;
888 case 29: /* andis. */
889 imm = (unsigned short) instr;
890 regs->gpr[ra] = regs->gpr[rd] & (imm << 16);
896 mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
898 if ((instr & 0x10) == 0) {
899 sh = rb | ((instr & 2) << 4);
900 val = ROTATE(val, sh);
901 switch ((instr >> 2) & 3) {
903 regs->gpr[ra] = val & MASK64_L(mb);
906 regs->gpr[ra] = val & MASK64_R(mb);
909 regs->gpr[ra] = val & MASK64(mb, 63 - sh);
912 imm = MASK64(mb, 63 - sh);
913 regs->gpr[ra] = (regs->gpr[ra] & ~imm) |
918 sh = regs->gpr[rb] & 0x3f;
919 val = ROTATE(val, sh);
920 switch ((instr >> 1) & 7) {
922 regs->gpr[ra] = val & MASK64_L(mb);
925 regs->gpr[ra] = val & MASK64_R(mb);
932 switch ((instr >> 1) & 0x3ff) {
935 (rd & trap_compare((int)regs->gpr[ra],
936 (int)regs->gpr[rb])))
941 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
946 if (regs->msr & MSR_PR)
951 case 146: /* mtmsr */
952 if (regs->msr & MSR_PR)
956 op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
959 case 178: /* mtmsrd */
960 if (regs->msr & MSR_PR)
964 /* only MSR_EE and MSR_RI get changed if bit 15 set */
965 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
966 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
972 if ((instr >> 20) & 1) {
974 for (sh = 0; sh < 8; ++sh) {
975 if (instr & (0x80000 >> sh)) {
976 regs->gpr[rd] = regs->ccr & imm;
985 regs->gpr[rd] = regs->ccr;
986 regs->gpr[rd] &= 0xffffffffUL;
989 case 144: /* mtcrf */
992 for (sh = 0; sh < 8; ++sh) {
993 if (instr & (0x80000 >> sh))
994 regs->ccr = (regs->ccr & ~imm) |
1000 case 339: /* mfspr */
1001 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1003 case SPRN_XER: /* mfxer */
1004 regs->gpr[rd] = regs->xer;
1005 regs->gpr[rd] &= 0xffffffffUL;
1007 case SPRN_LR: /* mflr */
1008 regs->gpr[rd] = regs->link;
1010 case SPRN_CTR: /* mfctr */
1011 regs->gpr[rd] = regs->ctr;
1021 case 467: /* mtspr */
1022 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1024 case SPRN_XER: /* mtxer */
1025 regs->xer = (regs->gpr[rd] & 0xffffffffUL);
1027 case SPRN_LR: /* mtlr */
1028 regs->link = regs->gpr[rd];
1030 case SPRN_CTR: /* mtctr */
1031 regs->ctr = regs->gpr[rd];
1035 op->val = regs->gpr[rd];
1042 * Compare instructions
1045 val = regs->gpr[ra];
1046 val2 = regs->gpr[rb];
1047 #ifdef __powerpc64__
1048 if ((rd & 1) == 0) {
1049 /* word (32-bit) compare */
1054 do_cmp_signed(regs, val, val2, rd >> 2);
1058 val = regs->gpr[ra];
1059 val2 = regs->gpr[rb];
1060 #ifdef __powerpc64__
1061 if ((rd & 1) == 0) {
1062 /* word (32-bit) compare */
1063 val = (unsigned int) val;
1064 val2 = (unsigned int) val2;
1067 do_cmp_unsigned(regs, val, val2, rd >> 2);
1071 * Arithmetic instructions
1074 add_with_carry(regs, rd, ~regs->gpr[ra],
1077 #ifdef __powerpc64__
1078 case 9: /* mulhdu */
1079 asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) :
1080 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1084 add_with_carry(regs, rd, regs->gpr[ra],
1088 case 11: /* mulhwu */
1089 asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) :
1090 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1094 regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra];
1096 #ifdef __powerpc64__
1097 case 73: /* mulhd */
1098 asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) :
1099 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1102 case 75: /* mulhw */
1103 asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) :
1104 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1108 regs->gpr[rd] = -regs->gpr[ra];
1111 case 136: /* subfe */
1112 add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb],
1113 regs->xer & XER_CA);
1116 case 138: /* adde */
1117 add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb],
1118 regs->xer & XER_CA);
1121 case 200: /* subfze */
1122 add_with_carry(regs, rd, ~regs->gpr[ra], 0L,
1123 regs->xer & XER_CA);
1126 case 202: /* addze */
1127 add_with_carry(regs, rd, regs->gpr[ra], 0L,
1128 regs->xer & XER_CA);
1131 case 232: /* subfme */
1132 add_with_carry(regs, rd, ~regs->gpr[ra], -1L,
1133 regs->xer & XER_CA);
1135 #ifdef __powerpc64__
1136 case 233: /* mulld */
1137 regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb];
1140 case 234: /* addme */
1141 add_with_carry(regs, rd, regs->gpr[ra], -1L,
1142 regs->xer & XER_CA);
1145 case 235: /* mullw */
1146 regs->gpr[rd] = (unsigned int) regs->gpr[ra] *
1147 (unsigned int) regs->gpr[rb];
1151 regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb];
1153 #ifdef __powerpc64__
1154 case 457: /* divdu */
1155 regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb];
1158 case 459: /* divwu */
1159 regs->gpr[rd] = (unsigned int) regs->gpr[ra] /
1160 (unsigned int) regs->gpr[rb];
1162 #ifdef __powerpc64__
1163 case 489: /* divd */
1164 regs->gpr[rd] = (long int) regs->gpr[ra] /
1165 (long int) regs->gpr[rb];
1168 case 491: /* divw */
1169 regs->gpr[rd] = (int) regs->gpr[ra] /
1170 (int) regs->gpr[rb];
1175 * Logical instructions
1177 case 26: /* cntlzw */
1178 asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) :
1179 "r" (regs->gpr[rd]));
1181 #ifdef __powerpc64__
1182 case 58: /* cntlzd */
1183 asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) :
1184 "r" (regs->gpr[rd]));
1188 regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb];
1192 regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb];
1196 regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]);
1200 regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1204 regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb];
1208 regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb];
1212 regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb];
1215 case 476: /* nand */
1216 regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]);
1219 case 922: /* extsh */
1220 regs->gpr[ra] = (signed short) regs->gpr[rd];
1223 case 954: /* extsb */
1224 regs->gpr[ra] = (signed char) regs->gpr[rd];
1226 #ifdef __powerpc64__
1227 case 986: /* extsw */
1228 regs->gpr[ra] = (signed int) regs->gpr[rd];
1233 * Shift instructions
1236 sh = regs->gpr[rb] & 0x3f;
1238 regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL;
1244 sh = regs->gpr[rb] & 0x3f;
1246 regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1251 case 792: /* sraw */
1252 sh = regs->gpr[rb] & 0x3f;
1253 ival = (signed int) regs->gpr[rd];
1254 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1255 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1256 regs->xer |= XER_CA;
1258 regs->xer &= ~XER_CA;
1261 case 824: /* srawi */
1263 ival = (signed int) regs->gpr[rd];
1264 regs->gpr[ra] = ival >> sh;
1265 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1266 regs->xer |= XER_CA;
1268 regs->xer &= ~XER_CA;
1271 #ifdef __powerpc64__
1273 sh = regs->gpr[rb] & 0x7f;
1275 regs->gpr[ra] = regs->gpr[rd] << sh;
1281 sh = regs->gpr[rb] & 0x7f;
1283 regs->gpr[ra] = regs->gpr[rd] >> sh;
1288 case 794: /* srad */
1289 sh = regs->gpr[rb] & 0x7f;
1290 ival = (signed long int) regs->gpr[rd];
1291 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1292 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1293 regs->xer |= XER_CA;
1295 regs->xer &= ~XER_CA;
1298 case 826: /* sradi with sh_5 = 0 */
1299 case 827: /* sradi with sh_5 = 1 */
1300 sh = rb | ((instr & 2) << 4);
1301 ival = (signed long int) regs->gpr[rd];
1302 regs->gpr[ra] = ival >> sh;
1303 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1304 regs->xer |= XER_CA;
1306 regs->xer &= ~XER_CA;
1308 #endif /* __powerpc64__ */
1311 * Cache instructions
1313 case 54: /* dcbst */
1314 op->type = MKOP(CACHEOP, DCBST, 0);
1315 op->ea = xform_ea(instr, regs);
1319 op->type = MKOP(CACHEOP, DCBF, 0);
1320 op->ea = xform_ea(instr, regs);
1323 case 246: /* dcbtst */
1324 op->type = MKOP(CACHEOP, DCBTST, 0);
1325 op->ea = xform_ea(instr, regs);
1329 case 278: /* dcbt */
1330 op->type = MKOP(CACHEOP, DCBTST, 0);
1331 op->ea = xform_ea(instr, regs);
1335 case 982: /* icbi */
1336 op->type = MKOP(CACHEOP, ICBI, 0);
1337 op->ea = xform_ea(instr, regs);
1347 op->update_reg = ra;
1349 op->val = regs->gpr[rd];
1350 u = (instr >> 20) & UPDATE;
1355 op->ea = xform_ea(instr, regs);
1356 switch ((instr >> 1) & 0x3ff) {
1357 case 20: /* lwarx */
1358 op->type = MKOP(LARX, 0, 4);
1361 case 150: /* stwcx. */
1362 op->type = MKOP(STCX, 0, 4);
1365 #ifdef __powerpc64__
1366 case 84: /* ldarx */
1367 op->type = MKOP(LARX, 0, 8);
1370 case 214: /* stdcx. */
1371 op->type = MKOP(STCX, 0, 8);
1376 op->type = MKOP(LOAD, u, 8);
1381 case 55: /* lwzux */
1382 op->type = MKOP(LOAD, u, 4);
1386 case 119: /* lbzux */
1387 op->type = MKOP(LOAD, u, 1);
1390 #ifdef CONFIG_ALTIVEC
1392 case 359: /* lvxl */
1393 if (!(regs->msr & MSR_VEC))
1395 op->type = MKOP(LOAD_VMX, 0, 16);
1398 case 231: /* stvx */
1399 case 487: /* stvxl */
1400 if (!(regs->msr & MSR_VEC))
1402 op->type = MKOP(STORE_VMX, 0, 16);
1404 #endif /* CONFIG_ALTIVEC */
1406 #ifdef __powerpc64__
1407 case 149: /* stdx */
1408 case 181: /* stdux */
1409 op->type = MKOP(STORE, u, 8);
1413 case 151: /* stwx */
1414 case 183: /* stwux */
1415 op->type = MKOP(STORE, u, 4);
1418 case 215: /* stbx */
1419 case 247: /* stbux */
1420 op->type = MKOP(STORE, u, 1);
1423 case 279: /* lhzx */
1424 case 311: /* lhzux */
1425 op->type = MKOP(LOAD, u, 2);
1428 #ifdef __powerpc64__
1429 case 341: /* lwax */
1430 case 373: /* lwaux */
1431 op->type = MKOP(LOAD, SIGNEXT | u, 4);
1435 case 343: /* lhax */
1436 case 375: /* lhaux */
1437 op->type = MKOP(LOAD, SIGNEXT | u, 2);
1440 case 407: /* sthx */
1441 case 439: /* sthux */
1442 op->type = MKOP(STORE, u, 2);
1445 #ifdef __powerpc64__
1446 case 532: /* ldbrx */
1447 op->type = MKOP(LOAD, BYTEREV, 8);
1451 case 533: /* lswx */
1452 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
1455 case 534: /* lwbrx */
1456 op->type = MKOP(LOAD, BYTEREV, 4);
1459 case 597: /* lswi */
1461 rb = 32; /* # bytes to load */
1462 op->type = MKOP(LOAD_MULTI, 0, rb);
1465 op->ea = truncate_if_32bit(regs->msr,
1469 #ifdef CONFIG_PPC_FPU
1470 case 535: /* lfsx */
1471 case 567: /* lfsux */
1472 if (!(regs->msr & MSR_FP))
1474 op->type = MKOP(LOAD_FP, u, 4);
1477 case 599: /* lfdx */
1478 case 631: /* lfdux */
1479 if (!(regs->msr & MSR_FP))
1481 op->type = MKOP(LOAD_FP, u, 8);
1484 case 663: /* stfsx */
1485 case 695: /* stfsux */
1486 if (!(regs->msr & MSR_FP))
1488 op->type = MKOP(STORE_FP, u, 4);
1491 case 727: /* stfdx */
1492 case 759: /* stfdux */
1493 if (!(regs->msr & MSR_FP))
1495 op->type = MKOP(STORE_FP, u, 8);
1499 #ifdef __powerpc64__
1500 case 660: /* stdbrx */
1501 op->type = MKOP(STORE, BYTEREV, 8);
1502 op->val = byterev_8(regs->gpr[rd]);
1506 case 661: /* stswx */
1507 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
1510 case 662: /* stwbrx */
1511 op->type = MKOP(STORE, BYTEREV, 4);
1512 op->val = byterev_4(regs->gpr[rd]);
1517 rb = 32; /* # bytes to store */
1518 op->type = MKOP(STORE_MULTI, 0, rb);
1521 op->ea = truncate_if_32bit(regs->msr,
1525 case 790: /* lhbrx */
1526 op->type = MKOP(LOAD, BYTEREV, 2);
1529 case 918: /* sthbrx */
1530 op->type = MKOP(STORE, BYTEREV, 2);
1531 op->val = byterev_2(regs->gpr[rd]);
1535 case 844: /* lxvd2x */
1536 case 876: /* lxvd2ux */
1537 if (!(regs->msr & MSR_VSX))
1539 op->reg = rd | ((instr & 1) << 5);
1540 op->type = MKOP(LOAD_VSX, u, 16);
1543 case 972: /* stxvd2x */
1544 case 1004: /* stxvd2ux */
1545 if (!(regs->msr & MSR_VSX))
1547 op->reg = rd | ((instr & 1) << 5);
1548 op->type = MKOP(STORE_VSX, u, 16);
1551 #endif /* CONFIG_VSX */
1557 op->type = MKOP(LOAD, u, 4);
1558 op->ea = dform_ea(instr, regs);
1563 op->type = MKOP(LOAD, u, 1);
1564 op->ea = dform_ea(instr, regs);
1569 op->type = MKOP(STORE, u, 4);
1570 op->ea = dform_ea(instr, regs);
1575 op->type = MKOP(STORE, u, 1);
1576 op->ea = dform_ea(instr, regs);
1581 op->type = MKOP(LOAD, u, 2);
1582 op->ea = dform_ea(instr, regs);
1587 op->type = MKOP(LOAD, SIGNEXT | u, 2);
1588 op->ea = dform_ea(instr, regs);
1593 op->type = MKOP(STORE, u, 2);
1594 op->ea = dform_ea(instr, regs);
1599 break; /* invalid form, ra in range to load */
1600 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
1601 op->ea = dform_ea(instr, regs);
1605 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
1606 op->ea = dform_ea(instr, regs);
1609 #ifdef CONFIG_PPC_FPU
1612 if (!(regs->msr & MSR_FP))
1614 op->type = MKOP(LOAD_FP, u, 4);
1615 op->ea = dform_ea(instr, regs);
1620 if (!(regs->msr & MSR_FP))
1622 op->type = MKOP(LOAD_FP, u, 8);
1623 op->ea = dform_ea(instr, regs);
1627 case 53: /* stfsu */
1628 if (!(regs->msr & MSR_FP))
1630 op->type = MKOP(STORE_FP, u, 4);
1631 op->ea = dform_ea(instr, regs);
1635 case 55: /* stfdu */
1636 if (!(regs->msr & MSR_FP))
1638 op->type = MKOP(STORE_FP, u, 8);
1639 op->ea = dform_ea(instr, regs);
1643 #ifdef __powerpc64__
1644 case 58: /* ld[u], lwa */
1645 op->ea = dsform_ea(instr, regs);
1646 switch (instr & 3) {
1648 op->type = MKOP(LOAD, 0, 8);
1651 op->type = MKOP(LOAD, UPDATE, 8);
1654 op->type = MKOP(LOAD, SIGNEXT, 4);
1659 case 62: /* std[u] */
1660 op->ea = dsform_ea(instr, regs);
1661 switch (instr & 3) {
1663 op->type = MKOP(STORE, 0, 8);
1666 op->type = MKOP(STORE, UPDATE, 8);
1670 #endif /* __powerpc64__ */
1685 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
1689 op->type = INTERRUPT | 0x700;
1690 op->val = SRR1_PROGPRIV;
1694 op->type = INTERRUPT | 0x700;
1695 op->val = SRR1_PROGTRAP;
1698 #ifdef CONFIG_PPC_FPU
1700 op->type = INTERRUPT | 0x800;
1704 #ifdef CONFIG_ALTIVEC
1706 op->type = INTERRUPT | 0xf20;
1712 op->type = INTERRUPT | 0xf40;
1716 EXPORT_SYMBOL_GPL(analyse_instr);
1719 * For PPC32 we always use stwu with r1 to change the stack pointer.
1720 * So this emulated store may corrupt the exception frame, now we
1721 * have to provide the exception frame trampoline, which is pushed
1722 * below the kprobed function stack. So we only update gpr[1] but
1723 * don't emulate the real store operation. We will do real store
1724 * operation safely in exception return code by checking this flag.
1726 static __kprobes int handle_stack_update(unsigned long ea, struct pt_regs *regs)
1730 * Check if we will touch kernel stack overflow
1732 if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
1733 printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
1736 #endif /* CONFIG_PPC32 */
1738 * Check if we already set since that means we'll
1739 * lose the previous value.
1741 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
1742 set_thread_flag(TIF_EMULATE_STACK_STORE);
1746 static __kprobes void do_signext(unsigned long *valp, int size)
1750 *valp = (signed short) *valp;
1753 *valp = (signed int) *valp;
1758 static __kprobes void do_byterev(unsigned long *valp, int size)
1762 *valp = byterev_2(*valp);
1765 *valp = byterev_4(*valp);
1767 #ifdef __powerpc64__
1769 *valp = byterev_8(*valp);
1776 * Emulate instructions that cause a transfer of control,
1777 * loads and stores, and a few other instructions.
1778 * Returns 1 if the step was emulated, 0 if not,
1779 * or -1 if the instruction is one that should not be stepped,
1780 * such as an rfid, or a mtmsrd that would clear MSR_RI.
1782 int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1784 struct instruction_op op;
1790 r = analyse_instr(&op, regs, instr);
1795 size = GETSIZE(op.type);
1796 switch (op.type & INSTR_TYPE_MASK) {
1798 if (!address_ok(regs, op.ea, 8))
1800 switch (op.type & CACHEOP_MASK) {
1802 __cacheop_user_asmx(op.ea, err, "dcbst");
1805 __cacheop_user_asmx(op.ea, err, "dcbf");
1809 prefetchw((void *) op.ea);
1813 prefetch((void *) op.ea);
1816 __cacheop_user_asmx(op.ea, err, "icbi");
1824 if (op.ea & (size - 1))
1825 break; /* can't handle misaligned */
1827 if (!address_ok(regs, op.ea, size))
1832 __get_user_asmx(val, op.ea, err, "lwarx");
1835 __get_user_asmx(val, op.ea, err, "ldarx");
1841 regs->gpr[op.reg] = val;
1845 if (op.ea & (size - 1))
1846 break; /* can't handle misaligned */
1848 if (!address_ok(regs, op.ea, size))
1853 __put_user_asmx(op.val, op.ea, err, "stwcx.", cr);
1856 __put_user_asmx(op.val, op.ea, err, "stdcx.", cr);
1862 regs->ccr = (regs->ccr & 0x0fffffff) |
1864 ((regs->xer >> 3) & 0x10000000);
1868 err = read_mem(®s->gpr[op.reg], op.ea, size, regs);
1870 if (op.type & SIGNEXT)
1871 do_signext(®s->gpr[op.reg], size);
1872 if (op.type & BYTEREV)
1873 do_byterev(®s->gpr[op.reg], size);
1877 #ifdef CONFIG_PPC_FPU
1880 err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
1882 err = do_fp_load(op.reg, do_lfd, op.ea, size, regs);
1885 #ifdef CONFIG_ALTIVEC
1887 err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
1892 err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
1896 if (regs->msr & MSR_LE)
1899 for (i = 0; i < size; i += 4) {
1903 err = read_mem(®s->gpr[rd], op.ea, nb, regs);
1906 if (nb < 4) /* left-justify last bytes */
1907 regs->gpr[rd] <<= 32 - 8 * nb;
1914 if ((op.type & UPDATE) && size == sizeof(long) &&
1915 op.reg == 1 && op.update_reg == 1 &&
1916 !(regs->msr & MSR_PR) &&
1917 op.ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
1918 err = handle_stack_update(op.ea, regs);
1921 err = write_mem(op.val, op.ea, size, regs);
1924 #ifdef CONFIG_PPC_FPU
1927 err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
1929 err = do_fp_store(op.reg, do_stfd, op.ea, size, regs);
1932 #ifdef CONFIG_ALTIVEC
1934 err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
1939 err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
1943 if (regs->msr & MSR_LE)
1946 for (i = 0; i < size; i += 4) {
1947 val = regs->gpr[rd];
1952 val >>= 32 - 8 * nb;
1953 err = write_mem(val, op.ea, nb, regs);
1962 regs->gpr[op.reg] = regs->msr & MSR_MASK;
1966 val = regs->gpr[op.reg];
1967 if ((val & MSR_RI) == 0)
1968 /* can't step mtmsr[d] that would clear MSR_RI */
1970 /* here op.val is the mask of bits to change */
1971 regs->msr = (regs->msr & ~op.val) | (val & op.val);
1975 case SYSCALL: /* sc */
1977 * N.B. this uses knowledge about how the syscall
1978 * entry code works. If that is changed, this will
1979 * need to be changed also.
1981 if (regs->gpr[0] == 0x1ebe &&
1982 cpu_has_feature(CPU_FTR_REAL_LE)) {
1983 regs->msr ^= MSR_LE;
1986 regs->gpr[9] = regs->gpr[13];
1987 regs->gpr[10] = MSR_KERNEL;
1988 regs->gpr[11] = regs->nip + 4;
1989 regs->gpr[12] = regs->msr & MSR_MASK;
1990 regs->gpr[13] = (unsigned long) get_paca();
1991 regs->nip = (unsigned long) &system_call_common;
1992 regs->msr = MSR_KERNEL;
2004 if (op.type & UPDATE)
2005 regs->gpr[op.update_reg] = op.ea;
2008 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);